xref: /openbmc/linux/fs/btrfs/ctree.c (revision 089a49b6)
1 /*
2  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "print-tree.h"
26 #include "locking.h"
27 
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 		      *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 		      *root, struct btrfs_key *ins_key,
32 		      struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 			  struct btrfs_root *root, struct extent_buffer *dst,
35 			  struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 			      struct btrfs_root *root,
38 			      struct extent_buffer *dst_buf,
39 			      struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 		    int level, int slot);
42 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 				 struct extent_buffer *eb);
44 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
45 
46 struct btrfs_path *btrfs_alloc_path(void)
47 {
48 	struct btrfs_path *path;
49 	path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 	return path;
51 }
52 
53 /*
54  * set all locked nodes in the path to blocking locks.  This should
55  * be done before scheduling
56  */
57 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
58 {
59 	int i;
60 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
61 		if (!p->nodes[i] || !p->locks[i])
62 			continue;
63 		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
64 		if (p->locks[i] == BTRFS_READ_LOCK)
65 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
66 		else if (p->locks[i] == BTRFS_WRITE_LOCK)
67 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
68 	}
69 }
70 
71 /*
72  * reset all the locked nodes in the patch to spinning locks.
73  *
74  * held is used to keep lockdep happy, when lockdep is enabled
75  * we set held to a blocking lock before we go around and
76  * retake all the spinlocks in the path.  You can safely use NULL
77  * for held
78  */
79 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
80 					struct extent_buffer *held, int held_rw)
81 {
82 	int i;
83 
84 #ifdef CONFIG_DEBUG_LOCK_ALLOC
85 	/* lockdep really cares that we take all of these spinlocks
86 	 * in the right order.  If any of the locks in the path are not
87 	 * currently blocking, it is going to complain.  So, make really
88 	 * really sure by forcing the path to blocking before we clear
89 	 * the path blocking.
90 	 */
91 	if (held) {
92 		btrfs_set_lock_blocking_rw(held, held_rw);
93 		if (held_rw == BTRFS_WRITE_LOCK)
94 			held_rw = BTRFS_WRITE_LOCK_BLOCKING;
95 		else if (held_rw == BTRFS_READ_LOCK)
96 			held_rw = BTRFS_READ_LOCK_BLOCKING;
97 	}
98 	btrfs_set_path_blocking(p);
99 #endif
100 
101 	for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
102 		if (p->nodes[i] && p->locks[i]) {
103 			btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
104 			if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
105 				p->locks[i] = BTRFS_WRITE_LOCK;
106 			else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
107 				p->locks[i] = BTRFS_READ_LOCK;
108 		}
109 	}
110 
111 #ifdef CONFIG_DEBUG_LOCK_ALLOC
112 	if (held)
113 		btrfs_clear_lock_blocking_rw(held, held_rw);
114 #endif
115 }
116 
117 /* this also releases the path */
118 void btrfs_free_path(struct btrfs_path *p)
119 {
120 	if (!p)
121 		return;
122 	btrfs_release_path(p);
123 	kmem_cache_free(btrfs_path_cachep, p);
124 }
125 
126 /*
127  * path release drops references on the extent buffers in the path
128  * and it drops any locks held by this path
129  *
130  * It is safe to call this on paths that no locks or extent buffers held.
131  */
132 noinline void btrfs_release_path(struct btrfs_path *p)
133 {
134 	int i;
135 
136 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
137 		p->slots[i] = 0;
138 		if (!p->nodes[i])
139 			continue;
140 		if (p->locks[i]) {
141 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
142 			p->locks[i] = 0;
143 		}
144 		free_extent_buffer(p->nodes[i]);
145 		p->nodes[i] = NULL;
146 	}
147 }
148 
149 /*
150  * safely gets a reference on the root node of a tree.  A lock
151  * is not taken, so a concurrent writer may put a different node
152  * at the root of the tree.  See btrfs_lock_root_node for the
153  * looping required.
154  *
155  * The extent buffer returned by this has a reference taken, so
156  * it won't disappear.  It may stop being the root of the tree
157  * at any time because there are no locks held.
158  */
159 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
160 {
161 	struct extent_buffer *eb;
162 
163 	while (1) {
164 		rcu_read_lock();
165 		eb = rcu_dereference(root->node);
166 
167 		/*
168 		 * RCU really hurts here, we could free up the root node because
169 		 * it was cow'ed but we may not get the new root node yet so do
170 		 * the inc_not_zero dance and if it doesn't work then
171 		 * synchronize_rcu and try again.
172 		 */
173 		if (atomic_inc_not_zero(&eb->refs)) {
174 			rcu_read_unlock();
175 			break;
176 		}
177 		rcu_read_unlock();
178 		synchronize_rcu();
179 	}
180 	return eb;
181 }
182 
183 /* loop around taking references on and locking the root node of the
184  * tree until you end up with a lock on the root.  A locked buffer
185  * is returned, with a reference held.
186  */
187 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
188 {
189 	struct extent_buffer *eb;
190 
191 	while (1) {
192 		eb = btrfs_root_node(root);
193 		btrfs_tree_lock(eb);
194 		if (eb == root->node)
195 			break;
196 		btrfs_tree_unlock(eb);
197 		free_extent_buffer(eb);
198 	}
199 	return eb;
200 }
201 
202 /* loop around taking references on and locking the root node of the
203  * tree until you end up with a lock on the root.  A locked buffer
204  * is returned, with a reference held.
205  */
206 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
207 {
208 	struct extent_buffer *eb;
209 
210 	while (1) {
211 		eb = btrfs_root_node(root);
212 		btrfs_tree_read_lock(eb);
213 		if (eb == root->node)
214 			break;
215 		btrfs_tree_read_unlock(eb);
216 		free_extent_buffer(eb);
217 	}
218 	return eb;
219 }
220 
221 /* cowonly root (everything not a reference counted cow subvolume), just get
222  * put onto a simple dirty list.  transaction.c walks this to make sure they
223  * get properly updated on disk.
224  */
225 static void add_root_to_dirty_list(struct btrfs_root *root)
226 {
227 	spin_lock(&root->fs_info->trans_lock);
228 	if (root->track_dirty && list_empty(&root->dirty_list)) {
229 		list_add(&root->dirty_list,
230 			 &root->fs_info->dirty_cowonly_roots);
231 	}
232 	spin_unlock(&root->fs_info->trans_lock);
233 }
234 
235 /*
236  * used by snapshot creation to make a copy of a root for a tree with
237  * a given objectid.  The buffer with the new root node is returned in
238  * cow_ret, and this func returns zero on success or a negative error code.
239  */
240 int btrfs_copy_root(struct btrfs_trans_handle *trans,
241 		      struct btrfs_root *root,
242 		      struct extent_buffer *buf,
243 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
244 {
245 	struct extent_buffer *cow;
246 	int ret = 0;
247 	int level;
248 	struct btrfs_disk_key disk_key;
249 
250 	WARN_ON(root->ref_cows && trans->transid !=
251 		root->fs_info->running_transaction->transid);
252 	WARN_ON(root->ref_cows && trans->transid != root->last_trans);
253 
254 	level = btrfs_header_level(buf);
255 	if (level == 0)
256 		btrfs_item_key(buf, &disk_key, 0);
257 	else
258 		btrfs_node_key(buf, &disk_key, 0);
259 
260 	cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
261 				     new_root_objectid, &disk_key, level,
262 				     buf->start, 0);
263 	if (IS_ERR(cow))
264 		return PTR_ERR(cow);
265 
266 	copy_extent_buffer(cow, buf, 0, 0, cow->len);
267 	btrfs_set_header_bytenr(cow, cow->start);
268 	btrfs_set_header_generation(cow, trans->transid);
269 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
270 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
271 				     BTRFS_HEADER_FLAG_RELOC);
272 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
273 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
274 	else
275 		btrfs_set_header_owner(cow, new_root_objectid);
276 
277 	write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(cow),
278 			    BTRFS_FSID_SIZE);
279 
280 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
281 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
282 		ret = btrfs_inc_ref(trans, root, cow, 1, 1);
283 	else
284 		ret = btrfs_inc_ref(trans, root, cow, 0, 1);
285 
286 	if (ret)
287 		return ret;
288 
289 	btrfs_mark_buffer_dirty(cow);
290 	*cow_ret = cow;
291 	return 0;
292 }
293 
294 enum mod_log_op {
295 	MOD_LOG_KEY_REPLACE,
296 	MOD_LOG_KEY_ADD,
297 	MOD_LOG_KEY_REMOVE,
298 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
299 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
300 	MOD_LOG_MOVE_KEYS,
301 	MOD_LOG_ROOT_REPLACE,
302 };
303 
304 struct tree_mod_move {
305 	int dst_slot;
306 	int nr_items;
307 };
308 
309 struct tree_mod_root {
310 	u64 logical;
311 	u8 level;
312 };
313 
314 struct tree_mod_elem {
315 	struct rb_node node;
316 	u64 index;		/* shifted logical */
317 	u64 seq;
318 	enum mod_log_op op;
319 
320 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
321 	int slot;
322 
323 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
324 	u64 generation;
325 
326 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
327 	struct btrfs_disk_key key;
328 	u64 blockptr;
329 
330 	/* this is used for op == MOD_LOG_MOVE_KEYS */
331 	struct tree_mod_move move;
332 
333 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
334 	struct tree_mod_root old_root;
335 };
336 
337 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
338 {
339 	read_lock(&fs_info->tree_mod_log_lock);
340 }
341 
342 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
343 {
344 	read_unlock(&fs_info->tree_mod_log_lock);
345 }
346 
347 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
348 {
349 	write_lock(&fs_info->tree_mod_log_lock);
350 }
351 
352 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
353 {
354 	write_unlock(&fs_info->tree_mod_log_lock);
355 }
356 
357 /*
358  * Increment the upper half of tree_mod_seq, set lower half zero.
359  *
360  * Must be called with fs_info->tree_mod_seq_lock held.
361  */
362 static inline u64 btrfs_inc_tree_mod_seq_major(struct btrfs_fs_info *fs_info)
363 {
364 	u64 seq = atomic64_read(&fs_info->tree_mod_seq);
365 	seq &= 0xffffffff00000000ull;
366 	seq += 1ull << 32;
367 	atomic64_set(&fs_info->tree_mod_seq, seq);
368 	return seq;
369 }
370 
371 /*
372  * Increment the lower half of tree_mod_seq.
373  *
374  * Must be called with fs_info->tree_mod_seq_lock held. The way major numbers
375  * are generated should not technically require a spin lock here. (Rationale:
376  * incrementing the minor while incrementing the major seq number is between its
377  * atomic64_read and atomic64_set calls doesn't duplicate sequence numbers, it
378  * just returns a unique sequence number as usual.) We have decided to leave
379  * that requirement in here and rethink it once we notice it really imposes a
380  * problem on some workload.
381  */
382 static inline u64 btrfs_inc_tree_mod_seq_minor(struct btrfs_fs_info *fs_info)
383 {
384 	return atomic64_inc_return(&fs_info->tree_mod_seq);
385 }
386 
387 /*
388  * return the last minor in the previous major tree_mod_seq number
389  */
390 u64 btrfs_tree_mod_seq_prev(u64 seq)
391 {
392 	return (seq & 0xffffffff00000000ull) - 1ull;
393 }
394 
395 /*
396  * This adds a new blocker to the tree mod log's blocker list if the @elem
397  * passed does not already have a sequence number set. So when a caller expects
398  * to record tree modifications, it should ensure to set elem->seq to zero
399  * before calling btrfs_get_tree_mod_seq.
400  * Returns a fresh, unused tree log modification sequence number, even if no new
401  * blocker was added.
402  */
403 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
404 			   struct seq_list *elem)
405 {
406 	u64 seq;
407 
408 	tree_mod_log_write_lock(fs_info);
409 	spin_lock(&fs_info->tree_mod_seq_lock);
410 	if (!elem->seq) {
411 		elem->seq = btrfs_inc_tree_mod_seq_major(fs_info);
412 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
413 	}
414 	seq = btrfs_inc_tree_mod_seq_minor(fs_info);
415 	spin_unlock(&fs_info->tree_mod_seq_lock);
416 	tree_mod_log_write_unlock(fs_info);
417 
418 	return seq;
419 }
420 
421 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
422 			    struct seq_list *elem)
423 {
424 	struct rb_root *tm_root;
425 	struct rb_node *node;
426 	struct rb_node *next;
427 	struct seq_list *cur_elem;
428 	struct tree_mod_elem *tm;
429 	u64 min_seq = (u64)-1;
430 	u64 seq_putting = elem->seq;
431 
432 	if (!seq_putting)
433 		return;
434 
435 	spin_lock(&fs_info->tree_mod_seq_lock);
436 	list_del(&elem->list);
437 	elem->seq = 0;
438 
439 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
440 		if (cur_elem->seq < min_seq) {
441 			if (seq_putting > cur_elem->seq) {
442 				/*
443 				 * blocker with lower sequence number exists, we
444 				 * cannot remove anything from the log
445 				 */
446 				spin_unlock(&fs_info->tree_mod_seq_lock);
447 				return;
448 			}
449 			min_seq = cur_elem->seq;
450 		}
451 	}
452 	spin_unlock(&fs_info->tree_mod_seq_lock);
453 
454 	/*
455 	 * anything that's lower than the lowest existing (read: blocked)
456 	 * sequence number can be removed from the tree.
457 	 */
458 	tree_mod_log_write_lock(fs_info);
459 	tm_root = &fs_info->tree_mod_log;
460 	for (node = rb_first(tm_root); node; node = next) {
461 		next = rb_next(node);
462 		tm = container_of(node, struct tree_mod_elem, node);
463 		if (tm->seq > min_seq)
464 			continue;
465 		rb_erase(node, tm_root);
466 		kfree(tm);
467 	}
468 	tree_mod_log_write_unlock(fs_info);
469 }
470 
471 /*
472  * key order of the log:
473  *       index -> sequence
474  *
475  * the index is the shifted logical of the *new* root node for root replace
476  * operations, or the shifted logical of the affected block for all other
477  * operations.
478  */
479 static noinline int
480 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
481 {
482 	struct rb_root *tm_root;
483 	struct rb_node **new;
484 	struct rb_node *parent = NULL;
485 	struct tree_mod_elem *cur;
486 	int ret = 0;
487 
488 	BUG_ON(!tm);
489 
490 	tree_mod_log_write_lock(fs_info);
491 	if (list_empty(&fs_info->tree_mod_seq_list)) {
492 		tree_mod_log_write_unlock(fs_info);
493 		/*
494 		 * Ok we no longer care about logging modifications, free up tm
495 		 * and return 0.  Any callers shouldn't be using tm after
496 		 * calling tree_mod_log_insert, but if they do we can just
497 		 * change this to return a special error code to let the callers
498 		 * do their own thing.
499 		 */
500 		kfree(tm);
501 		return 0;
502 	}
503 
504 	spin_lock(&fs_info->tree_mod_seq_lock);
505 	tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
506 	spin_unlock(&fs_info->tree_mod_seq_lock);
507 
508 	tm_root = &fs_info->tree_mod_log;
509 	new = &tm_root->rb_node;
510 	while (*new) {
511 		cur = container_of(*new, struct tree_mod_elem, node);
512 		parent = *new;
513 		if (cur->index < tm->index)
514 			new = &((*new)->rb_left);
515 		else if (cur->index > tm->index)
516 			new = &((*new)->rb_right);
517 		else if (cur->seq < tm->seq)
518 			new = &((*new)->rb_left);
519 		else if (cur->seq > tm->seq)
520 			new = &((*new)->rb_right);
521 		else {
522 			ret = -EEXIST;
523 			kfree(tm);
524 			goto out;
525 		}
526 	}
527 
528 	rb_link_node(&tm->node, parent, new);
529 	rb_insert_color(&tm->node, tm_root);
530 out:
531 	tree_mod_log_write_unlock(fs_info);
532 	return ret;
533 }
534 
535 /*
536  * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
537  * returns zero with the tree_mod_log_lock acquired. The caller must hold
538  * this until all tree mod log insertions are recorded in the rb tree and then
539  * call tree_mod_log_write_unlock() to release.
540  */
541 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
542 				    struct extent_buffer *eb) {
543 	smp_mb();
544 	if (list_empty(&(fs_info)->tree_mod_seq_list))
545 		return 1;
546 	if (eb && btrfs_header_level(eb) == 0)
547 		return 1;
548 	return 0;
549 }
550 
551 static inline int
552 __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
553 			  struct extent_buffer *eb, int slot,
554 			  enum mod_log_op op, gfp_t flags)
555 {
556 	struct tree_mod_elem *tm;
557 
558 	tm = kzalloc(sizeof(*tm), flags);
559 	if (!tm)
560 		return -ENOMEM;
561 
562 	tm->index = eb->start >> PAGE_CACHE_SHIFT;
563 	if (op != MOD_LOG_KEY_ADD) {
564 		btrfs_node_key(eb, &tm->key, slot);
565 		tm->blockptr = btrfs_node_blockptr(eb, slot);
566 	}
567 	tm->op = op;
568 	tm->slot = slot;
569 	tm->generation = btrfs_node_ptr_generation(eb, slot);
570 
571 	return __tree_mod_log_insert(fs_info, tm);
572 }
573 
574 static noinline int
575 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
576 			struct extent_buffer *eb, int slot,
577 			enum mod_log_op op, gfp_t flags)
578 {
579 	if (tree_mod_dont_log(fs_info, eb))
580 		return 0;
581 
582 	return __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
583 }
584 
585 static noinline int
586 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
587 			 struct extent_buffer *eb, int dst_slot, int src_slot,
588 			 int nr_items, gfp_t flags)
589 {
590 	struct tree_mod_elem *tm;
591 	int ret;
592 	int i;
593 
594 	if (tree_mod_dont_log(fs_info, eb))
595 		return 0;
596 
597 	/*
598 	 * When we override something during the move, we log these removals.
599 	 * This can only happen when we move towards the beginning of the
600 	 * buffer, i.e. dst_slot < src_slot.
601 	 */
602 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
603 		ret = __tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
604 				MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
605 		BUG_ON(ret < 0);
606 	}
607 
608 	tm = kzalloc(sizeof(*tm), flags);
609 	if (!tm)
610 		return -ENOMEM;
611 
612 	tm->index = eb->start >> PAGE_CACHE_SHIFT;
613 	tm->slot = src_slot;
614 	tm->move.dst_slot = dst_slot;
615 	tm->move.nr_items = nr_items;
616 	tm->op = MOD_LOG_MOVE_KEYS;
617 
618 	return __tree_mod_log_insert(fs_info, tm);
619 }
620 
621 static inline void
622 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
623 {
624 	int i;
625 	u32 nritems;
626 	int ret;
627 
628 	if (btrfs_header_level(eb) == 0)
629 		return;
630 
631 	nritems = btrfs_header_nritems(eb);
632 	for (i = nritems - 1; i >= 0; i--) {
633 		ret = __tree_mod_log_insert_key(fs_info, eb, i,
634 				MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
635 		BUG_ON(ret < 0);
636 	}
637 }
638 
639 static noinline int
640 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
641 			 struct extent_buffer *old_root,
642 			 struct extent_buffer *new_root, gfp_t flags,
643 			 int log_removal)
644 {
645 	struct tree_mod_elem *tm;
646 
647 	if (tree_mod_dont_log(fs_info, NULL))
648 		return 0;
649 
650 	if (log_removal)
651 		__tree_mod_log_free_eb(fs_info, old_root);
652 
653 	tm = kzalloc(sizeof(*tm), flags);
654 	if (!tm)
655 		return -ENOMEM;
656 
657 	tm->index = new_root->start >> PAGE_CACHE_SHIFT;
658 	tm->old_root.logical = old_root->start;
659 	tm->old_root.level = btrfs_header_level(old_root);
660 	tm->generation = btrfs_header_generation(old_root);
661 	tm->op = MOD_LOG_ROOT_REPLACE;
662 
663 	return __tree_mod_log_insert(fs_info, tm);
664 }
665 
666 static struct tree_mod_elem *
667 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
668 		      int smallest)
669 {
670 	struct rb_root *tm_root;
671 	struct rb_node *node;
672 	struct tree_mod_elem *cur = NULL;
673 	struct tree_mod_elem *found = NULL;
674 	u64 index = start >> PAGE_CACHE_SHIFT;
675 
676 	tree_mod_log_read_lock(fs_info);
677 	tm_root = &fs_info->tree_mod_log;
678 	node = tm_root->rb_node;
679 	while (node) {
680 		cur = container_of(node, struct tree_mod_elem, node);
681 		if (cur->index < index) {
682 			node = node->rb_left;
683 		} else if (cur->index > index) {
684 			node = node->rb_right;
685 		} else if (cur->seq < min_seq) {
686 			node = node->rb_left;
687 		} else if (!smallest) {
688 			/* we want the node with the highest seq */
689 			if (found)
690 				BUG_ON(found->seq > cur->seq);
691 			found = cur;
692 			node = node->rb_left;
693 		} else if (cur->seq > min_seq) {
694 			/* we want the node with the smallest seq */
695 			if (found)
696 				BUG_ON(found->seq < cur->seq);
697 			found = cur;
698 			node = node->rb_right;
699 		} else {
700 			found = cur;
701 			break;
702 		}
703 	}
704 	tree_mod_log_read_unlock(fs_info);
705 
706 	return found;
707 }
708 
709 /*
710  * this returns the element from the log with the smallest time sequence
711  * value that's in the log (the oldest log item). any element with a time
712  * sequence lower than min_seq will be ignored.
713  */
714 static struct tree_mod_elem *
715 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
716 			   u64 min_seq)
717 {
718 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
719 }
720 
721 /*
722  * this returns the element from the log with the largest time sequence
723  * value that's in the log (the most recent log item). any element with
724  * a time sequence lower than min_seq will be ignored.
725  */
726 static struct tree_mod_elem *
727 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
728 {
729 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
730 }
731 
732 static noinline void
733 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
734 		     struct extent_buffer *src, unsigned long dst_offset,
735 		     unsigned long src_offset, int nr_items)
736 {
737 	int ret;
738 	int i;
739 
740 	if (tree_mod_dont_log(fs_info, NULL))
741 		return;
742 
743 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
744 		return;
745 
746 	for (i = 0; i < nr_items; i++) {
747 		ret = __tree_mod_log_insert_key(fs_info, src,
748 						i + src_offset,
749 						MOD_LOG_KEY_REMOVE, GFP_NOFS);
750 		BUG_ON(ret < 0);
751 		ret = __tree_mod_log_insert_key(fs_info, dst,
752 						     i + dst_offset,
753 						     MOD_LOG_KEY_ADD,
754 						     GFP_NOFS);
755 		BUG_ON(ret < 0);
756 	}
757 }
758 
759 static inline void
760 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
761 		     int dst_offset, int src_offset, int nr_items)
762 {
763 	int ret;
764 	ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
765 				       nr_items, GFP_NOFS);
766 	BUG_ON(ret < 0);
767 }
768 
769 static noinline void
770 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
771 			  struct extent_buffer *eb, int slot, int atomic)
772 {
773 	int ret;
774 
775 	ret = __tree_mod_log_insert_key(fs_info, eb, slot,
776 					MOD_LOG_KEY_REPLACE,
777 					atomic ? GFP_ATOMIC : GFP_NOFS);
778 	BUG_ON(ret < 0);
779 }
780 
781 static noinline void
782 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
783 {
784 	if (tree_mod_dont_log(fs_info, eb))
785 		return;
786 	__tree_mod_log_free_eb(fs_info, eb);
787 }
788 
789 static noinline void
790 tree_mod_log_set_root_pointer(struct btrfs_root *root,
791 			      struct extent_buffer *new_root_node,
792 			      int log_removal)
793 {
794 	int ret;
795 	ret = tree_mod_log_insert_root(root->fs_info, root->node,
796 				       new_root_node, GFP_NOFS, log_removal);
797 	BUG_ON(ret < 0);
798 }
799 
800 /*
801  * check if the tree block can be shared by multiple trees
802  */
803 int btrfs_block_can_be_shared(struct btrfs_root *root,
804 			      struct extent_buffer *buf)
805 {
806 	/*
807 	 * Tree blocks not in refernece counted trees and tree roots
808 	 * are never shared. If a block was allocated after the last
809 	 * snapshot and the block was not allocated by tree relocation,
810 	 * we know the block is not shared.
811 	 */
812 	if (root->ref_cows &&
813 	    buf != root->node && buf != root->commit_root &&
814 	    (btrfs_header_generation(buf) <=
815 	     btrfs_root_last_snapshot(&root->root_item) ||
816 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
817 		return 1;
818 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
819 	if (root->ref_cows &&
820 	    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
821 		return 1;
822 #endif
823 	return 0;
824 }
825 
826 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
827 				       struct btrfs_root *root,
828 				       struct extent_buffer *buf,
829 				       struct extent_buffer *cow,
830 				       int *last_ref)
831 {
832 	u64 refs;
833 	u64 owner;
834 	u64 flags;
835 	u64 new_flags = 0;
836 	int ret;
837 
838 	/*
839 	 * Backrefs update rules:
840 	 *
841 	 * Always use full backrefs for extent pointers in tree block
842 	 * allocated by tree relocation.
843 	 *
844 	 * If a shared tree block is no longer referenced by its owner
845 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
846 	 * use full backrefs for extent pointers in tree block.
847 	 *
848 	 * If a tree block is been relocating
849 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
850 	 * use full backrefs for extent pointers in tree block.
851 	 * The reason for this is some operations (such as drop tree)
852 	 * are only allowed for blocks use full backrefs.
853 	 */
854 
855 	if (btrfs_block_can_be_shared(root, buf)) {
856 		ret = btrfs_lookup_extent_info(trans, root, buf->start,
857 					       btrfs_header_level(buf), 1,
858 					       &refs, &flags);
859 		if (ret)
860 			return ret;
861 		if (refs == 0) {
862 			ret = -EROFS;
863 			btrfs_std_error(root->fs_info, ret);
864 			return ret;
865 		}
866 	} else {
867 		refs = 1;
868 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
869 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
870 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
871 		else
872 			flags = 0;
873 	}
874 
875 	owner = btrfs_header_owner(buf);
876 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
877 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
878 
879 	if (refs > 1) {
880 		if ((owner == root->root_key.objectid ||
881 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
882 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
883 			ret = btrfs_inc_ref(trans, root, buf, 1, 1);
884 			BUG_ON(ret); /* -ENOMEM */
885 
886 			if (root->root_key.objectid ==
887 			    BTRFS_TREE_RELOC_OBJECTID) {
888 				ret = btrfs_dec_ref(trans, root, buf, 0, 1);
889 				BUG_ON(ret); /* -ENOMEM */
890 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
891 				BUG_ON(ret); /* -ENOMEM */
892 			}
893 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
894 		} else {
895 
896 			if (root->root_key.objectid ==
897 			    BTRFS_TREE_RELOC_OBJECTID)
898 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
899 			else
900 				ret = btrfs_inc_ref(trans, root, cow, 0, 1);
901 			BUG_ON(ret); /* -ENOMEM */
902 		}
903 		if (new_flags != 0) {
904 			int level = btrfs_header_level(buf);
905 
906 			ret = btrfs_set_disk_extent_flags(trans, root,
907 							  buf->start,
908 							  buf->len,
909 							  new_flags, level, 0);
910 			if (ret)
911 				return ret;
912 		}
913 	} else {
914 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
915 			if (root->root_key.objectid ==
916 			    BTRFS_TREE_RELOC_OBJECTID)
917 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
918 			else
919 				ret = btrfs_inc_ref(trans, root, cow, 0, 1);
920 			BUG_ON(ret); /* -ENOMEM */
921 			ret = btrfs_dec_ref(trans, root, buf, 1, 1);
922 			BUG_ON(ret); /* -ENOMEM */
923 		}
924 		clean_tree_block(trans, root, buf);
925 		*last_ref = 1;
926 	}
927 	return 0;
928 }
929 
930 /*
931  * does the dirty work in cow of a single block.  The parent block (if
932  * supplied) is updated to point to the new cow copy.  The new buffer is marked
933  * dirty and returned locked.  If you modify the block it needs to be marked
934  * dirty again.
935  *
936  * search_start -- an allocation hint for the new block
937  *
938  * empty_size -- a hint that you plan on doing more cow.  This is the size in
939  * bytes the allocator should try to find free next to the block it returns.
940  * This is just a hint and may be ignored by the allocator.
941  */
942 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
943 			     struct btrfs_root *root,
944 			     struct extent_buffer *buf,
945 			     struct extent_buffer *parent, int parent_slot,
946 			     struct extent_buffer **cow_ret,
947 			     u64 search_start, u64 empty_size)
948 {
949 	struct btrfs_disk_key disk_key;
950 	struct extent_buffer *cow;
951 	int level, ret;
952 	int last_ref = 0;
953 	int unlock_orig = 0;
954 	u64 parent_start;
955 
956 	if (*cow_ret == buf)
957 		unlock_orig = 1;
958 
959 	btrfs_assert_tree_locked(buf);
960 
961 	WARN_ON(root->ref_cows && trans->transid !=
962 		root->fs_info->running_transaction->transid);
963 	WARN_ON(root->ref_cows && trans->transid != root->last_trans);
964 
965 	level = btrfs_header_level(buf);
966 
967 	if (level == 0)
968 		btrfs_item_key(buf, &disk_key, 0);
969 	else
970 		btrfs_node_key(buf, &disk_key, 0);
971 
972 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
973 		if (parent)
974 			parent_start = parent->start;
975 		else
976 			parent_start = 0;
977 	} else
978 		parent_start = 0;
979 
980 	cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
981 				     root->root_key.objectid, &disk_key,
982 				     level, search_start, empty_size);
983 	if (IS_ERR(cow))
984 		return PTR_ERR(cow);
985 
986 	/* cow is set to blocking by btrfs_init_new_buffer */
987 
988 	copy_extent_buffer(cow, buf, 0, 0, cow->len);
989 	btrfs_set_header_bytenr(cow, cow->start);
990 	btrfs_set_header_generation(cow, trans->transid);
991 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
992 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
993 				     BTRFS_HEADER_FLAG_RELOC);
994 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
995 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
996 	else
997 		btrfs_set_header_owner(cow, root->root_key.objectid);
998 
999 	write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(cow),
1000 			    BTRFS_FSID_SIZE);
1001 
1002 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1003 	if (ret) {
1004 		btrfs_abort_transaction(trans, root, ret);
1005 		return ret;
1006 	}
1007 
1008 	if (root->ref_cows) {
1009 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1010 		if (ret)
1011 			return ret;
1012 	}
1013 
1014 	if (buf == root->node) {
1015 		WARN_ON(parent && parent != buf);
1016 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1017 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1018 			parent_start = buf->start;
1019 		else
1020 			parent_start = 0;
1021 
1022 		extent_buffer_get(cow);
1023 		tree_mod_log_set_root_pointer(root, cow, 1);
1024 		rcu_assign_pointer(root->node, cow);
1025 
1026 		btrfs_free_tree_block(trans, root, buf, parent_start,
1027 				      last_ref);
1028 		free_extent_buffer(buf);
1029 		add_root_to_dirty_list(root);
1030 	} else {
1031 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1032 			parent_start = parent->start;
1033 		else
1034 			parent_start = 0;
1035 
1036 		WARN_ON(trans->transid != btrfs_header_generation(parent));
1037 		tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1038 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1039 		btrfs_set_node_blockptr(parent, parent_slot,
1040 					cow->start);
1041 		btrfs_set_node_ptr_generation(parent, parent_slot,
1042 					      trans->transid);
1043 		btrfs_mark_buffer_dirty(parent);
1044 		if (last_ref)
1045 			tree_mod_log_free_eb(root->fs_info, buf);
1046 		btrfs_free_tree_block(trans, root, buf, parent_start,
1047 				      last_ref);
1048 	}
1049 	if (unlock_orig)
1050 		btrfs_tree_unlock(buf);
1051 	free_extent_buffer_stale(buf);
1052 	btrfs_mark_buffer_dirty(cow);
1053 	*cow_ret = cow;
1054 	return 0;
1055 }
1056 
1057 /*
1058  * returns the logical address of the oldest predecessor of the given root.
1059  * entries older than time_seq are ignored.
1060  */
1061 static struct tree_mod_elem *
1062 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1063 			   struct extent_buffer *eb_root, u64 time_seq)
1064 {
1065 	struct tree_mod_elem *tm;
1066 	struct tree_mod_elem *found = NULL;
1067 	u64 root_logical = eb_root->start;
1068 	int looped = 0;
1069 
1070 	if (!time_seq)
1071 		return NULL;
1072 
1073 	/*
1074 	 * the very last operation that's logged for a root is the replacement
1075 	 * operation (if it is replaced at all). this has the index of the *new*
1076 	 * root, making it the very first operation that's logged for this root.
1077 	 */
1078 	while (1) {
1079 		tm = tree_mod_log_search_oldest(fs_info, root_logical,
1080 						time_seq);
1081 		if (!looped && !tm)
1082 			return NULL;
1083 		/*
1084 		 * if there are no tree operation for the oldest root, we simply
1085 		 * return it. this should only happen if that (old) root is at
1086 		 * level 0.
1087 		 */
1088 		if (!tm)
1089 			break;
1090 
1091 		/*
1092 		 * if there's an operation that's not a root replacement, we
1093 		 * found the oldest version of our root. normally, we'll find a
1094 		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1095 		 */
1096 		if (tm->op != MOD_LOG_ROOT_REPLACE)
1097 			break;
1098 
1099 		found = tm;
1100 		root_logical = tm->old_root.logical;
1101 		looped = 1;
1102 	}
1103 
1104 	/* if there's no old root to return, return what we found instead */
1105 	if (!found)
1106 		found = tm;
1107 
1108 	return found;
1109 }
1110 
1111 /*
1112  * tm is a pointer to the first operation to rewind within eb. then, all
1113  * previous operations will be rewinded (until we reach something older than
1114  * time_seq).
1115  */
1116 static void
1117 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1118 		      u64 time_seq, struct tree_mod_elem *first_tm)
1119 {
1120 	u32 n;
1121 	struct rb_node *next;
1122 	struct tree_mod_elem *tm = first_tm;
1123 	unsigned long o_dst;
1124 	unsigned long o_src;
1125 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1126 
1127 	n = btrfs_header_nritems(eb);
1128 	tree_mod_log_read_lock(fs_info);
1129 	while (tm && tm->seq >= time_seq) {
1130 		/*
1131 		 * all the operations are recorded with the operator used for
1132 		 * the modification. as we're going backwards, we do the
1133 		 * opposite of each operation here.
1134 		 */
1135 		switch (tm->op) {
1136 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1137 			BUG_ON(tm->slot < n);
1138 			/* Fallthrough */
1139 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1140 		case MOD_LOG_KEY_REMOVE:
1141 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1142 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1143 			btrfs_set_node_ptr_generation(eb, tm->slot,
1144 						      tm->generation);
1145 			n++;
1146 			break;
1147 		case MOD_LOG_KEY_REPLACE:
1148 			BUG_ON(tm->slot >= n);
1149 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1150 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1151 			btrfs_set_node_ptr_generation(eb, tm->slot,
1152 						      tm->generation);
1153 			break;
1154 		case MOD_LOG_KEY_ADD:
1155 			/* if a move operation is needed it's in the log */
1156 			n--;
1157 			break;
1158 		case MOD_LOG_MOVE_KEYS:
1159 			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1160 			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1161 			memmove_extent_buffer(eb, o_dst, o_src,
1162 					      tm->move.nr_items * p_size);
1163 			break;
1164 		case MOD_LOG_ROOT_REPLACE:
1165 			/*
1166 			 * this operation is special. for roots, this must be
1167 			 * handled explicitly before rewinding.
1168 			 * for non-roots, this operation may exist if the node
1169 			 * was a root: root A -> child B; then A gets empty and
1170 			 * B is promoted to the new root. in the mod log, we'll
1171 			 * have a root-replace operation for B, a tree block
1172 			 * that is no root. we simply ignore that operation.
1173 			 */
1174 			break;
1175 		}
1176 		next = rb_next(&tm->node);
1177 		if (!next)
1178 			break;
1179 		tm = container_of(next, struct tree_mod_elem, node);
1180 		if (tm->index != first_tm->index)
1181 			break;
1182 	}
1183 	tree_mod_log_read_unlock(fs_info);
1184 	btrfs_set_header_nritems(eb, n);
1185 }
1186 
1187 /*
1188  * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1189  * is returned. If rewind operations happen, a fresh buffer is returned. The
1190  * returned buffer is always read-locked. If the returned buffer is not the
1191  * input buffer, the lock on the input buffer is released and the input buffer
1192  * is freed (its refcount is decremented).
1193  */
1194 static struct extent_buffer *
1195 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1196 		    struct extent_buffer *eb, u64 time_seq)
1197 {
1198 	struct extent_buffer *eb_rewin;
1199 	struct tree_mod_elem *tm;
1200 
1201 	if (!time_seq)
1202 		return eb;
1203 
1204 	if (btrfs_header_level(eb) == 0)
1205 		return eb;
1206 
1207 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1208 	if (!tm)
1209 		return eb;
1210 
1211 	btrfs_set_path_blocking(path);
1212 	btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1213 
1214 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1215 		BUG_ON(tm->slot != 0);
1216 		eb_rewin = alloc_dummy_extent_buffer(eb->start,
1217 						fs_info->tree_root->nodesize);
1218 		if (!eb_rewin) {
1219 			btrfs_tree_read_unlock_blocking(eb);
1220 			free_extent_buffer(eb);
1221 			return NULL;
1222 		}
1223 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1224 		btrfs_set_header_backref_rev(eb_rewin,
1225 					     btrfs_header_backref_rev(eb));
1226 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1227 		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1228 	} else {
1229 		eb_rewin = btrfs_clone_extent_buffer(eb);
1230 		if (!eb_rewin) {
1231 			btrfs_tree_read_unlock_blocking(eb);
1232 			free_extent_buffer(eb);
1233 			return NULL;
1234 		}
1235 	}
1236 
1237 	btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1238 	btrfs_tree_read_unlock_blocking(eb);
1239 	free_extent_buffer(eb);
1240 
1241 	extent_buffer_get(eb_rewin);
1242 	btrfs_tree_read_lock(eb_rewin);
1243 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1244 	WARN_ON(btrfs_header_nritems(eb_rewin) >
1245 		BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1246 
1247 	return eb_rewin;
1248 }
1249 
1250 /*
1251  * get_old_root() rewinds the state of @root's root node to the given @time_seq
1252  * value. If there are no changes, the current root->root_node is returned. If
1253  * anything changed in between, there's a fresh buffer allocated on which the
1254  * rewind operations are done. In any case, the returned buffer is read locked.
1255  * Returns NULL on error (with no locks held).
1256  */
1257 static inline struct extent_buffer *
1258 get_old_root(struct btrfs_root *root, u64 time_seq)
1259 {
1260 	struct tree_mod_elem *tm;
1261 	struct extent_buffer *eb = NULL;
1262 	struct extent_buffer *eb_root;
1263 	struct extent_buffer *old;
1264 	struct tree_mod_root *old_root = NULL;
1265 	u64 old_generation = 0;
1266 	u64 logical;
1267 	u32 blocksize;
1268 
1269 	eb_root = btrfs_read_lock_root_node(root);
1270 	tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1271 	if (!tm)
1272 		return eb_root;
1273 
1274 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1275 		old_root = &tm->old_root;
1276 		old_generation = tm->generation;
1277 		logical = old_root->logical;
1278 	} else {
1279 		logical = eb_root->start;
1280 	}
1281 
1282 	tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1283 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1284 		btrfs_tree_read_unlock(eb_root);
1285 		free_extent_buffer(eb_root);
1286 		blocksize = btrfs_level_size(root, old_root->level);
1287 		old = read_tree_block(root, logical, blocksize, 0);
1288 		if (!old || !extent_buffer_uptodate(old)) {
1289 			free_extent_buffer(old);
1290 			pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1291 				logical);
1292 			WARN_ON(1);
1293 		} else {
1294 			eb = btrfs_clone_extent_buffer(old);
1295 			free_extent_buffer(old);
1296 		}
1297 	} else if (old_root) {
1298 		btrfs_tree_read_unlock(eb_root);
1299 		free_extent_buffer(eb_root);
1300 		eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1301 	} else {
1302 		btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1303 		eb = btrfs_clone_extent_buffer(eb_root);
1304 		btrfs_tree_read_unlock_blocking(eb_root);
1305 		free_extent_buffer(eb_root);
1306 	}
1307 
1308 	if (!eb)
1309 		return NULL;
1310 	extent_buffer_get(eb);
1311 	btrfs_tree_read_lock(eb);
1312 	if (old_root) {
1313 		btrfs_set_header_bytenr(eb, eb->start);
1314 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1315 		btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1316 		btrfs_set_header_level(eb, old_root->level);
1317 		btrfs_set_header_generation(eb, old_generation);
1318 	}
1319 	if (tm)
1320 		__tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1321 	else
1322 		WARN_ON(btrfs_header_level(eb) != 0);
1323 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1324 
1325 	return eb;
1326 }
1327 
1328 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1329 {
1330 	struct tree_mod_elem *tm;
1331 	int level;
1332 	struct extent_buffer *eb_root = btrfs_root_node(root);
1333 
1334 	tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1335 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1336 		level = tm->old_root.level;
1337 	} else {
1338 		level = btrfs_header_level(eb_root);
1339 	}
1340 	free_extent_buffer(eb_root);
1341 
1342 	return level;
1343 }
1344 
1345 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1346 				   struct btrfs_root *root,
1347 				   struct extent_buffer *buf)
1348 {
1349 	/* ensure we can see the force_cow */
1350 	smp_rmb();
1351 
1352 	/*
1353 	 * We do not need to cow a block if
1354 	 * 1) this block is not created or changed in this transaction;
1355 	 * 2) this block does not belong to TREE_RELOC tree;
1356 	 * 3) the root is not forced COW.
1357 	 *
1358 	 * What is forced COW:
1359 	 *    when we create snapshot during commiting the transaction,
1360 	 *    after we've finished coping src root, we must COW the shared
1361 	 *    block to ensure the metadata consistency.
1362 	 */
1363 	if (btrfs_header_generation(buf) == trans->transid &&
1364 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1365 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1366 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1367 	    !root->force_cow)
1368 		return 0;
1369 	return 1;
1370 }
1371 
1372 /*
1373  * cows a single block, see __btrfs_cow_block for the real work.
1374  * This version of it has extra checks so that a block isn't cow'd more than
1375  * once per transaction, as long as it hasn't been written yet
1376  */
1377 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1378 		    struct btrfs_root *root, struct extent_buffer *buf,
1379 		    struct extent_buffer *parent, int parent_slot,
1380 		    struct extent_buffer **cow_ret)
1381 {
1382 	u64 search_start;
1383 	int ret;
1384 
1385 	if (trans->transaction != root->fs_info->running_transaction)
1386 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1387 		       trans->transid,
1388 		       root->fs_info->running_transaction->transid);
1389 
1390 	if (trans->transid != root->fs_info->generation)
1391 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1392 		       trans->transid, root->fs_info->generation);
1393 
1394 	if (!should_cow_block(trans, root, buf)) {
1395 		*cow_ret = buf;
1396 		return 0;
1397 	}
1398 
1399 	search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1400 
1401 	if (parent)
1402 		btrfs_set_lock_blocking(parent);
1403 	btrfs_set_lock_blocking(buf);
1404 
1405 	ret = __btrfs_cow_block(trans, root, buf, parent,
1406 				 parent_slot, cow_ret, search_start, 0);
1407 
1408 	trace_btrfs_cow_block(root, buf, *cow_ret);
1409 
1410 	return ret;
1411 }
1412 
1413 /*
1414  * helper function for defrag to decide if two blocks pointed to by a
1415  * node are actually close by
1416  */
1417 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1418 {
1419 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1420 		return 1;
1421 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1422 		return 1;
1423 	return 0;
1424 }
1425 
1426 /*
1427  * compare two keys in a memcmp fashion
1428  */
1429 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1430 {
1431 	struct btrfs_key k1;
1432 
1433 	btrfs_disk_key_to_cpu(&k1, disk);
1434 
1435 	return btrfs_comp_cpu_keys(&k1, k2);
1436 }
1437 
1438 /*
1439  * same as comp_keys only with two btrfs_key's
1440  */
1441 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1442 {
1443 	if (k1->objectid > k2->objectid)
1444 		return 1;
1445 	if (k1->objectid < k2->objectid)
1446 		return -1;
1447 	if (k1->type > k2->type)
1448 		return 1;
1449 	if (k1->type < k2->type)
1450 		return -1;
1451 	if (k1->offset > k2->offset)
1452 		return 1;
1453 	if (k1->offset < k2->offset)
1454 		return -1;
1455 	return 0;
1456 }
1457 
1458 /*
1459  * this is used by the defrag code to go through all the
1460  * leaves pointed to by a node and reallocate them so that
1461  * disk order is close to key order
1462  */
1463 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1464 		       struct btrfs_root *root, struct extent_buffer *parent,
1465 		       int start_slot, u64 *last_ret,
1466 		       struct btrfs_key *progress)
1467 {
1468 	struct extent_buffer *cur;
1469 	u64 blocknr;
1470 	u64 gen;
1471 	u64 search_start = *last_ret;
1472 	u64 last_block = 0;
1473 	u64 other;
1474 	u32 parent_nritems;
1475 	int end_slot;
1476 	int i;
1477 	int err = 0;
1478 	int parent_level;
1479 	int uptodate;
1480 	u32 blocksize;
1481 	int progress_passed = 0;
1482 	struct btrfs_disk_key disk_key;
1483 
1484 	parent_level = btrfs_header_level(parent);
1485 
1486 	WARN_ON(trans->transaction != root->fs_info->running_transaction);
1487 	WARN_ON(trans->transid != root->fs_info->generation);
1488 
1489 	parent_nritems = btrfs_header_nritems(parent);
1490 	blocksize = btrfs_level_size(root, parent_level - 1);
1491 	end_slot = parent_nritems;
1492 
1493 	if (parent_nritems == 1)
1494 		return 0;
1495 
1496 	btrfs_set_lock_blocking(parent);
1497 
1498 	for (i = start_slot; i < end_slot; i++) {
1499 		int close = 1;
1500 
1501 		btrfs_node_key(parent, &disk_key, i);
1502 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1503 			continue;
1504 
1505 		progress_passed = 1;
1506 		blocknr = btrfs_node_blockptr(parent, i);
1507 		gen = btrfs_node_ptr_generation(parent, i);
1508 		if (last_block == 0)
1509 			last_block = blocknr;
1510 
1511 		if (i > 0) {
1512 			other = btrfs_node_blockptr(parent, i - 1);
1513 			close = close_blocks(blocknr, other, blocksize);
1514 		}
1515 		if (!close && i < end_slot - 2) {
1516 			other = btrfs_node_blockptr(parent, i + 1);
1517 			close = close_blocks(blocknr, other, blocksize);
1518 		}
1519 		if (close) {
1520 			last_block = blocknr;
1521 			continue;
1522 		}
1523 
1524 		cur = btrfs_find_tree_block(root, blocknr, blocksize);
1525 		if (cur)
1526 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1527 		else
1528 			uptodate = 0;
1529 		if (!cur || !uptodate) {
1530 			if (!cur) {
1531 				cur = read_tree_block(root, blocknr,
1532 							 blocksize, gen);
1533 				if (!cur || !extent_buffer_uptodate(cur)) {
1534 					free_extent_buffer(cur);
1535 					return -EIO;
1536 				}
1537 			} else if (!uptodate) {
1538 				err = btrfs_read_buffer(cur, gen);
1539 				if (err) {
1540 					free_extent_buffer(cur);
1541 					return err;
1542 				}
1543 			}
1544 		}
1545 		if (search_start == 0)
1546 			search_start = last_block;
1547 
1548 		btrfs_tree_lock(cur);
1549 		btrfs_set_lock_blocking(cur);
1550 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1551 					&cur, search_start,
1552 					min(16 * blocksize,
1553 					    (end_slot - i) * blocksize));
1554 		if (err) {
1555 			btrfs_tree_unlock(cur);
1556 			free_extent_buffer(cur);
1557 			break;
1558 		}
1559 		search_start = cur->start;
1560 		last_block = cur->start;
1561 		*last_ret = search_start;
1562 		btrfs_tree_unlock(cur);
1563 		free_extent_buffer(cur);
1564 	}
1565 	return err;
1566 }
1567 
1568 /*
1569  * The leaf data grows from end-to-front in the node.
1570  * this returns the address of the start of the last item,
1571  * which is the stop of the leaf data stack
1572  */
1573 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1574 					 struct extent_buffer *leaf)
1575 {
1576 	u32 nr = btrfs_header_nritems(leaf);
1577 	if (nr == 0)
1578 		return BTRFS_LEAF_DATA_SIZE(root);
1579 	return btrfs_item_offset_nr(leaf, nr - 1);
1580 }
1581 
1582 
1583 /*
1584  * search for key in the extent_buffer.  The items start at offset p,
1585  * and they are item_size apart.  There are 'max' items in p.
1586  *
1587  * the slot in the array is returned via slot, and it points to
1588  * the place where you would insert key if it is not found in
1589  * the array.
1590  *
1591  * slot may point to max if the key is bigger than all of the keys
1592  */
1593 static noinline int generic_bin_search(struct extent_buffer *eb,
1594 				       unsigned long p,
1595 				       int item_size, struct btrfs_key *key,
1596 				       int max, int *slot)
1597 {
1598 	int low = 0;
1599 	int high = max;
1600 	int mid;
1601 	int ret;
1602 	struct btrfs_disk_key *tmp = NULL;
1603 	struct btrfs_disk_key unaligned;
1604 	unsigned long offset;
1605 	char *kaddr = NULL;
1606 	unsigned long map_start = 0;
1607 	unsigned long map_len = 0;
1608 	int err;
1609 
1610 	while (low < high) {
1611 		mid = (low + high) / 2;
1612 		offset = p + mid * item_size;
1613 
1614 		if (!kaddr || offset < map_start ||
1615 		    (offset + sizeof(struct btrfs_disk_key)) >
1616 		    map_start + map_len) {
1617 
1618 			err = map_private_extent_buffer(eb, offset,
1619 						sizeof(struct btrfs_disk_key),
1620 						&kaddr, &map_start, &map_len);
1621 
1622 			if (!err) {
1623 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1624 							map_start);
1625 			} else {
1626 				read_extent_buffer(eb, &unaligned,
1627 						   offset, sizeof(unaligned));
1628 				tmp = &unaligned;
1629 			}
1630 
1631 		} else {
1632 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1633 							map_start);
1634 		}
1635 		ret = comp_keys(tmp, key);
1636 
1637 		if (ret < 0)
1638 			low = mid + 1;
1639 		else if (ret > 0)
1640 			high = mid;
1641 		else {
1642 			*slot = mid;
1643 			return 0;
1644 		}
1645 	}
1646 	*slot = low;
1647 	return 1;
1648 }
1649 
1650 /*
1651  * simple bin_search frontend that does the right thing for
1652  * leaves vs nodes
1653  */
1654 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1655 		      int level, int *slot)
1656 {
1657 	if (level == 0)
1658 		return generic_bin_search(eb,
1659 					  offsetof(struct btrfs_leaf, items),
1660 					  sizeof(struct btrfs_item),
1661 					  key, btrfs_header_nritems(eb),
1662 					  slot);
1663 	else
1664 		return generic_bin_search(eb,
1665 					  offsetof(struct btrfs_node, ptrs),
1666 					  sizeof(struct btrfs_key_ptr),
1667 					  key, btrfs_header_nritems(eb),
1668 					  slot);
1669 }
1670 
1671 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1672 		     int level, int *slot)
1673 {
1674 	return bin_search(eb, key, level, slot);
1675 }
1676 
1677 static void root_add_used(struct btrfs_root *root, u32 size)
1678 {
1679 	spin_lock(&root->accounting_lock);
1680 	btrfs_set_root_used(&root->root_item,
1681 			    btrfs_root_used(&root->root_item) + size);
1682 	spin_unlock(&root->accounting_lock);
1683 }
1684 
1685 static void root_sub_used(struct btrfs_root *root, u32 size)
1686 {
1687 	spin_lock(&root->accounting_lock);
1688 	btrfs_set_root_used(&root->root_item,
1689 			    btrfs_root_used(&root->root_item) - size);
1690 	spin_unlock(&root->accounting_lock);
1691 }
1692 
1693 /* given a node and slot number, this reads the blocks it points to.  The
1694  * extent buffer is returned with a reference taken (but unlocked).
1695  * NULL is returned on error.
1696  */
1697 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1698 				   struct extent_buffer *parent, int slot)
1699 {
1700 	int level = btrfs_header_level(parent);
1701 	struct extent_buffer *eb;
1702 
1703 	if (slot < 0)
1704 		return NULL;
1705 	if (slot >= btrfs_header_nritems(parent))
1706 		return NULL;
1707 
1708 	BUG_ON(level == 0);
1709 
1710 	eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1711 			     btrfs_level_size(root, level - 1),
1712 			     btrfs_node_ptr_generation(parent, slot));
1713 	if (eb && !extent_buffer_uptodate(eb)) {
1714 		free_extent_buffer(eb);
1715 		eb = NULL;
1716 	}
1717 
1718 	return eb;
1719 }
1720 
1721 /*
1722  * node level balancing, used to make sure nodes are in proper order for
1723  * item deletion.  We balance from the top down, so we have to make sure
1724  * that a deletion won't leave an node completely empty later on.
1725  */
1726 static noinline int balance_level(struct btrfs_trans_handle *trans,
1727 			 struct btrfs_root *root,
1728 			 struct btrfs_path *path, int level)
1729 {
1730 	struct extent_buffer *right = NULL;
1731 	struct extent_buffer *mid;
1732 	struct extent_buffer *left = NULL;
1733 	struct extent_buffer *parent = NULL;
1734 	int ret = 0;
1735 	int wret;
1736 	int pslot;
1737 	int orig_slot = path->slots[level];
1738 	u64 orig_ptr;
1739 
1740 	if (level == 0)
1741 		return 0;
1742 
1743 	mid = path->nodes[level];
1744 
1745 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1746 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1747 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1748 
1749 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1750 
1751 	if (level < BTRFS_MAX_LEVEL - 1) {
1752 		parent = path->nodes[level + 1];
1753 		pslot = path->slots[level + 1];
1754 	}
1755 
1756 	/*
1757 	 * deal with the case where there is only one pointer in the root
1758 	 * by promoting the node below to a root
1759 	 */
1760 	if (!parent) {
1761 		struct extent_buffer *child;
1762 
1763 		if (btrfs_header_nritems(mid) != 1)
1764 			return 0;
1765 
1766 		/* promote the child to a root */
1767 		child = read_node_slot(root, mid, 0);
1768 		if (!child) {
1769 			ret = -EROFS;
1770 			btrfs_std_error(root->fs_info, ret);
1771 			goto enospc;
1772 		}
1773 
1774 		btrfs_tree_lock(child);
1775 		btrfs_set_lock_blocking(child);
1776 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1777 		if (ret) {
1778 			btrfs_tree_unlock(child);
1779 			free_extent_buffer(child);
1780 			goto enospc;
1781 		}
1782 
1783 		tree_mod_log_set_root_pointer(root, child, 1);
1784 		rcu_assign_pointer(root->node, child);
1785 
1786 		add_root_to_dirty_list(root);
1787 		btrfs_tree_unlock(child);
1788 
1789 		path->locks[level] = 0;
1790 		path->nodes[level] = NULL;
1791 		clean_tree_block(trans, root, mid);
1792 		btrfs_tree_unlock(mid);
1793 		/* once for the path */
1794 		free_extent_buffer(mid);
1795 
1796 		root_sub_used(root, mid->len);
1797 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1798 		/* once for the root ptr */
1799 		free_extent_buffer_stale(mid);
1800 		return 0;
1801 	}
1802 	if (btrfs_header_nritems(mid) >
1803 	    BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1804 		return 0;
1805 
1806 	left = read_node_slot(root, parent, pslot - 1);
1807 	if (left) {
1808 		btrfs_tree_lock(left);
1809 		btrfs_set_lock_blocking(left);
1810 		wret = btrfs_cow_block(trans, root, left,
1811 				       parent, pslot - 1, &left);
1812 		if (wret) {
1813 			ret = wret;
1814 			goto enospc;
1815 		}
1816 	}
1817 	right = read_node_slot(root, parent, pslot + 1);
1818 	if (right) {
1819 		btrfs_tree_lock(right);
1820 		btrfs_set_lock_blocking(right);
1821 		wret = btrfs_cow_block(trans, root, right,
1822 				       parent, pslot + 1, &right);
1823 		if (wret) {
1824 			ret = wret;
1825 			goto enospc;
1826 		}
1827 	}
1828 
1829 	/* first, try to make some room in the middle buffer */
1830 	if (left) {
1831 		orig_slot += btrfs_header_nritems(left);
1832 		wret = push_node_left(trans, root, left, mid, 1);
1833 		if (wret < 0)
1834 			ret = wret;
1835 	}
1836 
1837 	/*
1838 	 * then try to empty the right most buffer into the middle
1839 	 */
1840 	if (right) {
1841 		wret = push_node_left(trans, root, mid, right, 1);
1842 		if (wret < 0 && wret != -ENOSPC)
1843 			ret = wret;
1844 		if (btrfs_header_nritems(right) == 0) {
1845 			clean_tree_block(trans, root, right);
1846 			btrfs_tree_unlock(right);
1847 			del_ptr(root, path, level + 1, pslot + 1);
1848 			root_sub_used(root, right->len);
1849 			btrfs_free_tree_block(trans, root, right, 0, 1);
1850 			free_extent_buffer_stale(right);
1851 			right = NULL;
1852 		} else {
1853 			struct btrfs_disk_key right_key;
1854 			btrfs_node_key(right, &right_key, 0);
1855 			tree_mod_log_set_node_key(root->fs_info, parent,
1856 						  pslot + 1, 0);
1857 			btrfs_set_node_key(parent, &right_key, pslot + 1);
1858 			btrfs_mark_buffer_dirty(parent);
1859 		}
1860 	}
1861 	if (btrfs_header_nritems(mid) == 1) {
1862 		/*
1863 		 * we're not allowed to leave a node with one item in the
1864 		 * tree during a delete.  A deletion from lower in the tree
1865 		 * could try to delete the only pointer in this node.
1866 		 * So, pull some keys from the left.
1867 		 * There has to be a left pointer at this point because
1868 		 * otherwise we would have pulled some pointers from the
1869 		 * right
1870 		 */
1871 		if (!left) {
1872 			ret = -EROFS;
1873 			btrfs_std_error(root->fs_info, ret);
1874 			goto enospc;
1875 		}
1876 		wret = balance_node_right(trans, root, mid, left);
1877 		if (wret < 0) {
1878 			ret = wret;
1879 			goto enospc;
1880 		}
1881 		if (wret == 1) {
1882 			wret = push_node_left(trans, root, left, mid, 1);
1883 			if (wret < 0)
1884 				ret = wret;
1885 		}
1886 		BUG_ON(wret == 1);
1887 	}
1888 	if (btrfs_header_nritems(mid) == 0) {
1889 		clean_tree_block(trans, root, mid);
1890 		btrfs_tree_unlock(mid);
1891 		del_ptr(root, path, level + 1, pslot);
1892 		root_sub_used(root, mid->len);
1893 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1894 		free_extent_buffer_stale(mid);
1895 		mid = NULL;
1896 	} else {
1897 		/* update the parent key to reflect our changes */
1898 		struct btrfs_disk_key mid_key;
1899 		btrfs_node_key(mid, &mid_key, 0);
1900 		tree_mod_log_set_node_key(root->fs_info, parent,
1901 					  pslot, 0);
1902 		btrfs_set_node_key(parent, &mid_key, pslot);
1903 		btrfs_mark_buffer_dirty(parent);
1904 	}
1905 
1906 	/* update the path */
1907 	if (left) {
1908 		if (btrfs_header_nritems(left) > orig_slot) {
1909 			extent_buffer_get(left);
1910 			/* left was locked after cow */
1911 			path->nodes[level] = left;
1912 			path->slots[level + 1] -= 1;
1913 			path->slots[level] = orig_slot;
1914 			if (mid) {
1915 				btrfs_tree_unlock(mid);
1916 				free_extent_buffer(mid);
1917 			}
1918 		} else {
1919 			orig_slot -= btrfs_header_nritems(left);
1920 			path->slots[level] = orig_slot;
1921 		}
1922 	}
1923 	/* double check we haven't messed things up */
1924 	if (orig_ptr !=
1925 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1926 		BUG();
1927 enospc:
1928 	if (right) {
1929 		btrfs_tree_unlock(right);
1930 		free_extent_buffer(right);
1931 	}
1932 	if (left) {
1933 		if (path->nodes[level] != left)
1934 			btrfs_tree_unlock(left);
1935 		free_extent_buffer(left);
1936 	}
1937 	return ret;
1938 }
1939 
1940 /* Node balancing for insertion.  Here we only split or push nodes around
1941  * when they are completely full.  This is also done top down, so we
1942  * have to be pessimistic.
1943  */
1944 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1945 					  struct btrfs_root *root,
1946 					  struct btrfs_path *path, int level)
1947 {
1948 	struct extent_buffer *right = NULL;
1949 	struct extent_buffer *mid;
1950 	struct extent_buffer *left = NULL;
1951 	struct extent_buffer *parent = NULL;
1952 	int ret = 0;
1953 	int wret;
1954 	int pslot;
1955 	int orig_slot = path->slots[level];
1956 
1957 	if (level == 0)
1958 		return 1;
1959 
1960 	mid = path->nodes[level];
1961 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1962 
1963 	if (level < BTRFS_MAX_LEVEL - 1) {
1964 		parent = path->nodes[level + 1];
1965 		pslot = path->slots[level + 1];
1966 	}
1967 
1968 	if (!parent)
1969 		return 1;
1970 
1971 	left = read_node_slot(root, parent, pslot - 1);
1972 
1973 	/* first, try to make some room in the middle buffer */
1974 	if (left) {
1975 		u32 left_nr;
1976 
1977 		btrfs_tree_lock(left);
1978 		btrfs_set_lock_blocking(left);
1979 
1980 		left_nr = btrfs_header_nritems(left);
1981 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1982 			wret = 1;
1983 		} else {
1984 			ret = btrfs_cow_block(trans, root, left, parent,
1985 					      pslot - 1, &left);
1986 			if (ret)
1987 				wret = 1;
1988 			else {
1989 				wret = push_node_left(trans, root,
1990 						      left, mid, 0);
1991 			}
1992 		}
1993 		if (wret < 0)
1994 			ret = wret;
1995 		if (wret == 0) {
1996 			struct btrfs_disk_key disk_key;
1997 			orig_slot += left_nr;
1998 			btrfs_node_key(mid, &disk_key, 0);
1999 			tree_mod_log_set_node_key(root->fs_info, parent,
2000 						  pslot, 0);
2001 			btrfs_set_node_key(parent, &disk_key, pslot);
2002 			btrfs_mark_buffer_dirty(parent);
2003 			if (btrfs_header_nritems(left) > orig_slot) {
2004 				path->nodes[level] = left;
2005 				path->slots[level + 1] -= 1;
2006 				path->slots[level] = orig_slot;
2007 				btrfs_tree_unlock(mid);
2008 				free_extent_buffer(mid);
2009 			} else {
2010 				orig_slot -=
2011 					btrfs_header_nritems(left);
2012 				path->slots[level] = orig_slot;
2013 				btrfs_tree_unlock(left);
2014 				free_extent_buffer(left);
2015 			}
2016 			return 0;
2017 		}
2018 		btrfs_tree_unlock(left);
2019 		free_extent_buffer(left);
2020 	}
2021 	right = read_node_slot(root, parent, pslot + 1);
2022 
2023 	/*
2024 	 * then try to empty the right most buffer into the middle
2025 	 */
2026 	if (right) {
2027 		u32 right_nr;
2028 
2029 		btrfs_tree_lock(right);
2030 		btrfs_set_lock_blocking(right);
2031 
2032 		right_nr = btrfs_header_nritems(right);
2033 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2034 			wret = 1;
2035 		} else {
2036 			ret = btrfs_cow_block(trans, root, right,
2037 					      parent, pslot + 1,
2038 					      &right);
2039 			if (ret)
2040 				wret = 1;
2041 			else {
2042 				wret = balance_node_right(trans, root,
2043 							  right, mid);
2044 			}
2045 		}
2046 		if (wret < 0)
2047 			ret = wret;
2048 		if (wret == 0) {
2049 			struct btrfs_disk_key disk_key;
2050 
2051 			btrfs_node_key(right, &disk_key, 0);
2052 			tree_mod_log_set_node_key(root->fs_info, parent,
2053 						  pslot + 1, 0);
2054 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2055 			btrfs_mark_buffer_dirty(parent);
2056 
2057 			if (btrfs_header_nritems(mid) <= orig_slot) {
2058 				path->nodes[level] = right;
2059 				path->slots[level + 1] += 1;
2060 				path->slots[level] = orig_slot -
2061 					btrfs_header_nritems(mid);
2062 				btrfs_tree_unlock(mid);
2063 				free_extent_buffer(mid);
2064 			} else {
2065 				btrfs_tree_unlock(right);
2066 				free_extent_buffer(right);
2067 			}
2068 			return 0;
2069 		}
2070 		btrfs_tree_unlock(right);
2071 		free_extent_buffer(right);
2072 	}
2073 	return 1;
2074 }
2075 
2076 /*
2077  * readahead one full node of leaves, finding things that are close
2078  * to the block in 'slot', and triggering ra on them.
2079  */
2080 static void reada_for_search(struct btrfs_root *root,
2081 			     struct btrfs_path *path,
2082 			     int level, int slot, u64 objectid)
2083 {
2084 	struct extent_buffer *node;
2085 	struct btrfs_disk_key disk_key;
2086 	u32 nritems;
2087 	u64 search;
2088 	u64 target;
2089 	u64 nread = 0;
2090 	u64 gen;
2091 	int direction = path->reada;
2092 	struct extent_buffer *eb;
2093 	u32 nr;
2094 	u32 blocksize;
2095 	u32 nscan = 0;
2096 
2097 	if (level != 1)
2098 		return;
2099 
2100 	if (!path->nodes[level])
2101 		return;
2102 
2103 	node = path->nodes[level];
2104 
2105 	search = btrfs_node_blockptr(node, slot);
2106 	blocksize = btrfs_level_size(root, level - 1);
2107 	eb = btrfs_find_tree_block(root, search, blocksize);
2108 	if (eb) {
2109 		free_extent_buffer(eb);
2110 		return;
2111 	}
2112 
2113 	target = search;
2114 
2115 	nritems = btrfs_header_nritems(node);
2116 	nr = slot;
2117 
2118 	while (1) {
2119 		if (direction < 0) {
2120 			if (nr == 0)
2121 				break;
2122 			nr--;
2123 		} else if (direction > 0) {
2124 			nr++;
2125 			if (nr >= nritems)
2126 				break;
2127 		}
2128 		if (path->reada < 0 && objectid) {
2129 			btrfs_node_key(node, &disk_key, nr);
2130 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2131 				break;
2132 		}
2133 		search = btrfs_node_blockptr(node, nr);
2134 		if ((search <= target && target - search <= 65536) ||
2135 		    (search > target && search - target <= 65536)) {
2136 			gen = btrfs_node_ptr_generation(node, nr);
2137 			readahead_tree_block(root, search, blocksize, gen);
2138 			nread += blocksize;
2139 		}
2140 		nscan++;
2141 		if ((nread > 65536 || nscan > 32))
2142 			break;
2143 	}
2144 }
2145 
2146 static noinline void reada_for_balance(struct btrfs_root *root,
2147 				       struct btrfs_path *path, int level)
2148 {
2149 	int slot;
2150 	int nritems;
2151 	struct extent_buffer *parent;
2152 	struct extent_buffer *eb;
2153 	u64 gen;
2154 	u64 block1 = 0;
2155 	u64 block2 = 0;
2156 	int blocksize;
2157 
2158 	parent = path->nodes[level + 1];
2159 	if (!parent)
2160 		return;
2161 
2162 	nritems = btrfs_header_nritems(parent);
2163 	slot = path->slots[level + 1];
2164 	blocksize = btrfs_level_size(root, level);
2165 
2166 	if (slot > 0) {
2167 		block1 = btrfs_node_blockptr(parent, slot - 1);
2168 		gen = btrfs_node_ptr_generation(parent, slot - 1);
2169 		eb = btrfs_find_tree_block(root, block1, blocksize);
2170 		/*
2171 		 * if we get -eagain from btrfs_buffer_uptodate, we
2172 		 * don't want to return eagain here.  That will loop
2173 		 * forever
2174 		 */
2175 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2176 			block1 = 0;
2177 		free_extent_buffer(eb);
2178 	}
2179 	if (slot + 1 < nritems) {
2180 		block2 = btrfs_node_blockptr(parent, slot + 1);
2181 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2182 		eb = btrfs_find_tree_block(root, block2, blocksize);
2183 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2184 			block2 = 0;
2185 		free_extent_buffer(eb);
2186 	}
2187 
2188 	if (block1)
2189 		readahead_tree_block(root, block1, blocksize, 0);
2190 	if (block2)
2191 		readahead_tree_block(root, block2, blocksize, 0);
2192 }
2193 
2194 
2195 /*
2196  * when we walk down the tree, it is usually safe to unlock the higher layers
2197  * in the tree.  The exceptions are when our path goes through slot 0, because
2198  * operations on the tree might require changing key pointers higher up in the
2199  * tree.
2200  *
2201  * callers might also have set path->keep_locks, which tells this code to keep
2202  * the lock if the path points to the last slot in the block.  This is part of
2203  * walking through the tree, and selecting the next slot in the higher block.
2204  *
2205  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2206  * if lowest_unlock is 1, level 0 won't be unlocked
2207  */
2208 static noinline void unlock_up(struct btrfs_path *path, int level,
2209 			       int lowest_unlock, int min_write_lock_level,
2210 			       int *write_lock_level)
2211 {
2212 	int i;
2213 	int skip_level = level;
2214 	int no_skips = 0;
2215 	struct extent_buffer *t;
2216 
2217 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2218 		if (!path->nodes[i])
2219 			break;
2220 		if (!path->locks[i])
2221 			break;
2222 		if (!no_skips && path->slots[i] == 0) {
2223 			skip_level = i + 1;
2224 			continue;
2225 		}
2226 		if (!no_skips && path->keep_locks) {
2227 			u32 nritems;
2228 			t = path->nodes[i];
2229 			nritems = btrfs_header_nritems(t);
2230 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2231 				skip_level = i + 1;
2232 				continue;
2233 			}
2234 		}
2235 		if (skip_level < i && i >= lowest_unlock)
2236 			no_skips = 1;
2237 
2238 		t = path->nodes[i];
2239 		if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2240 			btrfs_tree_unlock_rw(t, path->locks[i]);
2241 			path->locks[i] = 0;
2242 			if (write_lock_level &&
2243 			    i > min_write_lock_level &&
2244 			    i <= *write_lock_level) {
2245 				*write_lock_level = i - 1;
2246 			}
2247 		}
2248 	}
2249 }
2250 
2251 /*
2252  * This releases any locks held in the path starting at level and
2253  * going all the way up to the root.
2254  *
2255  * btrfs_search_slot will keep the lock held on higher nodes in a few
2256  * corner cases, such as COW of the block at slot zero in the node.  This
2257  * ignores those rules, and it should only be called when there are no
2258  * more updates to be done higher up in the tree.
2259  */
2260 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2261 {
2262 	int i;
2263 
2264 	if (path->keep_locks)
2265 		return;
2266 
2267 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2268 		if (!path->nodes[i])
2269 			continue;
2270 		if (!path->locks[i])
2271 			continue;
2272 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2273 		path->locks[i] = 0;
2274 	}
2275 }
2276 
2277 /*
2278  * helper function for btrfs_search_slot.  The goal is to find a block
2279  * in cache without setting the path to blocking.  If we find the block
2280  * we return zero and the path is unchanged.
2281  *
2282  * If we can't find the block, we set the path blocking and do some
2283  * reada.  -EAGAIN is returned and the search must be repeated.
2284  */
2285 static int
2286 read_block_for_search(struct btrfs_trans_handle *trans,
2287 		       struct btrfs_root *root, struct btrfs_path *p,
2288 		       struct extent_buffer **eb_ret, int level, int slot,
2289 		       struct btrfs_key *key, u64 time_seq)
2290 {
2291 	u64 blocknr;
2292 	u64 gen;
2293 	u32 blocksize;
2294 	struct extent_buffer *b = *eb_ret;
2295 	struct extent_buffer *tmp;
2296 	int ret;
2297 
2298 	blocknr = btrfs_node_blockptr(b, slot);
2299 	gen = btrfs_node_ptr_generation(b, slot);
2300 	blocksize = btrfs_level_size(root, level - 1);
2301 
2302 	tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2303 	if (tmp) {
2304 		/* first we do an atomic uptodate check */
2305 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2306 			*eb_ret = tmp;
2307 			return 0;
2308 		}
2309 
2310 		/* the pages were up to date, but we failed
2311 		 * the generation number check.  Do a full
2312 		 * read for the generation number that is correct.
2313 		 * We must do this without dropping locks so
2314 		 * we can trust our generation number
2315 		 */
2316 		btrfs_set_path_blocking(p);
2317 
2318 		/* now we're allowed to do a blocking uptodate check */
2319 		ret = btrfs_read_buffer(tmp, gen);
2320 		if (!ret) {
2321 			*eb_ret = tmp;
2322 			return 0;
2323 		}
2324 		free_extent_buffer(tmp);
2325 		btrfs_release_path(p);
2326 		return -EIO;
2327 	}
2328 
2329 	/*
2330 	 * reduce lock contention at high levels
2331 	 * of the btree by dropping locks before
2332 	 * we read.  Don't release the lock on the current
2333 	 * level because we need to walk this node to figure
2334 	 * out which blocks to read.
2335 	 */
2336 	btrfs_unlock_up_safe(p, level + 1);
2337 	btrfs_set_path_blocking(p);
2338 
2339 	free_extent_buffer(tmp);
2340 	if (p->reada)
2341 		reada_for_search(root, p, level, slot, key->objectid);
2342 
2343 	btrfs_release_path(p);
2344 
2345 	ret = -EAGAIN;
2346 	tmp = read_tree_block(root, blocknr, blocksize, 0);
2347 	if (tmp) {
2348 		/*
2349 		 * If the read above didn't mark this buffer up to date,
2350 		 * it will never end up being up to date.  Set ret to EIO now
2351 		 * and give up so that our caller doesn't loop forever
2352 		 * on our EAGAINs.
2353 		 */
2354 		if (!btrfs_buffer_uptodate(tmp, 0, 0))
2355 			ret = -EIO;
2356 		free_extent_buffer(tmp);
2357 	}
2358 	return ret;
2359 }
2360 
2361 /*
2362  * helper function for btrfs_search_slot.  This does all of the checks
2363  * for node-level blocks and does any balancing required based on
2364  * the ins_len.
2365  *
2366  * If no extra work was required, zero is returned.  If we had to
2367  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2368  * start over
2369  */
2370 static int
2371 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2372 		       struct btrfs_root *root, struct btrfs_path *p,
2373 		       struct extent_buffer *b, int level, int ins_len,
2374 		       int *write_lock_level)
2375 {
2376 	int ret;
2377 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2378 	    BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2379 		int sret;
2380 
2381 		if (*write_lock_level < level + 1) {
2382 			*write_lock_level = level + 1;
2383 			btrfs_release_path(p);
2384 			goto again;
2385 		}
2386 
2387 		btrfs_set_path_blocking(p);
2388 		reada_for_balance(root, p, level);
2389 		sret = split_node(trans, root, p, level);
2390 		btrfs_clear_path_blocking(p, NULL, 0);
2391 
2392 		BUG_ON(sret > 0);
2393 		if (sret) {
2394 			ret = sret;
2395 			goto done;
2396 		}
2397 		b = p->nodes[level];
2398 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2399 		   BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2400 		int sret;
2401 
2402 		if (*write_lock_level < level + 1) {
2403 			*write_lock_level = level + 1;
2404 			btrfs_release_path(p);
2405 			goto again;
2406 		}
2407 
2408 		btrfs_set_path_blocking(p);
2409 		reada_for_balance(root, p, level);
2410 		sret = balance_level(trans, root, p, level);
2411 		btrfs_clear_path_blocking(p, NULL, 0);
2412 
2413 		if (sret) {
2414 			ret = sret;
2415 			goto done;
2416 		}
2417 		b = p->nodes[level];
2418 		if (!b) {
2419 			btrfs_release_path(p);
2420 			goto again;
2421 		}
2422 		BUG_ON(btrfs_header_nritems(b) == 1);
2423 	}
2424 	return 0;
2425 
2426 again:
2427 	ret = -EAGAIN;
2428 done:
2429 	return ret;
2430 }
2431 
2432 static void key_search_validate(struct extent_buffer *b,
2433 				struct btrfs_key *key,
2434 				int level)
2435 {
2436 #ifdef CONFIG_BTRFS_ASSERT
2437 	struct btrfs_disk_key disk_key;
2438 
2439 	btrfs_cpu_key_to_disk(&disk_key, key);
2440 
2441 	if (level == 0)
2442 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2443 		    offsetof(struct btrfs_leaf, items[0].key),
2444 		    sizeof(disk_key)));
2445 	else
2446 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2447 		    offsetof(struct btrfs_node, ptrs[0].key),
2448 		    sizeof(disk_key)));
2449 #endif
2450 }
2451 
2452 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2453 		      int level, int *prev_cmp, int *slot)
2454 {
2455 	if (*prev_cmp != 0) {
2456 		*prev_cmp = bin_search(b, key, level, slot);
2457 		return *prev_cmp;
2458 	}
2459 
2460 	key_search_validate(b, key, level);
2461 	*slot = 0;
2462 
2463 	return 0;
2464 }
2465 
2466 /*
2467  * look for key in the tree.  path is filled in with nodes along the way
2468  * if key is found, we return zero and you can find the item in the leaf
2469  * level of the path (level 0)
2470  *
2471  * If the key isn't found, the path points to the slot where it should
2472  * be inserted, and 1 is returned.  If there are other errors during the
2473  * search a negative error number is returned.
2474  *
2475  * if ins_len > 0, nodes and leaves will be split as we walk down the
2476  * tree.  if ins_len < 0, nodes will be merged as we walk down the tree (if
2477  * possible)
2478  */
2479 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2480 		      *root, struct btrfs_key *key, struct btrfs_path *p, int
2481 		      ins_len, int cow)
2482 {
2483 	struct extent_buffer *b;
2484 	int slot;
2485 	int ret;
2486 	int err;
2487 	int level;
2488 	int lowest_unlock = 1;
2489 	int root_lock;
2490 	/* everything at write_lock_level or lower must be write locked */
2491 	int write_lock_level = 0;
2492 	u8 lowest_level = 0;
2493 	int min_write_lock_level;
2494 	int prev_cmp;
2495 
2496 	lowest_level = p->lowest_level;
2497 	WARN_ON(lowest_level && ins_len > 0);
2498 	WARN_ON(p->nodes[0] != NULL);
2499 
2500 	if (ins_len < 0) {
2501 		lowest_unlock = 2;
2502 
2503 		/* when we are removing items, we might have to go up to level
2504 		 * two as we update tree pointers  Make sure we keep write
2505 		 * for those levels as well
2506 		 */
2507 		write_lock_level = 2;
2508 	} else if (ins_len > 0) {
2509 		/*
2510 		 * for inserting items, make sure we have a write lock on
2511 		 * level 1 so we can update keys
2512 		 */
2513 		write_lock_level = 1;
2514 	}
2515 
2516 	if (!cow)
2517 		write_lock_level = -1;
2518 
2519 	if (cow && (p->keep_locks || p->lowest_level))
2520 		write_lock_level = BTRFS_MAX_LEVEL;
2521 
2522 	min_write_lock_level = write_lock_level;
2523 
2524 again:
2525 	prev_cmp = -1;
2526 	/*
2527 	 * we try very hard to do read locks on the root
2528 	 */
2529 	root_lock = BTRFS_READ_LOCK;
2530 	level = 0;
2531 	if (p->search_commit_root) {
2532 		/*
2533 		 * the commit roots are read only
2534 		 * so we always do read locks
2535 		 */
2536 		b = root->commit_root;
2537 		extent_buffer_get(b);
2538 		level = btrfs_header_level(b);
2539 		if (!p->skip_locking)
2540 			btrfs_tree_read_lock(b);
2541 	} else {
2542 		if (p->skip_locking) {
2543 			b = btrfs_root_node(root);
2544 			level = btrfs_header_level(b);
2545 		} else {
2546 			/* we don't know the level of the root node
2547 			 * until we actually have it read locked
2548 			 */
2549 			b = btrfs_read_lock_root_node(root);
2550 			level = btrfs_header_level(b);
2551 			if (level <= write_lock_level) {
2552 				/* whoops, must trade for write lock */
2553 				btrfs_tree_read_unlock(b);
2554 				free_extent_buffer(b);
2555 				b = btrfs_lock_root_node(root);
2556 				root_lock = BTRFS_WRITE_LOCK;
2557 
2558 				/* the level might have changed, check again */
2559 				level = btrfs_header_level(b);
2560 			}
2561 		}
2562 	}
2563 	p->nodes[level] = b;
2564 	if (!p->skip_locking)
2565 		p->locks[level] = root_lock;
2566 
2567 	while (b) {
2568 		level = btrfs_header_level(b);
2569 
2570 		/*
2571 		 * setup the path here so we can release it under lock
2572 		 * contention with the cow code
2573 		 */
2574 		if (cow) {
2575 			/*
2576 			 * if we don't really need to cow this block
2577 			 * then we don't want to set the path blocking,
2578 			 * so we test it here
2579 			 */
2580 			if (!should_cow_block(trans, root, b))
2581 				goto cow_done;
2582 
2583 			btrfs_set_path_blocking(p);
2584 
2585 			/*
2586 			 * must have write locks on this node and the
2587 			 * parent
2588 			 */
2589 			if (level > write_lock_level ||
2590 			    (level + 1 > write_lock_level &&
2591 			    level + 1 < BTRFS_MAX_LEVEL &&
2592 			    p->nodes[level + 1])) {
2593 				write_lock_level = level + 1;
2594 				btrfs_release_path(p);
2595 				goto again;
2596 			}
2597 
2598 			err = btrfs_cow_block(trans, root, b,
2599 					      p->nodes[level + 1],
2600 					      p->slots[level + 1], &b);
2601 			if (err) {
2602 				ret = err;
2603 				goto done;
2604 			}
2605 		}
2606 cow_done:
2607 		BUG_ON(!cow && ins_len);
2608 
2609 		p->nodes[level] = b;
2610 		btrfs_clear_path_blocking(p, NULL, 0);
2611 
2612 		/*
2613 		 * we have a lock on b and as long as we aren't changing
2614 		 * the tree, there is no way to for the items in b to change.
2615 		 * It is safe to drop the lock on our parent before we
2616 		 * go through the expensive btree search on b.
2617 		 *
2618 		 * If cow is true, then we might be changing slot zero,
2619 		 * which may require changing the parent.  So, we can't
2620 		 * drop the lock until after we know which slot we're
2621 		 * operating on.
2622 		 */
2623 		if (!cow)
2624 			btrfs_unlock_up_safe(p, level + 1);
2625 
2626 		ret = key_search(b, key, level, &prev_cmp, &slot);
2627 
2628 		if (level != 0) {
2629 			int dec = 0;
2630 			if (ret && slot > 0) {
2631 				dec = 1;
2632 				slot -= 1;
2633 			}
2634 			p->slots[level] = slot;
2635 			err = setup_nodes_for_search(trans, root, p, b, level,
2636 					     ins_len, &write_lock_level);
2637 			if (err == -EAGAIN)
2638 				goto again;
2639 			if (err) {
2640 				ret = err;
2641 				goto done;
2642 			}
2643 			b = p->nodes[level];
2644 			slot = p->slots[level];
2645 
2646 			/*
2647 			 * slot 0 is special, if we change the key
2648 			 * we have to update the parent pointer
2649 			 * which means we must have a write lock
2650 			 * on the parent
2651 			 */
2652 			if (slot == 0 && cow &&
2653 			    write_lock_level < level + 1) {
2654 				write_lock_level = level + 1;
2655 				btrfs_release_path(p);
2656 				goto again;
2657 			}
2658 
2659 			unlock_up(p, level, lowest_unlock,
2660 				  min_write_lock_level, &write_lock_level);
2661 
2662 			if (level == lowest_level) {
2663 				if (dec)
2664 					p->slots[level]++;
2665 				goto done;
2666 			}
2667 
2668 			err = read_block_for_search(trans, root, p,
2669 						    &b, level, slot, key, 0);
2670 			if (err == -EAGAIN)
2671 				goto again;
2672 			if (err) {
2673 				ret = err;
2674 				goto done;
2675 			}
2676 
2677 			if (!p->skip_locking) {
2678 				level = btrfs_header_level(b);
2679 				if (level <= write_lock_level) {
2680 					err = btrfs_try_tree_write_lock(b);
2681 					if (!err) {
2682 						btrfs_set_path_blocking(p);
2683 						btrfs_tree_lock(b);
2684 						btrfs_clear_path_blocking(p, b,
2685 								  BTRFS_WRITE_LOCK);
2686 					}
2687 					p->locks[level] = BTRFS_WRITE_LOCK;
2688 				} else {
2689 					err = btrfs_try_tree_read_lock(b);
2690 					if (!err) {
2691 						btrfs_set_path_blocking(p);
2692 						btrfs_tree_read_lock(b);
2693 						btrfs_clear_path_blocking(p, b,
2694 								  BTRFS_READ_LOCK);
2695 					}
2696 					p->locks[level] = BTRFS_READ_LOCK;
2697 				}
2698 				p->nodes[level] = b;
2699 			}
2700 		} else {
2701 			p->slots[level] = slot;
2702 			if (ins_len > 0 &&
2703 			    btrfs_leaf_free_space(root, b) < ins_len) {
2704 				if (write_lock_level < 1) {
2705 					write_lock_level = 1;
2706 					btrfs_release_path(p);
2707 					goto again;
2708 				}
2709 
2710 				btrfs_set_path_blocking(p);
2711 				err = split_leaf(trans, root, key,
2712 						 p, ins_len, ret == 0);
2713 				btrfs_clear_path_blocking(p, NULL, 0);
2714 
2715 				BUG_ON(err > 0);
2716 				if (err) {
2717 					ret = err;
2718 					goto done;
2719 				}
2720 			}
2721 			if (!p->search_for_split)
2722 				unlock_up(p, level, lowest_unlock,
2723 					  min_write_lock_level, &write_lock_level);
2724 			goto done;
2725 		}
2726 	}
2727 	ret = 1;
2728 done:
2729 	/*
2730 	 * we don't really know what they plan on doing with the path
2731 	 * from here on, so for now just mark it as blocking
2732 	 */
2733 	if (!p->leave_spinning)
2734 		btrfs_set_path_blocking(p);
2735 	if (ret < 0)
2736 		btrfs_release_path(p);
2737 	return ret;
2738 }
2739 
2740 /*
2741  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2742  * current state of the tree together with the operations recorded in the tree
2743  * modification log to search for the key in a previous version of this tree, as
2744  * denoted by the time_seq parameter.
2745  *
2746  * Naturally, there is no support for insert, delete or cow operations.
2747  *
2748  * The resulting path and return value will be set up as if we called
2749  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2750  */
2751 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2752 			  struct btrfs_path *p, u64 time_seq)
2753 {
2754 	struct extent_buffer *b;
2755 	int slot;
2756 	int ret;
2757 	int err;
2758 	int level;
2759 	int lowest_unlock = 1;
2760 	u8 lowest_level = 0;
2761 	int prev_cmp;
2762 
2763 	lowest_level = p->lowest_level;
2764 	WARN_ON(p->nodes[0] != NULL);
2765 
2766 	if (p->search_commit_root) {
2767 		BUG_ON(time_seq);
2768 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2769 	}
2770 
2771 again:
2772 	prev_cmp = -1;
2773 	b = get_old_root(root, time_seq);
2774 	level = btrfs_header_level(b);
2775 	p->locks[level] = BTRFS_READ_LOCK;
2776 
2777 	while (b) {
2778 		level = btrfs_header_level(b);
2779 		p->nodes[level] = b;
2780 		btrfs_clear_path_blocking(p, NULL, 0);
2781 
2782 		/*
2783 		 * we have a lock on b and as long as we aren't changing
2784 		 * the tree, there is no way to for the items in b to change.
2785 		 * It is safe to drop the lock on our parent before we
2786 		 * go through the expensive btree search on b.
2787 		 */
2788 		btrfs_unlock_up_safe(p, level + 1);
2789 
2790 		ret = key_search(b, key, level, &prev_cmp, &slot);
2791 
2792 		if (level != 0) {
2793 			int dec = 0;
2794 			if (ret && slot > 0) {
2795 				dec = 1;
2796 				slot -= 1;
2797 			}
2798 			p->slots[level] = slot;
2799 			unlock_up(p, level, lowest_unlock, 0, NULL);
2800 
2801 			if (level == lowest_level) {
2802 				if (dec)
2803 					p->slots[level]++;
2804 				goto done;
2805 			}
2806 
2807 			err = read_block_for_search(NULL, root, p, &b, level,
2808 						    slot, key, time_seq);
2809 			if (err == -EAGAIN)
2810 				goto again;
2811 			if (err) {
2812 				ret = err;
2813 				goto done;
2814 			}
2815 
2816 			level = btrfs_header_level(b);
2817 			err = btrfs_try_tree_read_lock(b);
2818 			if (!err) {
2819 				btrfs_set_path_blocking(p);
2820 				btrfs_tree_read_lock(b);
2821 				btrfs_clear_path_blocking(p, b,
2822 							  BTRFS_READ_LOCK);
2823 			}
2824 			b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
2825 			if (!b) {
2826 				ret = -ENOMEM;
2827 				goto done;
2828 			}
2829 			p->locks[level] = BTRFS_READ_LOCK;
2830 			p->nodes[level] = b;
2831 		} else {
2832 			p->slots[level] = slot;
2833 			unlock_up(p, level, lowest_unlock, 0, NULL);
2834 			goto done;
2835 		}
2836 	}
2837 	ret = 1;
2838 done:
2839 	if (!p->leave_spinning)
2840 		btrfs_set_path_blocking(p);
2841 	if (ret < 0)
2842 		btrfs_release_path(p);
2843 
2844 	return ret;
2845 }
2846 
2847 /*
2848  * helper to use instead of search slot if no exact match is needed but
2849  * instead the next or previous item should be returned.
2850  * When find_higher is true, the next higher item is returned, the next lower
2851  * otherwise.
2852  * When return_any and find_higher are both true, and no higher item is found,
2853  * return the next lower instead.
2854  * When return_any is true and find_higher is false, and no lower item is found,
2855  * return the next higher instead.
2856  * It returns 0 if any item is found, 1 if none is found (tree empty), and
2857  * < 0 on error
2858  */
2859 int btrfs_search_slot_for_read(struct btrfs_root *root,
2860 			       struct btrfs_key *key, struct btrfs_path *p,
2861 			       int find_higher, int return_any)
2862 {
2863 	int ret;
2864 	struct extent_buffer *leaf;
2865 
2866 again:
2867 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2868 	if (ret <= 0)
2869 		return ret;
2870 	/*
2871 	 * a return value of 1 means the path is at the position where the
2872 	 * item should be inserted. Normally this is the next bigger item,
2873 	 * but in case the previous item is the last in a leaf, path points
2874 	 * to the first free slot in the previous leaf, i.e. at an invalid
2875 	 * item.
2876 	 */
2877 	leaf = p->nodes[0];
2878 
2879 	if (find_higher) {
2880 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2881 			ret = btrfs_next_leaf(root, p);
2882 			if (ret <= 0)
2883 				return ret;
2884 			if (!return_any)
2885 				return 1;
2886 			/*
2887 			 * no higher item found, return the next
2888 			 * lower instead
2889 			 */
2890 			return_any = 0;
2891 			find_higher = 0;
2892 			btrfs_release_path(p);
2893 			goto again;
2894 		}
2895 	} else {
2896 		if (p->slots[0] == 0) {
2897 			ret = btrfs_prev_leaf(root, p);
2898 			if (ret < 0)
2899 				return ret;
2900 			if (!ret) {
2901 				p->slots[0] = btrfs_header_nritems(leaf) - 1;
2902 				return 0;
2903 			}
2904 			if (!return_any)
2905 				return 1;
2906 			/*
2907 			 * no lower item found, return the next
2908 			 * higher instead
2909 			 */
2910 			return_any = 0;
2911 			find_higher = 1;
2912 			btrfs_release_path(p);
2913 			goto again;
2914 		} else {
2915 			--p->slots[0];
2916 		}
2917 	}
2918 	return 0;
2919 }
2920 
2921 /*
2922  * adjust the pointers going up the tree, starting at level
2923  * making sure the right key of each node is points to 'key'.
2924  * This is used after shifting pointers to the left, so it stops
2925  * fixing up pointers when a given leaf/node is not in slot 0 of the
2926  * higher levels
2927  *
2928  */
2929 static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
2930 			   struct btrfs_disk_key *key, int level)
2931 {
2932 	int i;
2933 	struct extent_buffer *t;
2934 
2935 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2936 		int tslot = path->slots[i];
2937 		if (!path->nodes[i])
2938 			break;
2939 		t = path->nodes[i];
2940 		tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
2941 		btrfs_set_node_key(t, key, tslot);
2942 		btrfs_mark_buffer_dirty(path->nodes[i]);
2943 		if (tslot != 0)
2944 			break;
2945 	}
2946 }
2947 
2948 /*
2949  * update item key.
2950  *
2951  * This function isn't completely safe. It's the caller's responsibility
2952  * that the new key won't break the order
2953  */
2954 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
2955 			     struct btrfs_key *new_key)
2956 {
2957 	struct btrfs_disk_key disk_key;
2958 	struct extent_buffer *eb;
2959 	int slot;
2960 
2961 	eb = path->nodes[0];
2962 	slot = path->slots[0];
2963 	if (slot > 0) {
2964 		btrfs_item_key(eb, &disk_key, slot - 1);
2965 		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
2966 	}
2967 	if (slot < btrfs_header_nritems(eb) - 1) {
2968 		btrfs_item_key(eb, &disk_key, slot + 1);
2969 		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
2970 	}
2971 
2972 	btrfs_cpu_key_to_disk(&disk_key, new_key);
2973 	btrfs_set_item_key(eb, &disk_key, slot);
2974 	btrfs_mark_buffer_dirty(eb);
2975 	if (slot == 0)
2976 		fixup_low_keys(root, path, &disk_key, 1);
2977 }
2978 
2979 /*
2980  * try to push data from one node into the next node left in the
2981  * tree.
2982  *
2983  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2984  * error, and > 0 if there was no room in the left hand block.
2985  */
2986 static int push_node_left(struct btrfs_trans_handle *trans,
2987 			  struct btrfs_root *root, struct extent_buffer *dst,
2988 			  struct extent_buffer *src, int empty)
2989 {
2990 	int push_items = 0;
2991 	int src_nritems;
2992 	int dst_nritems;
2993 	int ret = 0;
2994 
2995 	src_nritems = btrfs_header_nritems(src);
2996 	dst_nritems = btrfs_header_nritems(dst);
2997 	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2998 	WARN_ON(btrfs_header_generation(src) != trans->transid);
2999 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3000 
3001 	if (!empty && src_nritems <= 8)
3002 		return 1;
3003 
3004 	if (push_items <= 0)
3005 		return 1;
3006 
3007 	if (empty) {
3008 		push_items = min(src_nritems, push_items);
3009 		if (push_items < src_nritems) {
3010 			/* leave at least 8 pointers in the node if
3011 			 * we aren't going to empty it
3012 			 */
3013 			if (src_nritems - push_items < 8) {
3014 				if (push_items <= 8)
3015 					return 1;
3016 				push_items -= 8;
3017 			}
3018 		}
3019 	} else
3020 		push_items = min(src_nritems - 8, push_items);
3021 
3022 	tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3023 			     push_items);
3024 	copy_extent_buffer(dst, src,
3025 			   btrfs_node_key_ptr_offset(dst_nritems),
3026 			   btrfs_node_key_ptr_offset(0),
3027 			   push_items * sizeof(struct btrfs_key_ptr));
3028 
3029 	if (push_items < src_nritems) {
3030 		/*
3031 		 * don't call tree_mod_log_eb_move here, key removal was already
3032 		 * fully logged by tree_mod_log_eb_copy above.
3033 		 */
3034 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3035 				      btrfs_node_key_ptr_offset(push_items),
3036 				      (src_nritems - push_items) *
3037 				      sizeof(struct btrfs_key_ptr));
3038 	}
3039 	btrfs_set_header_nritems(src, src_nritems - push_items);
3040 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3041 	btrfs_mark_buffer_dirty(src);
3042 	btrfs_mark_buffer_dirty(dst);
3043 
3044 	return ret;
3045 }
3046 
3047 /*
3048  * try to push data from one node into the next node right in the
3049  * tree.
3050  *
3051  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3052  * error, and > 0 if there was no room in the right hand block.
3053  *
3054  * this will  only push up to 1/2 the contents of the left node over
3055  */
3056 static int balance_node_right(struct btrfs_trans_handle *trans,
3057 			      struct btrfs_root *root,
3058 			      struct extent_buffer *dst,
3059 			      struct extent_buffer *src)
3060 {
3061 	int push_items = 0;
3062 	int max_push;
3063 	int src_nritems;
3064 	int dst_nritems;
3065 	int ret = 0;
3066 
3067 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3068 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3069 
3070 	src_nritems = btrfs_header_nritems(src);
3071 	dst_nritems = btrfs_header_nritems(dst);
3072 	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3073 	if (push_items <= 0)
3074 		return 1;
3075 
3076 	if (src_nritems < 4)
3077 		return 1;
3078 
3079 	max_push = src_nritems / 2 + 1;
3080 	/* don't try to empty the node */
3081 	if (max_push >= src_nritems)
3082 		return 1;
3083 
3084 	if (max_push < push_items)
3085 		push_items = max_push;
3086 
3087 	tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3088 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3089 				      btrfs_node_key_ptr_offset(0),
3090 				      (dst_nritems) *
3091 				      sizeof(struct btrfs_key_ptr));
3092 
3093 	tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3094 			     src_nritems - push_items, push_items);
3095 	copy_extent_buffer(dst, src,
3096 			   btrfs_node_key_ptr_offset(0),
3097 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3098 			   push_items * sizeof(struct btrfs_key_ptr));
3099 
3100 	btrfs_set_header_nritems(src, src_nritems - push_items);
3101 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3102 
3103 	btrfs_mark_buffer_dirty(src);
3104 	btrfs_mark_buffer_dirty(dst);
3105 
3106 	return ret;
3107 }
3108 
3109 /*
3110  * helper function to insert a new root level in the tree.
3111  * A new node is allocated, and a single item is inserted to
3112  * point to the existing root
3113  *
3114  * returns zero on success or < 0 on failure.
3115  */
3116 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3117 			   struct btrfs_root *root,
3118 			   struct btrfs_path *path, int level)
3119 {
3120 	u64 lower_gen;
3121 	struct extent_buffer *lower;
3122 	struct extent_buffer *c;
3123 	struct extent_buffer *old;
3124 	struct btrfs_disk_key lower_key;
3125 
3126 	BUG_ON(path->nodes[level]);
3127 	BUG_ON(path->nodes[level-1] != root->node);
3128 
3129 	lower = path->nodes[level-1];
3130 	if (level == 1)
3131 		btrfs_item_key(lower, &lower_key, 0);
3132 	else
3133 		btrfs_node_key(lower, &lower_key, 0);
3134 
3135 	c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3136 				   root->root_key.objectid, &lower_key,
3137 				   level, root->node->start, 0);
3138 	if (IS_ERR(c))
3139 		return PTR_ERR(c);
3140 
3141 	root_add_used(root, root->nodesize);
3142 
3143 	memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3144 	btrfs_set_header_nritems(c, 1);
3145 	btrfs_set_header_level(c, level);
3146 	btrfs_set_header_bytenr(c, c->start);
3147 	btrfs_set_header_generation(c, trans->transid);
3148 	btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3149 	btrfs_set_header_owner(c, root->root_key.objectid);
3150 
3151 	write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(c),
3152 			    BTRFS_FSID_SIZE);
3153 
3154 	write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3155 			    btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3156 
3157 	btrfs_set_node_key(c, &lower_key, 0);
3158 	btrfs_set_node_blockptr(c, 0, lower->start);
3159 	lower_gen = btrfs_header_generation(lower);
3160 	WARN_ON(lower_gen != trans->transid);
3161 
3162 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3163 
3164 	btrfs_mark_buffer_dirty(c);
3165 
3166 	old = root->node;
3167 	tree_mod_log_set_root_pointer(root, c, 0);
3168 	rcu_assign_pointer(root->node, c);
3169 
3170 	/* the super has an extra ref to root->node */
3171 	free_extent_buffer(old);
3172 
3173 	add_root_to_dirty_list(root);
3174 	extent_buffer_get(c);
3175 	path->nodes[level] = c;
3176 	path->locks[level] = BTRFS_WRITE_LOCK;
3177 	path->slots[level] = 0;
3178 	return 0;
3179 }
3180 
3181 /*
3182  * worker function to insert a single pointer in a node.
3183  * the node should have enough room for the pointer already
3184  *
3185  * slot and level indicate where you want the key to go, and
3186  * blocknr is the block the key points to.
3187  */
3188 static void insert_ptr(struct btrfs_trans_handle *trans,
3189 		       struct btrfs_root *root, struct btrfs_path *path,
3190 		       struct btrfs_disk_key *key, u64 bytenr,
3191 		       int slot, int level)
3192 {
3193 	struct extent_buffer *lower;
3194 	int nritems;
3195 	int ret;
3196 
3197 	BUG_ON(!path->nodes[level]);
3198 	btrfs_assert_tree_locked(path->nodes[level]);
3199 	lower = path->nodes[level];
3200 	nritems = btrfs_header_nritems(lower);
3201 	BUG_ON(slot > nritems);
3202 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3203 	if (slot != nritems) {
3204 		if (level)
3205 			tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3206 					     slot, nritems - slot);
3207 		memmove_extent_buffer(lower,
3208 			      btrfs_node_key_ptr_offset(slot + 1),
3209 			      btrfs_node_key_ptr_offset(slot),
3210 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3211 	}
3212 	if (level) {
3213 		ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3214 					      MOD_LOG_KEY_ADD, GFP_NOFS);
3215 		BUG_ON(ret < 0);
3216 	}
3217 	btrfs_set_node_key(lower, key, slot);
3218 	btrfs_set_node_blockptr(lower, slot, bytenr);
3219 	WARN_ON(trans->transid == 0);
3220 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3221 	btrfs_set_header_nritems(lower, nritems + 1);
3222 	btrfs_mark_buffer_dirty(lower);
3223 }
3224 
3225 /*
3226  * split the node at the specified level in path in two.
3227  * The path is corrected to point to the appropriate node after the split
3228  *
3229  * Before splitting this tries to make some room in the node by pushing
3230  * left and right, if either one works, it returns right away.
3231  *
3232  * returns 0 on success and < 0 on failure
3233  */
3234 static noinline int split_node(struct btrfs_trans_handle *trans,
3235 			       struct btrfs_root *root,
3236 			       struct btrfs_path *path, int level)
3237 {
3238 	struct extent_buffer *c;
3239 	struct extent_buffer *split;
3240 	struct btrfs_disk_key disk_key;
3241 	int mid;
3242 	int ret;
3243 	u32 c_nritems;
3244 
3245 	c = path->nodes[level];
3246 	WARN_ON(btrfs_header_generation(c) != trans->transid);
3247 	if (c == root->node) {
3248 		/*
3249 		 * trying to split the root, lets make a new one
3250 		 *
3251 		 * tree mod log: We don't log_removal old root in
3252 		 * insert_new_root, because that root buffer will be kept as a
3253 		 * normal node. We are going to log removal of half of the
3254 		 * elements below with tree_mod_log_eb_copy. We're holding a
3255 		 * tree lock on the buffer, which is why we cannot race with
3256 		 * other tree_mod_log users.
3257 		 */
3258 		ret = insert_new_root(trans, root, path, level + 1);
3259 		if (ret)
3260 			return ret;
3261 	} else {
3262 		ret = push_nodes_for_insert(trans, root, path, level);
3263 		c = path->nodes[level];
3264 		if (!ret && btrfs_header_nritems(c) <
3265 		    BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3266 			return 0;
3267 		if (ret < 0)
3268 			return ret;
3269 	}
3270 
3271 	c_nritems = btrfs_header_nritems(c);
3272 	mid = (c_nritems + 1) / 2;
3273 	btrfs_node_key(c, &disk_key, mid);
3274 
3275 	split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3276 					root->root_key.objectid,
3277 					&disk_key, level, c->start, 0);
3278 	if (IS_ERR(split))
3279 		return PTR_ERR(split);
3280 
3281 	root_add_used(root, root->nodesize);
3282 
3283 	memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3284 	btrfs_set_header_level(split, btrfs_header_level(c));
3285 	btrfs_set_header_bytenr(split, split->start);
3286 	btrfs_set_header_generation(split, trans->transid);
3287 	btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3288 	btrfs_set_header_owner(split, root->root_key.objectid);
3289 	write_extent_buffer(split, root->fs_info->fsid,
3290 			    btrfs_header_fsid(split), BTRFS_FSID_SIZE);
3291 	write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3292 			    btrfs_header_chunk_tree_uuid(split),
3293 			    BTRFS_UUID_SIZE);
3294 
3295 	tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3296 	copy_extent_buffer(split, c,
3297 			   btrfs_node_key_ptr_offset(0),
3298 			   btrfs_node_key_ptr_offset(mid),
3299 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3300 	btrfs_set_header_nritems(split, c_nritems - mid);
3301 	btrfs_set_header_nritems(c, mid);
3302 	ret = 0;
3303 
3304 	btrfs_mark_buffer_dirty(c);
3305 	btrfs_mark_buffer_dirty(split);
3306 
3307 	insert_ptr(trans, root, path, &disk_key, split->start,
3308 		   path->slots[level + 1] + 1, level + 1);
3309 
3310 	if (path->slots[level] >= mid) {
3311 		path->slots[level] -= mid;
3312 		btrfs_tree_unlock(c);
3313 		free_extent_buffer(c);
3314 		path->nodes[level] = split;
3315 		path->slots[level + 1] += 1;
3316 	} else {
3317 		btrfs_tree_unlock(split);
3318 		free_extent_buffer(split);
3319 	}
3320 	return ret;
3321 }
3322 
3323 /*
3324  * how many bytes are required to store the items in a leaf.  start
3325  * and nr indicate which items in the leaf to check.  This totals up the
3326  * space used both by the item structs and the item data
3327  */
3328 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3329 {
3330 	struct btrfs_item *start_item;
3331 	struct btrfs_item *end_item;
3332 	struct btrfs_map_token token;
3333 	int data_len;
3334 	int nritems = btrfs_header_nritems(l);
3335 	int end = min(nritems, start + nr) - 1;
3336 
3337 	if (!nr)
3338 		return 0;
3339 	btrfs_init_map_token(&token);
3340 	start_item = btrfs_item_nr(l, start);
3341 	end_item = btrfs_item_nr(l, end);
3342 	data_len = btrfs_token_item_offset(l, start_item, &token) +
3343 		btrfs_token_item_size(l, start_item, &token);
3344 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3345 	data_len += sizeof(struct btrfs_item) * nr;
3346 	WARN_ON(data_len < 0);
3347 	return data_len;
3348 }
3349 
3350 /*
3351  * The space between the end of the leaf items and
3352  * the start of the leaf data.  IOW, how much room
3353  * the leaf has left for both items and data
3354  */
3355 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3356 				   struct extent_buffer *leaf)
3357 {
3358 	int nritems = btrfs_header_nritems(leaf);
3359 	int ret;
3360 	ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3361 	if (ret < 0) {
3362 		printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3363 		       "used %d nritems %d\n",
3364 		       ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3365 		       leaf_space_used(leaf, 0, nritems), nritems);
3366 	}
3367 	return ret;
3368 }
3369 
3370 /*
3371  * min slot controls the lowest index we're willing to push to the
3372  * right.  We'll push up to and including min_slot, but no lower
3373  */
3374 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3375 				      struct btrfs_root *root,
3376 				      struct btrfs_path *path,
3377 				      int data_size, int empty,
3378 				      struct extent_buffer *right,
3379 				      int free_space, u32 left_nritems,
3380 				      u32 min_slot)
3381 {
3382 	struct extent_buffer *left = path->nodes[0];
3383 	struct extent_buffer *upper = path->nodes[1];
3384 	struct btrfs_map_token token;
3385 	struct btrfs_disk_key disk_key;
3386 	int slot;
3387 	u32 i;
3388 	int push_space = 0;
3389 	int push_items = 0;
3390 	struct btrfs_item *item;
3391 	u32 nr;
3392 	u32 right_nritems;
3393 	u32 data_end;
3394 	u32 this_item_size;
3395 
3396 	btrfs_init_map_token(&token);
3397 
3398 	if (empty)
3399 		nr = 0;
3400 	else
3401 		nr = max_t(u32, 1, min_slot);
3402 
3403 	if (path->slots[0] >= left_nritems)
3404 		push_space += data_size;
3405 
3406 	slot = path->slots[1];
3407 	i = left_nritems - 1;
3408 	while (i >= nr) {
3409 		item = btrfs_item_nr(left, i);
3410 
3411 		if (!empty && push_items > 0) {
3412 			if (path->slots[0] > i)
3413 				break;
3414 			if (path->slots[0] == i) {
3415 				int space = btrfs_leaf_free_space(root, left);
3416 				if (space + push_space * 2 > free_space)
3417 					break;
3418 			}
3419 		}
3420 
3421 		if (path->slots[0] == i)
3422 			push_space += data_size;
3423 
3424 		this_item_size = btrfs_item_size(left, item);
3425 		if (this_item_size + sizeof(*item) + push_space > free_space)
3426 			break;
3427 
3428 		push_items++;
3429 		push_space += this_item_size + sizeof(*item);
3430 		if (i == 0)
3431 			break;
3432 		i--;
3433 	}
3434 
3435 	if (push_items == 0)
3436 		goto out_unlock;
3437 
3438 	WARN_ON(!empty && push_items == left_nritems);
3439 
3440 	/* push left to right */
3441 	right_nritems = btrfs_header_nritems(right);
3442 
3443 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3444 	push_space -= leaf_data_end(root, left);
3445 
3446 	/* make room in the right data area */
3447 	data_end = leaf_data_end(root, right);
3448 	memmove_extent_buffer(right,
3449 			      btrfs_leaf_data(right) + data_end - push_space,
3450 			      btrfs_leaf_data(right) + data_end,
3451 			      BTRFS_LEAF_DATA_SIZE(root) - data_end);
3452 
3453 	/* copy from the left data area */
3454 	copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3455 		     BTRFS_LEAF_DATA_SIZE(root) - push_space,
3456 		     btrfs_leaf_data(left) + leaf_data_end(root, left),
3457 		     push_space);
3458 
3459 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3460 			      btrfs_item_nr_offset(0),
3461 			      right_nritems * sizeof(struct btrfs_item));
3462 
3463 	/* copy the items from left to right */
3464 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3465 		   btrfs_item_nr_offset(left_nritems - push_items),
3466 		   push_items * sizeof(struct btrfs_item));
3467 
3468 	/* update the item pointers */
3469 	right_nritems += push_items;
3470 	btrfs_set_header_nritems(right, right_nritems);
3471 	push_space = BTRFS_LEAF_DATA_SIZE(root);
3472 	for (i = 0; i < right_nritems; i++) {
3473 		item = btrfs_item_nr(right, i);
3474 		push_space -= btrfs_token_item_size(right, item, &token);
3475 		btrfs_set_token_item_offset(right, item, push_space, &token);
3476 	}
3477 
3478 	left_nritems -= push_items;
3479 	btrfs_set_header_nritems(left, left_nritems);
3480 
3481 	if (left_nritems)
3482 		btrfs_mark_buffer_dirty(left);
3483 	else
3484 		clean_tree_block(trans, root, left);
3485 
3486 	btrfs_mark_buffer_dirty(right);
3487 
3488 	btrfs_item_key(right, &disk_key, 0);
3489 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3490 	btrfs_mark_buffer_dirty(upper);
3491 
3492 	/* then fixup the leaf pointer in the path */
3493 	if (path->slots[0] >= left_nritems) {
3494 		path->slots[0] -= left_nritems;
3495 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3496 			clean_tree_block(trans, root, path->nodes[0]);
3497 		btrfs_tree_unlock(path->nodes[0]);
3498 		free_extent_buffer(path->nodes[0]);
3499 		path->nodes[0] = right;
3500 		path->slots[1] += 1;
3501 	} else {
3502 		btrfs_tree_unlock(right);
3503 		free_extent_buffer(right);
3504 	}
3505 	return 0;
3506 
3507 out_unlock:
3508 	btrfs_tree_unlock(right);
3509 	free_extent_buffer(right);
3510 	return 1;
3511 }
3512 
3513 /*
3514  * push some data in the path leaf to the right, trying to free up at
3515  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3516  *
3517  * returns 1 if the push failed because the other node didn't have enough
3518  * room, 0 if everything worked out and < 0 if there were major errors.
3519  *
3520  * this will push starting from min_slot to the end of the leaf.  It won't
3521  * push any slot lower than min_slot
3522  */
3523 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3524 			   *root, struct btrfs_path *path,
3525 			   int min_data_size, int data_size,
3526 			   int empty, u32 min_slot)
3527 {
3528 	struct extent_buffer *left = path->nodes[0];
3529 	struct extent_buffer *right;
3530 	struct extent_buffer *upper;
3531 	int slot;
3532 	int free_space;
3533 	u32 left_nritems;
3534 	int ret;
3535 
3536 	if (!path->nodes[1])
3537 		return 1;
3538 
3539 	slot = path->slots[1];
3540 	upper = path->nodes[1];
3541 	if (slot >= btrfs_header_nritems(upper) - 1)
3542 		return 1;
3543 
3544 	btrfs_assert_tree_locked(path->nodes[1]);
3545 
3546 	right = read_node_slot(root, upper, slot + 1);
3547 	if (right == NULL)
3548 		return 1;
3549 
3550 	btrfs_tree_lock(right);
3551 	btrfs_set_lock_blocking(right);
3552 
3553 	free_space = btrfs_leaf_free_space(root, right);
3554 	if (free_space < data_size)
3555 		goto out_unlock;
3556 
3557 	/* cow and double check */
3558 	ret = btrfs_cow_block(trans, root, right, upper,
3559 			      slot + 1, &right);
3560 	if (ret)
3561 		goto out_unlock;
3562 
3563 	free_space = btrfs_leaf_free_space(root, right);
3564 	if (free_space < data_size)
3565 		goto out_unlock;
3566 
3567 	left_nritems = btrfs_header_nritems(left);
3568 	if (left_nritems == 0)
3569 		goto out_unlock;
3570 
3571 	return __push_leaf_right(trans, root, path, min_data_size, empty,
3572 				right, free_space, left_nritems, min_slot);
3573 out_unlock:
3574 	btrfs_tree_unlock(right);
3575 	free_extent_buffer(right);
3576 	return 1;
3577 }
3578 
3579 /*
3580  * push some data in the path leaf to the left, trying to free up at
3581  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3582  *
3583  * max_slot can put a limit on how far into the leaf we'll push items.  The
3584  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3585  * items
3586  */
3587 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3588 				     struct btrfs_root *root,
3589 				     struct btrfs_path *path, int data_size,
3590 				     int empty, struct extent_buffer *left,
3591 				     int free_space, u32 right_nritems,
3592 				     u32 max_slot)
3593 {
3594 	struct btrfs_disk_key disk_key;
3595 	struct extent_buffer *right = path->nodes[0];
3596 	int i;
3597 	int push_space = 0;
3598 	int push_items = 0;
3599 	struct btrfs_item *item;
3600 	u32 old_left_nritems;
3601 	u32 nr;
3602 	int ret = 0;
3603 	u32 this_item_size;
3604 	u32 old_left_item_size;
3605 	struct btrfs_map_token token;
3606 
3607 	btrfs_init_map_token(&token);
3608 
3609 	if (empty)
3610 		nr = min(right_nritems, max_slot);
3611 	else
3612 		nr = min(right_nritems - 1, max_slot);
3613 
3614 	for (i = 0; i < nr; i++) {
3615 		item = btrfs_item_nr(right, i);
3616 
3617 		if (!empty && push_items > 0) {
3618 			if (path->slots[0] < i)
3619 				break;
3620 			if (path->slots[0] == i) {
3621 				int space = btrfs_leaf_free_space(root, right);
3622 				if (space + push_space * 2 > free_space)
3623 					break;
3624 			}
3625 		}
3626 
3627 		if (path->slots[0] == i)
3628 			push_space += data_size;
3629 
3630 		this_item_size = btrfs_item_size(right, item);
3631 		if (this_item_size + sizeof(*item) + push_space > free_space)
3632 			break;
3633 
3634 		push_items++;
3635 		push_space += this_item_size + sizeof(*item);
3636 	}
3637 
3638 	if (push_items == 0) {
3639 		ret = 1;
3640 		goto out;
3641 	}
3642 	if (!empty && push_items == btrfs_header_nritems(right))
3643 		WARN_ON(1);
3644 
3645 	/* push data from right to left */
3646 	copy_extent_buffer(left, right,
3647 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3648 			   btrfs_item_nr_offset(0),
3649 			   push_items * sizeof(struct btrfs_item));
3650 
3651 	push_space = BTRFS_LEAF_DATA_SIZE(root) -
3652 		     btrfs_item_offset_nr(right, push_items - 1);
3653 
3654 	copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3655 		     leaf_data_end(root, left) - push_space,
3656 		     btrfs_leaf_data(right) +
3657 		     btrfs_item_offset_nr(right, push_items - 1),
3658 		     push_space);
3659 	old_left_nritems = btrfs_header_nritems(left);
3660 	BUG_ON(old_left_nritems <= 0);
3661 
3662 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3663 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3664 		u32 ioff;
3665 
3666 		item = btrfs_item_nr(left, i);
3667 
3668 		ioff = btrfs_token_item_offset(left, item, &token);
3669 		btrfs_set_token_item_offset(left, item,
3670 		      ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3671 		      &token);
3672 	}
3673 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3674 
3675 	/* fixup right node */
3676 	if (push_items > right_nritems)
3677 		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3678 		       right_nritems);
3679 
3680 	if (push_items < right_nritems) {
3681 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3682 						  leaf_data_end(root, right);
3683 		memmove_extent_buffer(right, btrfs_leaf_data(right) +
3684 				      BTRFS_LEAF_DATA_SIZE(root) - push_space,
3685 				      btrfs_leaf_data(right) +
3686 				      leaf_data_end(root, right), push_space);
3687 
3688 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3689 			      btrfs_item_nr_offset(push_items),
3690 			     (btrfs_header_nritems(right) - push_items) *
3691 			     sizeof(struct btrfs_item));
3692 	}
3693 	right_nritems -= push_items;
3694 	btrfs_set_header_nritems(right, right_nritems);
3695 	push_space = BTRFS_LEAF_DATA_SIZE(root);
3696 	for (i = 0; i < right_nritems; i++) {
3697 		item = btrfs_item_nr(right, i);
3698 
3699 		push_space = push_space - btrfs_token_item_size(right,
3700 								item, &token);
3701 		btrfs_set_token_item_offset(right, item, push_space, &token);
3702 	}
3703 
3704 	btrfs_mark_buffer_dirty(left);
3705 	if (right_nritems)
3706 		btrfs_mark_buffer_dirty(right);
3707 	else
3708 		clean_tree_block(trans, root, right);
3709 
3710 	btrfs_item_key(right, &disk_key, 0);
3711 	fixup_low_keys(root, path, &disk_key, 1);
3712 
3713 	/* then fixup the leaf pointer in the path */
3714 	if (path->slots[0] < push_items) {
3715 		path->slots[0] += old_left_nritems;
3716 		btrfs_tree_unlock(path->nodes[0]);
3717 		free_extent_buffer(path->nodes[0]);
3718 		path->nodes[0] = left;
3719 		path->slots[1] -= 1;
3720 	} else {
3721 		btrfs_tree_unlock(left);
3722 		free_extent_buffer(left);
3723 		path->slots[0] -= push_items;
3724 	}
3725 	BUG_ON(path->slots[0] < 0);
3726 	return ret;
3727 out:
3728 	btrfs_tree_unlock(left);
3729 	free_extent_buffer(left);
3730 	return ret;
3731 }
3732 
3733 /*
3734  * push some data in the path leaf to the left, trying to free up at
3735  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3736  *
3737  * max_slot can put a limit on how far into the leaf we'll push items.  The
3738  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3739  * items
3740  */
3741 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3742 			  *root, struct btrfs_path *path, int min_data_size,
3743 			  int data_size, int empty, u32 max_slot)
3744 {
3745 	struct extent_buffer *right = path->nodes[0];
3746 	struct extent_buffer *left;
3747 	int slot;
3748 	int free_space;
3749 	u32 right_nritems;
3750 	int ret = 0;
3751 
3752 	slot = path->slots[1];
3753 	if (slot == 0)
3754 		return 1;
3755 	if (!path->nodes[1])
3756 		return 1;
3757 
3758 	right_nritems = btrfs_header_nritems(right);
3759 	if (right_nritems == 0)
3760 		return 1;
3761 
3762 	btrfs_assert_tree_locked(path->nodes[1]);
3763 
3764 	left = read_node_slot(root, path->nodes[1], slot - 1);
3765 	if (left == NULL)
3766 		return 1;
3767 
3768 	btrfs_tree_lock(left);
3769 	btrfs_set_lock_blocking(left);
3770 
3771 	free_space = btrfs_leaf_free_space(root, left);
3772 	if (free_space < data_size) {
3773 		ret = 1;
3774 		goto out;
3775 	}
3776 
3777 	/* cow and double check */
3778 	ret = btrfs_cow_block(trans, root, left,
3779 			      path->nodes[1], slot - 1, &left);
3780 	if (ret) {
3781 		/* we hit -ENOSPC, but it isn't fatal here */
3782 		if (ret == -ENOSPC)
3783 			ret = 1;
3784 		goto out;
3785 	}
3786 
3787 	free_space = btrfs_leaf_free_space(root, left);
3788 	if (free_space < data_size) {
3789 		ret = 1;
3790 		goto out;
3791 	}
3792 
3793 	return __push_leaf_left(trans, root, path, min_data_size,
3794 			       empty, left, free_space, right_nritems,
3795 			       max_slot);
3796 out:
3797 	btrfs_tree_unlock(left);
3798 	free_extent_buffer(left);
3799 	return ret;
3800 }
3801 
3802 /*
3803  * split the path's leaf in two, making sure there is at least data_size
3804  * available for the resulting leaf level of the path.
3805  */
3806 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3807 				    struct btrfs_root *root,
3808 				    struct btrfs_path *path,
3809 				    struct extent_buffer *l,
3810 				    struct extent_buffer *right,
3811 				    int slot, int mid, int nritems)
3812 {
3813 	int data_copy_size;
3814 	int rt_data_off;
3815 	int i;
3816 	struct btrfs_disk_key disk_key;
3817 	struct btrfs_map_token token;
3818 
3819 	btrfs_init_map_token(&token);
3820 
3821 	nritems = nritems - mid;
3822 	btrfs_set_header_nritems(right, nritems);
3823 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3824 
3825 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3826 			   btrfs_item_nr_offset(mid),
3827 			   nritems * sizeof(struct btrfs_item));
3828 
3829 	copy_extent_buffer(right, l,
3830 		     btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3831 		     data_copy_size, btrfs_leaf_data(l) +
3832 		     leaf_data_end(root, l), data_copy_size);
3833 
3834 	rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3835 		      btrfs_item_end_nr(l, mid);
3836 
3837 	for (i = 0; i < nritems; i++) {
3838 		struct btrfs_item *item = btrfs_item_nr(right, i);
3839 		u32 ioff;
3840 
3841 		ioff = btrfs_token_item_offset(right, item, &token);
3842 		btrfs_set_token_item_offset(right, item,
3843 					    ioff + rt_data_off, &token);
3844 	}
3845 
3846 	btrfs_set_header_nritems(l, mid);
3847 	btrfs_item_key(right, &disk_key, 0);
3848 	insert_ptr(trans, root, path, &disk_key, right->start,
3849 		   path->slots[1] + 1, 1);
3850 
3851 	btrfs_mark_buffer_dirty(right);
3852 	btrfs_mark_buffer_dirty(l);
3853 	BUG_ON(path->slots[0] != slot);
3854 
3855 	if (mid <= slot) {
3856 		btrfs_tree_unlock(path->nodes[0]);
3857 		free_extent_buffer(path->nodes[0]);
3858 		path->nodes[0] = right;
3859 		path->slots[0] -= mid;
3860 		path->slots[1] += 1;
3861 	} else {
3862 		btrfs_tree_unlock(right);
3863 		free_extent_buffer(right);
3864 	}
3865 
3866 	BUG_ON(path->slots[0] < 0);
3867 }
3868 
3869 /*
3870  * double splits happen when we need to insert a big item in the middle
3871  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
3872  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3873  *          A                 B                 C
3874  *
3875  * We avoid this by trying to push the items on either side of our target
3876  * into the adjacent leaves.  If all goes well we can avoid the double split
3877  * completely.
3878  */
3879 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3880 					  struct btrfs_root *root,
3881 					  struct btrfs_path *path,
3882 					  int data_size)
3883 {
3884 	int ret;
3885 	int progress = 0;
3886 	int slot;
3887 	u32 nritems;
3888 
3889 	slot = path->slots[0];
3890 
3891 	/*
3892 	 * try to push all the items after our slot into the
3893 	 * right leaf
3894 	 */
3895 	ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3896 	if (ret < 0)
3897 		return ret;
3898 
3899 	if (ret == 0)
3900 		progress++;
3901 
3902 	nritems = btrfs_header_nritems(path->nodes[0]);
3903 	/*
3904 	 * our goal is to get our slot at the start or end of a leaf.  If
3905 	 * we've done so we're done
3906 	 */
3907 	if (path->slots[0] == 0 || path->slots[0] == nritems)
3908 		return 0;
3909 
3910 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3911 		return 0;
3912 
3913 	/* try to push all the items before our slot into the next leaf */
3914 	slot = path->slots[0];
3915 	ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3916 	if (ret < 0)
3917 		return ret;
3918 
3919 	if (ret == 0)
3920 		progress++;
3921 
3922 	if (progress)
3923 		return 0;
3924 	return 1;
3925 }
3926 
3927 /*
3928  * split the path's leaf in two, making sure there is at least data_size
3929  * available for the resulting leaf level of the path.
3930  *
3931  * returns 0 if all went well and < 0 on failure.
3932  */
3933 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3934 			       struct btrfs_root *root,
3935 			       struct btrfs_key *ins_key,
3936 			       struct btrfs_path *path, int data_size,
3937 			       int extend)
3938 {
3939 	struct btrfs_disk_key disk_key;
3940 	struct extent_buffer *l;
3941 	u32 nritems;
3942 	int mid;
3943 	int slot;
3944 	struct extent_buffer *right;
3945 	int ret = 0;
3946 	int wret;
3947 	int split;
3948 	int num_doubles = 0;
3949 	int tried_avoid_double = 0;
3950 
3951 	l = path->nodes[0];
3952 	slot = path->slots[0];
3953 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
3954 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3955 		return -EOVERFLOW;
3956 
3957 	/* first try to make some room by pushing left and right */
3958 	if (data_size && path->nodes[1]) {
3959 		wret = push_leaf_right(trans, root, path, data_size,
3960 				       data_size, 0, 0);
3961 		if (wret < 0)
3962 			return wret;
3963 		if (wret) {
3964 			wret = push_leaf_left(trans, root, path, data_size,
3965 					      data_size, 0, (u32)-1);
3966 			if (wret < 0)
3967 				return wret;
3968 		}
3969 		l = path->nodes[0];
3970 
3971 		/* did the pushes work? */
3972 		if (btrfs_leaf_free_space(root, l) >= data_size)
3973 			return 0;
3974 	}
3975 
3976 	if (!path->nodes[1]) {
3977 		ret = insert_new_root(trans, root, path, 1);
3978 		if (ret)
3979 			return ret;
3980 	}
3981 again:
3982 	split = 1;
3983 	l = path->nodes[0];
3984 	slot = path->slots[0];
3985 	nritems = btrfs_header_nritems(l);
3986 	mid = (nritems + 1) / 2;
3987 
3988 	if (mid <= slot) {
3989 		if (nritems == 1 ||
3990 		    leaf_space_used(l, mid, nritems - mid) + data_size >
3991 			BTRFS_LEAF_DATA_SIZE(root)) {
3992 			if (slot >= nritems) {
3993 				split = 0;
3994 			} else {
3995 				mid = slot;
3996 				if (mid != nritems &&
3997 				    leaf_space_used(l, mid, nritems - mid) +
3998 				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3999 					if (data_size && !tried_avoid_double)
4000 						goto push_for_double;
4001 					split = 2;
4002 				}
4003 			}
4004 		}
4005 	} else {
4006 		if (leaf_space_used(l, 0, mid) + data_size >
4007 			BTRFS_LEAF_DATA_SIZE(root)) {
4008 			if (!extend && data_size && slot == 0) {
4009 				split = 0;
4010 			} else if ((extend || !data_size) && slot == 0) {
4011 				mid = 1;
4012 			} else {
4013 				mid = slot;
4014 				if (mid != nritems &&
4015 				    leaf_space_used(l, mid, nritems - mid) +
4016 				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4017 					if (data_size && !tried_avoid_double)
4018 						goto push_for_double;
4019 					split = 2 ;
4020 				}
4021 			}
4022 		}
4023 	}
4024 
4025 	if (split == 0)
4026 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4027 	else
4028 		btrfs_item_key(l, &disk_key, mid);
4029 
4030 	right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
4031 					root->root_key.objectid,
4032 					&disk_key, 0, l->start, 0);
4033 	if (IS_ERR(right))
4034 		return PTR_ERR(right);
4035 
4036 	root_add_used(root, root->leafsize);
4037 
4038 	memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4039 	btrfs_set_header_bytenr(right, right->start);
4040 	btrfs_set_header_generation(right, trans->transid);
4041 	btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4042 	btrfs_set_header_owner(right, root->root_key.objectid);
4043 	btrfs_set_header_level(right, 0);
4044 	write_extent_buffer(right, root->fs_info->fsid,
4045 			    btrfs_header_fsid(right), BTRFS_FSID_SIZE);
4046 
4047 	write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4048 			    btrfs_header_chunk_tree_uuid(right),
4049 			    BTRFS_UUID_SIZE);
4050 
4051 	if (split == 0) {
4052 		if (mid <= slot) {
4053 			btrfs_set_header_nritems(right, 0);
4054 			insert_ptr(trans, root, path, &disk_key, right->start,
4055 				   path->slots[1] + 1, 1);
4056 			btrfs_tree_unlock(path->nodes[0]);
4057 			free_extent_buffer(path->nodes[0]);
4058 			path->nodes[0] = right;
4059 			path->slots[0] = 0;
4060 			path->slots[1] += 1;
4061 		} else {
4062 			btrfs_set_header_nritems(right, 0);
4063 			insert_ptr(trans, root, path, &disk_key, right->start,
4064 					  path->slots[1], 1);
4065 			btrfs_tree_unlock(path->nodes[0]);
4066 			free_extent_buffer(path->nodes[0]);
4067 			path->nodes[0] = right;
4068 			path->slots[0] = 0;
4069 			if (path->slots[1] == 0)
4070 				fixup_low_keys(root, path, &disk_key, 1);
4071 		}
4072 		btrfs_mark_buffer_dirty(right);
4073 		return ret;
4074 	}
4075 
4076 	copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4077 
4078 	if (split == 2) {
4079 		BUG_ON(num_doubles != 0);
4080 		num_doubles++;
4081 		goto again;
4082 	}
4083 
4084 	return 0;
4085 
4086 push_for_double:
4087 	push_for_double_split(trans, root, path, data_size);
4088 	tried_avoid_double = 1;
4089 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4090 		return 0;
4091 	goto again;
4092 }
4093 
4094 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4095 					 struct btrfs_root *root,
4096 					 struct btrfs_path *path, int ins_len)
4097 {
4098 	struct btrfs_key key;
4099 	struct extent_buffer *leaf;
4100 	struct btrfs_file_extent_item *fi;
4101 	u64 extent_len = 0;
4102 	u32 item_size;
4103 	int ret;
4104 
4105 	leaf = path->nodes[0];
4106 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4107 
4108 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4109 	       key.type != BTRFS_EXTENT_CSUM_KEY);
4110 
4111 	if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4112 		return 0;
4113 
4114 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4115 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4116 		fi = btrfs_item_ptr(leaf, path->slots[0],
4117 				    struct btrfs_file_extent_item);
4118 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4119 	}
4120 	btrfs_release_path(path);
4121 
4122 	path->keep_locks = 1;
4123 	path->search_for_split = 1;
4124 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4125 	path->search_for_split = 0;
4126 	if (ret < 0)
4127 		goto err;
4128 
4129 	ret = -EAGAIN;
4130 	leaf = path->nodes[0];
4131 	/* if our item isn't there or got smaller, return now */
4132 	if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4133 		goto err;
4134 
4135 	/* the leaf has  changed, it now has room.  return now */
4136 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4137 		goto err;
4138 
4139 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4140 		fi = btrfs_item_ptr(leaf, path->slots[0],
4141 				    struct btrfs_file_extent_item);
4142 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4143 			goto err;
4144 	}
4145 
4146 	btrfs_set_path_blocking(path);
4147 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4148 	if (ret)
4149 		goto err;
4150 
4151 	path->keep_locks = 0;
4152 	btrfs_unlock_up_safe(path, 1);
4153 	return 0;
4154 err:
4155 	path->keep_locks = 0;
4156 	return ret;
4157 }
4158 
4159 static noinline int split_item(struct btrfs_trans_handle *trans,
4160 			       struct btrfs_root *root,
4161 			       struct btrfs_path *path,
4162 			       struct btrfs_key *new_key,
4163 			       unsigned long split_offset)
4164 {
4165 	struct extent_buffer *leaf;
4166 	struct btrfs_item *item;
4167 	struct btrfs_item *new_item;
4168 	int slot;
4169 	char *buf;
4170 	u32 nritems;
4171 	u32 item_size;
4172 	u32 orig_offset;
4173 	struct btrfs_disk_key disk_key;
4174 
4175 	leaf = path->nodes[0];
4176 	BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4177 
4178 	btrfs_set_path_blocking(path);
4179 
4180 	item = btrfs_item_nr(leaf, path->slots[0]);
4181 	orig_offset = btrfs_item_offset(leaf, item);
4182 	item_size = btrfs_item_size(leaf, item);
4183 
4184 	buf = kmalloc(item_size, GFP_NOFS);
4185 	if (!buf)
4186 		return -ENOMEM;
4187 
4188 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4189 			    path->slots[0]), item_size);
4190 
4191 	slot = path->slots[0] + 1;
4192 	nritems = btrfs_header_nritems(leaf);
4193 	if (slot != nritems) {
4194 		/* shift the items */
4195 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4196 				btrfs_item_nr_offset(slot),
4197 				(nritems - slot) * sizeof(struct btrfs_item));
4198 	}
4199 
4200 	btrfs_cpu_key_to_disk(&disk_key, new_key);
4201 	btrfs_set_item_key(leaf, &disk_key, slot);
4202 
4203 	new_item = btrfs_item_nr(leaf, slot);
4204 
4205 	btrfs_set_item_offset(leaf, new_item, orig_offset);
4206 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4207 
4208 	btrfs_set_item_offset(leaf, item,
4209 			      orig_offset + item_size - split_offset);
4210 	btrfs_set_item_size(leaf, item, split_offset);
4211 
4212 	btrfs_set_header_nritems(leaf, nritems + 1);
4213 
4214 	/* write the data for the start of the original item */
4215 	write_extent_buffer(leaf, buf,
4216 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4217 			    split_offset);
4218 
4219 	/* write the data for the new item */
4220 	write_extent_buffer(leaf, buf + split_offset,
4221 			    btrfs_item_ptr_offset(leaf, slot),
4222 			    item_size - split_offset);
4223 	btrfs_mark_buffer_dirty(leaf);
4224 
4225 	BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4226 	kfree(buf);
4227 	return 0;
4228 }
4229 
4230 /*
4231  * This function splits a single item into two items,
4232  * giving 'new_key' to the new item and splitting the
4233  * old one at split_offset (from the start of the item).
4234  *
4235  * The path may be released by this operation.  After
4236  * the split, the path is pointing to the old item.  The
4237  * new item is going to be in the same node as the old one.
4238  *
4239  * Note, the item being split must be smaller enough to live alone on
4240  * a tree block with room for one extra struct btrfs_item
4241  *
4242  * This allows us to split the item in place, keeping a lock on the
4243  * leaf the entire time.
4244  */
4245 int btrfs_split_item(struct btrfs_trans_handle *trans,
4246 		     struct btrfs_root *root,
4247 		     struct btrfs_path *path,
4248 		     struct btrfs_key *new_key,
4249 		     unsigned long split_offset)
4250 {
4251 	int ret;
4252 	ret = setup_leaf_for_split(trans, root, path,
4253 				   sizeof(struct btrfs_item));
4254 	if (ret)
4255 		return ret;
4256 
4257 	ret = split_item(trans, root, path, new_key, split_offset);
4258 	return ret;
4259 }
4260 
4261 /*
4262  * This function duplicate a item, giving 'new_key' to the new item.
4263  * It guarantees both items live in the same tree leaf and the new item
4264  * is contiguous with the original item.
4265  *
4266  * This allows us to split file extent in place, keeping a lock on the
4267  * leaf the entire time.
4268  */
4269 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4270 			 struct btrfs_root *root,
4271 			 struct btrfs_path *path,
4272 			 struct btrfs_key *new_key)
4273 {
4274 	struct extent_buffer *leaf;
4275 	int ret;
4276 	u32 item_size;
4277 
4278 	leaf = path->nodes[0];
4279 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4280 	ret = setup_leaf_for_split(trans, root, path,
4281 				   item_size + sizeof(struct btrfs_item));
4282 	if (ret)
4283 		return ret;
4284 
4285 	path->slots[0]++;
4286 	setup_items_for_insert(root, path, new_key, &item_size,
4287 			       item_size, item_size +
4288 			       sizeof(struct btrfs_item), 1);
4289 	leaf = path->nodes[0];
4290 	memcpy_extent_buffer(leaf,
4291 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4292 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4293 			     item_size);
4294 	return 0;
4295 }
4296 
4297 /*
4298  * make the item pointed to by the path smaller.  new_size indicates
4299  * how small to make it, and from_end tells us if we just chop bytes
4300  * off the end of the item or if we shift the item to chop bytes off
4301  * the front.
4302  */
4303 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4304 			 u32 new_size, int from_end)
4305 {
4306 	int slot;
4307 	struct extent_buffer *leaf;
4308 	struct btrfs_item *item;
4309 	u32 nritems;
4310 	unsigned int data_end;
4311 	unsigned int old_data_start;
4312 	unsigned int old_size;
4313 	unsigned int size_diff;
4314 	int i;
4315 	struct btrfs_map_token token;
4316 
4317 	btrfs_init_map_token(&token);
4318 
4319 	leaf = path->nodes[0];
4320 	slot = path->slots[0];
4321 
4322 	old_size = btrfs_item_size_nr(leaf, slot);
4323 	if (old_size == new_size)
4324 		return;
4325 
4326 	nritems = btrfs_header_nritems(leaf);
4327 	data_end = leaf_data_end(root, leaf);
4328 
4329 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4330 
4331 	size_diff = old_size - new_size;
4332 
4333 	BUG_ON(slot < 0);
4334 	BUG_ON(slot >= nritems);
4335 
4336 	/*
4337 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4338 	 */
4339 	/* first correct the data pointers */
4340 	for (i = slot; i < nritems; i++) {
4341 		u32 ioff;
4342 		item = btrfs_item_nr(leaf, i);
4343 
4344 		ioff = btrfs_token_item_offset(leaf, item, &token);
4345 		btrfs_set_token_item_offset(leaf, item,
4346 					    ioff + size_diff, &token);
4347 	}
4348 
4349 	/* shift the data */
4350 	if (from_end) {
4351 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4352 			      data_end + size_diff, btrfs_leaf_data(leaf) +
4353 			      data_end, old_data_start + new_size - data_end);
4354 	} else {
4355 		struct btrfs_disk_key disk_key;
4356 		u64 offset;
4357 
4358 		btrfs_item_key(leaf, &disk_key, slot);
4359 
4360 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4361 			unsigned long ptr;
4362 			struct btrfs_file_extent_item *fi;
4363 
4364 			fi = btrfs_item_ptr(leaf, slot,
4365 					    struct btrfs_file_extent_item);
4366 			fi = (struct btrfs_file_extent_item *)(
4367 			     (unsigned long)fi - size_diff);
4368 
4369 			if (btrfs_file_extent_type(leaf, fi) ==
4370 			    BTRFS_FILE_EXTENT_INLINE) {
4371 				ptr = btrfs_item_ptr_offset(leaf, slot);
4372 				memmove_extent_buffer(leaf, ptr,
4373 				      (unsigned long)fi,
4374 				      offsetof(struct btrfs_file_extent_item,
4375 						 disk_bytenr));
4376 			}
4377 		}
4378 
4379 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4380 			      data_end + size_diff, btrfs_leaf_data(leaf) +
4381 			      data_end, old_data_start - data_end);
4382 
4383 		offset = btrfs_disk_key_offset(&disk_key);
4384 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4385 		btrfs_set_item_key(leaf, &disk_key, slot);
4386 		if (slot == 0)
4387 			fixup_low_keys(root, path, &disk_key, 1);
4388 	}
4389 
4390 	item = btrfs_item_nr(leaf, slot);
4391 	btrfs_set_item_size(leaf, item, new_size);
4392 	btrfs_mark_buffer_dirty(leaf);
4393 
4394 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4395 		btrfs_print_leaf(root, leaf);
4396 		BUG();
4397 	}
4398 }
4399 
4400 /*
4401  * make the item pointed to by the path bigger, data_size is the added size.
4402  */
4403 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4404 		       u32 data_size)
4405 {
4406 	int slot;
4407 	struct extent_buffer *leaf;
4408 	struct btrfs_item *item;
4409 	u32 nritems;
4410 	unsigned int data_end;
4411 	unsigned int old_data;
4412 	unsigned int old_size;
4413 	int i;
4414 	struct btrfs_map_token token;
4415 
4416 	btrfs_init_map_token(&token);
4417 
4418 	leaf = path->nodes[0];
4419 
4420 	nritems = btrfs_header_nritems(leaf);
4421 	data_end = leaf_data_end(root, leaf);
4422 
4423 	if (btrfs_leaf_free_space(root, leaf) < data_size) {
4424 		btrfs_print_leaf(root, leaf);
4425 		BUG();
4426 	}
4427 	slot = path->slots[0];
4428 	old_data = btrfs_item_end_nr(leaf, slot);
4429 
4430 	BUG_ON(slot < 0);
4431 	if (slot >= nritems) {
4432 		btrfs_print_leaf(root, leaf);
4433 		printk(KERN_CRIT "slot %d too large, nritems %d\n",
4434 		       slot, nritems);
4435 		BUG_ON(1);
4436 	}
4437 
4438 	/*
4439 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4440 	 */
4441 	/* first correct the data pointers */
4442 	for (i = slot; i < nritems; i++) {
4443 		u32 ioff;
4444 		item = btrfs_item_nr(leaf, i);
4445 
4446 		ioff = btrfs_token_item_offset(leaf, item, &token);
4447 		btrfs_set_token_item_offset(leaf, item,
4448 					    ioff - data_size, &token);
4449 	}
4450 
4451 	/* shift the data */
4452 	memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4453 		      data_end - data_size, btrfs_leaf_data(leaf) +
4454 		      data_end, old_data - data_end);
4455 
4456 	data_end = old_data;
4457 	old_size = btrfs_item_size_nr(leaf, slot);
4458 	item = btrfs_item_nr(leaf, slot);
4459 	btrfs_set_item_size(leaf, item, old_size + data_size);
4460 	btrfs_mark_buffer_dirty(leaf);
4461 
4462 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4463 		btrfs_print_leaf(root, leaf);
4464 		BUG();
4465 	}
4466 }
4467 
4468 /*
4469  * this is a helper for btrfs_insert_empty_items, the main goal here is
4470  * to save stack depth by doing the bulk of the work in a function
4471  * that doesn't call btrfs_search_slot
4472  */
4473 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4474 			    struct btrfs_key *cpu_key, u32 *data_size,
4475 			    u32 total_data, u32 total_size, int nr)
4476 {
4477 	struct btrfs_item *item;
4478 	int i;
4479 	u32 nritems;
4480 	unsigned int data_end;
4481 	struct btrfs_disk_key disk_key;
4482 	struct extent_buffer *leaf;
4483 	int slot;
4484 	struct btrfs_map_token token;
4485 
4486 	btrfs_init_map_token(&token);
4487 
4488 	leaf = path->nodes[0];
4489 	slot = path->slots[0];
4490 
4491 	nritems = btrfs_header_nritems(leaf);
4492 	data_end = leaf_data_end(root, leaf);
4493 
4494 	if (btrfs_leaf_free_space(root, leaf) < total_size) {
4495 		btrfs_print_leaf(root, leaf);
4496 		printk(KERN_CRIT "not enough freespace need %u have %d\n",
4497 		       total_size, btrfs_leaf_free_space(root, leaf));
4498 		BUG();
4499 	}
4500 
4501 	if (slot != nritems) {
4502 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4503 
4504 		if (old_data < data_end) {
4505 			btrfs_print_leaf(root, leaf);
4506 			printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4507 			       slot, old_data, data_end);
4508 			BUG_ON(1);
4509 		}
4510 		/*
4511 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4512 		 */
4513 		/* first correct the data pointers */
4514 		for (i = slot; i < nritems; i++) {
4515 			u32 ioff;
4516 
4517 			item = btrfs_item_nr(leaf, i);
4518 			ioff = btrfs_token_item_offset(leaf, item, &token);
4519 			btrfs_set_token_item_offset(leaf, item,
4520 						    ioff - total_data, &token);
4521 		}
4522 		/* shift the items */
4523 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4524 			      btrfs_item_nr_offset(slot),
4525 			      (nritems - slot) * sizeof(struct btrfs_item));
4526 
4527 		/* shift the data */
4528 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4529 			      data_end - total_data, btrfs_leaf_data(leaf) +
4530 			      data_end, old_data - data_end);
4531 		data_end = old_data;
4532 	}
4533 
4534 	/* setup the item for the new data */
4535 	for (i = 0; i < nr; i++) {
4536 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4537 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4538 		item = btrfs_item_nr(leaf, slot + i);
4539 		btrfs_set_token_item_offset(leaf, item,
4540 					    data_end - data_size[i], &token);
4541 		data_end -= data_size[i];
4542 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4543 	}
4544 
4545 	btrfs_set_header_nritems(leaf, nritems + nr);
4546 
4547 	if (slot == 0) {
4548 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4549 		fixup_low_keys(root, path, &disk_key, 1);
4550 	}
4551 	btrfs_unlock_up_safe(path, 1);
4552 	btrfs_mark_buffer_dirty(leaf);
4553 
4554 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4555 		btrfs_print_leaf(root, leaf);
4556 		BUG();
4557 	}
4558 }
4559 
4560 /*
4561  * Given a key and some data, insert items into the tree.
4562  * This does all the path init required, making room in the tree if needed.
4563  */
4564 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4565 			    struct btrfs_root *root,
4566 			    struct btrfs_path *path,
4567 			    struct btrfs_key *cpu_key, u32 *data_size,
4568 			    int nr)
4569 {
4570 	int ret = 0;
4571 	int slot;
4572 	int i;
4573 	u32 total_size = 0;
4574 	u32 total_data = 0;
4575 
4576 	for (i = 0; i < nr; i++)
4577 		total_data += data_size[i];
4578 
4579 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4580 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4581 	if (ret == 0)
4582 		return -EEXIST;
4583 	if (ret < 0)
4584 		return ret;
4585 
4586 	slot = path->slots[0];
4587 	BUG_ON(slot < 0);
4588 
4589 	setup_items_for_insert(root, path, cpu_key, data_size,
4590 			       total_data, total_size, nr);
4591 	return 0;
4592 }
4593 
4594 /*
4595  * Given a key and some data, insert an item into the tree.
4596  * This does all the path init required, making room in the tree if needed.
4597  */
4598 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4599 		      *root, struct btrfs_key *cpu_key, void *data, u32
4600 		      data_size)
4601 {
4602 	int ret = 0;
4603 	struct btrfs_path *path;
4604 	struct extent_buffer *leaf;
4605 	unsigned long ptr;
4606 
4607 	path = btrfs_alloc_path();
4608 	if (!path)
4609 		return -ENOMEM;
4610 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4611 	if (!ret) {
4612 		leaf = path->nodes[0];
4613 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4614 		write_extent_buffer(leaf, data, ptr, data_size);
4615 		btrfs_mark_buffer_dirty(leaf);
4616 	}
4617 	btrfs_free_path(path);
4618 	return ret;
4619 }
4620 
4621 /*
4622  * delete the pointer from a given node.
4623  *
4624  * the tree should have been previously balanced so the deletion does not
4625  * empty a node.
4626  */
4627 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4628 		    int level, int slot)
4629 {
4630 	struct extent_buffer *parent = path->nodes[level];
4631 	u32 nritems;
4632 	int ret;
4633 
4634 	nritems = btrfs_header_nritems(parent);
4635 	if (slot != nritems - 1) {
4636 		if (level)
4637 			tree_mod_log_eb_move(root->fs_info, parent, slot,
4638 					     slot + 1, nritems - slot - 1);
4639 		memmove_extent_buffer(parent,
4640 			      btrfs_node_key_ptr_offset(slot),
4641 			      btrfs_node_key_ptr_offset(slot + 1),
4642 			      sizeof(struct btrfs_key_ptr) *
4643 			      (nritems - slot - 1));
4644 	} else if (level) {
4645 		ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4646 					      MOD_LOG_KEY_REMOVE, GFP_NOFS);
4647 		BUG_ON(ret < 0);
4648 	}
4649 
4650 	nritems--;
4651 	btrfs_set_header_nritems(parent, nritems);
4652 	if (nritems == 0 && parent == root->node) {
4653 		BUG_ON(btrfs_header_level(root->node) != 1);
4654 		/* just turn the root into a leaf and break */
4655 		btrfs_set_header_level(root->node, 0);
4656 	} else if (slot == 0) {
4657 		struct btrfs_disk_key disk_key;
4658 
4659 		btrfs_node_key(parent, &disk_key, 0);
4660 		fixup_low_keys(root, path, &disk_key, level + 1);
4661 	}
4662 	btrfs_mark_buffer_dirty(parent);
4663 }
4664 
4665 /*
4666  * a helper function to delete the leaf pointed to by path->slots[1] and
4667  * path->nodes[1].
4668  *
4669  * This deletes the pointer in path->nodes[1] and frees the leaf
4670  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4671  *
4672  * The path must have already been setup for deleting the leaf, including
4673  * all the proper balancing.  path->nodes[1] must be locked.
4674  */
4675 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4676 				    struct btrfs_root *root,
4677 				    struct btrfs_path *path,
4678 				    struct extent_buffer *leaf)
4679 {
4680 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4681 	del_ptr(root, path, 1, path->slots[1]);
4682 
4683 	/*
4684 	 * btrfs_free_extent is expensive, we want to make sure we
4685 	 * aren't holding any locks when we call it
4686 	 */
4687 	btrfs_unlock_up_safe(path, 0);
4688 
4689 	root_sub_used(root, leaf->len);
4690 
4691 	extent_buffer_get(leaf);
4692 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4693 	free_extent_buffer_stale(leaf);
4694 }
4695 /*
4696  * delete the item at the leaf level in path.  If that empties
4697  * the leaf, remove it from the tree
4698  */
4699 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4700 		    struct btrfs_path *path, int slot, int nr)
4701 {
4702 	struct extent_buffer *leaf;
4703 	struct btrfs_item *item;
4704 	int last_off;
4705 	int dsize = 0;
4706 	int ret = 0;
4707 	int wret;
4708 	int i;
4709 	u32 nritems;
4710 	struct btrfs_map_token token;
4711 
4712 	btrfs_init_map_token(&token);
4713 
4714 	leaf = path->nodes[0];
4715 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4716 
4717 	for (i = 0; i < nr; i++)
4718 		dsize += btrfs_item_size_nr(leaf, slot + i);
4719 
4720 	nritems = btrfs_header_nritems(leaf);
4721 
4722 	if (slot + nr != nritems) {
4723 		int data_end = leaf_data_end(root, leaf);
4724 
4725 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4726 			      data_end + dsize,
4727 			      btrfs_leaf_data(leaf) + data_end,
4728 			      last_off - data_end);
4729 
4730 		for (i = slot + nr; i < nritems; i++) {
4731 			u32 ioff;
4732 
4733 			item = btrfs_item_nr(leaf, i);
4734 			ioff = btrfs_token_item_offset(leaf, item, &token);
4735 			btrfs_set_token_item_offset(leaf, item,
4736 						    ioff + dsize, &token);
4737 		}
4738 
4739 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4740 			      btrfs_item_nr_offset(slot + nr),
4741 			      sizeof(struct btrfs_item) *
4742 			      (nritems - slot - nr));
4743 	}
4744 	btrfs_set_header_nritems(leaf, nritems - nr);
4745 	nritems -= nr;
4746 
4747 	/* delete the leaf if we've emptied it */
4748 	if (nritems == 0) {
4749 		if (leaf == root->node) {
4750 			btrfs_set_header_level(leaf, 0);
4751 		} else {
4752 			btrfs_set_path_blocking(path);
4753 			clean_tree_block(trans, root, leaf);
4754 			btrfs_del_leaf(trans, root, path, leaf);
4755 		}
4756 	} else {
4757 		int used = leaf_space_used(leaf, 0, nritems);
4758 		if (slot == 0) {
4759 			struct btrfs_disk_key disk_key;
4760 
4761 			btrfs_item_key(leaf, &disk_key, 0);
4762 			fixup_low_keys(root, path, &disk_key, 1);
4763 		}
4764 
4765 		/* delete the leaf if it is mostly empty */
4766 		if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4767 			/* push_leaf_left fixes the path.
4768 			 * make sure the path still points to our leaf
4769 			 * for possible call to del_ptr below
4770 			 */
4771 			slot = path->slots[1];
4772 			extent_buffer_get(leaf);
4773 
4774 			btrfs_set_path_blocking(path);
4775 			wret = push_leaf_left(trans, root, path, 1, 1,
4776 					      1, (u32)-1);
4777 			if (wret < 0 && wret != -ENOSPC)
4778 				ret = wret;
4779 
4780 			if (path->nodes[0] == leaf &&
4781 			    btrfs_header_nritems(leaf)) {
4782 				wret = push_leaf_right(trans, root, path, 1,
4783 						       1, 1, 0);
4784 				if (wret < 0 && wret != -ENOSPC)
4785 					ret = wret;
4786 			}
4787 
4788 			if (btrfs_header_nritems(leaf) == 0) {
4789 				path->slots[1] = slot;
4790 				btrfs_del_leaf(trans, root, path, leaf);
4791 				free_extent_buffer(leaf);
4792 				ret = 0;
4793 			} else {
4794 				/* if we're still in the path, make sure
4795 				 * we're dirty.  Otherwise, one of the
4796 				 * push_leaf functions must have already
4797 				 * dirtied this buffer
4798 				 */
4799 				if (path->nodes[0] == leaf)
4800 					btrfs_mark_buffer_dirty(leaf);
4801 				free_extent_buffer(leaf);
4802 			}
4803 		} else {
4804 			btrfs_mark_buffer_dirty(leaf);
4805 		}
4806 	}
4807 	return ret;
4808 }
4809 
4810 /*
4811  * search the tree again to find a leaf with lesser keys
4812  * returns 0 if it found something or 1 if there are no lesser leaves.
4813  * returns < 0 on io errors.
4814  *
4815  * This may release the path, and so you may lose any locks held at the
4816  * time you call it.
4817  */
4818 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4819 {
4820 	struct btrfs_key key;
4821 	struct btrfs_disk_key found_key;
4822 	int ret;
4823 
4824 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4825 
4826 	if (key.offset > 0)
4827 		key.offset--;
4828 	else if (key.type > 0)
4829 		key.type--;
4830 	else if (key.objectid > 0)
4831 		key.objectid--;
4832 	else
4833 		return 1;
4834 
4835 	btrfs_release_path(path);
4836 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4837 	if (ret < 0)
4838 		return ret;
4839 	btrfs_item_key(path->nodes[0], &found_key, 0);
4840 	ret = comp_keys(&found_key, &key);
4841 	if (ret < 0)
4842 		return 0;
4843 	return 1;
4844 }
4845 
4846 /*
4847  * A helper function to walk down the tree starting at min_key, and looking
4848  * for nodes or leaves that are have a minimum transaction id.
4849  * This is used by the btree defrag code, and tree logging
4850  *
4851  * This does not cow, but it does stuff the starting key it finds back
4852  * into min_key, so you can call btrfs_search_slot with cow=1 on the
4853  * key and get a writable path.
4854  *
4855  * This does lock as it descends, and path->keep_locks should be set
4856  * to 1 by the caller.
4857  *
4858  * This honors path->lowest_level to prevent descent past a given level
4859  * of the tree.
4860  *
4861  * min_trans indicates the oldest transaction that you are interested
4862  * in walking through.  Any nodes or leaves older than min_trans are
4863  * skipped over (without reading them).
4864  *
4865  * returns zero if something useful was found, < 0 on error and 1 if there
4866  * was nothing in the tree that matched the search criteria.
4867  */
4868 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4869 			 struct btrfs_key *max_key,
4870 			 struct btrfs_path *path,
4871 			 u64 min_trans)
4872 {
4873 	struct extent_buffer *cur;
4874 	struct btrfs_key found_key;
4875 	int slot;
4876 	int sret;
4877 	u32 nritems;
4878 	int level;
4879 	int ret = 1;
4880 
4881 	WARN_ON(!path->keep_locks);
4882 again:
4883 	cur = btrfs_read_lock_root_node(root);
4884 	level = btrfs_header_level(cur);
4885 	WARN_ON(path->nodes[level]);
4886 	path->nodes[level] = cur;
4887 	path->locks[level] = BTRFS_READ_LOCK;
4888 
4889 	if (btrfs_header_generation(cur) < min_trans) {
4890 		ret = 1;
4891 		goto out;
4892 	}
4893 	while (1) {
4894 		nritems = btrfs_header_nritems(cur);
4895 		level = btrfs_header_level(cur);
4896 		sret = bin_search(cur, min_key, level, &slot);
4897 
4898 		/* at the lowest level, we're done, setup the path and exit */
4899 		if (level == path->lowest_level) {
4900 			if (slot >= nritems)
4901 				goto find_next_key;
4902 			ret = 0;
4903 			path->slots[level] = slot;
4904 			btrfs_item_key_to_cpu(cur, &found_key, slot);
4905 			goto out;
4906 		}
4907 		if (sret && slot > 0)
4908 			slot--;
4909 		/*
4910 		 * check this node pointer against the min_trans parameters.
4911 		 * If it is too old, old, skip to the next one.
4912 		 */
4913 		while (slot < nritems) {
4914 			u64 blockptr;
4915 			u64 gen;
4916 
4917 			blockptr = btrfs_node_blockptr(cur, slot);
4918 			gen = btrfs_node_ptr_generation(cur, slot);
4919 			if (gen < min_trans) {
4920 				slot++;
4921 				continue;
4922 			}
4923 			break;
4924 		}
4925 find_next_key:
4926 		/*
4927 		 * we didn't find a candidate key in this node, walk forward
4928 		 * and find another one
4929 		 */
4930 		if (slot >= nritems) {
4931 			path->slots[level] = slot;
4932 			btrfs_set_path_blocking(path);
4933 			sret = btrfs_find_next_key(root, path, min_key, level,
4934 						  min_trans);
4935 			if (sret == 0) {
4936 				btrfs_release_path(path);
4937 				goto again;
4938 			} else {
4939 				goto out;
4940 			}
4941 		}
4942 		/* save our key for returning back */
4943 		btrfs_node_key_to_cpu(cur, &found_key, slot);
4944 		path->slots[level] = slot;
4945 		if (level == path->lowest_level) {
4946 			ret = 0;
4947 			unlock_up(path, level, 1, 0, NULL);
4948 			goto out;
4949 		}
4950 		btrfs_set_path_blocking(path);
4951 		cur = read_node_slot(root, cur, slot);
4952 		BUG_ON(!cur); /* -ENOMEM */
4953 
4954 		btrfs_tree_read_lock(cur);
4955 
4956 		path->locks[level - 1] = BTRFS_READ_LOCK;
4957 		path->nodes[level - 1] = cur;
4958 		unlock_up(path, level, 1, 0, NULL);
4959 		btrfs_clear_path_blocking(path, NULL, 0);
4960 	}
4961 out:
4962 	if (ret == 0)
4963 		memcpy(min_key, &found_key, sizeof(found_key));
4964 	btrfs_set_path_blocking(path);
4965 	return ret;
4966 }
4967 
4968 static void tree_move_down(struct btrfs_root *root,
4969 			   struct btrfs_path *path,
4970 			   int *level, int root_level)
4971 {
4972 	BUG_ON(*level == 0);
4973 	path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
4974 					path->slots[*level]);
4975 	path->slots[*level - 1] = 0;
4976 	(*level)--;
4977 }
4978 
4979 static int tree_move_next_or_upnext(struct btrfs_root *root,
4980 				    struct btrfs_path *path,
4981 				    int *level, int root_level)
4982 {
4983 	int ret = 0;
4984 	int nritems;
4985 	nritems = btrfs_header_nritems(path->nodes[*level]);
4986 
4987 	path->slots[*level]++;
4988 
4989 	while (path->slots[*level] >= nritems) {
4990 		if (*level == root_level)
4991 			return -1;
4992 
4993 		/* move upnext */
4994 		path->slots[*level] = 0;
4995 		free_extent_buffer(path->nodes[*level]);
4996 		path->nodes[*level] = NULL;
4997 		(*level)++;
4998 		path->slots[*level]++;
4999 
5000 		nritems = btrfs_header_nritems(path->nodes[*level]);
5001 		ret = 1;
5002 	}
5003 	return ret;
5004 }
5005 
5006 /*
5007  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5008  * or down.
5009  */
5010 static int tree_advance(struct btrfs_root *root,
5011 			struct btrfs_path *path,
5012 			int *level, int root_level,
5013 			int allow_down,
5014 			struct btrfs_key *key)
5015 {
5016 	int ret;
5017 
5018 	if (*level == 0 || !allow_down) {
5019 		ret = tree_move_next_or_upnext(root, path, level, root_level);
5020 	} else {
5021 		tree_move_down(root, path, level, root_level);
5022 		ret = 0;
5023 	}
5024 	if (ret >= 0) {
5025 		if (*level == 0)
5026 			btrfs_item_key_to_cpu(path->nodes[*level], key,
5027 					path->slots[*level]);
5028 		else
5029 			btrfs_node_key_to_cpu(path->nodes[*level], key,
5030 					path->slots[*level]);
5031 	}
5032 	return ret;
5033 }
5034 
5035 static int tree_compare_item(struct btrfs_root *left_root,
5036 			     struct btrfs_path *left_path,
5037 			     struct btrfs_path *right_path,
5038 			     char *tmp_buf)
5039 {
5040 	int cmp;
5041 	int len1, len2;
5042 	unsigned long off1, off2;
5043 
5044 	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5045 	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5046 	if (len1 != len2)
5047 		return 1;
5048 
5049 	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5050 	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5051 				right_path->slots[0]);
5052 
5053 	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5054 
5055 	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5056 	if (cmp)
5057 		return 1;
5058 	return 0;
5059 }
5060 
5061 #define ADVANCE 1
5062 #define ADVANCE_ONLY_NEXT -1
5063 
5064 /*
5065  * This function compares two trees and calls the provided callback for
5066  * every changed/new/deleted item it finds.
5067  * If shared tree blocks are encountered, whole subtrees are skipped, making
5068  * the compare pretty fast on snapshotted subvolumes.
5069  *
5070  * This currently works on commit roots only. As commit roots are read only,
5071  * we don't do any locking. The commit roots are protected with transactions.
5072  * Transactions are ended and rejoined when a commit is tried in between.
5073  *
5074  * This function checks for modifications done to the trees while comparing.
5075  * If it detects a change, it aborts immediately.
5076  */
5077 int btrfs_compare_trees(struct btrfs_root *left_root,
5078 			struct btrfs_root *right_root,
5079 			btrfs_changed_cb_t changed_cb, void *ctx)
5080 {
5081 	int ret;
5082 	int cmp;
5083 	struct btrfs_trans_handle *trans = NULL;
5084 	struct btrfs_path *left_path = NULL;
5085 	struct btrfs_path *right_path = NULL;
5086 	struct btrfs_key left_key;
5087 	struct btrfs_key right_key;
5088 	char *tmp_buf = NULL;
5089 	int left_root_level;
5090 	int right_root_level;
5091 	int left_level;
5092 	int right_level;
5093 	int left_end_reached;
5094 	int right_end_reached;
5095 	int advance_left;
5096 	int advance_right;
5097 	u64 left_blockptr;
5098 	u64 right_blockptr;
5099 	u64 left_start_ctransid;
5100 	u64 right_start_ctransid;
5101 	u64 ctransid;
5102 
5103 	left_path = btrfs_alloc_path();
5104 	if (!left_path) {
5105 		ret = -ENOMEM;
5106 		goto out;
5107 	}
5108 	right_path = btrfs_alloc_path();
5109 	if (!right_path) {
5110 		ret = -ENOMEM;
5111 		goto out;
5112 	}
5113 
5114 	tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
5115 	if (!tmp_buf) {
5116 		ret = -ENOMEM;
5117 		goto out;
5118 	}
5119 
5120 	left_path->search_commit_root = 1;
5121 	left_path->skip_locking = 1;
5122 	right_path->search_commit_root = 1;
5123 	right_path->skip_locking = 1;
5124 
5125 	spin_lock(&left_root->root_item_lock);
5126 	left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
5127 	spin_unlock(&left_root->root_item_lock);
5128 
5129 	spin_lock(&right_root->root_item_lock);
5130 	right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
5131 	spin_unlock(&right_root->root_item_lock);
5132 
5133 	trans = btrfs_join_transaction(left_root);
5134 	if (IS_ERR(trans)) {
5135 		ret = PTR_ERR(trans);
5136 		trans = NULL;
5137 		goto out;
5138 	}
5139 
5140 	/*
5141 	 * Strategy: Go to the first items of both trees. Then do
5142 	 *
5143 	 * If both trees are at level 0
5144 	 *   Compare keys of current items
5145 	 *     If left < right treat left item as new, advance left tree
5146 	 *       and repeat
5147 	 *     If left > right treat right item as deleted, advance right tree
5148 	 *       and repeat
5149 	 *     If left == right do deep compare of items, treat as changed if
5150 	 *       needed, advance both trees and repeat
5151 	 * If both trees are at the same level but not at level 0
5152 	 *   Compare keys of current nodes/leafs
5153 	 *     If left < right advance left tree and repeat
5154 	 *     If left > right advance right tree and repeat
5155 	 *     If left == right compare blockptrs of the next nodes/leafs
5156 	 *       If they match advance both trees but stay at the same level
5157 	 *         and repeat
5158 	 *       If they don't match advance both trees while allowing to go
5159 	 *         deeper and repeat
5160 	 * If tree levels are different
5161 	 *   Advance the tree that needs it and repeat
5162 	 *
5163 	 * Advancing a tree means:
5164 	 *   If we are at level 0, try to go to the next slot. If that's not
5165 	 *   possible, go one level up and repeat. Stop when we found a level
5166 	 *   where we could go to the next slot. We may at this point be on a
5167 	 *   node or a leaf.
5168 	 *
5169 	 *   If we are not at level 0 and not on shared tree blocks, go one
5170 	 *   level deeper.
5171 	 *
5172 	 *   If we are not at level 0 and on shared tree blocks, go one slot to
5173 	 *   the right if possible or go up and right.
5174 	 */
5175 
5176 	left_level = btrfs_header_level(left_root->commit_root);
5177 	left_root_level = left_level;
5178 	left_path->nodes[left_level] = left_root->commit_root;
5179 	extent_buffer_get(left_path->nodes[left_level]);
5180 
5181 	right_level = btrfs_header_level(right_root->commit_root);
5182 	right_root_level = right_level;
5183 	right_path->nodes[right_level] = right_root->commit_root;
5184 	extent_buffer_get(right_path->nodes[right_level]);
5185 
5186 	if (left_level == 0)
5187 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
5188 				&left_key, left_path->slots[left_level]);
5189 	else
5190 		btrfs_node_key_to_cpu(left_path->nodes[left_level],
5191 				&left_key, left_path->slots[left_level]);
5192 	if (right_level == 0)
5193 		btrfs_item_key_to_cpu(right_path->nodes[right_level],
5194 				&right_key, right_path->slots[right_level]);
5195 	else
5196 		btrfs_node_key_to_cpu(right_path->nodes[right_level],
5197 				&right_key, right_path->slots[right_level]);
5198 
5199 	left_end_reached = right_end_reached = 0;
5200 	advance_left = advance_right = 0;
5201 
5202 	while (1) {
5203 		/*
5204 		 * We need to make sure the transaction does not get committed
5205 		 * while we do anything on commit roots. This means, we need to
5206 		 * join and leave transactions for every item that we process.
5207 		 */
5208 		if (trans && btrfs_should_end_transaction(trans, left_root)) {
5209 			btrfs_release_path(left_path);
5210 			btrfs_release_path(right_path);
5211 
5212 			ret = btrfs_end_transaction(trans, left_root);
5213 			trans = NULL;
5214 			if (ret < 0)
5215 				goto out;
5216 		}
5217 		/* now rejoin the transaction */
5218 		if (!trans) {
5219 			trans = btrfs_join_transaction(left_root);
5220 			if (IS_ERR(trans)) {
5221 				ret = PTR_ERR(trans);
5222 				trans = NULL;
5223 				goto out;
5224 			}
5225 
5226 			spin_lock(&left_root->root_item_lock);
5227 			ctransid = btrfs_root_ctransid(&left_root->root_item);
5228 			spin_unlock(&left_root->root_item_lock);
5229 			if (ctransid != left_start_ctransid)
5230 				left_start_ctransid = 0;
5231 
5232 			spin_lock(&right_root->root_item_lock);
5233 			ctransid = btrfs_root_ctransid(&right_root->root_item);
5234 			spin_unlock(&right_root->root_item_lock);
5235 			if (ctransid != right_start_ctransid)
5236 				right_start_ctransid = 0;
5237 
5238 			if (!left_start_ctransid || !right_start_ctransid) {
5239 				WARN(1, KERN_WARNING
5240 					"btrfs: btrfs_compare_tree detected "
5241 					"a change in one of the trees while "
5242 					"iterating. This is probably a "
5243 					"bug.\n");
5244 				ret = -EIO;
5245 				goto out;
5246 			}
5247 
5248 			/*
5249 			 * the commit root may have changed, so start again
5250 			 * where we stopped
5251 			 */
5252 			left_path->lowest_level = left_level;
5253 			right_path->lowest_level = right_level;
5254 			ret = btrfs_search_slot(NULL, left_root,
5255 					&left_key, left_path, 0, 0);
5256 			if (ret < 0)
5257 				goto out;
5258 			ret = btrfs_search_slot(NULL, right_root,
5259 					&right_key, right_path, 0, 0);
5260 			if (ret < 0)
5261 				goto out;
5262 		}
5263 
5264 		if (advance_left && !left_end_reached) {
5265 			ret = tree_advance(left_root, left_path, &left_level,
5266 					left_root_level,
5267 					advance_left != ADVANCE_ONLY_NEXT,
5268 					&left_key);
5269 			if (ret < 0)
5270 				left_end_reached = ADVANCE;
5271 			advance_left = 0;
5272 		}
5273 		if (advance_right && !right_end_reached) {
5274 			ret = tree_advance(right_root, right_path, &right_level,
5275 					right_root_level,
5276 					advance_right != ADVANCE_ONLY_NEXT,
5277 					&right_key);
5278 			if (ret < 0)
5279 				right_end_reached = ADVANCE;
5280 			advance_right = 0;
5281 		}
5282 
5283 		if (left_end_reached && right_end_reached) {
5284 			ret = 0;
5285 			goto out;
5286 		} else if (left_end_reached) {
5287 			if (right_level == 0) {
5288 				ret = changed_cb(left_root, right_root,
5289 						left_path, right_path,
5290 						&right_key,
5291 						BTRFS_COMPARE_TREE_DELETED,
5292 						ctx);
5293 				if (ret < 0)
5294 					goto out;
5295 			}
5296 			advance_right = ADVANCE;
5297 			continue;
5298 		} else if (right_end_reached) {
5299 			if (left_level == 0) {
5300 				ret = changed_cb(left_root, right_root,
5301 						left_path, right_path,
5302 						&left_key,
5303 						BTRFS_COMPARE_TREE_NEW,
5304 						ctx);
5305 				if (ret < 0)
5306 					goto out;
5307 			}
5308 			advance_left = ADVANCE;
5309 			continue;
5310 		}
5311 
5312 		if (left_level == 0 && right_level == 0) {
5313 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5314 			if (cmp < 0) {
5315 				ret = changed_cb(left_root, right_root,
5316 						left_path, right_path,
5317 						&left_key,
5318 						BTRFS_COMPARE_TREE_NEW,
5319 						ctx);
5320 				if (ret < 0)
5321 					goto out;
5322 				advance_left = ADVANCE;
5323 			} else if (cmp > 0) {
5324 				ret = changed_cb(left_root, right_root,
5325 						left_path, right_path,
5326 						&right_key,
5327 						BTRFS_COMPARE_TREE_DELETED,
5328 						ctx);
5329 				if (ret < 0)
5330 					goto out;
5331 				advance_right = ADVANCE;
5332 			} else {
5333 				enum btrfs_compare_tree_result cmp;
5334 
5335 				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5336 				ret = tree_compare_item(left_root, left_path,
5337 						right_path, tmp_buf);
5338 				if (ret)
5339 					cmp = BTRFS_COMPARE_TREE_CHANGED;
5340 				else
5341 					cmp = BTRFS_COMPARE_TREE_SAME;
5342 				ret = changed_cb(left_root, right_root,
5343 						 left_path, right_path,
5344 						 &left_key, cmp, ctx);
5345 				if (ret < 0)
5346 					goto out;
5347 				advance_left = ADVANCE;
5348 				advance_right = ADVANCE;
5349 			}
5350 		} else if (left_level == right_level) {
5351 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5352 			if (cmp < 0) {
5353 				advance_left = ADVANCE;
5354 			} else if (cmp > 0) {
5355 				advance_right = ADVANCE;
5356 			} else {
5357 				left_blockptr = btrfs_node_blockptr(
5358 						left_path->nodes[left_level],
5359 						left_path->slots[left_level]);
5360 				right_blockptr = btrfs_node_blockptr(
5361 						right_path->nodes[right_level],
5362 						right_path->slots[right_level]);
5363 				if (left_blockptr == right_blockptr) {
5364 					/*
5365 					 * As we're on a shared block, don't
5366 					 * allow to go deeper.
5367 					 */
5368 					advance_left = ADVANCE_ONLY_NEXT;
5369 					advance_right = ADVANCE_ONLY_NEXT;
5370 				} else {
5371 					advance_left = ADVANCE;
5372 					advance_right = ADVANCE;
5373 				}
5374 			}
5375 		} else if (left_level < right_level) {
5376 			advance_right = ADVANCE;
5377 		} else {
5378 			advance_left = ADVANCE;
5379 		}
5380 	}
5381 
5382 out:
5383 	btrfs_free_path(left_path);
5384 	btrfs_free_path(right_path);
5385 	kfree(tmp_buf);
5386 
5387 	if (trans) {
5388 		if (!ret)
5389 			ret = btrfs_end_transaction(trans, left_root);
5390 		else
5391 			btrfs_end_transaction(trans, left_root);
5392 	}
5393 
5394 	return ret;
5395 }
5396 
5397 /*
5398  * this is similar to btrfs_next_leaf, but does not try to preserve
5399  * and fixup the path.  It looks for and returns the next key in the
5400  * tree based on the current path and the min_trans parameters.
5401  *
5402  * 0 is returned if another key is found, < 0 if there are any errors
5403  * and 1 is returned if there are no higher keys in the tree
5404  *
5405  * path->keep_locks should be set to 1 on the search made before
5406  * calling this function.
5407  */
5408 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5409 			struct btrfs_key *key, int level, u64 min_trans)
5410 {
5411 	int slot;
5412 	struct extent_buffer *c;
5413 
5414 	WARN_ON(!path->keep_locks);
5415 	while (level < BTRFS_MAX_LEVEL) {
5416 		if (!path->nodes[level])
5417 			return 1;
5418 
5419 		slot = path->slots[level] + 1;
5420 		c = path->nodes[level];
5421 next:
5422 		if (slot >= btrfs_header_nritems(c)) {
5423 			int ret;
5424 			int orig_lowest;
5425 			struct btrfs_key cur_key;
5426 			if (level + 1 >= BTRFS_MAX_LEVEL ||
5427 			    !path->nodes[level + 1])
5428 				return 1;
5429 
5430 			if (path->locks[level + 1]) {
5431 				level++;
5432 				continue;
5433 			}
5434 
5435 			slot = btrfs_header_nritems(c) - 1;
5436 			if (level == 0)
5437 				btrfs_item_key_to_cpu(c, &cur_key, slot);
5438 			else
5439 				btrfs_node_key_to_cpu(c, &cur_key, slot);
5440 
5441 			orig_lowest = path->lowest_level;
5442 			btrfs_release_path(path);
5443 			path->lowest_level = level;
5444 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5445 						0, 0);
5446 			path->lowest_level = orig_lowest;
5447 			if (ret < 0)
5448 				return ret;
5449 
5450 			c = path->nodes[level];
5451 			slot = path->slots[level];
5452 			if (ret == 0)
5453 				slot++;
5454 			goto next;
5455 		}
5456 
5457 		if (level == 0)
5458 			btrfs_item_key_to_cpu(c, key, slot);
5459 		else {
5460 			u64 gen = btrfs_node_ptr_generation(c, slot);
5461 
5462 			if (gen < min_trans) {
5463 				slot++;
5464 				goto next;
5465 			}
5466 			btrfs_node_key_to_cpu(c, key, slot);
5467 		}
5468 		return 0;
5469 	}
5470 	return 1;
5471 }
5472 
5473 /*
5474  * search the tree again to find a leaf with greater keys
5475  * returns 0 if it found something or 1 if there are no greater leaves.
5476  * returns < 0 on io errors.
5477  */
5478 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5479 {
5480 	return btrfs_next_old_leaf(root, path, 0);
5481 }
5482 
5483 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5484 			u64 time_seq)
5485 {
5486 	int slot;
5487 	int level;
5488 	struct extent_buffer *c;
5489 	struct extent_buffer *next;
5490 	struct btrfs_key key;
5491 	u32 nritems;
5492 	int ret;
5493 	int old_spinning = path->leave_spinning;
5494 	int next_rw_lock = 0;
5495 
5496 	nritems = btrfs_header_nritems(path->nodes[0]);
5497 	if (nritems == 0)
5498 		return 1;
5499 
5500 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5501 again:
5502 	level = 1;
5503 	next = NULL;
5504 	next_rw_lock = 0;
5505 	btrfs_release_path(path);
5506 
5507 	path->keep_locks = 1;
5508 	path->leave_spinning = 1;
5509 
5510 	if (time_seq)
5511 		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5512 	else
5513 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5514 	path->keep_locks = 0;
5515 
5516 	if (ret < 0)
5517 		return ret;
5518 
5519 	nritems = btrfs_header_nritems(path->nodes[0]);
5520 	/*
5521 	 * by releasing the path above we dropped all our locks.  A balance
5522 	 * could have added more items next to the key that used to be
5523 	 * at the very end of the block.  So, check again here and
5524 	 * advance the path if there are now more items available.
5525 	 */
5526 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5527 		if (ret == 0)
5528 			path->slots[0]++;
5529 		ret = 0;
5530 		goto done;
5531 	}
5532 
5533 	while (level < BTRFS_MAX_LEVEL) {
5534 		if (!path->nodes[level]) {
5535 			ret = 1;
5536 			goto done;
5537 		}
5538 
5539 		slot = path->slots[level] + 1;
5540 		c = path->nodes[level];
5541 		if (slot >= btrfs_header_nritems(c)) {
5542 			level++;
5543 			if (level == BTRFS_MAX_LEVEL) {
5544 				ret = 1;
5545 				goto done;
5546 			}
5547 			continue;
5548 		}
5549 
5550 		if (next) {
5551 			btrfs_tree_unlock_rw(next, next_rw_lock);
5552 			free_extent_buffer(next);
5553 		}
5554 
5555 		next = c;
5556 		next_rw_lock = path->locks[level];
5557 		ret = read_block_for_search(NULL, root, path, &next, level,
5558 					    slot, &key, 0);
5559 		if (ret == -EAGAIN)
5560 			goto again;
5561 
5562 		if (ret < 0) {
5563 			btrfs_release_path(path);
5564 			goto done;
5565 		}
5566 
5567 		if (!path->skip_locking) {
5568 			ret = btrfs_try_tree_read_lock(next);
5569 			if (!ret && time_seq) {
5570 				/*
5571 				 * If we don't get the lock, we may be racing
5572 				 * with push_leaf_left, holding that lock while
5573 				 * itself waiting for the leaf we've currently
5574 				 * locked. To solve this situation, we give up
5575 				 * on our lock and cycle.
5576 				 */
5577 				free_extent_buffer(next);
5578 				btrfs_release_path(path);
5579 				cond_resched();
5580 				goto again;
5581 			}
5582 			if (!ret) {
5583 				btrfs_set_path_blocking(path);
5584 				btrfs_tree_read_lock(next);
5585 				btrfs_clear_path_blocking(path, next,
5586 							  BTRFS_READ_LOCK);
5587 			}
5588 			next_rw_lock = BTRFS_READ_LOCK;
5589 		}
5590 		break;
5591 	}
5592 	path->slots[level] = slot;
5593 	while (1) {
5594 		level--;
5595 		c = path->nodes[level];
5596 		if (path->locks[level])
5597 			btrfs_tree_unlock_rw(c, path->locks[level]);
5598 
5599 		free_extent_buffer(c);
5600 		path->nodes[level] = next;
5601 		path->slots[level] = 0;
5602 		if (!path->skip_locking)
5603 			path->locks[level] = next_rw_lock;
5604 		if (!level)
5605 			break;
5606 
5607 		ret = read_block_for_search(NULL, root, path, &next, level,
5608 					    0, &key, 0);
5609 		if (ret == -EAGAIN)
5610 			goto again;
5611 
5612 		if (ret < 0) {
5613 			btrfs_release_path(path);
5614 			goto done;
5615 		}
5616 
5617 		if (!path->skip_locking) {
5618 			ret = btrfs_try_tree_read_lock(next);
5619 			if (!ret) {
5620 				btrfs_set_path_blocking(path);
5621 				btrfs_tree_read_lock(next);
5622 				btrfs_clear_path_blocking(path, next,
5623 							  BTRFS_READ_LOCK);
5624 			}
5625 			next_rw_lock = BTRFS_READ_LOCK;
5626 		}
5627 	}
5628 	ret = 0;
5629 done:
5630 	unlock_up(path, 0, 1, 0, NULL);
5631 	path->leave_spinning = old_spinning;
5632 	if (!old_spinning)
5633 		btrfs_set_path_blocking(path);
5634 
5635 	return ret;
5636 }
5637 
5638 /*
5639  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5640  * searching until it gets past min_objectid or finds an item of 'type'
5641  *
5642  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5643  */
5644 int btrfs_previous_item(struct btrfs_root *root,
5645 			struct btrfs_path *path, u64 min_objectid,
5646 			int type)
5647 {
5648 	struct btrfs_key found_key;
5649 	struct extent_buffer *leaf;
5650 	u32 nritems;
5651 	int ret;
5652 
5653 	while (1) {
5654 		if (path->slots[0] == 0) {
5655 			btrfs_set_path_blocking(path);
5656 			ret = btrfs_prev_leaf(root, path);
5657 			if (ret != 0)
5658 				return ret;
5659 		} else {
5660 			path->slots[0]--;
5661 		}
5662 		leaf = path->nodes[0];
5663 		nritems = btrfs_header_nritems(leaf);
5664 		if (nritems == 0)
5665 			return 1;
5666 		if (path->slots[0] == nritems)
5667 			path->slots[0]--;
5668 
5669 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5670 		if (found_key.objectid < min_objectid)
5671 			break;
5672 		if (found_key.type == type)
5673 			return 0;
5674 		if (found_key.objectid == min_objectid &&
5675 		    found_key.type < type)
5676 			break;
5677 	}
5678 	return 1;
5679 }
5680