xref: /openbmc/linux/fs/btrfs/ctree.c (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 /*
2  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include <linux/mm.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "print-tree.h"
27 #include "locking.h"
28 
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 		      *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
32 		      const struct btrfs_key *ins_key, struct btrfs_path *path,
33 		      int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 			  struct btrfs_fs_info *fs_info,
36 			  struct extent_buffer *dst,
37 			  struct extent_buffer *src, int empty);
38 static int balance_node_right(struct btrfs_trans_handle *trans,
39 			      struct btrfs_fs_info *fs_info,
40 			      struct extent_buffer *dst_buf,
41 			      struct extent_buffer *src_buf);
42 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
43 		    int level, int slot);
44 
45 struct btrfs_path *btrfs_alloc_path(void)
46 {
47 	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
48 }
49 
50 /*
51  * set all locked nodes in the path to blocking locks.  This should
52  * be done before scheduling
53  */
54 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
55 {
56 	int i;
57 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
58 		if (!p->nodes[i] || !p->locks[i])
59 			continue;
60 		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
61 		if (p->locks[i] == BTRFS_READ_LOCK)
62 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
63 		else if (p->locks[i] == BTRFS_WRITE_LOCK)
64 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
65 	}
66 }
67 
68 /*
69  * reset all the locked nodes in the patch to spinning locks.
70  *
71  * held is used to keep lockdep happy, when lockdep is enabled
72  * we set held to a blocking lock before we go around and
73  * retake all the spinlocks in the path.  You can safely use NULL
74  * for held
75  */
76 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
77 					struct extent_buffer *held, int held_rw)
78 {
79 	int i;
80 
81 	if (held) {
82 		btrfs_set_lock_blocking_rw(held, held_rw);
83 		if (held_rw == BTRFS_WRITE_LOCK)
84 			held_rw = BTRFS_WRITE_LOCK_BLOCKING;
85 		else if (held_rw == BTRFS_READ_LOCK)
86 			held_rw = BTRFS_READ_LOCK_BLOCKING;
87 	}
88 	btrfs_set_path_blocking(p);
89 
90 	for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
91 		if (p->nodes[i] && p->locks[i]) {
92 			btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
93 			if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
94 				p->locks[i] = BTRFS_WRITE_LOCK;
95 			else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
96 				p->locks[i] = BTRFS_READ_LOCK;
97 		}
98 	}
99 
100 	if (held)
101 		btrfs_clear_lock_blocking_rw(held, held_rw);
102 }
103 
104 /* this also releases the path */
105 void btrfs_free_path(struct btrfs_path *p)
106 {
107 	if (!p)
108 		return;
109 	btrfs_release_path(p);
110 	kmem_cache_free(btrfs_path_cachep, p);
111 }
112 
113 /*
114  * path release drops references on the extent buffers in the path
115  * and it drops any locks held by this path
116  *
117  * It is safe to call this on paths that no locks or extent buffers held.
118  */
119 noinline void btrfs_release_path(struct btrfs_path *p)
120 {
121 	int i;
122 
123 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
124 		p->slots[i] = 0;
125 		if (!p->nodes[i])
126 			continue;
127 		if (p->locks[i]) {
128 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
129 			p->locks[i] = 0;
130 		}
131 		free_extent_buffer(p->nodes[i]);
132 		p->nodes[i] = NULL;
133 	}
134 }
135 
136 /*
137  * safely gets a reference on the root node of a tree.  A lock
138  * is not taken, so a concurrent writer may put a different node
139  * at the root of the tree.  See btrfs_lock_root_node for the
140  * looping required.
141  *
142  * The extent buffer returned by this has a reference taken, so
143  * it won't disappear.  It may stop being the root of the tree
144  * at any time because there are no locks held.
145  */
146 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
147 {
148 	struct extent_buffer *eb;
149 
150 	while (1) {
151 		rcu_read_lock();
152 		eb = rcu_dereference(root->node);
153 
154 		/*
155 		 * RCU really hurts here, we could free up the root node because
156 		 * it was COWed but we may not get the new root node yet so do
157 		 * the inc_not_zero dance and if it doesn't work then
158 		 * synchronize_rcu and try again.
159 		 */
160 		if (atomic_inc_not_zero(&eb->refs)) {
161 			rcu_read_unlock();
162 			break;
163 		}
164 		rcu_read_unlock();
165 		synchronize_rcu();
166 	}
167 	return eb;
168 }
169 
170 /* loop around taking references on and locking the root node of the
171  * tree until you end up with a lock on the root.  A locked buffer
172  * is returned, with a reference held.
173  */
174 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
175 {
176 	struct extent_buffer *eb;
177 
178 	while (1) {
179 		eb = btrfs_root_node(root);
180 		btrfs_tree_lock(eb);
181 		if (eb == root->node)
182 			break;
183 		btrfs_tree_unlock(eb);
184 		free_extent_buffer(eb);
185 	}
186 	return eb;
187 }
188 
189 /* loop around taking references on and locking the root node of the
190  * tree until you end up with a lock on the root.  A locked buffer
191  * is returned, with a reference held.
192  */
193 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
194 {
195 	struct extent_buffer *eb;
196 
197 	while (1) {
198 		eb = btrfs_root_node(root);
199 		btrfs_tree_read_lock(eb);
200 		if (eb == root->node)
201 			break;
202 		btrfs_tree_read_unlock(eb);
203 		free_extent_buffer(eb);
204 	}
205 	return eb;
206 }
207 
208 /* cowonly root (everything not a reference counted cow subvolume), just get
209  * put onto a simple dirty list.  transaction.c walks this to make sure they
210  * get properly updated on disk.
211  */
212 static void add_root_to_dirty_list(struct btrfs_root *root)
213 {
214 	struct btrfs_fs_info *fs_info = root->fs_info;
215 
216 	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
217 	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
218 		return;
219 
220 	spin_lock(&fs_info->trans_lock);
221 	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
222 		/* Want the extent tree to be the last on the list */
223 		if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
224 			list_move_tail(&root->dirty_list,
225 				       &fs_info->dirty_cowonly_roots);
226 		else
227 			list_move(&root->dirty_list,
228 				  &fs_info->dirty_cowonly_roots);
229 	}
230 	spin_unlock(&fs_info->trans_lock);
231 }
232 
233 /*
234  * used by snapshot creation to make a copy of a root for a tree with
235  * a given objectid.  The buffer with the new root node is returned in
236  * cow_ret, and this func returns zero on success or a negative error code.
237  */
238 int btrfs_copy_root(struct btrfs_trans_handle *trans,
239 		      struct btrfs_root *root,
240 		      struct extent_buffer *buf,
241 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
242 {
243 	struct btrfs_fs_info *fs_info = root->fs_info;
244 	struct extent_buffer *cow;
245 	int ret = 0;
246 	int level;
247 	struct btrfs_disk_key disk_key;
248 
249 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
250 		trans->transid != fs_info->running_transaction->transid);
251 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
252 		trans->transid != root->last_trans);
253 
254 	level = btrfs_header_level(buf);
255 	if (level == 0)
256 		btrfs_item_key(buf, &disk_key, 0);
257 	else
258 		btrfs_node_key(buf, &disk_key, 0);
259 
260 	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
261 			&disk_key, level, buf->start, 0);
262 	if (IS_ERR(cow))
263 		return PTR_ERR(cow);
264 
265 	copy_extent_buffer_full(cow, buf);
266 	btrfs_set_header_bytenr(cow, cow->start);
267 	btrfs_set_header_generation(cow, trans->transid);
268 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
269 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
270 				     BTRFS_HEADER_FLAG_RELOC);
271 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
272 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
273 	else
274 		btrfs_set_header_owner(cow, new_root_objectid);
275 
276 	write_extent_buffer_fsid(cow, fs_info->fsid);
277 
278 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
279 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
280 		ret = btrfs_inc_ref(trans, root, cow, 1);
281 	else
282 		ret = btrfs_inc_ref(trans, root, cow, 0);
283 
284 	if (ret)
285 		return ret;
286 
287 	btrfs_mark_buffer_dirty(cow);
288 	*cow_ret = cow;
289 	return 0;
290 }
291 
292 enum mod_log_op {
293 	MOD_LOG_KEY_REPLACE,
294 	MOD_LOG_KEY_ADD,
295 	MOD_LOG_KEY_REMOVE,
296 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
297 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
298 	MOD_LOG_MOVE_KEYS,
299 	MOD_LOG_ROOT_REPLACE,
300 };
301 
302 struct tree_mod_root {
303 	u64 logical;
304 	u8 level;
305 };
306 
307 struct tree_mod_elem {
308 	struct rb_node node;
309 	u64 logical;
310 	u64 seq;
311 	enum mod_log_op op;
312 
313 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
314 	int slot;
315 
316 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
317 	u64 generation;
318 
319 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
320 	struct btrfs_disk_key key;
321 	u64 blockptr;
322 
323 	/* this is used for op == MOD_LOG_MOVE_KEYS */
324 	struct {
325 		int dst_slot;
326 		int nr_items;
327 	} move;
328 
329 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
330 	struct tree_mod_root old_root;
331 };
332 
333 /*
334  * Pull a new tree mod seq number for our operation.
335  */
336 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
337 {
338 	return atomic64_inc_return(&fs_info->tree_mod_seq);
339 }
340 
341 /*
342  * This adds a new blocker to the tree mod log's blocker list if the @elem
343  * passed does not already have a sequence number set. So when a caller expects
344  * to record tree modifications, it should ensure to set elem->seq to zero
345  * before calling btrfs_get_tree_mod_seq.
346  * Returns a fresh, unused tree log modification sequence number, even if no new
347  * blocker was added.
348  */
349 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
350 			   struct seq_list *elem)
351 {
352 	write_lock(&fs_info->tree_mod_log_lock);
353 	spin_lock(&fs_info->tree_mod_seq_lock);
354 	if (!elem->seq) {
355 		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
356 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
357 	}
358 	spin_unlock(&fs_info->tree_mod_seq_lock);
359 	write_unlock(&fs_info->tree_mod_log_lock);
360 
361 	return elem->seq;
362 }
363 
364 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
365 			    struct seq_list *elem)
366 {
367 	struct rb_root *tm_root;
368 	struct rb_node *node;
369 	struct rb_node *next;
370 	struct seq_list *cur_elem;
371 	struct tree_mod_elem *tm;
372 	u64 min_seq = (u64)-1;
373 	u64 seq_putting = elem->seq;
374 
375 	if (!seq_putting)
376 		return;
377 
378 	spin_lock(&fs_info->tree_mod_seq_lock);
379 	list_del(&elem->list);
380 	elem->seq = 0;
381 
382 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
383 		if (cur_elem->seq < min_seq) {
384 			if (seq_putting > cur_elem->seq) {
385 				/*
386 				 * blocker with lower sequence number exists, we
387 				 * cannot remove anything from the log
388 				 */
389 				spin_unlock(&fs_info->tree_mod_seq_lock);
390 				return;
391 			}
392 			min_seq = cur_elem->seq;
393 		}
394 	}
395 	spin_unlock(&fs_info->tree_mod_seq_lock);
396 
397 	/*
398 	 * anything that's lower than the lowest existing (read: blocked)
399 	 * sequence number can be removed from the tree.
400 	 */
401 	write_lock(&fs_info->tree_mod_log_lock);
402 	tm_root = &fs_info->tree_mod_log;
403 	for (node = rb_first(tm_root); node; node = next) {
404 		next = rb_next(node);
405 		tm = rb_entry(node, struct tree_mod_elem, node);
406 		if (tm->seq > min_seq)
407 			continue;
408 		rb_erase(node, tm_root);
409 		kfree(tm);
410 	}
411 	write_unlock(&fs_info->tree_mod_log_lock);
412 }
413 
414 /*
415  * key order of the log:
416  *       node/leaf start address -> sequence
417  *
418  * The 'start address' is the logical address of the *new* root node
419  * for root replace operations, or the logical address of the affected
420  * block for all other operations.
421  *
422  * Note: must be called with write lock for fs_info::tree_mod_log_lock.
423  */
424 static noinline int
425 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
426 {
427 	struct rb_root *tm_root;
428 	struct rb_node **new;
429 	struct rb_node *parent = NULL;
430 	struct tree_mod_elem *cur;
431 
432 	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
433 
434 	tm_root = &fs_info->tree_mod_log;
435 	new = &tm_root->rb_node;
436 	while (*new) {
437 		cur = rb_entry(*new, struct tree_mod_elem, node);
438 		parent = *new;
439 		if (cur->logical < tm->logical)
440 			new = &((*new)->rb_left);
441 		else if (cur->logical > tm->logical)
442 			new = &((*new)->rb_right);
443 		else if (cur->seq < tm->seq)
444 			new = &((*new)->rb_left);
445 		else if (cur->seq > tm->seq)
446 			new = &((*new)->rb_right);
447 		else
448 			return -EEXIST;
449 	}
450 
451 	rb_link_node(&tm->node, parent, new);
452 	rb_insert_color(&tm->node, tm_root);
453 	return 0;
454 }
455 
456 /*
457  * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
458  * returns zero with the tree_mod_log_lock acquired. The caller must hold
459  * this until all tree mod log insertions are recorded in the rb tree and then
460  * write unlock fs_info::tree_mod_log_lock.
461  */
462 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
463 				    struct extent_buffer *eb) {
464 	smp_mb();
465 	if (list_empty(&(fs_info)->tree_mod_seq_list))
466 		return 1;
467 	if (eb && btrfs_header_level(eb) == 0)
468 		return 1;
469 
470 	write_lock(&fs_info->tree_mod_log_lock);
471 	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
472 		write_unlock(&fs_info->tree_mod_log_lock);
473 		return 1;
474 	}
475 
476 	return 0;
477 }
478 
479 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
480 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
481 				    struct extent_buffer *eb)
482 {
483 	smp_mb();
484 	if (list_empty(&(fs_info)->tree_mod_seq_list))
485 		return 0;
486 	if (eb && btrfs_header_level(eb) == 0)
487 		return 0;
488 
489 	return 1;
490 }
491 
492 static struct tree_mod_elem *
493 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
494 		    enum mod_log_op op, gfp_t flags)
495 {
496 	struct tree_mod_elem *tm;
497 
498 	tm = kzalloc(sizeof(*tm), flags);
499 	if (!tm)
500 		return NULL;
501 
502 	tm->logical = eb->start;
503 	if (op != MOD_LOG_KEY_ADD) {
504 		btrfs_node_key(eb, &tm->key, slot);
505 		tm->blockptr = btrfs_node_blockptr(eb, slot);
506 	}
507 	tm->op = op;
508 	tm->slot = slot;
509 	tm->generation = btrfs_node_ptr_generation(eb, slot);
510 	RB_CLEAR_NODE(&tm->node);
511 
512 	return tm;
513 }
514 
515 static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
516 		enum mod_log_op op, gfp_t flags)
517 {
518 	struct tree_mod_elem *tm;
519 	int ret;
520 
521 	if (!tree_mod_need_log(eb->fs_info, eb))
522 		return 0;
523 
524 	tm = alloc_tree_mod_elem(eb, slot, op, flags);
525 	if (!tm)
526 		return -ENOMEM;
527 
528 	if (tree_mod_dont_log(eb->fs_info, eb)) {
529 		kfree(tm);
530 		return 0;
531 	}
532 
533 	ret = __tree_mod_log_insert(eb->fs_info, tm);
534 	write_unlock(&eb->fs_info->tree_mod_log_lock);
535 	if (ret)
536 		kfree(tm);
537 
538 	return ret;
539 }
540 
541 static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
542 		int dst_slot, int src_slot, int nr_items)
543 {
544 	struct tree_mod_elem *tm = NULL;
545 	struct tree_mod_elem **tm_list = NULL;
546 	int ret = 0;
547 	int i;
548 	int locked = 0;
549 
550 	if (!tree_mod_need_log(eb->fs_info, eb))
551 		return 0;
552 
553 	tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
554 	if (!tm_list)
555 		return -ENOMEM;
556 
557 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
558 	if (!tm) {
559 		ret = -ENOMEM;
560 		goto free_tms;
561 	}
562 
563 	tm->logical = eb->start;
564 	tm->slot = src_slot;
565 	tm->move.dst_slot = dst_slot;
566 	tm->move.nr_items = nr_items;
567 	tm->op = MOD_LOG_MOVE_KEYS;
568 
569 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
570 		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
571 		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
572 		if (!tm_list[i]) {
573 			ret = -ENOMEM;
574 			goto free_tms;
575 		}
576 	}
577 
578 	if (tree_mod_dont_log(eb->fs_info, eb))
579 		goto free_tms;
580 	locked = 1;
581 
582 	/*
583 	 * When we override something during the move, we log these removals.
584 	 * This can only happen when we move towards the beginning of the
585 	 * buffer, i.e. dst_slot < src_slot.
586 	 */
587 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
588 		ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
589 		if (ret)
590 			goto free_tms;
591 	}
592 
593 	ret = __tree_mod_log_insert(eb->fs_info, tm);
594 	if (ret)
595 		goto free_tms;
596 	write_unlock(&eb->fs_info->tree_mod_log_lock);
597 	kfree(tm_list);
598 
599 	return 0;
600 free_tms:
601 	for (i = 0; i < nr_items; i++) {
602 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
603 			rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
604 		kfree(tm_list[i]);
605 	}
606 	if (locked)
607 		write_unlock(&eb->fs_info->tree_mod_log_lock);
608 	kfree(tm_list);
609 	kfree(tm);
610 
611 	return ret;
612 }
613 
614 static inline int
615 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
616 		       struct tree_mod_elem **tm_list,
617 		       int nritems)
618 {
619 	int i, j;
620 	int ret;
621 
622 	for (i = nritems - 1; i >= 0; i--) {
623 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
624 		if (ret) {
625 			for (j = nritems - 1; j > i; j--)
626 				rb_erase(&tm_list[j]->node,
627 					 &fs_info->tree_mod_log);
628 			return ret;
629 		}
630 	}
631 
632 	return 0;
633 }
634 
635 static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
636 			 struct extent_buffer *new_root, int log_removal)
637 {
638 	struct btrfs_fs_info *fs_info = old_root->fs_info;
639 	struct tree_mod_elem *tm = NULL;
640 	struct tree_mod_elem **tm_list = NULL;
641 	int nritems = 0;
642 	int ret = 0;
643 	int i;
644 
645 	if (!tree_mod_need_log(fs_info, NULL))
646 		return 0;
647 
648 	if (log_removal && btrfs_header_level(old_root) > 0) {
649 		nritems = btrfs_header_nritems(old_root);
650 		tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
651 				  GFP_NOFS);
652 		if (!tm_list) {
653 			ret = -ENOMEM;
654 			goto free_tms;
655 		}
656 		for (i = 0; i < nritems; i++) {
657 			tm_list[i] = alloc_tree_mod_elem(old_root, i,
658 			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
659 			if (!tm_list[i]) {
660 				ret = -ENOMEM;
661 				goto free_tms;
662 			}
663 		}
664 	}
665 
666 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
667 	if (!tm) {
668 		ret = -ENOMEM;
669 		goto free_tms;
670 	}
671 
672 	tm->logical = new_root->start;
673 	tm->old_root.logical = old_root->start;
674 	tm->old_root.level = btrfs_header_level(old_root);
675 	tm->generation = btrfs_header_generation(old_root);
676 	tm->op = MOD_LOG_ROOT_REPLACE;
677 
678 	if (tree_mod_dont_log(fs_info, NULL))
679 		goto free_tms;
680 
681 	if (tm_list)
682 		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
683 	if (!ret)
684 		ret = __tree_mod_log_insert(fs_info, tm);
685 
686 	write_unlock(&fs_info->tree_mod_log_lock);
687 	if (ret)
688 		goto free_tms;
689 	kfree(tm_list);
690 
691 	return ret;
692 
693 free_tms:
694 	if (tm_list) {
695 		for (i = 0; i < nritems; i++)
696 			kfree(tm_list[i]);
697 		kfree(tm_list);
698 	}
699 	kfree(tm);
700 
701 	return ret;
702 }
703 
704 static struct tree_mod_elem *
705 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
706 		      int smallest)
707 {
708 	struct rb_root *tm_root;
709 	struct rb_node *node;
710 	struct tree_mod_elem *cur = NULL;
711 	struct tree_mod_elem *found = NULL;
712 
713 	read_lock(&fs_info->tree_mod_log_lock);
714 	tm_root = &fs_info->tree_mod_log;
715 	node = tm_root->rb_node;
716 	while (node) {
717 		cur = rb_entry(node, struct tree_mod_elem, node);
718 		if (cur->logical < start) {
719 			node = node->rb_left;
720 		} else if (cur->logical > start) {
721 			node = node->rb_right;
722 		} else if (cur->seq < min_seq) {
723 			node = node->rb_left;
724 		} else if (!smallest) {
725 			/* we want the node with the highest seq */
726 			if (found)
727 				BUG_ON(found->seq > cur->seq);
728 			found = cur;
729 			node = node->rb_left;
730 		} else if (cur->seq > min_seq) {
731 			/* we want the node with the smallest seq */
732 			if (found)
733 				BUG_ON(found->seq < cur->seq);
734 			found = cur;
735 			node = node->rb_right;
736 		} else {
737 			found = cur;
738 			break;
739 		}
740 	}
741 	read_unlock(&fs_info->tree_mod_log_lock);
742 
743 	return found;
744 }
745 
746 /*
747  * this returns the element from the log with the smallest time sequence
748  * value that's in the log (the oldest log item). any element with a time
749  * sequence lower than min_seq will be ignored.
750  */
751 static struct tree_mod_elem *
752 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
753 			   u64 min_seq)
754 {
755 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
756 }
757 
758 /*
759  * this returns the element from the log with the largest time sequence
760  * value that's in the log (the most recent log item). any element with
761  * a time sequence lower than min_seq will be ignored.
762  */
763 static struct tree_mod_elem *
764 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
765 {
766 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
767 }
768 
769 static noinline int
770 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
771 		     struct extent_buffer *src, unsigned long dst_offset,
772 		     unsigned long src_offset, int nr_items)
773 {
774 	int ret = 0;
775 	struct tree_mod_elem **tm_list = NULL;
776 	struct tree_mod_elem **tm_list_add, **tm_list_rem;
777 	int i;
778 	int locked = 0;
779 
780 	if (!tree_mod_need_log(fs_info, NULL))
781 		return 0;
782 
783 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
784 		return 0;
785 
786 	tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
787 			  GFP_NOFS);
788 	if (!tm_list)
789 		return -ENOMEM;
790 
791 	tm_list_add = tm_list;
792 	tm_list_rem = tm_list + nr_items;
793 	for (i = 0; i < nr_items; i++) {
794 		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
795 		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
796 		if (!tm_list_rem[i]) {
797 			ret = -ENOMEM;
798 			goto free_tms;
799 		}
800 
801 		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
802 		    MOD_LOG_KEY_ADD, GFP_NOFS);
803 		if (!tm_list_add[i]) {
804 			ret = -ENOMEM;
805 			goto free_tms;
806 		}
807 	}
808 
809 	if (tree_mod_dont_log(fs_info, NULL))
810 		goto free_tms;
811 	locked = 1;
812 
813 	for (i = 0; i < nr_items; i++) {
814 		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
815 		if (ret)
816 			goto free_tms;
817 		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
818 		if (ret)
819 			goto free_tms;
820 	}
821 
822 	write_unlock(&fs_info->tree_mod_log_lock);
823 	kfree(tm_list);
824 
825 	return 0;
826 
827 free_tms:
828 	for (i = 0; i < nr_items * 2; i++) {
829 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
830 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
831 		kfree(tm_list[i]);
832 	}
833 	if (locked)
834 		write_unlock(&fs_info->tree_mod_log_lock);
835 	kfree(tm_list);
836 
837 	return ret;
838 }
839 
840 static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
841 {
842 	struct tree_mod_elem **tm_list = NULL;
843 	int nritems = 0;
844 	int i;
845 	int ret = 0;
846 
847 	if (btrfs_header_level(eb) == 0)
848 		return 0;
849 
850 	if (!tree_mod_need_log(eb->fs_info, NULL))
851 		return 0;
852 
853 	nritems = btrfs_header_nritems(eb);
854 	tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
855 	if (!tm_list)
856 		return -ENOMEM;
857 
858 	for (i = 0; i < nritems; i++) {
859 		tm_list[i] = alloc_tree_mod_elem(eb, i,
860 		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
861 		if (!tm_list[i]) {
862 			ret = -ENOMEM;
863 			goto free_tms;
864 		}
865 	}
866 
867 	if (tree_mod_dont_log(eb->fs_info, eb))
868 		goto free_tms;
869 
870 	ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
871 	write_unlock(&eb->fs_info->tree_mod_log_lock);
872 	if (ret)
873 		goto free_tms;
874 	kfree(tm_list);
875 
876 	return 0;
877 
878 free_tms:
879 	for (i = 0; i < nritems; i++)
880 		kfree(tm_list[i]);
881 	kfree(tm_list);
882 
883 	return ret;
884 }
885 
886 /*
887  * check if the tree block can be shared by multiple trees
888  */
889 int btrfs_block_can_be_shared(struct btrfs_root *root,
890 			      struct extent_buffer *buf)
891 {
892 	/*
893 	 * Tree blocks not in reference counted trees and tree roots
894 	 * are never shared. If a block was allocated after the last
895 	 * snapshot and the block was not allocated by tree relocation,
896 	 * we know the block is not shared.
897 	 */
898 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
899 	    buf != root->node && buf != root->commit_root &&
900 	    (btrfs_header_generation(buf) <=
901 	     btrfs_root_last_snapshot(&root->root_item) ||
902 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
903 		return 1;
904 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
905 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
906 	    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
907 		return 1;
908 #endif
909 	return 0;
910 }
911 
912 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
913 				       struct btrfs_root *root,
914 				       struct extent_buffer *buf,
915 				       struct extent_buffer *cow,
916 				       int *last_ref)
917 {
918 	struct btrfs_fs_info *fs_info = root->fs_info;
919 	u64 refs;
920 	u64 owner;
921 	u64 flags;
922 	u64 new_flags = 0;
923 	int ret;
924 
925 	/*
926 	 * Backrefs update rules:
927 	 *
928 	 * Always use full backrefs for extent pointers in tree block
929 	 * allocated by tree relocation.
930 	 *
931 	 * If a shared tree block is no longer referenced by its owner
932 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
933 	 * use full backrefs for extent pointers in tree block.
934 	 *
935 	 * If a tree block is been relocating
936 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
937 	 * use full backrefs for extent pointers in tree block.
938 	 * The reason for this is some operations (such as drop tree)
939 	 * are only allowed for blocks use full backrefs.
940 	 */
941 
942 	if (btrfs_block_can_be_shared(root, buf)) {
943 		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
944 					       btrfs_header_level(buf), 1,
945 					       &refs, &flags);
946 		if (ret)
947 			return ret;
948 		if (refs == 0) {
949 			ret = -EROFS;
950 			btrfs_handle_fs_error(fs_info, ret, NULL);
951 			return ret;
952 		}
953 	} else {
954 		refs = 1;
955 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
956 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
957 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
958 		else
959 			flags = 0;
960 	}
961 
962 	owner = btrfs_header_owner(buf);
963 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
964 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
965 
966 	if (refs > 1) {
967 		if ((owner == root->root_key.objectid ||
968 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
969 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
970 			ret = btrfs_inc_ref(trans, root, buf, 1);
971 			if (ret)
972 				return ret;
973 
974 			if (root->root_key.objectid ==
975 			    BTRFS_TREE_RELOC_OBJECTID) {
976 				ret = btrfs_dec_ref(trans, root, buf, 0);
977 				if (ret)
978 					return ret;
979 				ret = btrfs_inc_ref(trans, root, cow, 1);
980 				if (ret)
981 					return ret;
982 			}
983 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
984 		} else {
985 
986 			if (root->root_key.objectid ==
987 			    BTRFS_TREE_RELOC_OBJECTID)
988 				ret = btrfs_inc_ref(trans, root, cow, 1);
989 			else
990 				ret = btrfs_inc_ref(trans, root, cow, 0);
991 			if (ret)
992 				return ret;
993 		}
994 		if (new_flags != 0) {
995 			int level = btrfs_header_level(buf);
996 
997 			ret = btrfs_set_disk_extent_flags(trans, fs_info,
998 							  buf->start,
999 							  buf->len,
1000 							  new_flags, level, 0);
1001 			if (ret)
1002 				return ret;
1003 		}
1004 	} else {
1005 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1006 			if (root->root_key.objectid ==
1007 			    BTRFS_TREE_RELOC_OBJECTID)
1008 				ret = btrfs_inc_ref(trans, root, cow, 1);
1009 			else
1010 				ret = btrfs_inc_ref(trans, root, cow, 0);
1011 			if (ret)
1012 				return ret;
1013 			ret = btrfs_dec_ref(trans, root, buf, 1);
1014 			if (ret)
1015 				return ret;
1016 		}
1017 		clean_tree_block(fs_info, buf);
1018 		*last_ref = 1;
1019 	}
1020 	return 0;
1021 }
1022 
1023 /*
1024  * does the dirty work in cow of a single block.  The parent block (if
1025  * supplied) is updated to point to the new cow copy.  The new buffer is marked
1026  * dirty and returned locked.  If you modify the block it needs to be marked
1027  * dirty again.
1028  *
1029  * search_start -- an allocation hint for the new block
1030  *
1031  * empty_size -- a hint that you plan on doing more cow.  This is the size in
1032  * bytes the allocator should try to find free next to the block it returns.
1033  * This is just a hint and may be ignored by the allocator.
1034  */
1035 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1036 			     struct btrfs_root *root,
1037 			     struct extent_buffer *buf,
1038 			     struct extent_buffer *parent, int parent_slot,
1039 			     struct extent_buffer **cow_ret,
1040 			     u64 search_start, u64 empty_size)
1041 {
1042 	struct btrfs_fs_info *fs_info = root->fs_info;
1043 	struct btrfs_disk_key disk_key;
1044 	struct extent_buffer *cow;
1045 	int level, ret;
1046 	int last_ref = 0;
1047 	int unlock_orig = 0;
1048 	u64 parent_start = 0;
1049 
1050 	if (*cow_ret == buf)
1051 		unlock_orig = 1;
1052 
1053 	btrfs_assert_tree_locked(buf);
1054 
1055 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1056 		trans->transid != fs_info->running_transaction->transid);
1057 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1058 		trans->transid != root->last_trans);
1059 
1060 	level = btrfs_header_level(buf);
1061 
1062 	if (level == 0)
1063 		btrfs_item_key(buf, &disk_key, 0);
1064 	else
1065 		btrfs_node_key(buf, &disk_key, 0);
1066 
1067 	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1068 		parent_start = parent->start;
1069 
1070 	cow = btrfs_alloc_tree_block(trans, root, parent_start,
1071 			root->root_key.objectid, &disk_key, level,
1072 			search_start, empty_size);
1073 	if (IS_ERR(cow))
1074 		return PTR_ERR(cow);
1075 
1076 	/* cow is set to blocking by btrfs_init_new_buffer */
1077 
1078 	copy_extent_buffer_full(cow, buf);
1079 	btrfs_set_header_bytenr(cow, cow->start);
1080 	btrfs_set_header_generation(cow, trans->transid);
1081 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1082 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1083 				     BTRFS_HEADER_FLAG_RELOC);
1084 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1085 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1086 	else
1087 		btrfs_set_header_owner(cow, root->root_key.objectid);
1088 
1089 	write_extent_buffer_fsid(cow, fs_info->fsid);
1090 
1091 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1092 	if (ret) {
1093 		btrfs_abort_transaction(trans, ret);
1094 		return ret;
1095 	}
1096 
1097 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1098 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1099 		if (ret) {
1100 			btrfs_abort_transaction(trans, ret);
1101 			return ret;
1102 		}
1103 	}
1104 
1105 	if (buf == root->node) {
1106 		WARN_ON(parent && parent != buf);
1107 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1108 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1109 			parent_start = buf->start;
1110 
1111 		extent_buffer_get(cow);
1112 		ret = tree_mod_log_insert_root(root->node, cow, 1);
1113 		BUG_ON(ret < 0);
1114 		rcu_assign_pointer(root->node, cow);
1115 
1116 		btrfs_free_tree_block(trans, root, buf, parent_start,
1117 				      last_ref);
1118 		free_extent_buffer(buf);
1119 		add_root_to_dirty_list(root);
1120 	} else {
1121 		WARN_ON(trans->transid != btrfs_header_generation(parent));
1122 		tree_mod_log_insert_key(parent, parent_slot,
1123 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1124 		btrfs_set_node_blockptr(parent, parent_slot,
1125 					cow->start);
1126 		btrfs_set_node_ptr_generation(parent, parent_slot,
1127 					      trans->transid);
1128 		btrfs_mark_buffer_dirty(parent);
1129 		if (last_ref) {
1130 			ret = tree_mod_log_free_eb(buf);
1131 			if (ret) {
1132 				btrfs_abort_transaction(trans, ret);
1133 				return ret;
1134 			}
1135 		}
1136 		btrfs_free_tree_block(trans, root, buf, parent_start,
1137 				      last_ref);
1138 	}
1139 	if (unlock_orig)
1140 		btrfs_tree_unlock(buf);
1141 	free_extent_buffer_stale(buf);
1142 	btrfs_mark_buffer_dirty(cow);
1143 	*cow_ret = cow;
1144 	return 0;
1145 }
1146 
1147 /*
1148  * returns the logical address of the oldest predecessor of the given root.
1149  * entries older than time_seq are ignored.
1150  */
1151 static struct tree_mod_elem *__tree_mod_log_oldest_root(
1152 		struct extent_buffer *eb_root, u64 time_seq)
1153 {
1154 	struct tree_mod_elem *tm;
1155 	struct tree_mod_elem *found = NULL;
1156 	u64 root_logical = eb_root->start;
1157 	int looped = 0;
1158 
1159 	if (!time_seq)
1160 		return NULL;
1161 
1162 	/*
1163 	 * the very last operation that's logged for a root is the
1164 	 * replacement operation (if it is replaced at all). this has
1165 	 * the logical address of the *new* root, making it the very
1166 	 * first operation that's logged for this root.
1167 	 */
1168 	while (1) {
1169 		tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1170 						time_seq);
1171 		if (!looped && !tm)
1172 			return NULL;
1173 		/*
1174 		 * if there are no tree operation for the oldest root, we simply
1175 		 * return it. this should only happen if that (old) root is at
1176 		 * level 0.
1177 		 */
1178 		if (!tm)
1179 			break;
1180 
1181 		/*
1182 		 * if there's an operation that's not a root replacement, we
1183 		 * found the oldest version of our root. normally, we'll find a
1184 		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1185 		 */
1186 		if (tm->op != MOD_LOG_ROOT_REPLACE)
1187 			break;
1188 
1189 		found = tm;
1190 		root_logical = tm->old_root.logical;
1191 		looped = 1;
1192 	}
1193 
1194 	/* if there's no old root to return, return what we found instead */
1195 	if (!found)
1196 		found = tm;
1197 
1198 	return found;
1199 }
1200 
1201 /*
1202  * tm is a pointer to the first operation to rewind within eb. then, all
1203  * previous operations will be rewound (until we reach something older than
1204  * time_seq).
1205  */
1206 static void
1207 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1208 		      u64 time_seq, struct tree_mod_elem *first_tm)
1209 {
1210 	u32 n;
1211 	struct rb_node *next;
1212 	struct tree_mod_elem *tm = first_tm;
1213 	unsigned long o_dst;
1214 	unsigned long o_src;
1215 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1216 
1217 	n = btrfs_header_nritems(eb);
1218 	read_lock(&fs_info->tree_mod_log_lock);
1219 	while (tm && tm->seq >= time_seq) {
1220 		/*
1221 		 * all the operations are recorded with the operator used for
1222 		 * the modification. as we're going backwards, we do the
1223 		 * opposite of each operation here.
1224 		 */
1225 		switch (tm->op) {
1226 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1227 			BUG_ON(tm->slot < n);
1228 			/* Fallthrough */
1229 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1230 		case MOD_LOG_KEY_REMOVE:
1231 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1232 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1233 			btrfs_set_node_ptr_generation(eb, tm->slot,
1234 						      tm->generation);
1235 			n++;
1236 			break;
1237 		case MOD_LOG_KEY_REPLACE:
1238 			BUG_ON(tm->slot >= n);
1239 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1240 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1241 			btrfs_set_node_ptr_generation(eb, tm->slot,
1242 						      tm->generation);
1243 			break;
1244 		case MOD_LOG_KEY_ADD:
1245 			/* if a move operation is needed it's in the log */
1246 			n--;
1247 			break;
1248 		case MOD_LOG_MOVE_KEYS:
1249 			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1250 			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1251 			memmove_extent_buffer(eb, o_dst, o_src,
1252 					      tm->move.nr_items * p_size);
1253 			break;
1254 		case MOD_LOG_ROOT_REPLACE:
1255 			/*
1256 			 * this operation is special. for roots, this must be
1257 			 * handled explicitly before rewinding.
1258 			 * for non-roots, this operation may exist if the node
1259 			 * was a root: root A -> child B; then A gets empty and
1260 			 * B is promoted to the new root. in the mod log, we'll
1261 			 * have a root-replace operation for B, a tree block
1262 			 * that is no root. we simply ignore that operation.
1263 			 */
1264 			break;
1265 		}
1266 		next = rb_next(&tm->node);
1267 		if (!next)
1268 			break;
1269 		tm = rb_entry(next, struct tree_mod_elem, node);
1270 		if (tm->logical != first_tm->logical)
1271 			break;
1272 	}
1273 	read_unlock(&fs_info->tree_mod_log_lock);
1274 	btrfs_set_header_nritems(eb, n);
1275 }
1276 
1277 /*
1278  * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1279  * is returned. If rewind operations happen, a fresh buffer is returned. The
1280  * returned buffer is always read-locked. If the returned buffer is not the
1281  * input buffer, the lock on the input buffer is released and the input buffer
1282  * is freed (its refcount is decremented).
1283  */
1284 static struct extent_buffer *
1285 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1286 		    struct extent_buffer *eb, u64 time_seq)
1287 {
1288 	struct extent_buffer *eb_rewin;
1289 	struct tree_mod_elem *tm;
1290 
1291 	if (!time_seq)
1292 		return eb;
1293 
1294 	if (btrfs_header_level(eb) == 0)
1295 		return eb;
1296 
1297 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1298 	if (!tm)
1299 		return eb;
1300 
1301 	btrfs_set_path_blocking(path);
1302 	btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1303 
1304 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1305 		BUG_ON(tm->slot != 0);
1306 		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1307 		if (!eb_rewin) {
1308 			btrfs_tree_read_unlock_blocking(eb);
1309 			free_extent_buffer(eb);
1310 			return NULL;
1311 		}
1312 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1313 		btrfs_set_header_backref_rev(eb_rewin,
1314 					     btrfs_header_backref_rev(eb));
1315 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1316 		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1317 	} else {
1318 		eb_rewin = btrfs_clone_extent_buffer(eb);
1319 		if (!eb_rewin) {
1320 			btrfs_tree_read_unlock_blocking(eb);
1321 			free_extent_buffer(eb);
1322 			return NULL;
1323 		}
1324 	}
1325 
1326 	btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1327 	btrfs_tree_read_unlock_blocking(eb);
1328 	free_extent_buffer(eb);
1329 
1330 	extent_buffer_get(eb_rewin);
1331 	btrfs_tree_read_lock(eb_rewin);
1332 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1333 	WARN_ON(btrfs_header_nritems(eb_rewin) >
1334 		BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1335 
1336 	return eb_rewin;
1337 }
1338 
1339 /*
1340  * get_old_root() rewinds the state of @root's root node to the given @time_seq
1341  * value. If there are no changes, the current root->root_node is returned. If
1342  * anything changed in between, there's a fresh buffer allocated on which the
1343  * rewind operations are done. In any case, the returned buffer is read locked.
1344  * Returns NULL on error (with no locks held).
1345  */
1346 static inline struct extent_buffer *
1347 get_old_root(struct btrfs_root *root, u64 time_seq)
1348 {
1349 	struct btrfs_fs_info *fs_info = root->fs_info;
1350 	struct tree_mod_elem *tm;
1351 	struct extent_buffer *eb = NULL;
1352 	struct extent_buffer *eb_root;
1353 	struct extent_buffer *old;
1354 	struct tree_mod_root *old_root = NULL;
1355 	u64 old_generation = 0;
1356 	u64 logical;
1357 	int level;
1358 
1359 	eb_root = btrfs_read_lock_root_node(root);
1360 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1361 	if (!tm)
1362 		return eb_root;
1363 
1364 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1365 		old_root = &tm->old_root;
1366 		old_generation = tm->generation;
1367 		logical = old_root->logical;
1368 		level = old_root->level;
1369 	} else {
1370 		logical = eb_root->start;
1371 		level = btrfs_header_level(eb_root);
1372 	}
1373 
1374 	tm = tree_mod_log_search(fs_info, logical, time_seq);
1375 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1376 		btrfs_tree_read_unlock(eb_root);
1377 		free_extent_buffer(eb_root);
1378 		old = read_tree_block(fs_info, logical, 0, level, NULL);
1379 		if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1380 			if (!IS_ERR(old))
1381 				free_extent_buffer(old);
1382 			btrfs_warn(fs_info,
1383 				   "failed to read tree block %llu from get_old_root",
1384 				   logical);
1385 		} else {
1386 			eb = btrfs_clone_extent_buffer(old);
1387 			free_extent_buffer(old);
1388 		}
1389 	} else if (old_root) {
1390 		btrfs_tree_read_unlock(eb_root);
1391 		free_extent_buffer(eb_root);
1392 		eb = alloc_dummy_extent_buffer(fs_info, logical);
1393 	} else {
1394 		btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1395 		eb = btrfs_clone_extent_buffer(eb_root);
1396 		btrfs_tree_read_unlock_blocking(eb_root);
1397 		free_extent_buffer(eb_root);
1398 	}
1399 
1400 	if (!eb)
1401 		return NULL;
1402 	extent_buffer_get(eb);
1403 	btrfs_tree_read_lock(eb);
1404 	if (old_root) {
1405 		btrfs_set_header_bytenr(eb, eb->start);
1406 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1407 		btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1408 		btrfs_set_header_level(eb, old_root->level);
1409 		btrfs_set_header_generation(eb, old_generation);
1410 	}
1411 	if (tm)
1412 		__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1413 	else
1414 		WARN_ON(btrfs_header_level(eb) != 0);
1415 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1416 
1417 	return eb;
1418 }
1419 
1420 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1421 {
1422 	struct tree_mod_elem *tm;
1423 	int level;
1424 	struct extent_buffer *eb_root = btrfs_root_node(root);
1425 
1426 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1427 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1428 		level = tm->old_root.level;
1429 	} else {
1430 		level = btrfs_header_level(eb_root);
1431 	}
1432 	free_extent_buffer(eb_root);
1433 
1434 	return level;
1435 }
1436 
1437 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1438 				   struct btrfs_root *root,
1439 				   struct extent_buffer *buf)
1440 {
1441 	if (btrfs_is_testing(root->fs_info))
1442 		return 0;
1443 
1444 	/* Ensure we can see the FORCE_COW bit */
1445 	smp_mb__before_atomic();
1446 
1447 	/*
1448 	 * We do not need to cow a block if
1449 	 * 1) this block is not created or changed in this transaction;
1450 	 * 2) this block does not belong to TREE_RELOC tree;
1451 	 * 3) the root is not forced COW.
1452 	 *
1453 	 * What is forced COW:
1454 	 *    when we create snapshot during committing the transaction,
1455 	 *    after we've finished coping src root, we must COW the shared
1456 	 *    block to ensure the metadata consistency.
1457 	 */
1458 	if (btrfs_header_generation(buf) == trans->transid &&
1459 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1460 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1461 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1462 	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1463 		return 0;
1464 	return 1;
1465 }
1466 
1467 /*
1468  * cows a single block, see __btrfs_cow_block for the real work.
1469  * This version of it has extra checks so that a block isn't COWed more than
1470  * once per transaction, as long as it hasn't been written yet
1471  */
1472 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1473 		    struct btrfs_root *root, struct extent_buffer *buf,
1474 		    struct extent_buffer *parent, int parent_slot,
1475 		    struct extent_buffer **cow_ret)
1476 {
1477 	struct btrfs_fs_info *fs_info = root->fs_info;
1478 	u64 search_start;
1479 	int ret;
1480 
1481 	if (trans->transaction != fs_info->running_transaction)
1482 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1483 		       trans->transid,
1484 		       fs_info->running_transaction->transid);
1485 
1486 	if (trans->transid != fs_info->generation)
1487 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1488 		       trans->transid, fs_info->generation);
1489 
1490 	if (!should_cow_block(trans, root, buf)) {
1491 		trans->dirty = true;
1492 		*cow_ret = buf;
1493 		return 0;
1494 	}
1495 
1496 	search_start = buf->start & ~((u64)SZ_1G - 1);
1497 
1498 	if (parent)
1499 		btrfs_set_lock_blocking(parent);
1500 	btrfs_set_lock_blocking(buf);
1501 
1502 	ret = __btrfs_cow_block(trans, root, buf, parent,
1503 				 parent_slot, cow_ret, search_start, 0);
1504 
1505 	trace_btrfs_cow_block(root, buf, *cow_ret);
1506 
1507 	return ret;
1508 }
1509 
1510 /*
1511  * helper function for defrag to decide if two blocks pointed to by a
1512  * node are actually close by
1513  */
1514 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1515 {
1516 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1517 		return 1;
1518 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1519 		return 1;
1520 	return 0;
1521 }
1522 
1523 /*
1524  * compare two keys in a memcmp fashion
1525  */
1526 static int comp_keys(const struct btrfs_disk_key *disk,
1527 		     const struct btrfs_key *k2)
1528 {
1529 	struct btrfs_key k1;
1530 
1531 	btrfs_disk_key_to_cpu(&k1, disk);
1532 
1533 	return btrfs_comp_cpu_keys(&k1, k2);
1534 }
1535 
1536 /*
1537  * same as comp_keys only with two btrfs_key's
1538  */
1539 int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1540 {
1541 	if (k1->objectid > k2->objectid)
1542 		return 1;
1543 	if (k1->objectid < k2->objectid)
1544 		return -1;
1545 	if (k1->type > k2->type)
1546 		return 1;
1547 	if (k1->type < k2->type)
1548 		return -1;
1549 	if (k1->offset > k2->offset)
1550 		return 1;
1551 	if (k1->offset < k2->offset)
1552 		return -1;
1553 	return 0;
1554 }
1555 
1556 /*
1557  * this is used by the defrag code to go through all the
1558  * leaves pointed to by a node and reallocate them so that
1559  * disk order is close to key order
1560  */
1561 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1562 		       struct btrfs_root *root, struct extent_buffer *parent,
1563 		       int start_slot, u64 *last_ret,
1564 		       struct btrfs_key *progress)
1565 {
1566 	struct btrfs_fs_info *fs_info = root->fs_info;
1567 	struct extent_buffer *cur;
1568 	u64 blocknr;
1569 	u64 gen;
1570 	u64 search_start = *last_ret;
1571 	u64 last_block = 0;
1572 	u64 other;
1573 	u32 parent_nritems;
1574 	int end_slot;
1575 	int i;
1576 	int err = 0;
1577 	int parent_level;
1578 	int uptodate;
1579 	u32 blocksize;
1580 	int progress_passed = 0;
1581 	struct btrfs_disk_key disk_key;
1582 
1583 	parent_level = btrfs_header_level(parent);
1584 
1585 	WARN_ON(trans->transaction != fs_info->running_transaction);
1586 	WARN_ON(trans->transid != fs_info->generation);
1587 
1588 	parent_nritems = btrfs_header_nritems(parent);
1589 	blocksize = fs_info->nodesize;
1590 	end_slot = parent_nritems - 1;
1591 
1592 	if (parent_nritems <= 1)
1593 		return 0;
1594 
1595 	btrfs_set_lock_blocking(parent);
1596 
1597 	for (i = start_slot; i <= end_slot; i++) {
1598 		struct btrfs_key first_key;
1599 		int close = 1;
1600 
1601 		btrfs_node_key(parent, &disk_key, i);
1602 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1603 			continue;
1604 
1605 		progress_passed = 1;
1606 		blocknr = btrfs_node_blockptr(parent, i);
1607 		gen = btrfs_node_ptr_generation(parent, i);
1608 		btrfs_node_key_to_cpu(parent, &first_key, i);
1609 		if (last_block == 0)
1610 			last_block = blocknr;
1611 
1612 		if (i > 0) {
1613 			other = btrfs_node_blockptr(parent, i - 1);
1614 			close = close_blocks(blocknr, other, blocksize);
1615 		}
1616 		if (!close && i < end_slot) {
1617 			other = btrfs_node_blockptr(parent, i + 1);
1618 			close = close_blocks(blocknr, other, blocksize);
1619 		}
1620 		if (close) {
1621 			last_block = blocknr;
1622 			continue;
1623 		}
1624 
1625 		cur = find_extent_buffer(fs_info, blocknr);
1626 		if (cur)
1627 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1628 		else
1629 			uptodate = 0;
1630 		if (!cur || !uptodate) {
1631 			if (!cur) {
1632 				cur = read_tree_block(fs_info, blocknr, gen,
1633 						      parent_level - 1,
1634 						      &first_key);
1635 				if (IS_ERR(cur)) {
1636 					return PTR_ERR(cur);
1637 				} else if (!extent_buffer_uptodate(cur)) {
1638 					free_extent_buffer(cur);
1639 					return -EIO;
1640 				}
1641 			} else if (!uptodate) {
1642 				err = btrfs_read_buffer(cur, gen,
1643 						parent_level - 1,&first_key);
1644 				if (err) {
1645 					free_extent_buffer(cur);
1646 					return err;
1647 				}
1648 			}
1649 		}
1650 		if (search_start == 0)
1651 			search_start = last_block;
1652 
1653 		btrfs_tree_lock(cur);
1654 		btrfs_set_lock_blocking(cur);
1655 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1656 					&cur, search_start,
1657 					min(16 * blocksize,
1658 					    (end_slot - i) * blocksize));
1659 		if (err) {
1660 			btrfs_tree_unlock(cur);
1661 			free_extent_buffer(cur);
1662 			break;
1663 		}
1664 		search_start = cur->start;
1665 		last_block = cur->start;
1666 		*last_ret = search_start;
1667 		btrfs_tree_unlock(cur);
1668 		free_extent_buffer(cur);
1669 	}
1670 	return err;
1671 }
1672 
1673 /*
1674  * search for key in the extent_buffer.  The items start at offset p,
1675  * and they are item_size apart.  There are 'max' items in p.
1676  *
1677  * the slot in the array is returned via slot, and it points to
1678  * the place where you would insert key if it is not found in
1679  * the array.
1680  *
1681  * slot may point to max if the key is bigger than all of the keys
1682  */
1683 static noinline int generic_bin_search(struct extent_buffer *eb,
1684 				       unsigned long p, int item_size,
1685 				       const struct btrfs_key *key,
1686 				       int max, int *slot)
1687 {
1688 	int low = 0;
1689 	int high = max;
1690 	int mid;
1691 	int ret;
1692 	struct btrfs_disk_key *tmp = NULL;
1693 	struct btrfs_disk_key unaligned;
1694 	unsigned long offset;
1695 	char *kaddr = NULL;
1696 	unsigned long map_start = 0;
1697 	unsigned long map_len = 0;
1698 	int err;
1699 
1700 	if (low > high) {
1701 		btrfs_err(eb->fs_info,
1702 		 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1703 			  __func__, low, high, eb->start,
1704 			  btrfs_header_owner(eb), btrfs_header_level(eb));
1705 		return -EINVAL;
1706 	}
1707 
1708 	while (low < high) {
1709 		mid = (low + high) / 2;
1710 		offset = p + mid * item_size;
1711 
1712 		if (!kaddr || offset < map_start ||
1713 		    (offset + sizeof(struct btrfs_disk_key)) >
1714 		    map_start + map_len) {
1715 
1716 			err = map_private_extent_buffer(eb, offset,
1717 						sizeof(struct btrfs_disk_key),
1718 						&kaddr, &map_start, &map_len);
1719 
1720 			if (!err) {
1721 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1722 							map_start);
1723 			} else if (err == 1) {
1724 				read_extent_buffer(eb, &unaligned,
1725 						   offset, sizeof(unaligned));
1726 				tmp = &unaligned;
1727 			} else {
1728 				return err;
1729 			}
1730 
1731 		} else {
1732 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1733 							map_start);
1734 		}
1735 		ret = comp_keys(tmp, key);
1736 
1737 		if (ret < 0)
1738 			low = mid + 1;
1739 		else if (ret > 0)
1740 			high = mid;
1741 		else {
1742 			*slot = mid;
1743 			return 0;
1744 		}
1745 	}
1746 	*slot = low;
1747 	return 1;
1748 }
1749 
1750 /*
1751  * simple bin_search frontend that does the right thing for
1752  * leaves vs nodes
1753  */
1754 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1755 		     int level, int *slot)
1756 {
1757 	if (level == 0)
1758 		return generic_bin_search(eb,
1759 					  offsetof(struct btrfs_leaf, items),
1760 					  sizeof(struct btrfs_item),
1761 					  key, btrfs_header_nritems(eb),
1762 					  slot);
1763 	else
1764 		return generic_bin_search(eb,
1765 					  offsetof(struct btrfs_node, ptrs),
1766 					  sizeof(struct btrfs_key_ptr),
1767 					  key, btrfs_header_nritems(eb),
1768 					  slot);
1769 }
1770 
1771 static void root_add_used(struct btrfs_root *root, u32 size)
1772 {
1773 	spin_lock(&root->accounting_lock);
1774 	btrfs_set_root_used(&root->root_item,
1775 			    btrfs_root_used(&root->root_item) + size);
1776 	spin_unlock(&root->accounting_lock);
1777 }
1778 
1779 static void root_sub_used(struct btrfs_root *root, u32 size)
1780 {
1781 	spin_lock(&root->accounting_lock);
1782 	btrfs_set_root_used(&root->root_item,
1783 			    btrfs_root_used(&root->root_item) - size);
1784 	spin_unlock(&root->accounting_lock);
1785 }
1786 
1787 /* given a node and slot number, this reads the blocks it points to.  The
1788  * extent buffer is returned with a reference taken (but unlocked).
1789  */
1790 static noinline struct extent_buffer *
1791 read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
1792 	       int slot)
1793 {
1794 	int level = btrfs_header_level(parent);
1795 	struct extent_buffer *eb;
1796 	struct btrfs_key first_key;
1797 
1798 	if (slot < 0 || slot >= btrfs_header_nritems(parent))
1799 		return ERR_PTR(-ENOENT);
1800 
1801 	BUG_ON(level == 0);
1802 
1803 	btrfs_node_key_to_cpu(parent, &first_key, slot);
1804 	eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
1805 			     btrfs_node_ptr_generation(parent, slot),
1806 			     level - 1, &first_key);
1807 	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1808 		free_extent_buffer(eb);
1809 		eb = ERR_PTR(-EIO);
1810 	}
1811 
1812 	return eb;
1813 }
1814 
1815 /*
1816  * node level balancing, used to make sure nodes are in proper order for
1817  * item deletion.  We balance from the top down, so we have to make sure
1818  * that a deletion won't leave an node completely empty later on.
1819  */
1820 static noinline int balance_level(struct btrfs_trans_handle *trans,
1821 			 struct btrfs_root *root,
1822 			 struct btrfs_path *path, int level)
1823 {
1824 	struct btrfs_fs_info *fs_info = root->fs_info;
1825 	struct extent_buffer *right = NULL;
1826 	struct extent_buffer *mid;
1827 	struct extent_buffer *left = NULL;
1828 	struct extent_buffer *parent = NULL;
1829 	int ret = 0;
1830 	int wret;
1831 	int pslot;
1832 	int orig_slot = path->slots[level];
1833 	u64 orig_ptr;
1834 
1835 	if (level == 0)
1836 		return 0;
1837 
1838 	mid = path->nodes[level];
1839 
1840 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1841 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1842 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1843 
1844 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1845 
1846 	if (level < BTRFS_MAX_LEVEL - 1) {
1847 		parent = path->nodes[level + 1];
1848 		pslot = path->slots[level + 1];
1849 	}
1850 
1851 	/*
1852 	 * deal with the case where there is only one pointer in the root
1853 	 * by promoting the node below to a root
1854 	 */
1855 	if (!parent) {
1856 		struct extent_buffer *child;
1857 
1858 		if (btrfs_header_nritems(mid) != 1)
1859 			return 0;
1860 
1861 		/* promote the child to a root */
1862 		child = read_node_slot(fs_info, mid, 0);
1863 		if (IS_ERR(child)) {
1864 			ret = PTR_ERR(child);
1865 			btrfs_handle_fs_error(fs_info, ret, NULL);
1866 			goto enospc;
1867 		}
1868 
1869 		btrfs_tree_lock(child);
1870 		btrfs_set_lock_blocking(child);
1871 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1872 		if (ret) {
1873 			btrfs_tree_unlock(child);
1874 			free_extent_buffer(child);
1875 			goto enospc;
1876 		}
1877 
1878 		ret = tree_mod_log_insert_root(root->node, child, 1);
1879 		BUG_ON(ret < 0);
1880 		rcu_assign_pointer(root->node, child);
1881 
1882 		add_root_to_dirty_list(root);
1883 		btrfs_tree_unlock(child);
1884 
1885 		path->locks[level] = 0;
1886 		path->nodes[level] = NULL;
1887 		clean_tree_block(fs_info, mid);
1888 		btrfs_tree_unlock(mid);
1889 		/* once for the path */
1890 		free_extent_buffer(mid);
1891 
1892 		root_sub_used(root, mid->len);
1893 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1894 		/* once for the root ptr */
1895 		free_extent_buffer_stale(mid);
1896 		return 0;
1897 	}
1898 	if (btrfs_header_nritems(mid) >
1899 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1900 		return 0;
1901 
1902 	left = read_node_slot(fs_info, parent, pslot - 1);
1903 	if (IS_ERR(left))
1904 		left = NULL;
1905 
1906 	if (left) {
1907 		btrfs_tree_lock(left);
1908 		btrfs_set_lock_blocking(left);
1909 		wret = btrfs_cow_block(trans, root, left,
1910 				       parent, pslot - 1, &left);
1911 		if (wret) {
1912 			ret = wret;
1913 			goto enospc;
1914 		}
1915 	}
1916 
1917 	right = read_node_slot(fs_info, parent, pslot + 1);
1918 	if (IS_ERR(right))
1919 		right = NULL;
1920 
1921 	if (right) {
1922 		btrfs_tree_lock(right);
1923 		btrfs_set_lock_blocking(right);
1924 		wret = btrfs_cow_block(trans, root, right,
1925 				       parent, pslot + 1, &right);
1926 		if (wret) {
1927 			ret = wret;
1928 			goto enospc;
1929 		}
1930 	}
1931 
1932 	/* first, try to make some room in the middle buffer */
1933 	if (left) {
1934 		orig_slot += btrfs_header_nritems(left);
1935 		wret = push_node_left(trans, fs_info, left, mid, 1);
1936 		if (wret < 0)
1937 			ret = wret;
1938 	}
1939 
1940 	/*
1941 	 * then try to empty the right most buffer into the middle
1942 	 */
1943 	if (right) {
1944 		wret = push_node_left(trans, fs_info, mid, right, 1);
1945 		if (wret < 0 && wret != -ENOSPC)
1946 			ret = wret;
1947 		if (btrfs_header_nritems(right) == 0) {
1948 			clean_tree_block(fs_info, right);
1949 			btrfs_tree_unlock(right);
1950 			del_ptr(root, path, level + 1, pslot + 1);
1951 			root_sub_used(root, right->len);
1952 			btrfs_free_tree_block(trans, root, right, 0, 1);
1953 			free_extent_buffer_stale(right);
1954 			right = NULL;
1955 		} else {
1956 			struct btrfs_disk_key right_key;
1957 			btrfs_node_key(right, &right_key, 0);
1958 			ret = tree_mod_log_insert_key(parent, pslot + 1,
1959 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1960 			BUG_ON(ret < 0);
1961 			btrfs_set_node_key(parent, &right_key, pslot + 1);
1962 			btrfs_mark_buffer_dirty(parent);
1963 		}
1964 	}
1965 	if (btrfs_header_nritems(mid) == 1) {
1966 		/*
1967 		 * we're not allowed to leave a node with one item in the
1968 		 * tree during a delete.  A deletion from lower in the tree
1969 		 * could try to delete the only pointer in this node.
1970 		 * So, pull some keys from the left.
1971 		 * There has to be a left pointer at this point because
1972 		 * otherwise we would have pulled some pointers from the
1973 		 * right
1974 		 */
1975 		if (!left) {
1976 			ret = -EROFS;
1977 			btrfs_handle_fs_error(fs_info, ret, NULL);
1978 			goto enospc;
1979 		}
1980 		wret = balance_node_right(trans, fs_info, mid, left);
1981 		if (wret < 0) {
1982 			ret = wret;
1983 			goto enospc;
1984 		}
1985 		if (wret == 1) {
1986 			wret = push_node_left(trans, fs_info, left, mid, 1);
1987 			if (wret < 0)
1988 				ret = wret;
1989 		}
1990 		BUG_ON(wret == 1);
1991 	}
1992 	if (btrfs_header_nritems(mid) == 0) {
1993 		clean_tree_block(fs_info, mid);
1994 		btrfs_tree_unlock(mid);
1995 		del_ptr(root, path, level + 1, pslot);
1996 		root_sub_used(root, mid->len);
1997 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1998 		free_extent_buffer_stale(mid);
1999 		mid = NULL;
2000 	} else {
2001 		/* update the parent key to reflect our changes */
2002 		struct btrfs_disk_key mid_key;
2003 		btrfs_node_key(mid, &mid_key, 0);
2004 		ret = tree_mod_log_insert_key(parent, pslot,
2005 				MOD_LOG_KEY_REPLACE, GFP_NOFS);
2006 		BUG_ON(ret < 0);
2007 		btrfs_set_node_key(parent, &mid_key, pslot);
2008 		btrfs_mark_buffer_dirty(parent);
2009 	}
2010 
2011 	/* update the path */
2012 	if (left) {
2013 		if (btrfs_header_nritems(left) > orig_slot) {
2014 			extent_buffer_get(left);
2015 			/* left was locked after cow */
2016 			path->nodes[level] = left;
2017 			path->slots[level + 1] -= 1;
2018 			path->slots[level] = orig_slot;
2019 			if (mid) {
2020 				btrfs_tree_unlock(mid);
2021 				free_extent_buffer(mid);
2022 			}
2023 		} else {
2024 			orig_slot -= btrfs_header_nritems(left);
2025 			path->slots[level] = orig_slot;
2026 		}
2027 	}
2028 	/* double check we haven't messed things up */
2029 	if (orig_ptr !=
2030 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2031 		BUG();
2032 enospc:
2033 	if (right) {
2034 		btrfs_tree_unlock(right);
2035 		free_extent_buffer(right);
2036 	}
2037 	if (left) {
2038 		if (path->nodes[level] != left)
2039 			btrfs_tree_unlock(left);
2040 		free_extent_buffer(left);
2041 	}
2042 	return ret;
2043 }
2044 
2045 /* Node balancing for insertion.  Here we only split or push nodes around
2046  * when they are completely full.  This is also done top down, so we
2047  * have to be pessimistic.
2048  */
2049 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2050 					  struct btrfs_root *root,
2051 					  struct btrfs_path *path, int level)
2052 {
2053 	struct btrfs_fs_info *fs_info = root->fs_info;
2054 	struct extent_buffer *right = NULL;
2055 	struct extent_buffer *mid;
2056 	struct extent_buffer *left = NULL;
2057 	struct extent_buffer *parent = NULL;
2058 	int ret = 0;
2059 	int wret;
2060 	int pslot;
2061 	int orig_slot = path->slots[level];
2062 
2063 	if (level == 0)
2064 		return 1;
2065 
2066 	mid = path->nodes[level];
2067 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
2068 
2069 	if (level < BTRFS_MAX_LEVEL - 1) {
2070 		parent = path->nodes[level + 1];
2071 		pslot = path->slots[level + 1];
2072 	}
2073 
2074 	if (!parent)
2075 		return 1;
2076 
2077 	left = read_node_slot(fs_info, parent, pslot - 1);
2078 	if (IS_ERR(left))
2079 		left = NULL;
2080 
2081 	/* first, try to make some room in the middle buffer */
2082 	if (left) {
2083 		u32 left_nr;
2084 
2085 		btrfs_tree_lock(left);
2086 		btrfs_set_lock_blocking(left);
2087 
2088 		left_nr = btrfs_header_nritems(left);
2089 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2090 			wret = 1;
2091 		} else {
2092 			ret = btrfs_cow_block(trans, root, left, parent,
2093 					      pslot - 1, &left);
2094 			if (ret)
2095 				wret = 1;
2096 			else {
2097 				wret = push_node_left(trans, fs_info,
2098 						      left, mid, 0);
2099 			}
2100 		}
2101 		if (wret < 0)
2102 			ret = wret;
2103 		if (wret == 0) {
2104 			struct btrfs_disk_key disk_key;
2105 			orig_slot += left_nr;
2106 			btrfs_node_key(mid, &disk_key, 0);
2107 			ret = tree_mod_log_insert_key(parent, pslot,
2108 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2109 			BUG_ON(ret < 0);
2110 			btrfs_set_node_key(parent, &disk_key, pslot);
2111 			btrfs_mark_buffer_dirty(parent);
2112 			if (btrfs_header_nritems(left) > orig_slot) {
2113 				path->nodes[level] = left;
2114 				path->slots[level + 1] -= 1;
2115 				path->slots[level] = orig_slot;
2116 				btrfs_tree_unlock(mid);
2117 				free_extent_buffer(mid);
2118 			} else {
2119 				orig_slot -=
2120 					btrfs_header_nritems(left);
2121 				path->slots[level] = orig_slot;
2122 				btrfs_tree_unlock(left);
2123 				free_extent_buffer(left);
2124 			}
2125 			return 0;
2126 		}
2127 		btrfs_tree_unlock(left);
2128 		free_extent_buffer(left);
2129 	}
2130 	right = read_node_slot(fs_info, parent, pslot + 1);
2131 	if (IS_ERR(right))
2132 		right = NULL;
2133 
2134 	/*
2135 	 * then try to empty the right most buffer into the middle
2136 	 */
2137 	if (right) {
2138 		u32 right_nr;
2139 
2140 		btrfs_tree_lock(right);
2141 		btrfs_set_lock_blocking(right);
2142 
2143 		right_nr = btrfs_header_nritems(right);
2144 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2145 			wret = 1;
2146 		} else {
2147 			ret = btrfs_cow_block(trans, root, right,
2148 					      parent, pslot + 1,
2149 					      &right);
2150 			if (ret)
2151 				wret = 1;
2152 			else {
2153 				wret = balance_node_right(trans, fs_info,
2154 							  right, mid);
2155 			}
2156 		}
2157 		if (wret < 0)
2158 			ret = wret;
2159 		if (wret == 0) {
2160 			struct btrfs_disk_key disk_key;
2161 
2162 			btrfs_node_key(right, &disk_key, 0);
2163 			ret = tree_mod_log_insert_key(parent, pslot + 1,
2164 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2165 			BUG_ON(ret < 0);
2166 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2167 			btrfs_mark_buffer_dirty(parent);
2168 
2169 			if (btrfs_header_nritems(mid) <= orig_slot) {
2170 				path->nodes[level] = right;
2171 				path->slots[level + 1] += 1;
2172 				path->slots[level] = orig_slot -
2173 					btrfs_header_nritems(mid);
2174 				btrfs_tree_unlock(mid);
2175 				free_extent_buffer(mid);
2176 			} else {
2177 				btrfs_tree_unlock(right);
2178 				free_extent_buffer(right);
2179 			}
2180 			return 0;
2181 		}
2182 		btrfs_tree_unlock(right);
2183 		free_extent_buffer(right);
2184 	}
2185 	return 1;
2186 }
2187 
2188 /*
2189  * readahead one full node of leaves, finding things that are close
2190  * to the block in 'slot', and triggering ra on them.
2191  */
2192 static void reada_for_search(struct btrfs_fs_info *fs_info,
2193 			     struct btrfs_path *path,
2194 			     int level, int slot, u64 objectid)
2195 {
2196 	struct extent_buffer *node;
2197 	struct btrfs_disk_key disk_key;
2198 	u32 nritems;
2199 	u64 search;
2200 	u64 target;
2201 	u64 nread = 0;
2202 	struct extent_buffer *eb;
2203 	u32 nr;
2204 	u32 blocksize;
2205 	u32 nscan = 0;
2206 
2207 	if (level != 1)
2208 		return;
2209 
2210 	if (!path->nodes[level])
2211 		return;
2212 
2213 	node = path->nodes[level];
2214 
2215 	search = btrfs_node_blockptr(node, slot);
2216 	blocksize = fs_info->nodesize;
2217 	eb = find_extent_buffer(fs_info, search);
2218 	if (eb) {
2219 		free_extent_buffer(eb);
2220 		return;
2221 	}
2222 
2223 	target = search;
2224 
2225 	nritems = btrfs_header_nritems(node);
2226 	nr = slot;
2227 
2228 	while (1) {
2229 		if (path->reada == READA_BACK) {
2230 			if (nr == 0)
2231 				break;
2232 			nr--;
2233 		} else if (path->reada == READA_FORWARD) {
2234 			nr++;
2235 			if (nr >= nritems)
2236 				break;
2237 		}
2238 		if (path->reada == READA_BACK && objectid) {
2239 			btrfs_node_key(node, &disk_key, nr);
2240 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2241 				break;
2242 		}
2243 		search = btrfs_node_blockptr(node, nr);
2244 		if ((search <= target && target - search <= 65536) ||
2245 		    (search > target && search - target <= 65536)) {
2246 			readahead_tree_block(fs_info, search);
2247 			nread += blocksize;
2248 		}
2249 		nscan++;
2250 		if ((nread > 65536 || nscan > 32))
2251 			break;
2252 	}
2253 }
2254 
2255 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2256 				       struct btrfs_path *path, int level)
2257 {
2258 	int slot;
2259 	int nritems;
2260 	struct extent_buffer *parent;
2261 	struct extent_buffer *eb;
2262 	u64 gen;
2263 	u64 block1 = 0;
2264 	u64 block2 = 0;
2265 
2266 	parent = path->nodes[level + 1];
2267 	if (!parent)
2268 		return;
2269 
2270 	nritems = btrfs_header_nritems(parent);
2271 	slot = path->slots[level + 1];
2272 
2273 	if (slot > 0) {
2274 		block1 = btrfs_node_blockptr(parent, slot - 1);
2275 		gen = btrfs_node_ptr_generation(parent, slot - 1);
2276 		eb = find_extent_buffer(fs_info, block1);
2277 		/*
2278 		 * if we get -eagain from btrfs_buffer_uptodate, we
2279 		 * don't want to return eagain here.  That will loop
2280 		 * forever
2281 		 */
2282 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2283 			block1 = 0;
2284 		free_extent_buffer(eb);
2285 	}
2286 	if (slot + 1 < nritems) {
2287 		block2 = btrfs_node_blockptr(parent, slot + 1);
2288 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2289 		eb = find_extent_buffer(fs_info, block2);
2290 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2291 			block2 = 0;
2292 		free_extent_buffer(eb);
2293 	}
2294 
2295 	if (block1)
2296 		readahead_tree_block(fs_info, block1);
2297 	if (block2)
2298 		readahead_tree_block(fs_info, block2);
2299 }
2300 
2301 
2302 /*
2303  * when we walk down the tree, it is usually safe to unlock the higher layers
2304  * in the tree.  The exceptions are when our path goes through slot 0, because
2305  * operations on the tree might require changing key pointers higher up in the
2306  * tree.
2307  *
2308  * callers might also have set path->keep_locks, which tells this code to keep
2309  * the lock if the path points to the last slot in the block.  This is part of
2310  * walking through the tree, and selecting the next slot in the higher block.
2311  *
2312  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2313  * if lowest_unlock is 1, level 0 won't be unlocked
2314  */
2315 static noinline void unlock_up(struct btrfs_path *path, int level,
2316 			       int lowest_unlock, int min_write_lock_level,
2317 			       int *write_lock_level)
2318 {
2319 	int i;
2320 	int skip_level = level;
2321 	int no_skips = 0;
2322 	struct extent_buffer *t;
2323 
2324 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2325 		if (!path->nodes[i])
2326 			break;
2327 		if (!path->locks[i])
2328 			break;
2329 		if (!no_skips && path->slots[i] == 0) {
2330 			skip_level = i + 1;
2331 			continue;
2332 		}
2333 		if (!no_skips && path->keep_locks) {
2334 			u32 nritems;
2335 			t = path->nodes[i];
2336 			nritems = btrfs_header_nritems(t);
2337 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2338 				skip_level = i + 1;
2339 				continue;
2340 			}
2341 		}
2342 		if (skip_level < i && i >= lowest_unlock)
2343 			no_skips = 1;
2344 
2345 		t = path->nodes[i];
2346 		if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2347 			btrfs_tree_unlock_rw(t, path->locks[i]);
2348 			path->locks[i] = 0;
2349 			if (write_lock_level &&
2350 			    i > min_write_lock_level &&
2351 			    i <= *write_lock_level) {
2352 				*write_lock_level = i - 1;
2353 			}
2354 		}
2355 	}
2356 }
2357 
2358 /*
2359  * This releases any locks held in the path starting at level and
2360  * going all the way up to the root.
2361  *
2362  * btrfs_search_slot will keep the lock held on higher nodes in a few
2363  * corner cases, such as COW of the block at slot zero in the node.  This
2364  * ignores those rules, and it should only be called when there are no
2365  * more updates to be done higher up in the tree.
2366  */
2367 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2368 {
2369 	int i;
2370 
2371 	if (path->keep_locks)
2372 		return;
2373 
2374 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2375 		if (!path->nodes[i])
2376 			continue;
2377 		if (!path->locks[i])
2378 			continue;
2379 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2380 		path->locks[i] = 0;
2381 	}
2382 }
2383 
2384 /*
2385  * helper function for btrfs_search_slot.  The goal is to find a block
2386  * in cache without setting the path to blocking.  If we find the block
2387  * we return zero and the path is unchanged.
2388  *
2389  * If we can't find the block, we set the path blocking and do some
2390  * reada.  -EAGAIN is returned and the search must be repeated.
2391  */
2392 static int
2393 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2394 		      struct extent_buffer **eb_ret, int level, int slot,
2395 		      const struct btrfs_key *key)
2396 {
2397 	struct btrfs_fs_info *fs_info = root->fs_info;
2398 	u64 blocknr;
2399 	u64 gen;
2400 	struct extent_buffer *b = *eb_ret;
2401 	struct extent_buffer *tmp;
2402 	struct btrfs_key first_key;
2403 	int ret;
2404 	int parent_level;
2405 
2406 	blocknr = btrfs_node_blockptr(b, slot);
2407 	gen = btrfs_node_ptr_generation(b, slot);
2408 	parent_level = btrfs_header_level(b);
2409 	btrfs_node_key_to_cpu(b, &first_key, slot);
2410 
2411 	tmp = find_extent_buffer(fs_info, blocknr);
2412 	if (tmp) {
2413 		/* first we do an atomic uptodate check */
2414 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2415 			*eb_ret = tmp;
2416 			return 0;
2417 		}
2418 
2419 		/* the pages were up to date, but we failed
2420 		 * the generation number check.  Do a full
2421 		 * read for the generation number that is correct.
2422 		 * We must do this without dropping locks so
2423 		 * we can trust our generation number
2424 		 */
2425 		btrfs_set_path_blocking(p);
2426 
2427 		/* now we're allowed to do a blocking uptodate check */
2428 		ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
2429 		if (!ret) {
2430 			*eb_ret = tmp;
2431 			return 0;
2432 		}
2433 		free_extent_buffer(tmp);
2434 		btrfs_release_path(p);
2435 		return -EIO;
2436 	}
2437 
2438 	/*
2439 	 * reduce lock contention at high levels
2440 	 * of the btree by dropping locks before
2441 	 * we read.  Don't release the lock on the current
2442 	 * level because we need to walk this node to figure
2443 	 * out which blocks to read.
2444 	 */
2445 	btrfs_unlock_up_safe(p, level + 1);
2446 	btrfs_set_path_blocking(p);
2447 
2448 	free_extent_buffer(tmp);
2449 	if (p->reada != READA_NONE)
2450 		reada_for_search(fs_info, p, level, slot, key->objectid);
2451 
2452 	btrfs_release_path(p);
2453 
2454 	ret = -EAGAIN;
2455 	tmp = read_tree_block(fs_info, blocknr, 0, parent_level - 1,
2456 			      &first_key);
2457 	if (!IS_ERR(tmp)) {
2458 		/*
2459 		 * If the read above didn't mark this buffer up to date,
2460 		 * it will never end up being up to date.  Set ret to EIO now
2461 		 * and give up so that our caller doesn't loop forever
2462 		 * on our EAGAINs.
2463 		 */
2464 		if (!btrfs_buffer_uptodate(tmp, 0, 0))
2465 			ret = -EIO;
2466 		free_extent_buffer(tmp);
2467 	} else {
2468 		ret = PTR_ERR(tmp);
2469 	}
2470 	return ret;
2471 }
2472 
2473 /*
2474  * helper function for btrfs_search_slot.  This does all of the checks
2475  * for node-level blocks and does any balancing required based on
2476  * the ins_len.
2477  *
2478  * If no extra work was required, zero is returned.  If we had to
2479  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2480  * start over
2481  */
2482 static int
2483 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2484 		       struct btrfs_root *root, struct btrfs_path *p,
2485 		       struct extent_buffer *b, int level, int ins_len,
2486 		       int *write_lock_level)
2487 {
2488 	struct btrfs_fs_info *fs_info = root->fs_info;
2489 	int ret;
2490 
2491 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2492 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2493 		int sret;
2494 
2495 		if (*write_lock_level < level + 1) {
2496 			*write_lock_level = level + 1;
2497 			btrfs_release_path(p);
2498 			goto again;
2499 		}
2500 
2501 		btrfs_set_path_blocking(p);
2502 		reada_for_balance(fs_info, p, level);
2503 		sret = split_node(trans, root, p, level);
2504 		btrfs_clear_path_blocking(p, NULL, 0);
2505 
2506 		BUG_ON(sret > 0);
2507 		if (sret) {
2508 			ret = sret;
2509 			goto done;
2510 		}
2511 		b = p->nodes[level];
2512 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2513 		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2514 		int sret;
2515 
2516 		if (*write_lock_level < level + 1) {
2517 			*write_lock_level = level + 1;
2518 			btrfs_release_path(p);
2519 			goto again;
2520 		}
2521 
2522 		btrfs_set_path_blocking(p);
2523 		reada_for_balance(fs_info, p, level);
2524 		sret = balance_level(trans, root, p, level);
2525 		btrfs_clear_path_blocking(p, NULL, 0);
2526 
2527 		if (sret) {
2528 			ret = sret;
2529 			goto done;
2530 		}
2531 		b = p->nodes[level];
2532 		if (!b) {
2533 			btrfs_release_path(p);
2534 			goto again;
2535 		}
2536 		BUG_ON(btrfs_header_nritems(b) == 1);
2537 	}
2538 	return 0;
2539 
2540 again:
2541 	ret = -EAGAIN;
2542 done:
2543 	return ret;
2544 }
2545 
2546 static void key_search_validate(struct extent_buffer *b,
2547 				const struct btrfs_key *key,
2548 				int level)
2549 {
2550 #ifdef CONFIG_BTRFS_ASSERT
2551 	struct btrfs_disk_key disk_key;
2552 
2553 	btrfs_cpu_key_to_disk(&disk_key, key);
2554 
2555 	if (level == 0)
2556 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2557 		    offsetof(struct btrfs_leaf, items[0].key),
2558 		    sizeof(disk_key)));
2559 	else
2560 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2561 		    offsetof(struct btrfs_node, ptrs[0].key),
2562 		    sizeof(disk_key)));
2563 #endif
2564 }
2565 
2566 static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2567 		      int level, int *prev_cmp, int *slot)
2568 {
2569 	if (*prev_cmp != 0) {
2570 		*prev_cmp = btrfs_bin_search(b, key, level, slot);
2571 		return *prev_cmp;
2572 	}
2573 
2574 	key_search_validate(b, key, level);
2575 	*slot = 0;
2576 
2577 	return 0;
2578 }
2579 
2580 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2581 		u64 iobjectid, u64 ioff, u8 key_type,
2582 		struct btrfs_key *found_key)
2583 {
2584 	int ret;
2585 	struct btrfs_key key;
2586 	struct extent_buffer *eb;
2587 
2588 	ASSERT(path);
2589 	ASSERT(found_key);
2590 
2591 	key.type = key_type;
2592 	key.objectid = iobjectid;
2593 	key.offset = ioff;
2594 
2595 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2596 	if (ret < 0)
2597 		return ret;
2598 
2599 	eb = path->nodes[0];
2600 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2601 		ret = btrfs_next_leaf(fs_root, path);
2602 		if (ret)
2603 			return ret;
2604 		eb = path->nodes[0];
2605 	}
2606 
2607 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2608 	if (found_key->type != key.type ||
2609 			found_key->objectid != key.objectid)
2610 		return 1;
2611 
2612 	return 0;
2613 }
2614 
2615 /*
2616  * btrfs_search_slot - look for a key in a tree and perform necessary
2617  * modifications to preserve tree invariants.
2618  *
2619  * @trans:	Handle of transaction, used when modifying the tree
2620  * @p:		Holds all btree nodes along the search path
2621  * @root:	The root node of the tree
2622  * @key:	The key we are looking for
2623  * @ins_len:	Indicates purpose of search, for inserts it is 1, for
2624  *		deletions it's -1. 0 for plain searches
2625  * @cow:	boolean should CoW operations be performed. Must always be 1
2626  *		when modifying the tree.
2627  *
2628  * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2629  * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2630  *
2631  * If @key is found, 0 is returned and you can find the item in the leaf level
2632  * of the path (level 0)
2633  *
2634  * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2635  * points to the slot where it should be inserted
2636  *
2637  * If an error is encountered while searching the tree a negative error number
2638  * is returned
2639  */
2640 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2641 		      const struct btrfs_key *key, struct btrfs_path *p,
2642 		      int ins_len, int cow)
2643 {
2644 	struct btrfs_fs_info *fs_info = root->fs_info;
2645 	struct extent_buffer *b;
2646 	int slot;
2647 	int ret;
2648 	int err;
2649 	int level;
2650 	int lowest_unlock = 1;
2651 	int root_lock;
2652 	/* everything at write_lock_level or lower must be write locked */
2653 	int write_lock_level = 0;
2654 	u8 lowest_level = 0;
2655 	int min_write_lock_level;
2656 	int prev_cmp;
2657 
2658 	lowest_level = p->lowest_level;
2659 	WARN_ON(lowest_level && ins_len > 0);
2660 	WARN_ON(p->nodes[0] != NULL);
2661 	BUG_ON(!cow && ins_len);
2662 
2663 	if (ins_len < 0) {
2664 		lowest_unlock = 2;
2665 
2666 		/* when we are removing items, we might have to go up to level
2667 		 * two as we update tree pointers  Make sure we keep write
2668 		 * for those levels as well
2669 		 */
2670 		write_lock_level = 2;
2671 	} else if (ins_len > 0) {
2672 		/*
2673 		 * for inserting items, make sure we have a write lock on
2674 		 * level 1 so we can update keys
2675 		 */
2676 		write_lock_level = 1;
2677 	}
2678 
2679 	if (!cow)
2680 		write_lock_level = -1;
2681 
2682 	if (cow && (p->keep_locks || p->lowest_level))
2683 		write_lock_level = BTRFS_MAX_LEVEL;
2684 
2685 	min_write_lock_level = write_lock_level;
2686 
2687 again:
2688 	prev_cmp = -1;
2689 	/*
2690 	 * we try very hard to do read locks on the root
2691 	 */
2692 	root_lock = BTRFS_READ_LOCK;
2693 	level = 0;
2694 	if (p->search_commit_root) {
2695 		/*
2696 		 * the commit roots are read only
2697 		 * so we always do read locks
2698 		 */
2699 		if (p->need_commit_sem)
2700 			down_read(&fs_info->commit_root_sem);
2701 		b = root->commit_root;
2702 		extent_buffer_get(b);
2703 		level = btrfs_header_level(b);
2704 		if (p->need_commit_sem)
2705 			up_read(&fs_info->commit_root_sem);
2706 		if (!p->skip_locking)
2707 			btrfs_tree_read_lock(b);
2708 	} else {
2709 		if (p->skip_locking) {
2710 			b = btrfs_root_node(root);
2711 			level = btrfs_header_level(b);
2712 		} else {
2713 			/* we don't know the level of the root node
2714 			 * until we actually have it read locked
2715 			 */
2716 			b = btrfs_read_lock_root_node(root);
2717 			level = btrfs_header_level(b);
2718 			if (level <= write_lock_level) {
2719 				/* whoops, must trade for write lock */
2720 				btrfs_tree_read_unlock(b);
2721 				free_extent_buffer(b);
2722 				b = btrfs_lock_root_node(root);
2723 				root_lock = BTRFS_WRITE_LOCK;
2724 
2725 				/* the level might have changed, check again */
2726 				level = btrfs_header_level(b);
2727 			}
2728 		}
2729 	}
2730 	p->nodes[level] = b;
2731 	if (!p->skip_locking)
2732 		p->locks[level] = root_lock;
2733 
2734 	while (b) {
2735 		level = btrfs_header_level(b);
2736 
2737 		/*
2738 		 * setup the path here so we can release it under lock
2739 		 * contention with the cow code
2740 		 */
2741 		if (cow) {
2742 			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2743 
2744 			/*
2745 			 * if we don't really need to cow this block
2746 			 * then we don't want to set the path blocking,
2747 			 * so we test it here
2748 			 */
2749 			if (!should_cow_block(trans, root, b)) {
2750 				trans->dirty = true;
2751 				goto cow_done;
2752 			}
2753 
2754 			/*
2755 			 * must have write locks on this node and the
2756 			 * parent
2757 			 */
2758 			if (level > write_lock_level ||
2759 			    (level + 1 > write_lock_level &&
2760 			    level + 1 < BTRFS_MAX_LEVEL &&
2761 			    p->nodes[level + 1])) {
2762 				write_lock_level = level + 1;
2763 				btrfs_release_path(p);
2764 				goto again;
2765 			}
2766 
2767 			btrfs_set_path_blocking(p);
2768 			if (last_level)
2769 				err = btrfs_cow_block(trans, root, b, NULL, 0,
2770 						      &b);
2771 			else
2772 				err = btrfs_cow_block(trans, root, b,
2773 						      p->nodes[level + 1],
2774 						      p->slots[level + 1], &b);
2775 			if (err) {
2776 				ret = err;
2777 				goto done;
2778 			}
2779 		}
2780 cow_done:
2781 		p->nodes[level] = b;
2782 		btrfs_clear_path_blocking(p, NULL, 0);
2783 
2784 		/*
2785 		 * we have a lock on b and as long as we aren't changing
2786 		 * the tree, there is no way to for the items in b to change.
2787 		 * It is safe to drop the lock on our parent before we
2788 		 * go through the expensive btree search on b.
2789 		 *
2790 		 * If we're inserting or deleting (ins_len != 0), then we might
2791 		 * be changing slot zero, which may require changing the parent.
2792 		 * So, we can't drop the lock until after we know which slot
2793 		 * we're operating on.
2794 		 */
2795 		if (!ins_len && !p->keep_locks) {
2796 			int u = level + 1;
2797 
2798 			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2799 				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2800 				p->locks[u] = 0;
2801 			}
2802 		}
2803 
2804 		ret = key_search(b, key, level, &prev_cmp, &slot);
2805 		if (ret < 0)
2806 			goto done;
2807 
2808 		if (level != 0) {
2809 			int dec = 0;
2810 			if (ret && slot > 0) {
2811 				dec = 1;
2812 				slot -= 1;
2813 			}
2814 			p->slots[level] = slot;
2815 			err = setup_nodes_for_search(trans, root, p, b, level,
2816 					     ins_len, &write_lock_level);
2817 			if (err == -EAGAIN)
2818 				goto again;
2819 			if (err) {
2820 				ret = err;
2821 				goto done;
2822 			}
2823 			b = p->nodes[level];
2824 			slot = p->slots[level];
2825 
2826 			/*
2827 			 * slot 0 is special, if we change the key
2828 			 * we have to update the parent pointer
2829 			 * which means we must have a write lock
2830 			 * on the parent
2831 			 */
2832 			if (slot == 0 && ins_len &&
2833 			    write_lock_level < level + 1) {
2834 				write_lock_level = level + 1;
2835 				btrfs_release_path(p);
2836 				goto again;
2837 			}
2838 
2839 			unlock_up(p, level, lowest_unlock,
2840 				  min_write_lock_level, &write_lock_level);
2841 
2842 			if (level == lowest_level) {
2843 				if (dec)
2844 					p->slots[level]++;
2845 				goto done;
2846 			}
2847 
2848 			err = read_block_for_search(root, p, &b, level,
2849 						    slot, key);
2850 			if (err == -EAGAIN)
2851 				goto again;
2852 			if (err) {
2853 				ret = err;
2854 				goto done;
2855 			}
2856 
2857 			if (!p->skip_locking) {
2858 				level = btrfs_header_level(b);
2859 				if (level <= write_lock_level) {
2860 					err = btrfs_try_tree_write_lock(b);
2861 					if (!err) {
2862 						btrfs_set_path_blocking(p);
2863 						btrfs_tree_lock(b);
2864 						btrfs_clear_path_blocking(p, b,
2865 								  BTRFS_WRITE_LOCK);
2866 					}
2867 					p->locks[level] = BTRFS_WRITE_LOCK;
2868 				} else {
2869 					err = btrfs_tree_read_lock_atomic(b);
2870 					if (!err) {
2871 						btrfs_set_path_blocking(p);
2872 						btrfs_tree_read_lock(b);
2873 						btrfs_clear_path_blocking(p, b,
2874 								  BTRFS_READ_LOCK);
2875 					}
2876 					p->locks[level] = BTRFS_READ_LOCK;
2877 				}
2878 				p->nodes[level] = b;
2879 			}
2880 		} else {
2881 			p->slots[level] = slot;
2882 			if (ins_len > 0 &&
2883 			    btrfs_leaf_free_space(fs_info, b) < ins_len) {
2884 				if (write_lock_level < 1) {
2885 					write_lock_level = 1;
2886 					btrfs_release_path(p);
2887 					goto again;
2888 				}
2889 
2890 				btrfs_set_path_blocking(p);
2891 				err = split_leaf(trans, root, key,
2892 						 p, ins_len, ret == 0);
2893 				btrfs_clear_path_blocking(p, NULL, 0);
2894 
2895 				BUG_ON(err > 0);
2896 				if (err) {
2897 					ret = err;
2898 					goto done;
2899 				}
2900 			}
2901 			if (!p->search_for_split)
2902 				unlock_up(p, level, lowest_unlock,
2903 					  min_write_lock_level, &write_lock_level);
2904 			goto done;
2905 		}
2906 	}
2907 	ret = 1;
2908 done:
2909 	/*
2910 	 * we don't really know what they plan on doing with the path
2911 	 * from here on, so for now just mark it as blocking
2912 	 */
2913 	if (!p->leave_spinning)
2914 		btrfs_set_path_blocking(p);
2915 	if (ret < 0 && !p->skip_release_on_error)
2916 		btrfs_release_path(p);
2917 	return ret;
2918 }
2919 
2920 /*
2921  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2922  * current state of the tree together with the operations recorded in the tree
2923  * modification log to search for the key in a previous version of this tree, as
2924  * denoted by the time_seq parameter.
2925  *
2926  * Naturally, there is no support for insert, delete or cow operations.
2927  *
2928  * The resulting path and return value will be set up as if we called
2929  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2930  */
2931 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2932 			  struct btrfs_path *p, u64 time_seq)
2933 {
2934 	struct btrfs_fs_info *fs_info = root->fs_info;
2935 	struct extent_buffer *b;
2936 	int slot;
2937 	int ret;
2938 	int err;
2939 	int level;
2940 	int lowest_unlock = 1;
2941 	u8 lowest_level = 0;
2942 	int prev_cmp = -1;
2943 
2944 	lowest_level = p->lowest_level;
2945 	WARN_ON(p->nodes[0] != NULL);
2946 
2947 	if (p->search_commit_root) {
2948 		BUG_ON(time_seq);
2949 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2950 	}
2951 
2952 again:
2953 	b = get_old_root(root, time_seq);
2954 	level = btrfs_header_level(b);
2955 	p->locks[level] = BTRFS_READ_LOCK;
2956 
2957 	while (b) {
2958 		level = btrfs_header_level(b);
2959 		p->nodes[level] = b;
2960 		btrfs_clear_path_blocking(p, NULL, 0);
2961 
2962 		/*
2963 		 * we have a lock on b and as long as we aren't changing
2964 		 * the tree, there is no way to for the items in b to change.
2965 		 * It is safe to drop the lock on our parent before we
2966 		 * go through the expensive btree search on b.
2967 		 */
2968 		btrfs_unlock_up_safe(p, level + 1);
2969 
2970 		/*
2971 		 * Since we can unwind ebs we want to do a real search every
2972 		 * time.
2973 		 */
2974 		prev_cmp = -1;
2975 		ret = key_search(b, key, level, &prev_cmp, &slot);
2976 
2977 		if (level != 0) {
2978 			int dec = 0;
2979 			if (ret && slot > 0) {
2980 				dec = 1;
2981 				slot -= 1;
2982 			}
2983 			p->slots[level] = slot;
2984 			unlock_up(p, level, lowest_unlock, 0, NULL);
2985 
2986 			if (level == lowest_level) {
2987 				if (dec)
2988 					p->slots[level]++;
2989 				goto done;
2990 			}
2991 
2992 			err = read_block_for_search(root, p, &b, level,
2993 						    slot, key);
2994 			if (err == -EAGAIN)
2995 				goto again;
2996 			if (err) {
2997 				ret = err;
2998 				goto done;
2999 			}
3000 
3001 			level = btrfs_header_level(b);
3002 			err = btrfs_tree_read_lock_atomic(b);
3003 			if (!err) {
3004 				btrfs_set_path_blocking(p);
3005 				btrfs_tree_read_lock(b);
3006 				btrfs_clear_path_blocking(p, b,
3007 							  BTRFS_READ_LOCK);
3008 			}
3009 			b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3010 			if (!b) {
3011 				ret = -ENOMEM;
3012 				goto done;
3013 			}
3014 			p->locks[level] = BTRFS_READ_LOCK;
3015 			p->nodes[level] = b;
3016 		} else {
3017 			p->slots[level] = slot;
3018 			unlock_up(p, level, lowest_unlock, 0, NULL);
3019 			goto done;
3020 		}
3021 	}
3022 	ret = 1;
3023 done:
3024 	if (!p->leave_spinning)
3025 		btrfs_set_path_blocking(p);
3026 	if (ret < 0)
3027 		btrfs_release_path(p);
3028 
3029 	return ret;
3030 }
3031 
3032 /*
3033  * helper to use instead of search slot if no exact match is needed but
3034  * instead the next or previous item should be returned.
3035  * When find_higher is true, the next higher item is returned, the next lower
3036  * otherwise.
3037  * When return_any and find_higher are both true, and no higher item is found,
3038  * return the next lower instead.
3039  * When return_any is true and find_higher is false, and no lower item is found,
3040  * return the next higher instead.
3041  * It returns 0 if any item is found, 1 if none is found (tree empty), and
3042  * < 0 on error
3043  */
3044 int btrfs_search_slot_for_read(struct btrfs_root *root,
3045 			       const struct btrfs_key *key,
3046 			       struct btrfs_path *p, int find_higher,
3047 			       int return_any)
3048 {
3049 	int ret;
3050 	struct extent_buffer *leaf;
3051 
3052 again:
3053 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3054 	if (ret <= 0)
3055 		return ret;
3056 	/*
3057 	 * a return value of 1 means the path is at the position where the
3058 	 * item should be inserted. Normally this is the next bigger item,
3059 	 * but in case the previous item is the last in a leaf, path points
3060 	 * to the first free slot in the previous leaf, i.e. at an invalid
3061 	 * item.
3062 	 */
3063 	leaf = p->nodes[0];
3064 
3065 	if (find_higher) {
3066 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3067 			ret = btrfs_next_leaf(root, p);
3068 			if (ret <= 0)
3069 				return ret;
3070 			if (!return_any)
3071 				return 1;
3072 			/*
3073 			 * no higher item found, return the next
3074 			 * lower instead
3075 			 */
3076 			return_any = 0;
3077 			find_higher = 0;
3078 			btrfs_release_path(p);
3079 			goto again;
3080 		}
3081 	} else {
3082 		if (p->slots[0] == 0) {
3083 			ret = btrfs_prev_leaf(root, p);
3084 			if (ret < 0)
3085 				return ret;
3086 			if (!ret) {
3087 				leaf = p->nodes[0];
3088 				if (p->slots[0] == btrfs_header_nritems(leaf))
3089 					p->slots[0]--;
3090 				return 0;
3091 			}
3092 			if (!return_any)
3093 				return 1;
3094 			/*
3095 			 * no lower item found, return the next
3096 			 * higher instead
3097 			 */
3098 			return_any = 0;
3099 			find_higher = 1;
3100 			btrfs_release_path(p);
3101 			goto again;
3102 		} else {
3103 			--p->slots[0];
3104 		}
3105 	}
3106 	return 0;
3107 }
3108 
3109 /*
3110  * adjust the pointers going up the tree, starting at level
3111  * making sure the right key of each node is points to 'key'.
3112  * This is used after shifting pointers to the left, so it stops
3113  * fixing up pointers when a given leaf/node is not in slot 0 of the
3114  * higher levels
3115  *
3116  */
3117 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3118 			   struct btrfs_path *path,
3119 			   struct btrfs_disk_key *key, int level)
3120 {
3121 	int i;
3122 	struct extent_buffer *t;
3123 	int ret;
3124 
3125 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3126 		int tslot = path->slots[i];
3127 
3128 		if (!path->nodes[i])
3129 			break;
3130 		t = path->nodes[i];
3131 		ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3132 				GFP_ATOMIC);
3133 		BUG_ON(ret < 0);
3134 		btrfs_set_node_key(t, key, tslot);
3135 		btrfs_mark_buffer_dirty(path->nodes[i]);
3136 		if (tslot != 0)
3137 			break;
3138 	}
3139 }
3140 
3141 /*
3142  * update item key.
3143  *
3144  * This function isn't completely safe. It's the caller's responsibility
3145  * that the new key won't break the order
3146  */
3147 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3148 			     struct btrfs_path *path,
3149 			     const struct btrfs_key *new_key)
3150 {
3151 	struct btrfs_disk_key disk_key;
3152 	struct extent_buffer *eb;
3153 	int slot;
3154 
3155 	eb = path->nodes[0];
3156 	slot = path->slots[0];
3157 	if (slot > 0) {
3158 		btrfs_item_key(eb, &disk_key, slot - 1);
3159 		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3160 	}
3161 	if (slot < btrfs_header_nritems(eb) - 1) {
3162 		btrfs_item_key(eb, &disk_key, slot + 1);
3163 		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3164 	}
3165 
3166 	btrfs_cpu_key_to_disk(&disk_key, new_key);
3167 	btrfs_set_item_key(eb, &disk_key, slot);
3168 	btrfs_mark_buffer_dirty(eb);
3169 	if (slot == 0)
3170 		fixup_low_keys(fs_info, path, &disk_key, 1);
3171 }
3172 
3173 /*
3174  * try to push data from one node into the next node left in the
3175  * tree.
3176  *
3177  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3178  * error, and > 0 if there was no room in the left hand block.
3179  */
3180 static int push_node_left(struct btrfs_trans_handle *trans,
3181 			  struct btrfs_fs_info *fs_info,
3182 			  struct extent_buffer *dst,
3183 			  struct extent_buffer *src, int empty)
3184 {
3185 	int push_items = 0;
3186 	int src_nritems;
3187 	int dst_nritems;
3188 	int ret = 0;
3189 
3190 	src_nritems = btrfs_header_nritems(src);
3191 	dst_nritems = btrfs_header_nritems(dst);
3192 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3193 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3194 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3195 
3196 	if (!empty && src_nritems <= 8)
3197 		return 1;
3198 
3199 	if (push_items <= 0)
3200 		return 1;
3201 
3202 	if (empty) {
3203 		push_items = min(src_nritems, push_items);
3204 		if (push_items < src_nritems) {
3205 			/* leave at least 8 pointers in the node if
3206 			 * we aren't going to empty it
3207 			 */
3208 			if (src_nritems - push_items < 8) {
3209 				if (push_items <= 8)
3210 					return 1;
3211 				push_items -= 8;
3212 			}
3213 		}
3214 	} else
3215 		push_items = min(src_nritems - 8, push_items);
3216 
3217 	ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
3218 				   push_items);
3219 	if (ret) {
3220 		btrfs_abort_transaction(trans, ret);
3221 		return ret;
3222 	}
3223 	copy_extent_buffer(dst, src,
3224 			   btrfs_node_key_ptr_offset(dst_nritems),
3225 			   btrfs_node_key_ptr_offset(0),
3226 			   push_items * sizeof(struct btrfs_key_ptr));
3227 
3228 	if (push_items < src_nritems) {
3229 		/*
3230 		 * Don't call tree_mod_log_insert_move here, key removal was
3231 		 * already fully logged by tree_mod_log_eb_copy above.
3232 		 */
3233 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3234 				      btrfs_node_key_ptr_offset(push_items),
3235 				      (src_nritems - push_items) *
3236 				      sizeof(struct btrfs_key_ptr));
3237 	}
3238 	btrfs_set_header_nritems(src, src_nritems - push_items);
3239 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3240 	btrfs_mark_buffer_dirty(src);
3241 	btrfs_mark_buffer_dirty(dst);
3242 
3243 	return ret;
3244 }
3245 
3246 /*
3247  * try to push data from one node into the next node right in the
3248  * tree.
3249  *
3250  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3251  * error, and > 0 if there was no room in the right hand block.
3252  *
3253  * this will  only push up to 1/2 the contents of the left node over
3254  */
3255 static int balance_node_right(struct btrfs_trans_handle *trans,
3256 			      struct btrfs_fs_info *fs_info,
3257 			      struct extent_buffer *dst,
3258 			      struct extent_buffer *src)
3259 {
3260 	int push_items = 0;
3261 	int max_push;
3262 	int src_nritems;
3263 	int dst_nritems;
3264 	int ret = 0;
3265 
3266 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3267 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3268 
3269 	src_nritems = btrfs_header_nritems(src);
3270 	dst_nritems = btrfs_header_nritems(dst);
3271 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3272 	if (push_items <= 0)
3273 		return 1;
3274 
3275 	if (src_nritems < 4)
3276 		return 1;
3277 
3278 	max_push = src_nritems / 2 + 1;
3279 	/* don't try to empty the node */
3280 	if (max_push >= src_nritems)
3281 		return 1;
3282 
3283 	if (max_push < push_items)
3284 		push_items = max_push;
3285 
3286 	ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3287 	BUG_ON(ret < 0);
3288 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3289 				      btrfs_node_key_ptr_offset(0),
3290 				      (dst_nritems) *
3291 				      sizeof(struct btrfs_key_ptr));
3292 
3293 	ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
3294 				   src_nritems - push_items, push_items);
3295 	if (ret) {
3296 		btrfs_abort_transaction(trans, ret);
3297 		return ret;
3298 	}
3299 	copy_extent_buffer(dst, src,
3300 			   btrfs_node_key_ptr_offset(0),
3301 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3302 			   push_items * sizeof(struct btrfs_key_ptr));
3303 
3304 	btrfs_set_header_nritems(src, src_nritems - push_items);
3305 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3306 
3307 	btrfs_mark_buffer_dirty(src);
3308 	btrfs_mark_buffer_dirty(dst);
3309 
3310 	return ret;
3311 }
3312 
3313 /*
3314  * helper function to insert a new root level in the tree.
3315  * A new node is allocated, and a single item is inserted to
3316  * point to the existing root
3317  *
3318  * returns zero on success or < 0 on failure.
3319  */
3320 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3321 			   struct btrfs_root *root,
3322 			   struct btrfs_path *path, int level)
3323 {
3324 	struct btrfs_fs_info *fs_info = root->fs_info;
3325 	u64 lower_gen;
3326 	struct extent_buffer *lower;
3327 	struct extent_buffer *c;
3328 	struct extent_buffer *old;
3329 	struct btrfs_disk_key lower_key;
3330 	int ret;
3331 
3332 	BUG_ON(path->nodes[level]);
3333 	BUG_ON(path->nodes[level-1] != root->node);
3334 
3335 	lower = path->nodes[level-1];
3336 	if (level == 1)
3337 		btrfs_item_key(lower, &lower_key, 0);
3338 	else
3339 		btrfs_node_key(lower, &lower_key, 0);
3340 
3341 	c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3342 				   &lower_key, level, root->node->start, 0);
3343 	if (IS_ERR(c))
3344 		return PTR_ERR(c);
3345 
3346 	root_add_used(root, fs_info->nodesize);
3347 
3348 	memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
3349 	btrfs_set_header_nritems(c, 1);
3350 	btrfs_set_header_level(c, level);
3351 	btrfs_set_header_bytenr(c, c->start);
3352 	btrfs_set_header_generation(c, trans->transid);
3353 	btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3354 	btrfs_set_header_owner(c, root->root_key.objectid);
3355 
3356 	write_extent_buffer_fsid(c, fs_info->fsid);
3357 	write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
3358 
3359 	btrfs_set_node_key(c, &lower_key, 0);
3360 	btrfs_set_node_blockptr(c, 0, lower->start);
3361 	lower_gen = btrfs_header_generation(lower);
3362 	WARN_ON(lower_gen != trans->transid);
3363 
3364 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3365 
3366 	btrfs_mark_buffer_dirty(c);
3367 
3368 	old = root->node;
3369 	ret = tree_mod_log_insert_root(root->node, c, 0);
3370 	BUG_ON(ret < 0);
3371 	rcu_assign_pointer(root->node, c);
3372 
3373 	/* the super has an extra ref to root->node */
3374 	free_extent_buffer(old);
3375 
3376 	add_root_to_dirty_list(root);
3377 	extent_buffer_get(c);
3378 	path->nodes[level] = c;
3379 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3380 	path->slots[level] = 0;
3381 	return 0;
3382 }
3383 
3384 /*
3385  * worker function to insert a single pointer in a node.
3386  * the node should have enough room for the pointer already
3387  *
3388  * slot and level indicate where you want the key to go, and
3389  * blocknr is the block the key points to.
3390  */
3391 static void insert_ptr(struct btrfs_trans_handle *trans,
3392 		       struct btrfs_fs_info *fs_info, struct btrfs_path *path,
3393 		       struct btrfs_disk_key *key, u64 bytenr,
3394 		       int slot, int level)
3395 {
3396 	struct extent_buffer *lower;
3397 	int nritems;
3398 	int ret;
3399 
3400 	BUG_ON(!path->nodes[level]);
3401 	btrfs_assert_tree_locked(path->nodes[level]);
3402 	lower = path->nodes[level];
3403 	nritems = btrfs_header_nritems(lower);
3404 	BUG_ON(slot > nritems);
3405 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
3406 	if (slot != nritems) {
3407 		if (level) {
3408 			ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3409 					nritems - slot);
3410 			BUG_ON(ret < 0);
3411 		}
3412 		memmove_extent_buffer(lower,
3413 			      btrfs_node_key_ptr_offset(slot + 1),
3414 			      btrfs_node_key_ptr_offset(slot),
3415 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3416 	}
3417 	if (level) {
3418 		ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3419 				GFP_NOFS);
3420 		BUG_ON(ret < 0);
3421 	}
3422 	btrfs_set_node_key(lower, key, slot);
3423 	btrfs_set_node_blockptr(lower, slot, bytenr);
3424 	WARN_ON(trans->transid == 0);
3425 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3426 	btrfs_set_header_nritems(lower, nritems + 1);
3427 	btrfs_mark_buffer_dirty(lower);
3428 }
3429 
3430 /*
3431  * split the node at the specified level in path in two.
3432  * The path is corrected to point to the appropriate node after the split
3433  *
3434  * Before splitting this tries to make some room in the node by pushing
3435  * left and right, if either one works, it returns right away.
3436  *
3437  * returns 0 on success and < 0 on failure
3438  */
3439 static noinline int split_node(struct btrfs_trans_handle *trans,
3440 			       struct btrfs_root *root,
3441 			       struct btrfs_path *path, int level)
3442 {
3443 	struct btrfs_fs_info *fs_info = root->fs_info;
3444 	struct extent_buffer *c;
3445 	struct extent_buffer *split;
3446 	struct btrfs_disk_key disk_key;
3447 	int mid;
3448 	int ret;
3449 	u32 c_nritems;
3450 
3451 	c = path->nodes[level];
3452 	WARN_ON(btrfs_header_generation(c) != trans->transid);
3453 	if (c == root->node) {
3454 		/*
3455 		 * trying to split the root, lets make a new one
3456 		 *
3457 		 * tree mod log: We don't log_removal old root in
3458 		 * insert_new_root, because that root buffer will be kept as a
3459 		 * normal node. We are going to log removal of half of the
3460 		 * elements below with tree_mod_log_eb_copy. We're holding a
3461 		 * tree lock on the buffer, which is why we cannot race with
3462 		 * other tree_mod_log users.
3463 		 */
3464 		ret = insert_new_root(trans, root, path, level + 1);
3465 		if (ret)
3466 			return ret;
3467 	} else {
3468 		ret = push_nodes_for_insert(trans, root, path, level);
3469 		c = path->nodes[level];
3470 		if (!ret && btrfs_header_nritems(c) <
3471 		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3472 			return 0;
3473 		if (ret < 0)
3474 			return ret;
3475 	}
3476 
3477 	c_nritems = btrfs_header_nritems(c);
3478 	mid = (c_nritems + 1) / 2;
3479 	btrfs_node_key(c, &disk_key, mid);
3480 
3481 	split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3482 			&disk_key, level, c->start, 0);
3483 	if (IS_ERR(split))
3484 		return PTR_ERR(split);
3485 
3486 	root_add_used(root, fs_info->nodesize);
3487 
3488 	memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
3489 	btrfs_set_header_level(split, btrfs_header_level(c));
3490 	btrfs_set_header_bytenr(split, split->start);
3491 	btrfs_set_header_generation(split, trans->transid);
3492 	btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3493 	btrfs_set_header_owner(split, root->root_key.objectid);
3494 	write_extent_buffer_fsid(split, fs_info->fsid);
3495 	write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
3496 
3497 	ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
3498 	if (ret) {
3499 		btrfs_abort_transaction(trans, ret);
3500 		return ret;
3501 	}
3502 	copy_extent_buffer(split, c,
3503 			   btrfs_node_key_ptr_offset(0),
3504 			   btrfs_node_key_ptr_offset(mid),
3505 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3506 	btrfs_set_header_nritems(split, c_nritems - mid);
3507 	btrfs_set_header_nritems(c, mid);
3508 	ret = 0;
3509 
3510 	btrfs_mark_buffer_dirty(c);
3511 	btrfs_mark_buffer_dirty(split);
3512 
3513 	insert_ptr(trans, fs_info, path, &disk_key, split->start,
3514 		   path->slots[level + 1] + 1, level + 1);
3515 
3516 	if (path->slots[level] >= mid) {
3517 		path->slots[level] -= mid;
3518 		btrfs_tree_unlock(c);
3519 		free_extent_buffer(c);
3520 		path->nodes[level] = split;
3521 		path->slots[level + 1] += 1;
3522 	} else {
3523 		btrfs_tree_unlock(split);
3524 		free_extent_buffer(split);
3525 	}
3526 	return ret;
3527 }
3528 
3529 /*
3530  * how many bytes are required to store the items in a leaf.  start
3531  * and nr indicate which items in the leaf to check.  This totals up the
3532  * space used both by the item structs and the item data
3533  */
3534 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3535 {
3536 	struct btrfs_item *start_item;
3537 	struct btrfs_item *end_item;
3538 	struct btrfs_map_token token;
3539 	int data_len;
3540 	int nritems = btrfs_header_nritems(l);
3541 	int end = min(nritems, start + nr) - 1;
3542 
3543 	if (!nr)
3544 		return 0;
3545 	btrfs_init_map_token(&token);
3546 	start_item = btrfs_item_nr(start);
3547 	end_item = btrfs_item_nr(end);
3548 	data_len = btrfs_token_item_offset(l, start_item, &token) +
3549 		btrfs_token_item_size(l, start_item, &token);
3550 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3551 	data_len += sizeof(struct btrfs_item) * nr;
3552 	WARN_ON(data_len < 0);
3553 	return data_len;
3554 }
3555 
3556 /*
3557  * The space between the end of the leaf items and
3558  * the start of the leaf data.  IOW, how much room
3559  * the leaf has left for both items and data
3560  */
3561 noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
3562 				   struct extent_buffer *leaf)
3563 {
3564 	int nritems = btrfs_header_nritems(leaf);
3565 	int ret;
3566 
3567 	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3568 	if (ret < 0) {
3569 		btrfs_crit(fs_info,
3570 			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3571 			   ret,
3572 			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3573 			   leaf_space_used(leaf, 0, nritems), nritems);
3574 	}
3575 	return ret;
3576 }
3577 
3578 /*
3579  * min slot controls the lowest index we're willing to push to the
3580  * right.  We'll push up to and including min_slot, but no lower
3581  */
3582 static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
3583 				      struct btrfs_path *path,
3584 				      int data_size, int empty,
3585 				      struct extent_buffer *right,
3586 				      int free_space, u32 left_nritems,
3587 				      u32 min_slot)
3588 {
3589 	struct extent_buffer *left = path->nodes[0];
3590 	struct extent_buffer *upper = path->nodes[1];
3591 	struct btrfs_map_token token;
3592 	struct btrfs_disk_key disk_key;
3593 	int slot;
3594 	u32 i;
3595 	int push_space = 0;
3596 	int push_items = 0;
3597 	struct btrfs_item *item;
3598 	u32 nr;
3599 	u32 right_nritems;
3600 	u32 data_end;
3601 	u32 this_item_size;
3602 
3603 	btrfs_init_map_token(&token);
3604 
3605 	if (empty)
3606 		nr = 0;
3607 	else
3608 		nr = max_t(u32, 1, min_slot);
3609 
3610 	if (path->slots[0] >= left_nritems)
3611 		push_space += data_size;
3612 
3613 	slot = path->slots[1];
3614 	i = left_nritems - 1;
3615 	while (i >= nr) {
3616 		item = btrfs_item_nr(i);
3617 
3618 		if (!empty && push_items > 0) {
3619 			if (path->slots[0] > i)
3620 				break;
3621 			if (path->slots[0] == i) {
3622 				int space = btrfs_leaf_free_space(fs_info, left);
3623 				if (space + push_space * 2 > free_space)
3624 					break;
3625 			}
3626 		}
3627 
3628 		if (path->slots[0] == i)
3629 			push_space += data_size;
3630 
3631 		this_item_size = btrfs_item_size(left, item);
3632 		if (this_item_size + sizeof(*item) + push_space > free_space)
3633 			break;
3634 
3635 		push_items++;
3636 		push_space += this_item_size + sizeof(*item);
3637 		if (i == 0)
3638 			break;
3639 		i--;
3640 	}
3641 
3642 	if (push_items == 0)
3643 		goto out_unlock;
3644 
3645 	WARN_ON(!empty && push_items == left_nritems);
3646 
3647 	/* push left to right */
3648 	right_nritems = btrfs_header_nritems(right);
3649 
3650 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3651 	push_space -= leaf_data_end(fs_info, left);
3652 
3653 	/* make room in the right data area */
3654 	data_end = leaf_data_end(fs_info, right);
3655 	memmove_extent_buffer(right,
3656 			      BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3657 			      BTRFS_LEAF_DATA_OFFSET + data_end,
3658 			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3659 
3660 	/* copy from the left data area */
3661 	copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3662 		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3663 		     BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
3664 		     push_space);
3665 
3666 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3667 			      btrfs_item_nr_offset(0),
3668 			      right_nritems * sizeof(struct btrfs_item));
3669 
3670 	/* copy the items from left to right */
3671 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3672 		   btrfs_item_nr_offset(left_nritems - push_items),
3673 		   push_items * sizeof(struct btrfs_item));
3674 
3675 	/* update the item pointers */
3676 	right_nritems += push_items;
3677 	btrfs_set_header_nritems(right, right_nritems);
3678 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3679 	for (i = 0; i < right_nritems; i++) {
3680 		item = btrfs_item_nr(i);
3681 		push_space -= btrfs_token_item_size(right, item, &token);
3682 		btrfs_set_token_item_offset(right, item, push_space, &token);
3683 	}
3684 
3685 	left_nritems -= push_items;
3686 	btrfs_set_header_nritems(left, left_nritems);
3687 
3688 	if (left_nritems)
3689 		btrfs_mark_buffer_dirty(left);
3690 	else
3691 		clean_tree_block(fs_info, left);
3692 
3693 	btrfs_mark_buffer_dirty(right);
3694 
3695 	btrfs_item_key(right, &disk_key, 0);
3696 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3697 	btrfs_mark_buffer_dirty(upper);
3698 
3699 	/* then fixup the leaf pointer in the path */
3700 	if (path->slots[0] >= left_nritems) {
3701 		path->slots[0] -= left_nritems;
3702 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3703 			clean_tree_block(fs_info, path->nodes[0]);
3704 		btrfs_tree_unlock(path->nodes[0]);
3705 		free_extent_buffer(path->nodes[0]);
3706 		path->nodes[0] = right;
3707 		path->slots[1] += 1;
3708 	} else {
3709 		btrfs_tree_unlock(right);
3710 		free_extent_buffer(right);
3711 	}
3712 	return 0;
3713 
3714 out_unlock:
3715 	btrfs_tree_unlock(right);
3716 	free_extent_buffer(right);
3717 	return 1;
3718 }
3719 
3720 /*
3721  * push some data in the path leaf to the right, trying to free up at
3722  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3723  *
3724  * returns 1 if the push failed because the other node didn't have enough
3725  * room, 0 if everything worked out and < 0 if there were major errors.
3726  *
3727  * this will push starting from min_slot to the end of the leaf.  It won't
3728  * push any slot lower than min_slot
3729  */
3730 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3731 			   *root, struct btrfs_path *path,
3732 			   int min_data_size, int data_size,
3733 			   int empty, u32 min_slot)
3734 {
3735 	struct btrfs_fs_info *fs_info = root->fs_info;
3736 	struct extent_buffer *left = path->nodes[0];
3737 	struct extent_buffer *right;
3738 	struct extent_buffer *upper;
3739 	int slot;
3740 	int free_space;
3741 	u32 left_nritems;
3742 	int ret;
3743 
3744 	if (!path->nodes[1])
3745 		return 1;
3746 
3747 	slot = path->slots[1];
3748 	upper = path->nodes[1];
3749 	if (slot >= btrfs_header_nritems(upper) - 1)
3750 		return 1;
3751 
3752 	btrfs_assert_tree_locked(path->nodes[1]);
3753 
3754 	right = read_node_slot(fs_info, upper, slot + 1);
3755 	/*
3756 	 * slot + 1 is not valid or we fail to read the right node,
3757 	 * no big deal, just return.
3758 	 */
3759 	if (IS_ERR(right))
3760 		return 1;
3761 
3762 	btrfs_tree_lock(right);
3763 	btrfs_set_lock_blocking(right);
3764 
3765 	free_space = btrfs_leaf_free_space(fs_info, right);
3766 	if (free_space < data_size)
3767 		goto out_unlock;
3768 
3769 	/* cow and double check */
3770 	ret = btrfs_cow_block(trans, root, right, upper,
3771 			      slot + 1, &right);
3772 	if (ret)
3773 		goto out_unlock;
3774 
3775 	free_space = btrfs_leaf_free_space(fs_info, right);
3776 	if (free_space < data_size)
3777 		goto out_unlock;
3778 
3779 	left_nritems = btrfs_header_nritems(left);
3780 	if (left_nritems == 0)
3781 		goto out_unlock;
3782 
3783 	if (path->slots[0] == left_nritems && !empty) {
3784 		/* Key greater than all keys in the leaf, right neighbor has
3785 		 * enough room for it and we're not emptying our leaf to delete
3786 		 * it, therefore use right neighbor to insert the new item and
3787 		 * no need to touch/dirty our left leaft. */
3788 		btrfs_tree_unlock(left);
3789 		free_extent_buffer(left);
3790 		path->nodes[0] = right;
3791 		path->slots[0] = 0;
3792 		path->slots[1]++;
3793 		return 0;
3794 	}
3795 
3796 	return __push_leaf_right(fs_info, path, min_data_size, empty,
3797 				right, free_space, left_nritems, min_slot);
3798 out_unlock:
3799 	btrfs_tree_unlock(right);
3800 	free_extent_buffer(right);
3801 	return 1;
3802 }
3803 
3804 /*
3805  * push some data in the path leaf to the left, trying to free up at
3806  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3807  *
3808  * max_slot can put a limit on how far into the leaf we'll push items.  The
3809  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3810  * items
3811  */
3812 static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
3813 				     struct btrfs_path *path, int data_size,
3814 				     int empty, struct extent_buffer *left,
3815 				     int free_space, u32 right_nritems,
3816 				     u32 max_slot)
3817 {
3818 	struct btrfs_disk_key disk_key;
3819 	struct extent_buffer *right = path->nodes[0];
3820 	int i;
3821 	int push_space = 0;
3822 	int push_items = 0;
3823 	struct btrfs_item *item;
3824 	u32 old_left_nritems;
3825 	u32 nr;
3826 	int ret = 0;
3827 	u32 this_item_size;
3828 	u32 old_left_item_size;
3829 	struct btrfs_map_token token;
3830 
3831 	btrfs_init_map_token(&token);
3832 
3833 	if (empty)
3834 		nr = min(right_nritems, max_slot);
3835 	else
3836 		nr = min(right_nritems - 1, max_slot);
3837 
3838 	for (i = 0; i < nr; i++) {
3839 		item = btrfs_item_nr(i);
3840 
3841 		if (!empty && push_items > 0) {
3842 			if (path->slots[0] < i)
3843 				break;
3844 			if (path->slots[0] == i) {
3845 				int space = btrfs_leaf_free_space(fs_info, right);
3846 				if (space + push_space * 2 > free_space)
3847 					break;
3848 			}
3849 		}
3850 
3851 		if (path->slots[0] == i)
3852 			push_space += data_size;
3853 
3854 		this_item_size = btrfs_item_size(right, item);
3855 		if (this_item_size + sizeof(*item) + push_space > free_space)
3856 			break;
3857 
3858 		push_items++;
3859 		push_space += this_item_size + sizeof(*item);
3860 	}
3861 
3862 	if (push_items == 0) {
3863 		ret = 1;
3864 		goto out;
3865 	}
3866 	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3867 
3868 	/* push data from right to left */
3869 	copy_extent_buffer(left, right,
3870 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3871 			   btrfs_item_nr_offset(0),
3872 			   push_items * sizeof(struct btrfs_item));
3873 
3874 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3875 		     btrfs_item_offset_nr(right, push_items - 1);
3876 
3877 	copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3878 		     leaf_data_end(fs_info, left) - push_space,
3879 		     BTRFS_LEAF_DATA_OFFSET +
3880 		     btrfs_item_offset_nr(right, push_items - 1),
3881 		     push_space);
3882 	old_left_nritems = btrfs_header_nritems(left);
3883 	BUG_ON(old_left_nritems <= 0);
3884 
3885 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3886 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3887 		u32 ioff;
3888 
3889 		item = btrfs_item_nr(i);
3890 
3891 		ioff = btrfs_token_item_offset(left, item, &token);
3892 		btrfs_set_token_item_offset(left, item,
3893 		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3894 		      &token);
3895 	}
3896 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3897 
3898 	/* fixup right node */
3899 	if (push_items > right_nritems)
3900 		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3901 		       right_nritems);
3902 
3903 	if (push_items < right_nritems) {
3904 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3905 						  leaf_data_end(fs_info, right);
3906 		memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3907 				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3908 				      BTRFS_LEAF_DATA_OFFSET +
3909 				      leaf_data_end(fs_info, right), push_space);
3910 
3911 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3912 			      btrfs_item_nr_offset(push_items),
3913 			     (btrfs_header_nritems(right) - push_items) *
3914 			     sizeof(struct btrfs_item));
3915 	}
3916 	right_nritems -= push_items;
3917 	btrfs_set_header_nritems(right, right_nritems);
3918 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3919 	for (i = 0; i < right_nritems; i++) {
3920 		item = btrfs_item_nr(i);
3921 
3922 		push_space = push_space - btrfs_token_item_size(right,
3923 								item, &token);
3924 		btrfs_set_token_item_offset(right, item, push_space, &token);
3925 	}
3926 
3927 	btrfs_mark_buffer_dirty(left);
3928 	if (right_nritems)
3929 		btrfs_mark_buffer_dirty(right);
3930 	else
3931 		clean_tree_block(fs_info, right);
3932 
3933 	btrfs_item_key(right, &disk_key, 0);
3934 	fixup_low_keys(fs_info, path, &disk_key, 1);
3935 
3936 	/* then fixup the leaf pointer in the path */
3937 	if (path->slots[0] < push_items) {
3938 		path->slots[0] += old_left_nritems;
3939 		btrfs_tree_unlock(path->nodes[0]);
3940 		free_extent_buffer(path->nodes[0]);
3941 		path->nodes[0] = left;
3942 		path->slots[1] -= 1;
3943 	} else {
3944 		btrfs_tree_unlock(left);
3945 		free_extent_buffer(left);
3946 		path->slots[0] -= push_items;
3947 	}
3948 	BUG_ON(path->slots[0] < 0);
3949 	return ret;
3950 out:
3951 	btrfs_tree_unlock(left);
3952 	free_extent_buffer(left);
3953 	return ret;
3954 }
3955 
3956 /*
3957  * push some data in the path leaf to the left, trying to free up at
3958  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3959  *
3960  * max_slot can put a limit on how far into the leaf we'll push items.  The
3961  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3962  * items
3963  */
3964 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3965 			  *root, struct btrfs_path *path, int min_data_size,
3966 			  int data_size, int empty, u32 max_slot)
3967 {
3968 	struct btrfs_fs_info *fs_info = root->fs_info;
3969 	struct extent_buffer *right = path->nodes[0];
3970 	struct extent_buffer *left;
3971 	int slot;
3972 	int free_space;
3973 	u32 right_nritems;
3974 	int ret = 0;
3975 
3976 	slot = path->slots[1];
3977 	if (slot == 0)
3978 		return 1;
3979 	if (!path->nodes[1])
3980 		return 1;
3981 
3982 	right_nritems = btrfs_header_nritems(right);
3983 	if (right_nritems == 0)
3984 		return 1;
3985 
3986 	btrfs_assert_tree_locked(path->nodes[1]);
3987 
3988 	left = read_node_slot(fs_info, path->nodes[1], slot - 1);
3989 	/*
3990 	 * slot - 1 is not valid or we fail to read the left node,
3991 	 * no big deal, just return.
3992 	 */
3993 	if (IS_ERR(left))
3994 		return 1;
3995 
3996 	btrfs_tree_lock(left);
3997 	btrfs_set_lock_blocking(left);
3998 
3999 	free_space = btrfs_leaf_free_space(fs_info, left);
4000 	if (free_space < data_size) {
4001 		ret = 1;
4002 		goto out;
4003 	}
4004 
4005 	/* cow and double check */
4006 	ret = btrfs_cow_block(trans, root, left,
4007 			      path->nodes[1], slot - 1, &left);
4008 	if (ret) {
4009 		/* we hit -ENOSPC, but it isn't fatal here */
4010 		if (ret == -ENOSPC)
4011 			ret = 1;
4012 		goto out;
4013 	}
4014 
4015 	free_space = btrfs_leaf_free_space(fs_info, left);
4016 	if (free_space < data_size) {
4017 		ret = 1;
4018 		goto out;
4019 	}
4020 
4021 	return __push_leaf_left(fs_info, path, min_data_size,
4022 			       empty, left, free_space, right_nritems,
4023 			       max_slot);
4024 out:
4025 	btrfs_tree_unlock(left);
4026 	free_extent_buffer(left);
4027 	return ret;
4028 }
4029 
4030 /*
4031  * split the path's leaf in two, making sure there is at least data_size
4032  * available for the resulting leaf level of the path.
4033  */
4034 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4035 				    struct btrfs_fs_info *fs_info,
4036 				    struct btrfs_path *path,
4037 				    struct extent_buffer *l,
4038 				    struct extent_buffer *right,
4039 				    int slot, int mid, int nritems)
4040 {
4041 	int data_copy_size;
4042 	int rt_data_off;
4043 	int i;
4044 	struct btrfs_disk_key disk_key;
4045 	struct btrfs_map_token token;
4046 
4047 	btrfs_init_map_token(&token);
4048 
4049 	nritems = nritems - mid;
4050 	btrfs_set_header_nritems(right, nritems);
4051 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
4052 
4053 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4054 			   btrfs_item_nr_offset(mid),
4055 			   nritems * sizeof(struct btrfs_item));
4056 
4057 	copy_extent_buffer(right, l,
4058 		     BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4059 		     data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4060 		     leaf_data_end(fs_info, l), data_copy_size);
4061 
4062 	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4063 
4064 	for (i = 0; i < nritems; i++) {
4065 		struct btrfs_item *item = btrfs_item_nr(i);
4066 		u32 ioff;
4067 
4068 		ioff = btrfs_token_item_offset(right, item, &token);
4069 		btrfs_set_token_item_offset(right, item,
4070 					    ioff + rt_data_off, &token);
4071 	}
4072 
4073 	btrfs_set_header_nritems(l, mid);
4074 	btrfs_item_key(right, &disk_key, 0);
4075 	insert_ptr(trans, fs_info, path, &disk_key, right->start,
4076 		   path->slots[1] + 1, 1);
4077 
4078 	btrfs_mark_buffer_dirty(right);
4079 	btrfs_mark_buffer_dirty(l);
4080 	BUG_ON(path->slots[0] != slot);
4081 
4082 	if (mid <= slot) {
4083 		btrfs_tree_unlock(path->nodes[0]);
4084 		free_extent_buffer(path->nodes[0]);
4085 		path->nodes[0] = right;
4086 		path->slots[0] -= mid;
4087 		path->slots[1] += 1;
4088 	} else {
4089 		btrfs_tree_unlock(right);
4090 		free_extent_buffer(right);
4091 	}
4092 
4093 	BUG_ON(path->slots[0] < 0);
4094 }
4095 
4096 /*
4097  * double splits happen when we need to insert a big item in the middle
4098  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
4099  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4100  *          A                 B                 C
4101  *
4102  * We avoid this by trying to push the items on either side of our target
4103  * into the adjacent leaves.  If all goes well we can avoid the double split
4104  * completely.
4105  */
4106 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4107 					  struct btrfs_root *root,
4108 					  struct btrfs_path *path,
4109 					  int data_size)
4110 {
4111 	struct btrfs_fs_info *fs_info = root->fs_info;
4112 	int ret;
4113 	int progress = 0;
4114 	int slot;
4115 	u32 nritems;
4116 	int space_needed = data_size;
4117 
4118 	slot = path->slots[0];
4119 	if (slot < btrfs_header_nritems(path->nodes[0]))
4120 		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4121 
4122 	/*
4123 	 * try to push all the items after our slot into the
4124 	 * right leaf
4125 	 */
4126 	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4127 	if (ret < 0)
4128 		return ret;
4129 
4130 	if (ret == 0)
4131 		progress++;
4132 
4133 	nritems = btrfs_header_nritems(path->nodes[0]);
4134 	/*
4135 	 * our goal is to get our slot at the start or end of a leaf.  If
4136 	 * we've done so we're done
4137 	 */
4138 	if (path->slots[0] == 0 || path->slots[0] == nritems)
4139 		return 0;
4140 
4141 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4142 		return 0;
4143 
4144 	/* try to push all the items before our slot into the next leaf */
4145 	slot = path->slots[0];
4146 	space_needed = data_size;
4147 	if (slot > 0)
4148 		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4149 	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4150 	if (ret < 0)
4151 		return ret;
4152 
4153 	if (ret == 0)
4154 		progress++;
4155 
4156 	if (progress)
4157 		return 0;
4158 	return 1;
4159 }
4160 
4161 /*
4162  * split the path's leaf in two, making sure there is at least data_size
4163  * available for the resulting leaf level of the path.
4164  *
4165  * returns 0 if all went well and < 0 on failure.
4166  */
4167 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4168 			       struct btrfs_root *root,
4169 			       const struct btrfs_key *ins_key,
4170 			       struct btrfs_path *path, int data_size,
4171 			       int extend)
4172 {
4173 	struct btrfs_disk_key disk_key;
4174 	struct extent_buffer *l;
4175 	u32 nritems;
4176 	int mid;
4177 	int slot;
4178 	struct extent_buffer *right;
4179 	struct btrfs_fs_info *fs_info = root->fs_info;
4180 	int ret = 0;
4181 	int wret;
4182 	int split;
4183 	int num_doubles = 0;
4184 	int tried_avoid_double = 0;
4185 
4186 	l = path->nodes[0];
4187 	slot = path->slots[0];
4188 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
4189 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4190 		return -EOVERFLOW;
4191 
4192 	/* first try to make some room by pushing left and right */
4193 	if (data_size && path->nodes[1]) {
4194 		int space_needed = data_size;
4195 
4196 		if (slot < btrfs_header_nritems(l))
4197 			space_needed -= btrfs_leaf_free_space(fs_info, l);
4198 
4199 		wret = push_leaf_right(trans, root, path, space_needed,
4200 				       space_needed, 0, 0);
4201 		if (wret < 0)
4202 			return wret;
4203 		if (wret) {
4204 			space_needed = data_size;
4205 			if (slot > 0)
4206 				space_needed -= btrfs_leaf_free_space(fs_info,
4207 								      l);
4208 			wret = push_leaf_left(trans, root, path, space_needed,
4209 					      space_needed, 0, (u32)-1);
4210 			if (wret < 0)
4211 				return wret;
4212 		}
4213 		l = path->nodes[0];
4214 
4215 		/* did the pushes work? */
4216 		if (btrfs_leaf_free_space(fs_info, l) >= data_size)
4217 			return 0;
4218 	}
4219 
4220 	if (!path->nodes[1]) {
4221 		ret = insert_new_root(trans, root, path, 1);
4222 		if (ret)
4223 			return ret;
4224 	}
4225 again:
4226 	split = 1;
4227 	l = path->nodes[0];
4228 	slot = path->slots[0];
4229 	nritems = btrfs_header_nritems(l);
4230 	mid = (nritems + 1) / 2;
4231 
4232 	if (mid <= slot) {
4233 		if (nritems == 1 ||
4234 		    leaf_space_used(l, mid, nritems - mid) + data_size >
4235 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4236 			if (slot >= nritems) {
4237 				split = 0;
4238 			} else {
4239 				mid = slot;
4240 				if (mid != nritems &&
4241 				    leaf_space_used(l, mid, nritems - mid) +
4242 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4243 					if (data_size && !tried_avoid_double)
4244 						goto push_for_double;
4245 					split = 2;
4246 				}
4247 			}
4248 		}
4249 	} else {
4250 		if (leaf_space_used(l, 0, mid) + data_size >
4251 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4252 			if (!extend && data_size && slot == 0) {
4253 				split = 0;
4254 			} else if ((extend || !data_size) && slot == 0) {
4255 				mid = 1;
4256 			} else {
4257 				mid = slot;
4258 				if (mid != nritems &&
4259 				    leaf_space_used(l, mid, nritems - mid) +
4260 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4261 					if (data_size && !tried_avoid_double)
4262 						goto push_for_double;
4263 					split = 2;
4264 				}
4265 			}
4266 		}
4267 	}
4268 
4269 	if (split == 0)
4270 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4271 	else
4272 		btrfs_item_key(l, &disk_key, mid);
4273 
4274 	right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4275 			&disk_key, 0, l->start, 0);
4276 	if (IS_ERR(right))
4277 		return PTR_ERR(right);
4278 
4279 	root_add_used(root, fs_info->nodesize);
4280 
4281 	memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
4282 	btrfs_set_header_bytenr(right, right->start);
4283 	btrfs_set_header_generation(right, trans->transid);
4284 	btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4285 	btrfs_set_header_owner(right, root->root_key.objectid);
4286 	btrfs_set_header_level(right, 0);
4287 	write_extent_buffer_fsid(right, fs_info->fsid);
4288 	write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
4289 
4290 	if (split == 0) {
4291 		if (mid <= slot) {
4292 			btrfs_set_header_nritems(right, 0);
4293 			insert_ptr(trans, fs_info, path, &disk_key,
4294 				   right->start, path->slots[1] + 1, 1);
4295 			btrfs_tree_unlock(path->nodes[0]);
4296 			free_extent_buffer(path->nodes[0]);
4297 			path->nodes[0] = right;
4298 			path->slots[0] = 0;
4299 			path->slots[1] += 1;
4300 		} else {
4301 			btrfs_set_header_nritems(right, 0);
4302 			insert_ptr(trans, fs_info, path, &disk_key,
4303 				   right->start, path->slots[1], 1);
4304 			btrfs_tree_unlock(path->nodes[0]);
4305 			free_extent_buffer(path->nodes[0]);
4306 			path->nodes[0] = right;
4307 			path->slots[0] = 0;
4308 			if (path->slots[1] == 0)
4309 				fixup_low_keys(fs_info, path, &disk_key, 1);
4310 		}
4311 		/*
4312 		 * We create a new leaf 'right' for the required ins_len and
4313 		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4314 		 * the content of ins_len to 'right'.
4315 		 */
4316 		return ret;
4317 	}
4318 
4319 	copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
4320 
4321 	if (split == 2) {
4322 		BUG_ON(num_doubles != 0);
4323 		num_doubles++;
4324 		goto again;
4325 	}
4326 
4327 	return 0;
4328 
4329 push_for_double:
4330 	push_for_double_split(trans, root, path, data_size);
4331 	tried_avoid_double = 1;
4332 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4333 		return 0;
4334 	goto again;
4335 }
4336 
4337 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4338 					 struct btrfs_root *root,
4339 					 struct btrfs_path *path, int ins_len)
4340 {
4341 	struct btrfs_fs_info *fs_info = root->fs_info;
4342 	struct btrfs_key key;
4343 	struct extent_buffer *leaf;
4344 	struct btrfs_file_extent_item *fi;
4345 	u64 extent_len = 0;
4346 	u32 item_size;
4347 	int ret;
4348 
4349 	leaf = path->nodes[0];
4350 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4351 
4352 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4353 	       key.type != BTRFS_EXTENT_CSUM_KEY);
4354 
4355 	if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
4356 		return 0;
4357 
4358 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4359 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4360 		fi = btrfs_item_ptr(leaf, path->slots[0],
4361 				    struct btrfs_file_extent_item);
4362 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4363 	}
4364 	btrfs_release_path(path);
4365 
4366 	path->keep_locks = 1;
4367 	path->search_for_split = 1;
4368 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4369 	path->search_for_split = 0;
4370 	if (ret > 0)
4371 		ret = -EAGAIN;
4372 	if (ret < 0)
4373 		goto err;
4374 
4375 	ret = -EAGAIN;
4376 	leaf = path->nodes[0];
4377 	/* if our item isn't there, return now */
4378 	if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4379 		goto err;
4380 
4381 	/* the leaf has  changed, it now has room.  return now */
4382 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
4383 		goto err;
4384 
4385 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4386 		fi = btrfs_item_ptr(leaf, path->slots[0],
4387 				    struct btrfs_file_extent_item);
4388 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4389 			goto err;
4390 	}
4391 
4392 	btrfs_set_path_blocking(path);
4393 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4394 	if (ret)
4395 		goto err;
4396 
4397 	path->keep_locks = 0;
4398 	btrfs_unlock_up_safe(path, 1);
4399 	return 0;
4400 err:
4401 	path->keep_locks = 0;
4402 	return ret;
4403 }
4404 
4405 static noinline int split_item(struct btrfs_fs_info *fs_info,
4406 			       struct btrfs_path *path,
4407 			       const struct btrfs_key *new_key,
4408 			       unsigned long split_offset)
4409 {
4410 	struct extent_buffer *leaf;
4411 	struct btrfs_item *item;
4412 	struct btrfs_item *new_item;
4413 	int slot;
4414 	char *buf;
4415 	u32 nritems;
4416 	u32 item_size;
4417 	u32 orig_offset;
4418 	struct btrfs_disk_key disk_key;
4419 
4420 	leaf = path->nodes[0];
4421 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
4422 
4423 	btrfs_set_path_blocking(path);
4424 
4425 	item = btrfs_item_nr(path->slots[0]);
4426 	orig_offset = btrfs_item_offset(leaf, item);
4427 	item_size = btrfs_item_size(leaf, item);
4428 
4429 	buf = kmalloc(item_size, GFP_NOFS);
4430 	if (!buf)
4431 		return -ENOMEM;
4432 
4433 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4434 			    path->slots[0]), item_size);
4435 
4436 	slot = path->slots[0] + 1;
4437 	nritems = btrfs_header_nritems(leaf);
4438 	if (slot != nritems) {
4439 		/* shift the items */
4440 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4441 				btrfs_item_nr_offset(slot),
4442 				(nritems - slot) * sizeof(struct btrfs_item));
4443 	}
4444 
4445 	btrfs_cpu_key_to_disk(&disk_key, new_key);
4446 	btrfs_set_item_key(leaf, &disk_key, slot);
4447 
4448 	new_item = btrfs_item_nr(slot);
4449 
4450 	btrfs_set_item_offset(leaf, new_item, orig_offset);
4451 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4452 
4453 	btrfs_set_item_offset(leaf, item,
4454 			      orig_offset + item_size - split_offset);
4455 	btrfs_set_item_size(leaf, item, split_offset);
4456 
4457 	btrfs_set_header_nritems(leaf, nritems + 1);
4458 
4459 	/* write the data for the start of the original item */
4460 	write_extent_buffer(leaf, buf,
4461 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4462 			    split_offset);
4463 
4464 	/* write the data for the new item */
4465 	write_extent_buffer(leaf, buf + split_offset,
4466 			    btrfs_item_ptr_offset(leaf, slot),
4467 			    item_size - split_offset);
4468 	btrfs_mark_buffer_dirty(leaf);
4469 
4470 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
4471 	kfree(buf);
4472 	return 0;
4473 }
4474 
4475 /*
4476  * This function splits a single item into two items,
4477  * giving 'new_key' to the new item and splitting the
4478  * old one at split_offset (from the start of the item).
4479  *
4480  * The path may be released by this operation.  After
4481  * the split, the path is pointing to the old item.  The
4482  * new item is going to be in the same node as the old one.
4483  *
4484  * Note, the item being split must be smaller enough to live alone on
4485  * a tree block with room for one extra struct btrfs_item
4486  *
4487  * This allows us to split the item in place, keeping a lock on the
4488  * leaf the entire time.
4489  */
4490 int btrfs_split_item(struct btrfs_trans_handle *trans,
4491 		     struct btrfs_root *root,
4492 		     struct btrfs_path *path,
4493 		     const struct btrfs_key *new_key,
4494 		     unsigned long split_offset)
4495 {
4496 	int ret;
4497 	ret = setup_leaf_for_split(trans, root, path,
4498 				   sizeof(struct btrfs_item));
4499 	if (ret)
4500 		return ret;
4501 
4502 	ret = split_item(root->fs_info, path, new_key, split_offset);
4503 	return ret;
4504 }
4505 
4506 /*
4507  * This function duplicate a item, giving 'new_key' to the new item.
4508  * It guarantees both items live in the same tree leaf and the new item
4509  * is contiguous with the original item.
4510  *
4511  * This allows us to split file extent in place, keeping a lock on the
4512  * leaf the entire time.
4513  */
4514 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4515 			 struct btrfs_root *root,
4516 			 struct btrfs_path *path,
4517 			 const struct btrfs_key *new_key)
4518 {
4519 	struct extent_buffer *leaf;
4520 	int ret;
4521 	u32 item_size;
4522 
4523 	leaf = path->nodes[0];
4524 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4525 	ret = setup_leaf_for_split(trans, root, path,
4526 				   item_size + sizeof(struct btrfs_item));
4527 	if (ret)
4528 		return ret;
4529 
4530 	path->slots[0]++;
4531 	setup_items_for_insert(root, path, new_key, &item_size,
4532 			       item_size, item_size +
4533 			       sizeof(struct btrfs_item), 1);
4534 	leaf = path->nodes[0];
4535 	memcpy_extent_buffer(leaf,
4536 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4537 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4538 			     item_size);
4539 	return 0;
4540 }
4541 
4542 /*
4543  * make the item pointed to by the path smaller.  new_size indicates
4544  * how small to make it, and from_end tells us if we just chop bytes
4545  * off the end of the item or if we shift the item to chop bytes off
4546  * the front.
4547  */
4548 void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
4549 			 struct btrfs_path *path, u32 new_size, int from_end)
4550 {
4551 	int slot;
4552 	struct extent_buffer *leaf;
4553 	struct btrfs_item *item;
4554 	u32 nritems;
4555 	unsigned int data_end;
4556 	unsigned int old_data_start;
4557 	unsigned int old_size;
4558 	unsigned int size_diff;
4559 	int i;
4560 	struct btrfs_map_token token;
4561 
4562 	btrfs_init_map_token(&token);
4563 
4564 	leaf = path->nodes[0];
4565 	slot = path->slots[0];
4566 
4567 	old_size = btrfs_item_size_nr(leaf, slot);
4568 	if (old_size == new_size)
4569 		return;
4570 
4571 	nritems = btrfs_header_nritems(leaf);
4572 	data_end = leaf_data_end(fs_info, leaf);
4573 
4574 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4575 
4576 	size_diff = old_size - new_size;
4577 
4578 	BUG_ON(slot < 0);
4579 	BUG_ON(slot >= nritems);
4580 
4581 	/*
4582 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4583 	 */
4584 	/* first correct the data pointers */
4585 	for (i = slot; i < nritems; i++) {
4586 		u32 ioff;
4587 		item = btrfs_item_nr(i);
4588 
4589 		ioff = btrfs_token_item_offset(leaf, item, &token);
4590 		btrfs_set_token_item_offset(leaf, item,
4591 					    ioff + size_diff, &token);
4592 	}
4593 
4594 	/* shift the data */
4595 	if (from_end) {
4596 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4597 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4598 			      data_end, old_data_start + new_size - data_end);
4599 	} else {
4600 		struct btrfs_disk_key disk_key;
4601 		u64 offset;
4602 
4603 		btrfs_item_key(leaf, &disk_key, slot);
4604 
4605 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4606 			unsigned long ptr;
4607 			struct btrfs_file_extent_item *fi;
4608 
4609 			fi = btrfs_item_ptr(leaf, slot,
4610 					    struct btrfs_file_extent_item);
4611 			fi = (struct btrfs_file_extent_item *)(
4612 			     (unsigned long)fi - size_diff);
4613 
4614 			if (btrfs_file_extent_type(leaf, fi) ==
4615 			    BTRFS_FILE_EXTENT_INLINE) {
4616 				ptr = btrfs_item_ptr_offset(leaf, slot);
4617 				memmove_extent_buffer(leaf, ptr,
4618 				      (unsigned long)fi,
4619 				      BTRFS_FILE_EXTENT_INLINE_DATA_START);
4620 			}
4621 		}
4622 
4623 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4624 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4625 			      data_end, old_data_start - data_end);
4626 
4627 		offset = btrfs_disk_key_offset(&disk_key);
4628 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4629 		btrfs_set_item_key(leaf, &disk_key, slot);
4630 		if (slot == 0)
4631 			fixup_low_keys(fs_info, path, &disk_key, 1);
4632 	}
4633 
4634 	item = btrfs_item_nr(slot);
4635 	btrfs_set_item_size(leaf, item, new_size);
4636 	btrfs_mark_buffer_dirty(leaf);
4637 
4638 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4639 		btrfs_print_leaf(leaf);
4640 		BUG();
4641 	}
4642 }
4643 
4644 /*
4645  * make the item pointed to by the path bigger, data_size is the added size.
4646  */
4647 void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
4648 		       u32 data_size)
4649 {
4650 	int slot;
4651 	struct extent_buffer *leaf;
4652 	struct btrfs_item *item;
4653 	u32 nritems;
4654 	unsigned int data_end;
4655 	unsigned int old_data;
4656 	unsigned int old_size;
4657 	int i;
4658 	struct btrfs_map_token token;
4659 
4660 	btrfs_init_map_token(&token);
4661 
4662 	leaf = path->nodes[0];
4663 
4664 	nritems = btrfs_header_nritems(leaf);
4665 	data_end = leaf_data_end(fs_info, leaf);
4666 
4667 	if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
4668 		btrfs_print_leaf(leaf);
4669 		BUG();
4670 	}
4671 	slot = path->slots[0];
4672 	old_data = btrfs_item_end_nr(leaf, slot);
4673 
4674 	BUG_ON(slot < 0);
4675 	if (slot >= nritems) {
4676 		btrfs_print_leaf(leaf);
4677 		btrfs_crit(fs_info, "slot %d too large, nritems %d",
4678 			   slot, nritems);
4679 		BUG_ON(1);
4680 	}
4681 
4682 	/*
4683 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4684 	 */
4685 	/* first correct the data pointers */
4686 	for (i = slot; i < nritems; i++) {
4687 		u32 ioff;
4688 		item = btrfs_item_nr(i);
4689 
4690 		ioff = btrfs_token_item_offset(leaf, item, &token);
4691 		btrfs_set_token_item_offset(leaf, item,
4692 					    ioff - data_size, &token);
4693 	}
4694 
4695 	/* shift the data */
4696 	memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4697 		      data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4698 		      data_end, old_data - data_end);
4699 
4700 	data_end = old_data;
4701 	old_size = btrfs_item_size_nr(leaf, slot);
4702 	item = btrfs_item_nr(slot);
4703 	btrfs_set_item_size(leaf, item, old_size + data_size);
4704 	btrfs_mark_buffer_dirty(leaf);
4705 
4706 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4707 		btrfs_print_leaf(leaf);
4708 		BUG();
4709 	}
4710 }
4711 
4712 /*
4713  * this is a helper for btrfs_insert_empty_items, the main goal here is
4714  * to save stack depth by doing the bulk of the work in a function
4715  * that doesn't call btrfs_search_slot
4716  */
4717 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4718 			    const struct btrfs_key *cpu_key, u32 *data_size,
4719 			    u32 total_data, u32 total_size, int nr)
4720 {
4721 	struct btrfs_fs_info *fs_info = root->fs_info;
4722 	struct btrfs_item *item;
4723 	int i;
4724 	u32 nritems;
4725 	unsigned int data_end;
4726 	struct btrfs_disk_key disk_key;
4727 	struct extent_buffer *leaf;
4728 	int slot;
4729 	struct btrfs_map_token token;
4730 
4731 	if (path->slots[0] == 0) {
4732 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4733 		fixup_low_keys(fs_info, path, &disk_key, 1);
4734 	}
4735 	btrfs_unlock_up_safe(path, 1);
4736 
4737 	btrfs_init_map_token(&token);
4738 
4739 	leaf = path->nodes[0];
4740 	slot = path->slots[0];
4741 
4742 	nritems = btrfs_header_nritems(leaf);
4743 	data_end = leaf_data_end(fs_info, leaf);
4744 
4745 	if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
4746 		btrfs_print_leaf(leaf);
4747 		btrfs_crit(fs_info, "not enough freespace need %u have %d",
4748 			   total_size, btrfs_leaf_free_space(fs_info, leaf));
4749 		BUG();
4750 	}
4751 
4752 	if (slot != nritems) {
4753 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4754 
4755 		if (old_data < data_end) {
4756 			btrfs_print_leaf(leaf);
4757 			btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4758 				   slot, old_data, data_end);
4759 			BUG_ON(1);
4760 		}
4761 		/*
4762 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4763 		 */
4764 		/* first correct the data pointers */
4765 		for (i = slot; i < nritems; i++) {
4766 			u32 ioff;
4767 
4768 			item = btrfs_item_nr(i);
4769 			ioff = btrfs_token_item_offset(leaf, item, &token);
4770 			btrfs_set_token_item_offset(leaf, item,
4771 						    ioff - total_data, &token);
4772 		}
4773 		/* shift the items */
4774 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4775 			      btrfs_item_nr_offset(slot),
4776 			      (nritems - slot) * sizeof(struct btrfs_item));
4777 
4778 		/* shift the data */
4779 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4780 			      data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4781 			      data_end, old_data - data_end);
4782 		data_end = old_data;
4783 	}
4784 
4785 	/* setup the item for the new data */
4786 	for (i = 0; i < nr; i++) {
4787 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4788 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4789 		item = btrfs_item_nr(slot + i);
4790 		btrfs_set_token_item_offset(leaf, item,
4791 					    data_end - data_size[i], &token);
4792 		data_end -= data_size[i];
4793 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4794 	}
4795 
4796 	btrfs_set_header_nritems(leaf, nritems + nr);
4797 	btrfs_mark_buffer_dirty(leaf);
4798 
4799 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4800 		btrfs_print_leaf(leaf);
4801 		BUG();
4802 	}
4803 }
4804 
4805 /*
4806  * Given a key and some data, insert items into the tree.
4807  * This does all the path init required, making room in the tree if needed.
4808  */
4809 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4810 			    struct btrfs_root *root,
4811 			    struct btrfs_path *path,
4812 			    const struct btrfs_key *cpu_key, u32 *data_size,
4813 			    int nr)
4814 {
4815 	int ret = 0;
4816 	int slot;
4817 	int i;
4818 	u32 total_size = 0;
4819 	u32 total_data = 0;
4820 
4821 	for (i = 0; i < nr; i++)
4822 		total_data += data_size[i];
4823 
4824 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4825 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4826 	if (ret == 0)
4827 		return -EEXIST;
4828 	if (ret < 0)
4829 		return ret;
4830 
4831 	slot = path->slots[0];
4832 	BUG_ON(slot < 0);
4833 
4834 	setup_items_for_insert(root, path, cpu_key, data_size,
4835 			       total_data, total_size, nr);
4836 	return 0;
4837 }
4838 
4839 /*
4840  * Given a key and some data, insert an item into the tree.
4841  * This does all the path init required, making room in the tree if needed.
4842  */
4843 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4844 		      const struct btrfs_key *cpu_key, void *data,
4845 		      u32 data_size)
4846 {
4847 	int ret = 0;
4848 	struct btrfs_path *path;
4849 	struct extent_buffer *leaf;
4850 	unsigned long ptr;
4851 
4852 	path = btrfs_alloc_path();
4853 	if (!path)
4854 		return -ENOMEM;
4855 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4856 	if (!ret) {
4857 		leaf = path->nodes[0];
4858 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4859 		write_extent_buffer(leaf, data, ptr, data_size);
4860 		btrfs_mark_buffer_dirty(leaf);
4861 	}
4862 	btrfs_free_path(path);
4863 	return ret;
4864 }
4865 
4866 /*
4867  * delete the pointer from a given node.
4868  *
4869  * the tree should have been previously balanced so the deletion does not
4870  * empty a node.
4871  */
4872 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4873 		    int level, int slot)
4874 {
4875 	struct btrfs_fs_info *fs_info = root->fs_info;
4876 	struct extent_buffer *parent = path->nodes[level];
4877 	u32 nritems;
4878 	int ret;
4879 
4880 	nritems = btrfs_header_nritems(parent);
4881 	if (slot != nritems - 1) {
4882 		if (level) {
4883 			ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4884 					nritems - slot - 1);
4885 			BUG_ON(ret < 0);
4886 		}
4887 		memmove_extent_buffer(parent,
4888 			      btrfs_node_key_ptr_offset(slot),
4889 			      btrfs_node_key_ptr_offset(slot + 1),
4890 			      sizeof(struct btrfs_key_ptr) *
4891 			      (nritems - slot - 1));
4892 	} else if (level) {
4893 		ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4894 				GFP_NOFS);
4895 		BUG_ON(ret < 0);
4896 	}
4897 
4898 	nritems--;
4899 	btrfs_set_header_nritems(parent, nritems);
4900 	if (nritems == 0 && parent == root->node) {
4901 		BUG_ON(btrfs_header_level(root->node) != 1);
4902 		/* just turn the root into a leaf and break */
4903 		btrfs_set_header_level(root->node, 0);
4904 	} else if (slot == 0) {
4905 		struct btrfs_disk_key disk_key;
4906 
4907 		btrfs_node_key(parent, &disk_key, 0);
4908 		fixup_low_keys(fs_info, path, &disk_key, level + 1);
4909 	}
4910 	btrfs_mark_buffer_dirty(parent);
4911 }
4912 
4913 /*
4914  * a helper function to delete the leaf pointed to by path->slots[1] and
4915  * path->nodes[1].
4916  *
4917  * This deletes the pointer in path->nodes[1] and frees the leaf
4918  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4919  *
4920  * The path must have already been setup for deleting the leaf, including
4921  * all the proper balancing.  path->nodes[1] must be locked.
4922  */
4923 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4924 				    struct btrfs_root *root,
4925 				    struct btrfs_path *path,
4926 				    struct extent_buffer *leaf)
4927 {
4928 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4929 	del_ptr(root, path, 1, path->slots[1]);
4930 
4931 	/*
4932 	 * btrfs_free_extent is expensive, we want to make sure we
4933 	 * aren't holding any locks when we call it
4934 	 */
4935 	btrfs_unlock_up_safe(path, 0);
4936 
4937 	root_sub_used(root, leaf->len);
4938 
4939 	extent_buffer_get(leaf);
4940 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4941 	free_extent_buffer_stale(leaf);
4942 }
4943 /*
4944  * delete the item at the leaf level in path.  If that empties
4945  * the leaf, remove it from the tree
4946  */
4947 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4948 		    struct btrfs_path *path, int slot, int nr)
4949 {
4950 	struct btrfs_fs_info *fs_info = root->fs_info;
4951 	struct extent_buffer *leaf;
4952 	struct btrfs_item *item;
4953 	u32 last_off;
4954 	u32 dsize = 0;
4955 	int ret = 0;
4956 	int wret;
4957 	int i;
4958 	u32 nritems;
4959 	struct btrfs_map_token token;
4960 
4961 	btrfs_init_map_token(&token);
4962 
4963 	leaf = path->nodes[0];
4964 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4965 
4966 	for (i = 0; i < nr; i++)
4967 		dsize += btrfs_item_size_nr(leaf, slot + i);
4968 
4969 	nritems = btrfs_header_nritems(leaf);
4970 
4971 	if (slot + nr != nritems) {
4972 		int data_end = leaf_data_end(fs_info, leaf);
4973 
4974 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4975 			      data_end + dsize,
4976 			      BTRFS_LEAF_DATA_OFFSET + data_end,
4977 			      last_off - data_end);
4978 
4979 		for (i = slot + nr; i < nritems; i++) {
4980 			u32 ioff;
4981 
4982 			item = btrfs_item_nr(i);
4983 			ioff = btrfs_token_item_offset(leaf, item, &token);
4984 			btrfs_set_token_item_offset(leaf, item,
4985 						    ioff + dsize, &token);
4986 		}
4987 
4988 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4989 			      btrfs_item_nr_offset(slot + nr),
4990 			      sizeof(struct btrfs_item) *
4991 			      (nritems - slot - nr));
4992 	}
4993 	btrfs_set_header_nritems(leaf, nritems - nr);
4994 	nritems -= nr;
4995 
4996 	/* delete the leaf if we've emptied it */
4997 	if (nritems == 0) {
4998 		if (leaf == root->node) {
4999 			btrfs_set_header_level(leaf, 0);
5000 		} else {
5001 			btrfs_set_path_blocking(path);
5002 			clean_tree_block(fs_info, leaf);
5003 			btrfs_del_leaf(trans, root, path, leaf);
5004 		}
5005 	} else {
5006 		int used = leaf_space_used(leaf, 0, nritems);
5007 		if (slot == 0) {
5008 			struct btrfs_disk_key disk_key;
5009 
5010 			btrfs_item_key(leaf, &disk_key, 0);
5011 			fixup_low_keys(fs_info, path, &disk_key, 1);
5012 		}
5013 
5014 		/* delete the leaf if it is mostly empty */
5015 		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
5016 			/* push_leaf_left fixes the path.
5017 			 * make sure the path still points to our leaf
5018 			 * for possible call to del_ptr below
5019 			 */
5020 			slot = path->slots[1];
5021 			extent_buffer_get(leaf);
5022 
5023 			btrfs_set_path_blocking(path);
5024 			wret = push_leaf_left(trans, root, path, 1, 1,
5025 					      1, (u32)-1);
5026 			if (wret < 0 && wret != -ENOSPC)
5027 				ret = wret;
5028 
5029 			if (path->nodes[0] == leaf &&
5030 			    btrfs_header_nritems(leaf)) {
5031 				wret = push_leaf_right(trans, root, path, 1,
5032 						       1, 1, 0);
5033 				if (wret < 0 && wret != -ENOSPC)
5034 					ret = wret;
5035 			}
5036 
5037 			if (btrfs_header_nritems(leaf) == 0) {
5038 				path->slots[1] = slot;
5039 				btrfs_del_leaf(trans, root, path, leaf);
5040 				free_extent_buffer(leaf);
5041 				ret = 0;
5042 			} else {
5043 				/* if we're still in the path, make sure
5044 				 * we're dirty.  Otherwise, one of the
5045 				 * push_leaf functions must have already
5046 				 * dirtied this buffer
5047 				 */
5048 				if (path->nodes[0] == leaf)
5049 					btrfs_mark_buffer_dirty(leaf);
5050 				free_extent_buffer(leaf);
5051 			}
5052 		} else {
5053 			btrfs_mark_buffer_dirty(leaf);
5054 		}
5055 	}
5056 	return ret;
5057 }
5058 
5059 /*
5060  * search the tree again to find a leaf with lesser keys
5061  * returns 0 if it found something or 1 if there are no lesser leaves.
5062  * returns < 0 on io errors.
5063  *
5064  * This may release the path, and so you may lose any locks held at the
5065  * time you call it.
5066  */
5067 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5068 {
5069 	struct btrfs_key key;
5070 	struct btrfs_disk_key found_key;
5071 	int ret;
5072 
5073 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5074 
5075 	if (key.offset > 0) {
5076 		key.offset--;
5077 	} else if (key.type > 0) {
5078 		key.type--;
5079 		key.offset = (u64)-1;
5080 	} else if (key.objectid > 0) {
5081 		key.objectid--;
5082 		key.type = (u8)-1;
5083 		key.offset = (u64)-1;
5084 	} else {
5085 		return 1;
5086 	}
5087 
5088 	btrfs_release_path(path);
5089 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5090 	if (ret < 0)
5091 		return ret;
5092 	btrfs_item_key(path->nodes[0], &found_key, 0);
5093 	ret = comp_keys(&found_key, &key);
5094 	/*
5095 	 * We might have had an item with the previous key in the tree right
5096 	 * before we released our path. And after we released our path, that
5097 	 * item might have been pushed to the first slot (0) of the leaf we
5098 	 * were holding due to a tree balance. Alternatively, an item with the
5099 	 * previous key can exist as the only element of a leaf (big fat item).
5100 	 * Therefore account for these 2 cases, so that our callers (like
5101 	 * btrfs_previous_item) don't miss an existing item with a key matching
5102 	 * the previous key we computed above.
5103 	 */
5104 	if (ret <= 0)
5105 		return 0;
5106 	return 1;
5107 }
5108 
5109 /*
5110  * A helper function to walk down the tree starting at min_key, and looking
5111  * for nodes or leaves that are have a minimum transaction id.
5112  * This is used by the btree defrag code, and tree logging
5113  *
5114  * This does not cow, but it does stuff the starting key it finds back
5115  * into min_key, so you can call btrfs_search_slot with cow=1 on the
5116  * key and get a writable path.
5117  *
5118  * This honors path->lowest_level to prevent descent past a given level
5119  * of the tree.
5120  *
5121  * min_trans indicates the oldest transaction that you are interested
5122  * in walking through.  Any nodes or leaves older than min_trans are
5123  * skipped over (without reading them).
5124  *
5125  * returns zero if something useful was found, < 0 on error and 1 if there
5126  * was nothing in the tree that matched the search criteria.
5127  */
5128 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5129 			 struct btrfs_path *path,
5130 			 u64 min_trans)
5131 {
5132 	struct btrfs_fs_info *fs_info = root->fs_info;
5133 	struct extent_buffer *cur;
5134 	struct btrfs_key found_key;
5135 	int slot;
5136 	int sret;
5137 	u32 nritems;
5138 	int level;
5139 	int ret = 1;
5140 	int keep_locks = path->keep_locks;
5141 
5142 	path->keep_locks = 1;
5143 again:
5144 	cur = btrfs_read_lock_root_node(root);
5145 	level = btrfs_header_level(cur);
5146 	WARN_ON(path->nodes[level]);
5147 	path->nodes[level] = cur;
5148 	path->locks[level] = BTRFS_READ_LOCK;
5149 
5150 	if (btrfs_header_generation(cur) < min_trans) {
5151 		ret = 1;
5152 		goto out;
5153 	}
5154 	while (1) {
5155 		nritems = btrfs_header_nritems(cur);
5156 		level = btrfs_header_level(cur);
5157 		sret = btrfs_bin_search(cur, min_key, level, &slot);
5158 
5159 		/* at the lowest level, we're done, setup the path and exit */
5160 		if (level == path->lowest_level) {
5161 			if (slot >= nritems)
5162 				goto find_next_key;
5163 			ret = 0;
5164 			path->slots[level] = slot;
5165 			btrfs_item_key_to_cpu(cur, &found_key, slot);
5166 			goto out;
5167 		}
5168 		if (sret && slot > 0)
5169 			slot--;
5170 		/*
5171 		 * check this node pointer against the min_trans parameters.
5172 		 * If it is too old, old, skip to the next one.
5173 		 */
5174 		while (slot < nritems) {
5175 			u64 gen;
5176 
5177 			gen = btrfs_node_ptr_generation(cur, slot);
5178 			if (gen < min_trans) {
5179 				slot++;
5180 				continue;
5181 			}
5182 			break;
5183 		}
5184 find_next_key:
5185 		/*
5186 		 * we didn't find a candidate key in this node, walk forward
5187 		 * and find another one
5188 		 */
5189 		if (slot >= nritems) {
5190 			path->slots[level] = slot;
5191 			btrfs_set_path_blocking(path);
5192 			sret = btrfs_find_next_key(root, path, min_key, level,
5193 						  min_trans);
5194 			if (sret == 0) {
5195 				btrfs_release_path(path);
5196 				goto again;
5197 			} else {
5198 				goto out;
5199 			}
5200 		}
5201 		/* save our key for returning back */
5202 		btrfs_node_key_to_cpu(cur, &found_key, slot);
5203 		path->slots[level] = slot;
5204 		if (level == path->lowest_level) {
5205 			ret = 0;
5206 			goto out;
5207 		}
5208 		btrfs_set_path_blocking(path);
5209 		cur = read_node_slot(fs_info, cur, slot);
5210 		if (IS_ERR(cur)) {
5211 			ret = PTR_ERR(cur);
5212 			goto out;
5213 		}
5214 
5215 		btrfs_tree_read_lock(cur);
5216 
5217 		path->locks[level - 1] = BTRFS_READ_LOCK;
5218 		path->nodes[level - 1] = cur;
5219 		unlock_up(path, level, 1, 0, NULL);
5220 		btrfs_clear_path_blocking(path, NULL, 0);
5221 	}
5222 out:
5223 	path->keep_locks = keep_locks;
5224 	if (ret == 0) {
5225 		btrfs_unlock_up_safe(path, path->lowest_level + 1);
5226 		btrfs_set_path_blocking(path);
5227 		memcpy(min_key, &found_key, sizeof(found_key));
5228 	}
5229 	return ret;
5230 }
5231 
5232 static int tree_move_down(struct btrfs_fs_info *fs_info,
5233 			   struct btrfs_path *path,
5234 			   int *level)
5235 {
5236 	struct extent_buffer *eb;
5237 
5238 	BUG_ON(*level == 0);
5239 	eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
5240 	if (IS_ERR(eb))
5241 		return PTR_ERR(eb);
5242 
5243 	path->nodes[*level - 1] = eb;
5244 	path->slots[*level - 1] = 0;
5245 	(*level)--;
5246 	return 0;
5247 }
5248 
5249 static int tree_move_next_or_upnext(struct btrfs_path *path,
5250 				    int *level, int root_level)
5251 {
5252 	int ret = 0;
5253 	int nritems;
5254 	nritems = btrfs_header_nritems(path->nodes[*level]);
5255 
5256 	path->slots[*level]++;
5257 
5258 	while (path->slots[*level] >= nritems) {
5259 		if (*level == root_level)
5260 			return -1;
5261 
5262 		/* move upnext */
5263 		path->slots[*level] = 0;
5264 		free_extent_buffer(path->nodes[*level]);
5265 		path->nodes[*level] = NULL;
5266 		(*level)++;
5267 		path->slots[*level]++;
5268 
5269 		nritems = btrfs_header_nritems(path->nodes[*level]);
5270 		ret = 1;
5271 	}
5272 	return ret;
5273 }
5274 
5275 /*
5276  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5277  * or down.
5278  */
5279 static int tree_advance(struct btrfs_fs_info *fs_info,
5280 			struct btrfs_path *path,
5281 			int *level, int root_level,
5282 			int allow_down,
5283 			struct btrfs_key *key)
5284 {
5285 	int ret;
5286 
5287 	if (*level == 0 || !allow_down) {
5288 		ret = tree_move_next_or_upnext(path, level, root_level);
5289 	} else {
5290 		ret = tree_move_down(fs_info, path, level);
5291 	}
5292 	if (ret >= 0) {
5293 		if (*level == 0)
5294 			btrfs_item_key_to_cpu(path->nodes[*level], key,
5295 					path->slots[*level]);
5296 		else
5297 			btrfs_node_key_to_cpu(path->nodes[*level], key,
5298 					path->slots[*level]);
5299 	}
5300 	return ret;
5301 }
5302 
5303 static int tree_compare_item(struct btrfs_path *left_path,
5304 			     struct btrfs_path *right_path,
5305 			     char *tmp_buf)
5306 {
5307 	int cmp;
5308 	int len1, len2;
5309 	unsigned long off1, off2;
5310 
5311 	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5312 	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5313 	if (len1 != len2)
5314 		return 1;
5315 
5316 	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5317 	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5318 				right_path->slots[0]);
5319 
5320 	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5321 
5322 	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5323 	if (cmp)
5324 		return 1;
5325 	return 0;
5326 }
5327 
5328 #define ADVANCE 1
5329 #define ADVANCE_ONLY_NEXT -1
5330 
5331 /*
5332  * This function compares two trees and calls the provided callback for
5333  * every changed/new/deleted item it finds.
5334  * If shared tree blocks are encountered, whole subtrees are skipped, making
5335  * the compare pretty fast on snapshotted subvolumes.
5336  *
5337  * This currently works on commit roots only. As commit roots are read only,
5338  * we don't do any locking. The commit roots are protected with transactions.
5339  * Transactions are ended and rejoined when a commit is tried in between.
5340  *
5341  * This function checks for modifications done to the trees while comparing.
5342  * If it detects a change, it aborts immediately.
5343  */
5344 int btrfs_compare_trees(struct btrfs_root *left_root,
5345 			struct btrfs_root *right_root,
5346 			btrfs_changed_cb_t changed_cb, void *ctx)
5347 {
5348 	struct btrfs_fs_info *fs_info = left_root->fs_info;
5349 	int ret;
5350 	int cmp;
5351 	struct btrfs_path *left_path = NULL;
5352 	struct btrfs_path *right_path = NULL;
5353 	struct btrfs_key left_key;
5354 	struct btrfs_key right_key;
5355 	char *tmp_buf = NULL;
5356 	int left_root_level;
5357 	int right_root_level;
5358 	int left_level;
5359 	int right_level;
5360 	int left_end_reached;
5361 	int right_end_reached;
5362 	int advance_left;
5363 	int advance_right;
5364 	u64 left_blockptr;
5365 	u64 right_blockptr;
5366 	u64 left_gen;
5367 	u64 right_gen;
5368 
5369 	left_path = btrfs_alloc_path();
5370 	if (!left_path) {
5371 		ret = -ENOMEM;
5372 		goto out;
5373 	}
5374 	right_path = btrfs_alloc_path();
5375 	if (!right_path) {
5376 		ret = -ENOMEM;
5377 		goto out;
5378 	}
5379 
5380 	tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
5381 	if (!tmp_buf) {
5382 		ret = -ENOMEM;
5383 		goto out;
5384 	}
5385 
5386 	left_path->search_commit_root = 1;
5387 	left_path->skip_locking = 1;
5388 	right_path->search_commit_root = 1;
5389 	right_path->skip_locking = 1;
5390 
5391 	/*
5392 	 * Strategy: Go to the first items of both trees. Then do
5393 	 *
5394 	 * If both trees are at level 0
5395 	 *   Compare keys of current items
5396 	 *     If left < right treat left item as new, advance left tree
5397 	 *       and repeat
5398 	 *     If left > right treat right item as deleted, advance right tree
5399 	 *       and repeat
5400 	 *     If left == right do deep compare of items, treat as changed if
5401 	 *       needed, advance both trees and repeat
5402 	 * If both trees are at the same level but not at level 0
5403 	 *   Compare keys of current nodes/leafs
5404 	 *     If left < right advance left tree and repeat
5405 	 *     If left > right advance right tree and repeat
5406 	 *     If left == right compare blockptrs of the next nodes/leafs
5407 	 *       If they match advance both trees but stay at the same level
5408 	 *         and repeat
5409 	 *       If they don't match advance both trees while allowing to go
5410 	 *         deeper and repeat
5411 	 * If tree levels are different
5412 	 *   Advance the tree that needs it and repeat
5413 	 *
5414 	 * Advancing a tree means:
5415 	 *   If we are at level 0, try to go to the next slot. If that's not
5416 	 *   possible, go one level up and repeat. Stop when we found a level
5417 	 *   where we could go to the next slot. We may at this point be on a
5418 	 *   node or a leaf.
5419 	 *
5420 	 *   If we are not at level 0 and not on shared tree blocks, go one
5421 	 *   level deeper.
5422 	 *
5423 	 *   If we are not at level 0 and on shared tree blocks, go one slot to
5424 	 *   the right if possible or go up and right.
5425 	 */
5426 
5427 	down_read(&fs_info->commit_root_sem);
5428 	left_level = btrfs_header_level(left_root->commit_root);
5429 	left_root_level = left_level;
5430 	left_path->nodes[left_level] = left_root->commit_root;
5431 	extent_buffer_get(left_path->nodes[left_level]);
5432 
5433 	right_level = btrfs_header_level(right_root->commit_root);
5434 	right_root_level = right_level;
5435 	right_path->nodes[right_level] = right_root->commit_root;
5436 	extent_buffer_get(right_path->nodes[right_level]);
5437 	up_read(&fs_info->commit_root_sem);
5438 
5439 	if (left_level == 0)
5440 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
5441 				&left_key, left_path->slots[left_level]);
5442 	else
5443 		btrfs_node_key_to_cpu(left_path->nodes[left_level],
5444 				&left_key, left_path->slots[left_level]);
5445 	if (right_level == 0)
5446 		btrfs_item_key_to_cpu(right_path->nodes[right_level],
5447 				&right_key, right_path->slots[right_level]);
5448 	else
5449 		btrfs_node_key_to_cpu(right_path->nodes[right_level],
5450 				&right_key, right_path->slots[right_level]);
5451 
5452 	left_end_reached = right_end_reached = 0;
5453 	advance_left = advance_right = 0;
5454 
5455 	while (1) {
5456 		if (advance_left && !left_end_reached) {
5457 			ret = tree_advance(fs_info, left_path, &left_level,
5458 					left_root_level,
5459 					advance_left != ADVANCE_ONLY_NEXT,
5460 					&left_key);
5461 			if (ret == -1)
5462 				left_end_reached = ADVANCE;
5463 			else if (ret < 0)
5464 				goto out;
5465 			advance_left = 0;
5466 		}
5467 		if (advance_right && !right_end_reached) {
5468 			ret = tree_advance(fs_info, right_path, &right_level,
5469 					right_root_level,
5470 					advance_right != ADVANCE_ONLY_NEXT,
5471 					&right_key);
5472 			if (ret == -1)
5473 				right_end_reached = ADVANCE;
5474 			else if (ret < 0)
5475 				goto out;
5476 			advance_right = 0;
5477 		}
5478 
5479 		if (left_end_reached && right_end_reached) {
5480 			ret = 0;
5481 			goto out;
5482 		} else if (left_end_reached) {
5483 			if (right_level == 0) {
5484 				ret = changed_cb(left_path, right_path,
5485 						&right_key,
5486 						BTRFS_COMPARE_TREE_DELETED,
5487 						ctx);
5488 				if (ret < 0)
5489 					goto out;
5490 			}
5491 			advance_right = ADVANCE;
5492 			continue;
5493 		} else if (right_end_reached) {
5494 			if (left_level == 0) {
5495 				ret = changed_cb(left_path, right_path,
5496 						&left_key,
5497 						BTRFS_COMPARE_TREE_NEW,
5498 						ctx);
5499 				if (ret < 0)
5500 					goto out;
5501 			}
5502 			advance_left = ADVANCE;
5503 			continue;
5504 		}
5505 
5506 		if (left_level == 0 && right_level == 0) {
5507 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5508 			if (cmp < 0) {
5509 				ret = changed_cb(left_path, right_path,
5510 						&left_key,
5511 						BTRFS_COMPARE_TREE_NEW,
5512 						ctx);
5513 				if (ret < 0)
5514 					goto out;
5515 				advance_left = ADVANCE;
5516 			} else if (cmp > 0) {
5517 				ret = changed_cb(left_path, right_path,
5518 						&right_key,
5519 						BTRFS_COMPARE_TREE_DELETED,
5520 						ctx);
5521 				if (ret < 0)
5522 					goto out;
5523 				advance_right = ADVANCE;
5524 			} else {
5525 				enum btrfs_compare_tree_result result;
5526 
5527 				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5528 				ret = tree_compare_item(left_path, right_path,
5529 							tmp_buf);
5530 				if (ret)
5531 					result = BTRFS_COMPARE_TREE_CHANGED;
5532 				else
5533 					result = BTRFS_COMPARE_TREE_SAME;
5534 				ret = changed_cb(left_path, right_path,
5535 						 &left_key, result, ctx);
5536 				if (ret < 0)
5537 					goto out;
5538 				advance_left = ADVANCE;
5539 				advance_right = ADVANCE;
5540 			}
5541 		} else if (left_level == right_level) {
5542 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5543 			if (cmp < 0) {
5544 				advance_left = ADVANCE;
5545 			} else if (cmp > 0) {
5546 				advance_right = ADVANCE;
5547 			} else {
5548 				left_blockptr = btrfs_node_blockptr(
5549 						left_path->nodes[left_level],
5550 						left_path->slots[left_level]);
5551 				right_blockptr = btrfs_node_blockptr(
5552 						right_path->nodes[right_level],
5553 						right_path->slots[right_level]);
5554 				left_gen = btrfs_node_ptr_generation(
5555 						left_path->nodes[left_level],
5556 						left_path->slots[left_level]);
5557 				right_gen = btrfs_node_ptr_generation(
5558 						right_path->nodes[right_level],
5559 						right_path->slots[right_level]);
5560 				if (left_blockptr == right_blockptr &&
5561 				    left_gen == right_gen) {
5562 					/*
5563 					 * As we're on a shared block, don't
5564 					 * allow to go deeper.
5565 					 */
5566 					advance_left = ADVANCE_ONLY_NEXT;
5567 					advance_right = ADVANCE_ONLY_NEXT;
5568 				} else {
5569 					advance_left = ADVANCE;
5570 					advance_right = ADVANCE;
5571 				}
5572 			}
5573 		} else if (left_level < right_level) {
5574 			advance_right = ADVANCE;
5575 		} else {
5576 			advance_left = ADVANCE;
5577 		}
5578 	}
5579 
5580 out:
5581 	btrfs_free_path(left_path);
5582 	btrfs_free_path(right_path);
5583 	kvfree(tmp_buf);
5584 	return ret;
5585 }
5586 
5587 /*
5588  * this is similar to btrfs_next_leaf, but does not try to preserve
5589  * and fixup the path.  It looks for and returns the next key in the
5590  * tree based on the current path and the min_trans parameters.
5591  *
5592  * 0 is returned if another key is found, < 0 if there are any errors
5593  * and 1 is returned if there are no higher keys in the tree
5594  *
5595  * path->keep_locks should be set to 1 on the search made before
5596  * calling this function.
5597  */
5598 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5599 			struct btrfs_key *key, int level, u64 min_trans)
5600 {
5601 	int slot;
5602 	struct extent_buffer *c;
5603 
5604 	WARN_ON(!path->keep_locks);
5605 	while (level < BTRFS_MAX_LEVEL) {
5606 		if (!path->nodes[level])
5607 			return 1;
5608 
5609 		slot = path->slots[level] + 1;
5610 		c = path->nodes[level];
5611 next:
5612 		if (slot >= btrfs_header_nritems(c)) {
5613 			int ret;
5614 			int orig_lowest;
5615 			struct btrfs_key cur_key;
5616 			if (level + 1 >= BTRFS_MAX_LEVEL ||
5617 			    !path->nodes[level + 1])
5618 				return 1;
5619 
5620 			if (path->locks[level + 1]) {
5621 				level++;
5622 				continue;
5623 			}
5624 
5625 			slot = btrfs_header_nritems(c) - 1;
5626 			if (level == 0)
5627 				btrfs_item_key_to_cpu(c, &cur_key, slot);
5628 			else
5629 				btrfs_node_key_to_cpu(c, &cur_key, slot);
5630 
5631 			orig_lowest = path->lowest_level;
5632 			btrfs_release_path(path);
5633 			path->lowest_level = level;
5634 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5635 						0, 0);
5636 			path->lowest_level = orig_lowest;
5637 			if (ret < 0)
5638 				return ret;
5639 
5640 			c = path->nodes[level];
5641 			slot = path->slots[level];
5642 			if (ret == 0)
5643 				slot++;
5644 			goto next;
5645 		}
5646 
5647 		if (level == 0)
5648 			btrfs_item_key_to_cpu(c, key, slot);
5649 		else {
5650 			u64 gen = btrfs_node_ptr_generation(c, slot);
5651 
5652 			if (gen < min_trans) {
5653 				slot++;
5654 				goto next;
5655 			}
5656 			btrfs_node_key_to_cpu(c, key, slot);
5657 		}
5658 		return 0;
5659 	}
5660 	return 1;
5661 }
5662 
5663 /*
5664  * search the tree again to find a leaf with greater keys
5665  * returns 0 if it found something or 1 if there are no greater leaves.
5666  * returns < 0 on io errors.
5667  */
5668 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5669 {
5670 	return btrfs_next_old_leaf(root, path, 0);
5671 }
5672 
5673 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5674 			u64 time_seq)
5675 {
5676 	int slot;
5677 	int level;
5678 	struct extent_buffer *c;
5679 	struct extent_buffer *next;
5680 	struct btrfs_key key;
5681 	u32 nritems;
5682 	int ret;
5683 	int old_spinning = path->leave_spinning;
5684 	int next_rw_lock = 0;
5685 
5686 	nritems = btrfs_header_nritems(path->nodes[0]);
5687 	if (nritems == 0)
5688 		return 1;
5689 
5690 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5691 again:
5692 	level = 1;
5693 	next = NULL;
5694 	next_rw_lock = 0;
5695 	btrfs_release_path(path);
5696 
5697 	path->keep_locks = 1;
5698 	path->leave_spinning = 1;
5699 
5700 	if (time_seq)
5701 		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5702 	else
5703 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5704 	path->keep_locks = 0;
5705 
5706 	if (ret < 0)
5707 		return ret;
5708 
5709 	nritems = btrfs_header_nritems(path->nodes[0]);
5710 	/*
5711 	 * by releasing the path above we dropped all our locks.  A balance
5712 	 * could have added more items next to the key that used to be
5713 	 * at the very end of the block.  So, check again here and
5714 	 * advance the path if there are now more items available.
5715 	 */
5716 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5717 		if (ret == 0)
5718 			path->slots[0]++;
5719 		ret = 0;
5720 		goto done;
5721 	}
5722 	/*
5723 	 * So the above check misses one case:
5724 	 * - after releasing the path above, someone has removed the item that
5725 	 *   used to be at the very end of the block, and balance between leafs
5726 	 *   gets another one with bigger key.offset to replace it.
5727 	 *
5728 	 * This one should be returned as well, or we can get leaf corruption
5729 	 * later(esp. in __btrfs_drop_extents()).
5730 	 *
5731 	 * And a bit more explanation about this check,
5732 	 * with ret > 0, the key isn't found, the path points to the slot
5733 	 * where it should be inserted, so the path->slots[0] item must be the
5734 	 * bigger one.
5735 	 */
5736 	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5737 		ret = 0;
5738 		goto done;
5739 	}
5740 
5741 	while (level < BTRFS_MAX_LEVEL) {
5742 		if (!path->nodes[level]) {
5743 			ret = 1;
5744 			goto done;
5745 		}
5746 
5747 		slot = path->slots[level] + 1;
5748 		c = path->nodes[level];
5749 		if (slot >= btrfs_header_nritems(c)) {
5750 			level++;
5751 			if (level == BTRFS_MAX_LEVEL) {
5752 				ret = 1;
5753 				goto done;
5754 			}
5755 			continue;
5756 		}
5757 
5758 		if (next) {
5759 			btrfs_tree_unlock_rw(next, next_rw_lock);
5760 			free_extent_buffer(next);
5761 		}
5762 
5763 		next = c;
5764 		next_rw_lock = path->locks[level];
5765 		ret = read_block_for_search(root, path, &next, level,
5766 					    slot, &key);
5767 		if (ret == -EAGAIN)
5768 			goto again;
5769 
5770 		if (ret < 0) {
5771 			btrfs_release_path(path);
5772 			goto done;
5773 		}
5774 
5775 		if (!path->skip_locking) {
5776 			ret = btrfs_try_tree_read_lock(next);
5777 			if (!ret && time_seq) {
5778 				/*
5779 				 * If we don't get the lock, we may be racing
5780 				 * with push_leaf_left, holding that lock while
5781 				 * itself waiting for the leaf we've currently
5782 				 * locked. To solve this situation, we give up
5783 				 * on our lock and cycle.
5784 				 */
5785 				free_extent_buffer(next);
5786 				btrfs_release_path(path);
5787 				cond_resched();
5788 				goto again;
5789 			}
5790 			if (!ret) {
5791 				btrfs_set_path_blocking(path);
5792 				btrfs_tree_read_lock(next);
5793 				btrfs_clear_path_blocking(path, next,
5794 							  BTRFS_READ_LOCK);
5795 			}
5796 			next_rw_lock = BTRFS_READ_LOCK;
5797 		}
5798 		break;
5799 	}
5800 	path->slots[level] = slot;
5801 	while (1) {
5802 		level--;
5803 		c = path->nodes[level];
5804 		if (path->locks[level])
5805 			btrfs_tree_unlock_rw(c, path->locks[level]);
5806 
5807 		free_extent_buffer(c);
5808 		path->nodes[level] = next;
5809 		path->slots[level] = 0;
5810 		if (!path->skip_locking)
5811 			path->locks[level] = next_rw_lock;
5812 		if (!level)
5813 			break;
5814 
5815 		ret = read_block_for_search(root, path, &next, level,
5816 					    0, &key);
5817 		if (ret == -EAGAIN)
5818 			goto again;
5819 
5820 		if (ret < 0) {
5821 			btrfs_release_path(path);
5822 			goto done;
5823 		}
5824 
5825 		if (!path->skip_locking) {
5826 			ret = btrfs_try_tree_read_lock(next);
5827 			if (!ret) {
5828 				btrfs_set_path_blocking(path);
5829 				btrfs_tree_read_lock(next);
5830 				btrfs_clear_path_blocking(path, next,
5831 							  BTRFS_READ_LOCK);
5832 			}
5833 			next_rw_lock = BTRFS_READ_LOCK;
5834 		}
5835 	}
5836 	ret = 0;
5837 done:
5838 	unlock_up(path, 0, 1, 0, NULL);
5839 	path->leave_spinning = old_spinning;
5840 	if (!old_spinning)
5841 		btrfs_set_path_blocking(path);
5842 
5843 	return ret;
5844 }
5845 
5846 /*
5847  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5848  * searching until it gets past min_objectid or finds an item of 'type'
5849  *
5850  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5851  */
5852 int btrfs_previous_item(struct btrfs_root *root,
5853 			struct btrfs_path *path, u64 min_objectid,
5854 			int type)
5855 {
5856 	struct btrfs_key found_key;
5857 	struct extent_buffer *leaf;
5858 	u32 nritems;
5859 	int ret;
5860 
5861 	while (1) {
5862 		if (path->slots[0] == 0) {
5863 			btrfs_set_path_blocking(path);
5864 			ret = btrfs_prev_leaf(root, path);
5865 			if (ret != 0)
5866 				return ret;
5867 		} else {
5868 			path->slots[0]--;
5869 		}
5870 		leaf = path->nodes[0];
5871 		nritems = btrfs_header_nritems(leaf);
5872 		if (nritems == 0)
5873 			return 1;
5874 		if (path->slots[0] == nritems)
5875 			path->slots[0]--;
5876 
5877 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5878 		if (found_key.objectid < min_objectid)
5879 			break;
5880 		if (found_key.type == type)
5881 			return 0;
5882 		if (found_key.objectid == min_objectid &&
5883 		    found_key.type < type)
5884 			break;
5885 	}
5886 	return 1;
5887 }
5888 
5889 /*
5890  * search in extent tree to find a previous Metadata/Data extent item with
5891  * min objecitd.
5892  *
5893  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5894  */
5895 int btrfs_previous_extent_item(struct btrfs_root *root,
5896 			struct btrfs_path *path, u64 min_objectid)
5897 {
5898 	struct btrfs_key found_key;
5899 	struct extent_buffer *leaf;
5900 	u32 nritems;
5901 	int ret;
5902 
5903 	while (1) {
5904 		if (path->slots[0] == 0) {
5905 			btrfs_set_path_blocking(path);
5906 			ret = btrfs_prev_leaf(root, path);
5907 			if (ret != 0)
5908 				return ret;
5909 		} else {
5910 			path->slots[0]--;
5911 		}
5912 		leaf = path->nodes[0];
5913 		nritems = btrfs_header_nritems(leaf);
5914 		if (nritems == 0)
5915 			return 1;
5916 		if (path->slots[0] == nritems)
5917 			path->slots[0]--;
5918 
5919 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5920 		if (found_key.objectid < min_objectid)
5921 			break;
5922 		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5923 		    found_key.type == BTRFS_METADATA_ITEM_KEY)
5924 			return 0;
5925 		if (found_key.objectid == min_objectid &&
5926 		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
5927 			break;
5928 	}
5929 	return 1;
5930 }
5931