xref: /openbmc/linux/fs/btrfs/ctree.c (revision 565485b8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
9 #include <linux/mm.h>
10 #include "ctree.h"
11 #include "disk-io.h"
12 #include "transaction.h"
13 #include "print-tree.h"
14 #include "locking.h"
15 #include "volumes.h"
16 
17 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
18 		      *root, struct btrfs_path *path, int level);
19 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
20 		      const struct btrfs_key *ins_key, struct btrfs_path *path,
21 		      int data_size, int extend);
22 static int push_node_left(struct btrfs_trans_handle *trans,
23 			  struct btrfs_fs_info *fs_info,
24 			  struct extent_buffer *dst,
25 			  struct extent_buffer *src, int empty);
26 static int balance_node_right(struct btrfs_trans_handle *trans,
27 			      struct btrfs_fs_info *fs_info,
28 			      struct extent_buffer *dst_buf,
29 			      struct extent_buffer *src_buf);
30 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
31 		    int level, int slot);
32 
33 struct btrfs_path *btrfs_alloc_path(void)
34 {
35 	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
36 }
37 
38 /*
39  * set all locked nodes in the path to blocking locks.  This should
40  * be done before scheduling
41  */
42 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
43 {
44 	int i;
45 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
46 		if (!p->nodes[i] || !p->locks[i])
47 			continue;
48 		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
49 		if (p->locks[i] == BTRFS_READ_LOCK)
50 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
51 		else if (p->locks[i] == BTRFS_WRITE_LOCK)
52 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
53 	}
54 }
55 
56 /* this also releases the path */
57 void btrfs_free_path(struct btrfs_path *p)
58 {
59 	if (!p)
60 		return;
61 	btrfs_release_path(p);
62 	kmem_cache_free(btrfs_path_cachep, p);
63 }
64 
65 /*
66  * path release drops references on the extent buffers in the path
67  * and it drops any locks held by this path
68  *
69  * It is safe to call this on paths that no locks or extent buffers held.
70  */
71 noinline void btrfs_release_path(struct btrfs_path *p)
72 {
73 	int i;
74 
75 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
76 		p->slots[i] = 0;
77 		if (!p->nodes[i])
78 			continue;
79 		if (p->locks[i]) {
80 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
81 			p->locks[i] = 0;
82 		}
83 		free_extent_buffer(p->nodes[i]);
84 		p->nodes[i] = NULL;
85 	}
86 }
87 
88 /*
89  * safely gets a reference on the root node of a tree.  A lock
90  * is not taken, so a concurrent writer may put a different node
91  * at the root of the tree.  See btrfs_lock_root_node for the
92  * looping required.
93  *
94  * The extent buffer returned by this has a reference taken, so
95  * it won't disappear.  It may stop being the root of the tree
96  * at any time because there are no locks held.
97  */
98 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
99 {
100 	struct extent_buffer *eb;
101 
102 	while (1) {
103 		rcu_read_lock();
104 		eb = rcu_dereference(root->node);
105 
106 		/*
107 		 * RCU really hurts here, we could free up the root node because
108 		 * it was COWed but we may not get the new root node yet so do
109 		 * the inc_not_zero dance and if it doesn't work then
110 		 * synchronize_rcu and try again.
111 		 */
112 		if (atomic_inc_not_zero(&eb->refs)) {
113 			rcu_read_unlock();
114 			break;
115 		}
116 		rcu_read_unlock();
117 		synchronize_rcu();
118 	}
119 	return eb;
120 }
121 
122 /* loop around taking references on and locking the root node of the
123  * tree until you end up with a lock on the root.  A locked buffer
124  * is returned, with a reference held.
125  */
126 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
127 {
128 	struct extent_buffer *eb;
129 
130 	while (1) {
131 		eb = btrfs_root_node(root);
132 		btrfs_tree_lock(eb);
133 		if (eb == root->node)
134 			break;
135 		btrfs_tree_unlock(eb);
136 		free_extent_buffer(eb);
137 	}
138 	return eb;
139 }
140 
141 /* loop around taking references on and locking the root node of the
142  * tree until you end up with a lock on the root.  A locked buffer
143  * is returned, with a reference held.
144  */
145 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
146 {
147 	struct extent_buffer *eb;
148 
149 	while (1) {
150 		eb = btrfs_root_node(root);
151 		btrfs_tree_read_lock(eb);
152 		if (eb == root->node)
153 			break;
154 		btrfs_tree_read_unlock(eb);
155 		free_extent_buffer(eb);
156 	}
157 	return eb;
158 }
159 
160 /* cowonly root (everything not a reference counted cow subvolume), just get
161  * put onto a simple dirty list.  transaction.c walks this to make sure they
162  * get properly updated on disk.
163  */
164 static void add_root_to_dirty_list(struct btrfs_root *root)
165 {
166 	struct btrfs_fs_info *fs_info = root->fs_info;
167 
168 	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
169 	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
170 		return;
171 
172 	spin_lock(&fs_info->trans_lock);
173 	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
174 		/* Want the extent tree to be the last on the list */
175 		if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
176 			list_move_tail(&root->dirty_list,
177 				       &fs_info->dirty_cowonly_roots);
178 		else
179 			list_move(&root->dirty_list,
180 				  &fs_info->dirty_cowonly_roots);
181 	}
182 	spin_unlock(&fs_info->trans_lock);
183 }
184 
185 /*
186  * used by snapshot creation to make a copy of a root for a tree with
187  * a given objectid.  The buffer with the new root node is returned in
188  * cow_ret, and this func returns zero on success or a negative error code.
189  */
190 int btrfs_copy_root(struct btrfs_trans_handle *trans,
191 		      struct btrfs_root *root,
192 		      struct extent_buffer *buf,
193 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
194 {
195 	struct btrfs_fs_info *fs_info = root->fs_info;
196 	struct extent_buffer *cow;
197 	int ret = 0;
198 	int level;
199 	struct btrfs_disk_key disk_key;
200 
201 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
202 		trans->transid != fs_info->running_transaction->transid);
203 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
204 		trans->transid != root->last_trans);
205 
206 	level = btrfs_header_level(buf);
207 	if (level == 0)
208 		btrfs_item_key(buf, &disk_key, 0);
209 	else
210 		btrfs_node_key(buf, &disk_key, 0);
211 
212 	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
213 			&disk_key, level, buf->start, 0);
214 	if (IS_ERR(cow))
215 		return PTR_ERR(cow);
216 
217 	copy_extent_buffer_full(cow, buf);
218 	btrfs_set_header_bytenr(cow, cow->start);
219 	btrfs_set_header_generation(cow, trans->transid);
220 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
221 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
222 				     BTRFS_HEADER_FLAG_RELOC);
223 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
224 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
225 	else
226 		btrfs_set_header_owner(cow, new_root_objectid);
227 
228 	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
229 
230 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
231 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
232 		ret = btrfs_inc_ref(trans, root, cow, 1);
233 	else
234 		ret = btrfs_inc_ref(trans, root, cow, 0);
235 
236 	if (ret)
237 		return ret;
238 
239 	btrfs_mark_buffer_dirty(cow);
240 	*cow_ret = cow;
241 	return 0;
242 }
243 
244 enum mod_log_op {
245 	MOD_LOG_KEY_REPLACE,
246 	MOD_LOG_KEY_ADD,
247 	MOD_LOG_KEY_REMOVE,
248 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
249 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
250 	MOD_LOG_MOVE_KEYS,
251 	MOD_LOG_ROOT_REPLACE,
252 };
253 
254 struct tree_mod_root {
255 	u64 logical;
256 	u8 level;
257 };
258 
259 struct tree_mod_elem {
260 	struct rb_node node;
261 	u64 logical;
262 	u64 seq;
263 	enum mod_log_op op;
264 
265 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
266 	int slot;
267 
268 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
269 	u64 generation;
270 
271 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
272 	struct btrfs_disk_key key;
273 	u64 blockptr;
274 
275 	/* this is used for op == MOD_LOG_MOVE_KEYS */
276 	struct {
277 		int dst_slot;
278 		int nr_items;
279 	} move;
280 
281 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
282 	struct tree_mod_root old_root;
283 };
284 
285 /*
286  * Pull a new tree mod seq number for our operation.
287  */
288 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
289 {
290 	return atomic64_inc_return(&fs_info->tree_mod_seq);
291 }
292 
293 /*
294  * This adds a new blocker to the tree mod log's blocker list if the @elem
295  * passed does not already have a sequence number set. So when a caller expects
296  * to record tree modifications, it should ensure to set elem->seq to zero
297  * before calling btrfs_get_tree_mod_seq.
298  * Returns a fresh, unused tree log modification sequence number, even if no new
299  * blocker was added.
300  */
301 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
302 			   struct seq_list *elem)
303 {
304 	write_lock(&fs_info->tree_mod_log_lock);
305 	spin_lock(&fs_info->tree_mod_seq_lock);
306 	if (!elem->seq) {
307 		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
308 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
309 	}
310 	spin_unlock(&fs_info->tree_mod_seq_lock);
311 	write_unlock(&fs_info->tree_mod_log_lock);
312 
313 	return elem->seq;
314 }
315 
316 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
317 			    struct seq_list *elem)
318 {
319 	struct rb_root *tm_root;
320 	struct rb_node *node;
321 	struct rb_node *next;
322 	struct seq_list *cur_elem;
323 	struct tree_mod_elem *tm;
324 	u64 min_seq = (u64)-1;
325 	u64 seq_putting = elem->seq;
326 
327 	if (!seq_putting)
328 		return;
329 
330 	spin_lock(&fs_info->tree_mod_seq_lock);
331 	list_del(&elem->list);
332 	elem->seq = 0;
333 
334 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
335 		if (cur_elem->seq < min_seq) {
336 			if (seq_putting > cur_elem->seq) {
337 				/*
338 				 * blocker with lower sequence number exists, we
339 				 * cannot remove anything from the log
340 				 */
341 				spin_unlock(&fs_info->tree_mod_seq_lock);
342 				return;
343 			}
344 			min_seq = cur_elem->seq;
345 		}
346 	}
347 	spin_unlock(&fs_info->tree_mod_seq_lock);
348 
349 	/*
350 	 * anything that's lower than the lowest existing (read: blocked)
351 	 * sequence number can be removed from the tree.
352 	 */
353 	write_lock(&fs_info->tree_mod_log_lock);
354 	tm_root = &fs_info->tree_mod_log;
355 	for (node = rb_first(tm_root); node; node = next) {
356 		next = rb_next(node);
357 		tm = rb_entry(node, struct tree_mod_elem, node);
358 		if (tm->seq > min_seq)
359 			continue;
360 		rb_erase(node, tm_root);
361 		kfree(tm);
362 	}
363 	write_unlock(&fs_info->tree_mod_log_lock);
364 }
365 
366 /*
367  * key order of the log:
368  *       node/leaf start address -> sequence
369  *
370  * The 'start address' is the logical address of the *new* root node
371  * for root replace operations, or the logical address of the affected
372  * block for all other operations.
373  *
374  * Note: must be called with write lock for fs_info::tree_mod_log_lock.
375  */
376 static noinline int
377 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
378 {
379 	struct rb_root *tm_root;
380 	struct rb_node **new;
381 	struct rb_node *parent = NULL;
382 	struct tree_mod_elem *cur;
383 
384 	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
385 
386 	tm_root = &fs_info->tree_mod_log;
387 	new = &tm_root->rb_node;
388 	while (*new) {
389 		cur = rb_entry(*new, struct tree_mod_elem, node);
390 		parent = *new;
391 		if (cur->logical < tm->logical)
392 			new = &((*new)->rb_left);
393 		else if (cur->logical > tm->logical)
394 			new = &((*new)->rb_right);
395 		else if (cur->seq < tm->seq)
396 			new = &((*new)->rb_left);
397 		else if (cur->seq > tm->seq)
398 			new = &((*new)->rb_right);
399 		else
400 			return -EEXIST;
401 	}
402 
403 	rb_link_node(&tm->node, parent, new);
404 	rb_insert_color(&tm->node, tm_root);
405 	return 0;
406 }
407 
408 /*
409  * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
410  * returns zero with the tree_mod_log_lock acquired. The caller must hold
411  * this until all tree mod log insertions are recorded in the rb tree and then
412  * write unlock fs_info::tree_mod_log_lock.
413  */
414 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
415 				    struct extent_buffer *eb) {
416 	smp_mb();
417 	if (list_empty(&(fs_info)->tree_mod_seq_list))
418 		return 1;
419 	if (eb && btrfs_header_level(eb) == 0)
420 		return 1;
421 
422 	write_lock(&fs_info->tree_mod_log_lock);
423 	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
424 		write_unlock(&fs_info->tree_mod_log_lock);
425 		return 1;
426 	}
427 
428 	return 0;
429 }
430 
431 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
432 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
433 				    struct extent_buffer *eb)
434 {
435 	smp_mb();
436 	if (list_empty(&(fs_info)->tree_mod_seq_list))
437 		return 0;
438 	if (eb && btrfs_header_level(eb) == 0)
439 		return 0;
440 
441 	return 1;
442 }
443 
444 static struct tree_mod_elem *
445 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
446 		    enum mod_log_op op, gfp_t flags)
447 {
448 	struct tree_mod_elem *tm;
449 
450 	tm = kzalloc(sizeof(*tm), flags);
451 	if (!tm)
452 		return NULL;
453 
454 	tm->logical = eb->start;
455 	if (op != MOD_LOG_KEY_ADD) {
456 		btrfs_node_key(eb, &tm->key, slot);
457 		tm->blockptr = btrfs_node_blockptr(eb, slot);
458 	}
459 	tm->op = op;
460 	tm->slot = slot;
461 	tm->generation = btrfs_node_ptr_generation(eb, slot);
462 	RB_CLEAR_NODE(&tm->node);
463 
464 	return tm;
465 }
466 
467 static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
468 		enum mod_log_op op, gfp_t flags)
469 {
470 	struct tree_mod_elem *tm;
471 	int ret;
472 
473 	if (!tree_mod_need_log(eb->fs_info, eb))
474 		return 0;
475 
476 	tm = alloc_tree_mod_elem(eb, slot, op, flags);
477 	if (!tm)
478 		return -ENOMEM;
479 
480 	if (tree_mod_dont_log(eb->fs_info, eb)) {
481 		kfree(tm);
482 		return 0;
483 	}
484 
485 	ret = __tree_mod_log_insert(eb->fs_info, tm);
486 	write_unlock(&eb->fs_info->tree_mod_log_lock);
487 	if (ret)
488 		kfree(tm);
489 
490 	return ret;
491 }
492 
493 static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
494 		int dst_slot, int src_slot, int nr_items)
495 {
496 	struct tree_mod_elem *tm = NULL;
497 	struct tree_mod_elem **tm_list = NULL;
498 	int ret = 0;
499 	int i;
500 	int locked = 0;
501 
502 	if (!tree_mod_need_log(eb->fs_info, eb))
503 		return 0;
504 
505 	tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
506 	if (!tm_list)
507 		return -ENOMEM;
508 
509 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
510 	if (!tm) {
511 		ret = -ENOMEM;
512 		goto free_tms;
513 	}
514 
515 	tm->logical = eb->start;
516 	tm->slot = src_slot;
517 	tm->move.dst_slot = dst_slot;
518 	tm->move.nr_items = nr_items;
519 	tm->op = MOD_LOG_MOVE_KEYS;
520 
521 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
522 		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
523 		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
524 		if (!tm_list[i]) {
525 			ret = -ENOMEM;
526 			goto free_tms;
527 		}
528 	}
529 
530 	if (tree_mod_dont_log(eb->fs_info, eb))
531 		goto free_tms;
532 	locked = 1;
533 
534 	/*
535 	 * When we override something during the move, we log these removals.
536 	 * This can only happen when we move towards the beginning of the
537 	 * buffer, i.e. dst_slot < src_slot.
538 	 */
539 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
540 		ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
541 		if (ret)
542 			goto free_tms;
543 	}
544 
545 	ret = __tree_mod_log_insert(eb->fs_info, tm);
546 	if (ret)
547 		goto free_tms;
548 	write_unlock(&eb->fs_info->tree_mod_log_lock);
549 	kfree(tm_list);
550 
551 	return 0;
552 free_tms:
553 	for (i = 0; i < nr_items; i++) {
554 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
555 			rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
556 		kfree(tm_list[i]);
557 	}
558 	if (locked)
559 		write_unlock(&eb->fs_info->tree_mod_log_lock);
560 	kfree(tm_list);
561 	kfree(tm);
562 
563 	return ret;
564 }
565 
566 static inline int
567 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
568 		       struct tree_mod_elem **tm_list,
569 		       int nritems)
570 {
571 	int i, j;
572 	int ret;
573 
574 	for (i = nritems - 1; i >= 0; i--) {
575 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
576 		if (ret) {
577 			for (j = nritems - 1; j > i; j--)
578 				rb_erase(&tm_list[j]->node,
579 					 &fs_info->tree_mod_log);
580 			return ret;
581 		}
582 	}
583 
584 	return 0;
585 }
586 
587 static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
588 			 struct extent_buffer *new_root, int log_removal)
589 {
590 	struct btrfs_fs_info *fs_info = old_root->fs_info;
591 	struct tree_mod_elem *tm = NULL;
592 	struct tree_mod_elem **tm_list = NULL;
593 	int nritems = 0;
594 	int ret = 0;
595 	int i;
596 
597 	if (!tree_mod_need_log(fs_info, NULL))
598 		return 0;
599 
600 	if (log_removal && btrfs_header_level(old_root) > 0) {
601 		nritems = btrfs_header_nritems(old_root);
602 		tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
603 				  GFP_NOFS);
604 		if (!tm_list) {
605 			ret = -ENOMEM;
606 			goto free_tms;
607 		}
608 		for (i = 0; i < nritems; i++) {
609 			tm_list[i] = alloc_tree_mod_elem(old_root, i,
610 			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
611 			if (!tm_list[i]) {
612 				ret = -ENOMEM;
613 				goto free_tms;
614 			}
615 		}
616 	}
617 
618 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
619 	if (!tm) {
620 		ret = -ENOMEM;
621 		goto free_tms;
622 	}
623 
624 	tm->logical = new_root->start;
625 	tm->old_root.logical = old_root->start;
626 	tm->old_root.level = btrfs_header_level(old_root);
627 	tm->generation = btrfs_header_generation(old_root);
628 	tm->op = MOD_LOG_ROOT_REPLACE;
629 
630 	if (tree_mod_dont_log(fs_info, NULL))
631 		goto free_tms;
632 
633 	if (tm_list)
634 		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
635 	if (!ret)
636 		ret = __tree_mod_log_insert(fs_info, tm);
637 
638 	write_unlock(&fs_info->tree_mod_log_lock);
639 	if (ret)
640 		goto free_tms;
641 	kfree(tm_list);
642 
643 	return ret;
644 
645 free_tms:
646 	if (tm_list) {
647 		for (i = 0; i < nritems; i++)
648 			kfree(tm_list[i]);
649 		kfree(tm_list);
650 	}
651 	kfree(tm);
652 
653 	return ret;
654 }
655 
656 static struct tree_mod_elem *
657 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
658 		      int smallest)
659 {
660 	struct rb_root *tm_root;
661 	struct rb_node *node;
662 	struct tree_mod_elem *cur = NULL;
663 	struct tree_mod_elem *found = NULL;
664 
665 	read_lock(&fs_info->tree_mod_log_lock);
666 	tm_root = &fs_info->tree_mod_log;
667 	node = tm_root->rb_node;
668 	while (node) {
669 		cur = rb_entry(node, struct tree_mod_elem, node);
670 		if (cur->logical < start) {
671 			node = node->rb_left;
672 		} else if (cur->logical > start) {
673 			node = node->rb_right;
674 		} else if (cur->seq < min_seq) {
675 			node = node->rb_left;
676 		} else if (!smallest) {
677 			/* we want the node with the highest seq */
678 			if (found)
679 				BUG_ON(found->seq > cur->seq);
680 			found = cur;
681 			node = node->rb_left;
682 		} else if (cur->seq > min_seq) {
683 			/* we want the node with the smallest seq */
684 			if (found)
685 				BUG_ON(found->seq < cur->seq);
686 			found = cur;
687 			node = node->rb_right;
688 		} else {
689 			found = cur;
690 			break;
691 		}
692 	}
693 	read_unlock(&fs_info->tree_mod_log_lock);
694 
695 	return found;
696 }
697 
698 /*
699  * this returns the element from the log with the smallest time sequence
700  * value that's in the log (the oldest log item). any element with a time
701  * sequence lower than min_seq will be ignored.
702  */
703 static struct tree_mod_elem *
704 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
705 			   u64 min_seq)
706 {
707 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
708 }
709 
710 /*
711  * this returns the element from the log with the largest time sequence
712  * value that's in the log (the most recent log item). any element with
713  * a time sequence lower than min_seq will be ignored.
714  */
715 static struct tree_mod_elem *
716 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
717 {
718 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
719 }
720 
721 static noinline int
722 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
723 		     struct extent_buffer *src, unsigned long dst_offset,
724 		     unsigned long src_offset, int nr_items)
725 {
726 	int ret = 0;
727 	struct tree_mod_elem **tm_list = NULL;
728 	struct tree_mod_elem **tm_list_add, **tm_list_rem;
729 	int i;
730 	int locked = 0;
731 
732 	if (!tree_mod_need_log(fs_info, NULL))
733 		return 0;
734 
735 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
736 		return 0;
737 
738 	tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
739 			  GFP_NOFS);
740 	if (!tm_list)
741 		return -ENOMEM;
742 
743 	tm_list_add = tm_list;
744 	tm_list_rem = tm_list + nr_items;
745 	for (i = 0; i < nr_items; i++) {
746 		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
747 		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
748 		if (!tm_list_rem[i]) {
749 			ret = -ENOMEM;
750 			goto free_tms;
751 		}
752 
753 		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
754 		    MOD_LOG_KEY_ADD, GFP_NOFS);
755 		if (!tm_list_add[i]) {
756 			ret = -ENOMEM;
757 			goto free_tms;
758 		}
759 	}
760 
761 	if (tree_mod_dont_log(fs_info, NULL))
762 		goto free_tms;
763 	locked = 1;
764 
765 	for (i = 0; i < nr_items; i++) {
766 		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
767 		if (ret)
768 			goto free_tms;
769 		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
770 		if (ret)
771 			goto free_tms;
772 	}
773 
774 	write_unlock(&fs_info->tree_mod_log_lock);
775 	kfree(tm_list);
776 
777 	return 0;
778 
779 free_tms:
780 	for (i = 0; i < nr_items * 2; i++) {
781 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
782 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
783 		kfree(tm_list[i]);
784 	}
785 	if (locked)
786 		write_unlock(&fs_info->tree_mod_log_lock);
787 	kfree(tm_list);
788 
789 	return ret;
790 }
791 
792 static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
793 {
794 	struct tree_mod_elem **tm_list = NULL;
795 	int nritems = 0;
796 	int i;
797 	int ret = 0;
798 
799 	if (btrfs_header_level(eb) == 0)
800 		return 0;
801 
802 	if (!tree_mod_need_log(eb->fs_info, NULL))
803 		return 0;
804 
805 	nritems = btrfs_header_nritems(eb);
806 	tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
807 	if (!tm_list)
808 		return -ENOMEM;
809 
810 	for (i = 0; i < nritems; i++) {
811 		tm_list[i] = alloc_tree_mod_elem(eb, i,
812 		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
813 		if (!tm_list[i]) {
814 			ret = -ENOMEM;
815 			goto free_tms;
816 		}
817 	}
818 
819 	if (tree_mod_dont_log(eb->fs_info, eb))
820 		goto free_tms;
821 
822 	ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
823 	write_unlock(&eb->fs_info->tree_mod_log_lock);
824 	if (ret)
825 		goto free_tms;
826 	kfree(tm_list);
827 
828 	return 0;
829 
830 free_tms:
831 	for (i = 0; i < nritems; i++)
832 		kfree(tm_list[i]);
833 	kfree(tm_list);
834 
835 	return ret;
836 }
837 
838 /*
839  * check if the tree block can be shared by multiple trees
840  */
841 int btrfs_block_can_be_shared(struct btrfs_root *root,
842 			      struct extent_buffer *buf)
843 {
844 	/*
845 	 * Tree blocks not in reference counted trees and tree roots
846 	 * are never shared. If a block was allocated after the last
847 	 * snapshot and the block was not allocated by tree relocation,
848 	 * we know the block is not shared.
849 	 */
850 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
851 	    buf != root->node && buf != root->commit_root &&
852 	    (btrfs_header_generation(buf) <=
853 	     btrfs_root_last_snapshot(&root->root_item) ||
854 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
855 		return 1;
856 
857 	return 0;
858 }
859 
860 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
861 				       struct btrfs_root *root,
862 				       struct extent_buffer *buf,
863 				       struct extent_buffer *cow,
864 				       int *last_ref)
865 {
866 	struct btrfs_fs_info *fs_info = root->fs_info;
867 	u64 refs;
868 	u64 owner;
869 	u64 flags;
870 	u64 new_flags = 0;
871 	int ret;
872 
873 	/*
874 	 * Backrefs update rules:
875 	 *
876 	 * Always use full backrefs for extent pointers in tree block
877 	 * allocated by tree relocation.
878 	 *
879 	 * If a shared tree block is no longer referenced by its owner
880 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
881 	 * use full backrefs for extent pointers in tree block.
882 	 *
883 	 * If a tree block is been relocating
884 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
885 	 * use full backrefs for extent pointers in tree block.
886 	 * The reason for this is some operations (such as drop tree)
887 	 * are only allowed for blocks use full backrefs.
888 	 */
889 
890 	if (btrfs_block_can_be_shared(root, buf)) {
891 		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
892 					       btrfs_header_level(buf), 1,
893 					       &refs, &flags);
894 		if (ret)
895 			return ret;
896 		if (refs == 0) {
897 			ret = -EROFS;
898 			btrfs_handle_fs_error(fs_info, ret, NULL);
899 			return ret;
900 		}
901 	} else {
902 		refs = 1;
903 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
904 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
905 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
906 		else
907 			flags = 0;
908 	}
909 
910 	owner = btrfs_header_owner(buf);
911 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
912 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
913 
914 	if (refs > 1) {
915 		if ((owner == root->root_key.objectid ||
916 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
917 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
918 			ret = btrfs_inc_ref(trans, root, buf, 1);
919 			if (ret)
920 				return ret;
921 
922 			if (root->root_key.objectid ==
923 			    BTRFS_TREE_RELOC_OBJECTID) {
924 				ret = btrfs_dec_ref(trans, root, buf, 0);
925 				if (ret)
926 					return ret;
927 				ret = btrfs_inc_ref(trans, root, cow, 1);
928 				if (ret)
929 					return ret;
930 			}
931 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
932 		} else {
933 
934 			if (root->root_key.objectid ==
935 			    BTRFS_TREE_RELOC_OBJECTID)
936 				ret = btrfs_inc_ref(trans, root, cow, 1);
937 			else
938 				ret = btrfs_inc_ref(trans, root, cow, 0);
939 			if (ret)
940 				return ret;
941 		}
942 		if (new_flags != 0) {
943 			int level = btrfs_header_level(buf);
944 
945 			ret = btrfs_set_disk_extent_flags(trans, fs_info,
946 							  buf->start,
947 							  buf->len,
948 							  new_flags, level, 0);
949 			if (ret)
950 				return ret;
951 		}
952 	} else {
953 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
954 			if (root->root_key.objectid ==
955 			    BTRFS_TREE_RELOC_OBJECTID)
956 				ret = btrfs_inc_ref(trans, root, cow, 1);
957 			else
958 				ret = btrfs_inc_ref(trans, root, cow, 0);
959 			if (ret)
960 				return ret;
961 			ret = btrfs_dec_ref(trans, root, buf, 1);
962 			if (ret)
963 				return ret;
964 		}
965 		clean_tree_block(fs_info, buf);
966 		*last_ref = 1;
967 	}
968 	return 0;
969 }
970 
971 /*
972  * does the dirty work in cow of a single block.  The parent block (if
973  * supplied) is updated to point to the new cow copy.  The new buffer is marked
974  * dirty and returned locked.  If you modify the block it needs to be marked
975  * dirty again.
976  *
977  * search_start -- an allocation hint for the new block
978  *
979  * empty_size -- a hint that you plan on doing more cow.  This is the size in
980  * bytes the allocator should try to find free next to the block it returns.
981  * This is just a hint and may be ignored by the allocator.
982  */
983 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
984 			     struct btrfs_root *root,
985 			     struct extent_buffer *buf,
986 			     struct extent_buffer *parent, int parent_slot,
987 			     struct extent_buffer **cow_ret,
988 			     u64 search_start, u64 empty_size)
989 {
990 	struct btrfs_fs_info *fs_info = root->fs_info;
991 	struct btrfs_disk_key disk_key;
992 	struct extent_buffer *cow;
993 	int level, ret;
994 	int last_ref = 0;
995 	int unlock_orig = 0;
996 	u64 parent_start = 0;
997 
998 	if (*cow_ret == buf)
999 		unlock_orig = 1;
1000 
1001 	btrfs_assert_tree_locked(buf);
1002 
1003 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1004 		trans->transid != fs_info->running_transaction->transid);
1005 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1006 		trans->transid != root->last_trans);
1007 
1008 	level = btrfs_header_level(buf);
1009 
1010 	if (level == 0)
1011 		btrfs_item_key(buf, &disk_key, 0);
1012 	else
1013 		btrfs_node_key(buf, &disk_key, 0);
1014 
1015 	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1016 		parent_start = parent->start;
1017 
1018 	/*
1019 	 * If we are COWing a node/leaf from the extent, chunk, device or free
1020 	 * space trees, make sure that we do not finish block group creation of
1021 	 * pending block groups. We do this to avoid a deadlock.
1022 	 * COWing can result in allocation of a new chunk, and flushing pending
1023 	 * block groups (btrfs_create_pending_block_groups()) can be triggered
1024 	 * when finishing allocation of a new chunk. Creation of a pending block
1025 	 * group modifies the extent, chunk, device and free space trees,
1026 	 * therefore we could deadlock with ourselves since we are holding a
1027 	 * lock on an extent buffer that btrfs_create_pending_block_groups() may
1028 	 * try to COW later.
1029 	 */
1030 	if (root == fs_info->extent_root ||
1031 	    root == fs_info->chunk_root ||
1032 	    root == fs_info->dev_root ||
1033 	    root == fs_info->free_space_root)
1034 		trans->can_flush_pending_bgs = false;
1035 
1036 	cow = btrfs_alloc_tree_block(trans, root, parent_start,
1037 			root->root_key.objectid, &disk_key, level,
1038 			search_start, empty_size);
1039 	trans->can_flush_pending_bgs = true;
1040 	if (IS_ERR(cow))
1041 		return PTR_ERR(cow);
1042 
1043 	/* cow is set to blocking by btrfs_init_new_buffer */
1044 
1045 	copy_extent_buffer_full(cow, buf);
1046 	btrfs_set_header_bytenr(cow, cow->start);
1047 	btrfs_set_header_generation(cow, trans->transid);
1048 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1049 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1050 				     BTRFS_HEADER_FLAG_RELOC);
1051 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1052 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1053 	else
1054 		btrfs_set_header_owner(cow, root->root_key.objectid);
1055 
1056 	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
1057 
1058 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1059 	if (ret) {
1060 		btrfs_abort_transaction(trans, ret);
1061 		return ret;
1062 	}
1063 
1064 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1065 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1066 		if (ret) {
1067 			btrfs_abort_transaction(trans, ret);
1068 			return ret;
1069 		}
1070 	}
1071 
1072 	if (buf == root->node) {
1073 		WARN_ON(parent && parent != buf);
1074 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1075 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1076 			parent_start = buf->start;
1077 
1078 		extent_buffer_get(cow);
1079 		ret = tree_mod_log_insert_root(root->node, cow, 1);
1080 		BUG_ON(ret < 0);
1081 		rcu_assign_pointer(root->node, cow);
1082 
1083 		btrfs_free_tree_block(trans, root, buf, parent_start,
1084 				      last_ref);
1085 		free_extent_buffer(buf);
1086 		add_root_to_dirty_list(root);
1087 	} else {
1088 		WARN_ON(trans->transid != btrfs_header_generation(parent));
1089 		tree_mod_log_insert_key(parent, parent_slot,
1090 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1091 		btrfs_set_node_blockptr(parent, parent_slot,
1092 					cow->start);
1093 		btrfs_set_node_ptr_generation(parent, parent_slot,
1094 					      trans->transid);
1095 		btrfs_mark_buffer_dirty(parent);
1096 		if (last_ref) {
1097 			ret = tree_mod_log_free_eb(buf);
1098 			if (ret) {
1099 				btrfs_abort_transaction(trans, ret);
1100 				return ret;
1101 			}
1102 		}
1103 		btrfs_free_tree_block(trans, root, buf, parent_start,
1104 				      last_ref);
1105 	}
1106 	if (unlock_orig)
1107 		btrfs_tree_unlock(buf);
1108 	free_extent_buffer_stale(buf);
1109 	btrfs_mark_buffer_dirty(cow);
1110 	*cow_ret = cow;
1111 	return 0;
1112 }
1113 
1114 /*
1115  * returns the logical address of the oldest predecessor of the given root.
1116  * entries older than time_seq are ignored.
1117  */
1118 static struct tree_mod_elem *__tree_mod_log_oldest_root(
1119 		struct extent_buffer *eb_root, u64 time_seq)
1120 {
1121 	struct tree_mod_elem *tm;
1122 	struct tree_mod_elem *found = NULL;
1123 	u64 root_logical = eb_root->start;
1124 	int looped = 0;
1125 
1126 	if (!time_seq)
1127 		return NULL;
1128 
1129 	/*
1130 	 * the very last operation that's logged for a root is the
1131 	 * replacement operation (if it is replaced at all). this has
1132 	 * the logical address of the *new* root, making it the very
1133 	 * first operation that's logged for this root.
1134 	 */
1135 	while (1) {
1136 		tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1137 						time_seq);
1138 		if (!looped && !tm)
1139 			return NULL;
1140 		/*
1141 		 * if there are no tree operation for the oldest root, we simply
1142 		 * return it. this should only happen if that (old) root is at
1143 		 * level 0.
1144 		 */
1145 		if (!tm)
1146 			break;
1147 
1148 		/*
1149 		 * if there's an operation that's not a root replacement, we
1150 		 * found the oldest version of our root. normally, we'll find a
1151 		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1152 		 */
1153 		if (tm->op != MOD_LOG_ROOT_REPLACE)
1154 			break;
1155 
1156 		found = tm;
1157 		root_logical = tm->old_root.logical;
1158 		looped = 1;
1159 	}
1160 
1161 	/* if there's no old root to return, return what we found instead */
1162 	if (!found)
1163 		found = tm;
1164 
1165 	return found;
1166 }
1167 
1168 /*
1169  * tm is a pointer to the first operation to rewind within eb. then, all
1170  * previous operations will be rewound (until we reach something older than
1171  * time_seq).
1172  */
1173 static void
1174 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1175 		      u64 time_seq, struct tree_mod_elem *first_tm)
1176 {
1177 	u32 n;
1178 	struct rb_node *next;
1179 	struct tree_mod_elem *tm = first_tm;
1180 	unsigned long o_dst;
1181 	unsigned long o_src;
1182 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1183 
1184 	n = btrfs_header_nritems(eb);
1185 	read_lock(&fs_info->tree_mod_log_lock);
1186 	while (tm && tm->seq >= time_seq) {
1187 		/*
1188 		 * all the operations are recorded with the operator used for
1189 		 * the modification. as we're going backwards, we do the
1190 		 * opposite of each operation here.
1191 		 */
1192 		switch (tm->op) {
1193 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1194 			BUG_ON(tm->slot < n);
1195 			/* Fallthrough */
1196 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1197 		case MOD_LOG_KEY_REMOVE:
1198 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1199 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1200 			btrfs_set_node_ptr_generation(eb, tm->slot,
1201 						      tm->generation);
1202 			n++;
1203 			break;
1204 		case MOD_LOG_KEY_REPLACE:
1205 			BUG_ON(tm->slot >= n);
1206 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1207 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1208 			btrfs_set_node_ptr_generation(eb, tm->slot,
1209 						      tm->generation);
1210 			break;
1211 		case MOD_LOG_KEY_ADD:
1212 			/* if a move operation is needed it's in the log */
1213 			n--;
1214 			break;
1215 		case MOD_LOG_MOVE_KEYS:
1216 			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1217 			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1218 			memmove_extent_buffer(eb, o_dst, o_src,
1219 					      tm->move.nr_items * p_size);
1220 			break;
1221 		case MOD_LOG_ROOT_REPLACE:
1222 			/*
1223 			 * this operation is special. for roots, this must be
1224 			 * handled explicitly before rewinding.
1225 			 * for non-roots, this operation may exist if the node
1226 			 * was a root: root A -> child B; then A gets empty and
1227 			 * B is promoted to the new root. in the mod log, we'll
1228 			 * have a root-replace operation for B, a tree block
1229 			 * that is no root. we simply ignore that operation.
1230 			 */
1231 			break;
1232 		}
1233 		next = rb_next(&tm->node);
1234 		if (!next)
1235 			break;
1236 		tm = rb_entry(next, struct tree_mod_elem, node);
1237 		if (tm->logical != first_tm->logical)
1238 			break;
1239 	}
1240 	read_unlock(&fs_info->tree_mod_log_lock);
1241 	btrfs_set_header_nritems(eb, n);
1242 }
1243 
1244 /*
1245  * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1246  * is returned. If rewind operations happen, a fresh buffer is returned. The
1247  * returned buffer is always read-locked. If the returned buffer is not the
1248  * input buffer, the lock on the input buffer is released and the input buffer
1249  * is freed (its refcount is decremented).
1250  */
1251 static struct extent_buffer *
1252 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1253 		    struct extent_buffer *eb, u64 time_seq)
1254 {
1255 	struct extent_buffer *eb_rewin;
1256 	struct tree_mod_elem *tm;
1257 
1258 	if (!time_seq)
1259 		return eb;
1260 
1261 	if (btrfs_header_level(eb) == 0)
1262 		return eb;
1263 
1264 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1265 	if (!tm)
1266 		return eb;
1267 
1268 	btrfs_set_path_blocking(path);
1269 	btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1270 
1271 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1272 		BUG_ON(tm->slot != 0);
1273 		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1274 		if (!eb_rewin) {
1275 			btrfs_tree_read_unlock_blocking(eb);
1276 			free_extent_buffer(eb);
1277 			return NULL;
1278 		}
1279 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1280 		btrfs_set_header_backref_rev(eb_rewin,
1281 					     btrfs_header_backref_rev(eb));
1282 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1283 		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1284 	} else {
1285 		eb_rewin = btrfs_clone_extent_buffer(eb);
1286 		if (!eb_rewin) {
1287 			btrfs_tree_read_unlock_blocking(eb);
1288 			free_extent_buffer(eb);
1289 			return NULL;
1290 		}
1291 	}
1292 
1293 	btrfs_tree_read_unlock_blocking(eb);
1294 	free_extent_buffer(eb);
1295 
1296 	btrfs_tree_read_lock(eb_rewin);
1297 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1298 	WARN_ON(btrfs_header_nritems(eb_rewin) >
1299 		BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1300 
1301 	return eb_rewin;
1302 }
1303 
1304 /*
1305  * get_old_root() rewinds the state of @root's root node to the given @time_seq
1306  * value. If there are no changes, the current root->root_node is returned. If
1307  * anything changed in between, there's a fresh buffer allocated on which the
1308  * rewind operations are done. In any case, the returned buffer is read locked.
1309  * Returns NULL on error (with no locks held).
1310  */
1311 static inline struct extent_buffer *
1312 get_old_root(struct btrfs_root *root, u64 time_seq)
1313 {
1314 	struct btrfs_fs_info *fs_info = root->fs_info;
1315 	struct tree_mod_elem *tm;
1316 	struct extent_buffer *eb = NULL;
1317 	struct extent_buffer *eb_root;
1318 	struct extent_buffer *old;
1319 	struct tree_mod_root *old_root = NULL;
1320 	u64 old_generation = 0;
1321 	u64 logical;
1322 	int level;
1323 
1324 	eb_root = btrfs_read_lock_root_node(root);
1325 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1326 	if (!tm)
1327 		return eb_root;
1328 
1329 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1330 		old_root = &tm->old_root;
1331 		old_generation = tm->generation;
1332 		logical = old_root->logical;
1333 		level = old_root->level;
1334 	} else {
1335 		logical = eb_root->start;
1336 		level = btrfs_header_level(eb_root);
1337 	}
1338 
1339 	tm = tree_mod_log_search(fs_info, logical, time_seq);
1340 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1341 		btrfs_tree_read_unlock(eb_root);
1342 		free_extent_buffer(eb_root);
1343 		old = read_tree_block(fs_info, logical, 0, level, NULL);
1344 		if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1345 			if (!IS_ERR(old))
1346 				free_extent_buffer(old);
1347 			btrfs_warn(fs_info,
1348 				   "failed to read tree block %llu from get_old_root",
1349 				   logical);
1350 		} else {
1351 			eb = btrfs_clone_extent_buffer(old);
1352 			free_extent_buffer(old);
1353 		}
1354 	} else if (old_root) {
1355 		btrfs_tree_read_unlock(eb_root);
1356 		free_extent_buffer(eb_root);
1357 		eb = alloc_dummy_extent_buffer(fs_info, logical);
1358 	} else {
1359 		btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1360 		eb = btrfs_clone_extent_buffer(eb_root);
1361 		btrfs_tree_read_unlock_blocking(eb_root);
1362 		free_extent_buffer(eb_root);
1363 	}
1364 
1365 	if (!eb)
1366 		return NULL;
1367 	btrfs_tree_read_lock(eb);
1368 	if (old_root) {
1369 		btrfs_set_header_bytenr(eb, eb->start);
1370 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1371 		btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1372 		btrfs_set_header_level(eb, old_root->level);
1373 		btrfs_set_header_generation(eb, old_generation);
1374 	}
1375 	if (tm)
1376 		__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1377 	else
1378 		WARN_ON(btrfs_header_level(eb) != 0);
1379 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1380 
1381 	return eb;
1382 }
1383 
1384 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1385 {
1386 	struct tree_mod_elem *tm;
1387 	int level;
1388 	struct extent_buffer *eb_root = btrfs_root_node(root);
1389 
1390 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1391 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1392 		level = tm->old_root.level;
1393 	} else {
1394 		level = btrfs_header_level(eb_root);
1395 	}
1396 	free_extent_buffer(eb_root);
1397 
1398 	return level;
1399 }
1400 
1401 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1402 				   struct btrfs_root *root,
1403 				   struct extent_buffer *buf)
1404 {
1405 	if (btrfs_is_testing(root->fs_info))
1406 		return 0;
1407 
1408 	/* Ensure we can see the FORCE_COW bit */
1409 	smp_mb__before_atomic();
1410 
1411 	/*
1412 	 * We do not need to cow a block if
1413 	 * 1) this block is not created or changed in this transaction;
1414 	 * 2) this block does not belong to TREE_RELOC tree;
1415 	 * 3) the root is not forced COW.
1416 	 *
1417 	 * What is forced COW:
1418 	 *    when we create snapshot during committing the transaction,
1419 	 *    after we've finished copying src root, we must COW the shared
1420 	 *    block to ensure the metadata consistency.
1421 	 */
1422 	if (btrfs_header_generation(buf) == trans->transid &&
1423 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1424 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1425 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1426 	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1427 		return 0;
1428 	return 1;
1429 }
1430 
1431 /*
1432  * cows a single block, see __btrfs_cow_block for the real work.
1433  * This version of it has extra checks so that a block isn't COWed more than
1434  * once per transaction, as long as it hasn't been written yet
1435  */
1436 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1437 		    struct btrfs_root *root, struct extent_buffer *buf,
1438 		    struct extent_buffer *parent, int parent_slot,
1439 		    struct extent_buffer **cow_ret)
1440 {
1441 	struct btrfs_fs_info *fs_info = root->fs_info;
1442 	u64 search_start;
1443 	int ret;
1444 
1445 	if (test_bit(BTRFS_ROOT_DELETING, &root->state))
1446 		btrfs_err(fs_info,
1447 			"COW'ing blocks on a fs root that's being dropped");
1448 
1449 	if (trans->transaction != fs_info->running_transaction)
1450 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1451 		       trans->transid,
1452 		       fs_info->running_transaction->transid);
1453 
1454 	if (trans->transid != fs_info->generation)
1455 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1456 		       trans->transid, fs_info->generation);
1457 
1458 	if (!should_cow_block(trans, root, buf)) {
1459 		trans->dirty = true;
1460 		*cow_ret = buf;
1461 		return 0;
1462 	}
1463 
1464 	search_start = buf->start & ~((u64)SZ_1G - 1);
1465 
1466 	if (parent)
1467 		btrfs_set_lock_blocking(parent);
1468 	btrfs_set_lock_blocking(buf);
1469 
1470 	ret = __btrfs_cow_block(trans, root, buf, parent,
1471 				 parent_slot, cow_ret, search_start, 0);
1472 
1473 	trace_btrfs_cow_block(root, buf, *cow_ret);
1474 
1475 	return ret;
1476 }
1477 
1478 /*
1479  * helper function for defrag to decide if two blocks pointed to by a
1480  * node are actually close by
1481  */
1482 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1483 {
1484 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1485 		return 1;
1486 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1487 		return 1;
1488 	return 0;
1489 }
1490 
1491 /*
1492  * compare two keys in a memcmp fashion
1493  */
1494 static int comp_keys(const struct btrfs_disk_key *disk,
1495 		     const struct btrfs_key *k2)
1496 {
1497 	struct btrfs_key k1;
1498 
1499 	btrfs_disk_key_to_cpu(&k1, disk);
1500 
1501 	return btrfs_comp_cpu_keys(&k1, k2);
1502 }
1503 
1504 /*
1505  * same as comp_keys only with two btrfs_key's
1506  */
1507 int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1508 {
1509 	if (k1->objectid > k2->objectid)
1510 		return 1;
1511 	if (k1->objectid < k2->objectid)
1512 		return -1;
1513 	if (k1->type > k2->type)
1514 		return 1;
1515 	if (k1->type < k2->type)
1516 		return -1;
1517 	if (k1->offset > k2->offset)
1518 		return 1;
1519 	if (k1->offset < k2->offset)
1520 		return -1;
1521 	return 0;
1522 }
1523 
1524 /*
1525  * this is used by the defrag code to go through all the
1526  * leaves pointed to by a node and reallocate them so that
1527  * disk order is close to key order
1528  */
1529 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1530 		       struct btrfs_root *root, struct extent_buffer *parent,
1531 		       int start_slot, u64 *last_ret,
1532 		       struct btrfs_key *progress)
1533 {
1534 	struct btrfs_fs_info *fs_info = root->fs_info;
1535 	struct extent_buffer *cur;
1536 	u64 blocknr;
1537 	u64 gen;
1538 	u64 search_start = *last_ret;
1539 	u64 last_block = 0;
1540 	u64 other;
1541 	u32 parent_nritems;
1542 	int end_slot;
1543 	int i;
1544 	int err = 0;
1545 	int parent_level;
1546 	int uptodate;
1547 	u32 blocksize;
1548 	int progress_passed = 0;
1549 	struct btrfs_disk_key disk_key;
1550 
1551 	parent_level = btrfs_header_level(parent);
1552 
1553 	WARN_ON(trans->transaction != fs_info->running_transaction);
1554 	WARN_ON(trans->transid != fs_info->generation);
1555 
1556 	parent_nritems = btrfs_header_nritems(parent);
1557 	blocksize = fs_info->nodesize;
1558 	end_slot = parent_nritems - 1;
1559 
1560 	if (parent_nritems <= 1)
1561 		return 0;
1562 
1563 	btrfs_set_lock_blocking(parent);
1564 
1565 	for (i = start_slot; i <= end_slot; i++) {
1566 		struct btrfs_key first_key;
1567 		int close = 1;
1568 
1569 		btrfs_node_key(parent, &disk_key, i);
1570 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1571 			continue;
1572 
1573 		progress_passed = 1;
1574 		blocknr = btrfs_node_blockptr(parent, i);
1575 		gen = btrfs_node_ptr_generation(parent, i);
1576 		btrfs_node_key_to_cpu(parent, &first_key, i);
1577 		if (last_block == 0)
1578 			last_block = blocknr;
1579 
1580 		if (i > 0) {
1581 			other = btrfs_node_blockptr(parent, i - 1);
1582 			close = close_blocks(blocknr, other, blocksize);
1583 		}
1584 		if (!close && i < end_slot) {
1585 			other = btrfs_node_blockptr(parent, i + 1);
1586 			close = close_blocks(blocknr, other, blocksize);
1587 		}
1588 		if (close) {
1589 			last_block = blocknr;
1590 			continue;
1591 		}
1592 
1593 		cur = find_extent_buffer(fs_info, blocknr);
1594 		if (cur)
1595 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1596 		else
1597 			uptodate = 0;
1598 		if (!cur || !uptodate) {
1599 			if (!cur) {
1600 				cur = read_tree_block(fs_info, blocknr, gen,
1601 						      parent_level - 1,
1602 						      &first_key);
1603 				if (IS_ERR(cur)) {
1604 					return PTR_ERR(cur);
1605 				} else if (!extent_buffer_uptodate(cur)) {
1606 					free_extent_buffer(cur);
1607 					return -EIO;
1608 				}
1609 			} else if (!uptodate) {
1610 				err = btrfs_read_buffer(cur, gen,
1611 						parent_level - 1,&first_key);
1612 				if (err) {
1613 					free_extent_buffer(cur);
1614 					return err;
1615 				}
1616 			}
1617 		}
1618 		if (search_start == 0)
1619 			search_start = last_block;
1620 
1621 		btrfs_tree_lock(cur);
1622 		btrfs_set_lock_blocking(cur);
1623 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1624 					&cur, search_start,
1625 					min(16 * blocksize,
1626 					    (end_slot - i) * blocksize));
1627 		if (err) {
1628 			btrfs_tree_unlock(cur);
1629 			free_extent_buffer(cur);
1630 			break;
1631 		}
1632 		search_start = cur->start;
1633 		last_block = cur->start;
1634 		*last_ret = search_start;
1635 		btrfs_tree_unlock(cur);
1636 		free_extent_buffer(cur);
1637 	}
1638 	return err;
1639 }
1640 
1641 /*
1642  * search for key in the extent_buffer.  The items start at offset p,
1643  * and they are item_size apart.  There are 'max' items in p.
1644  *
1645  * the slot in the array is returned via slot, and it points to
1646  * the place where you would insert key if it is not found in
1647  * the array.
1648  *
1649  * slot may point to max if the key is bigger than all of the keys
1650  */
1651 static noinline int generic_bin_search(struct extent_buffer *eb,
1652 				       unsigned long p, int item_size,
1653 				       const struct btrfs_key *key,
1654 				       int max, int *slot)
1655 {
1656 	int low = 0;
1657 	int high = max;
1658 	int mid;
1659 	int ret;
1660 	struct btrfs_disk_key *tmp = NULL;
1661 	struct btrfs_disk_key unaligned;
1662 	unsigned long offset;
1663 	char *kaddr = NULL;
1664 	unsigned long map_start = 0;
1665 	unsigned long map_len = 0;
1666 	int err;
1667 
1668 	if (low > high) {
1669 		btrfs_err(eb->fs_info,
1670 		 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1671 			  __func__, low, high, eb->start,
1672 			  btrfs_header_owner(eb), btrfs_header_level(eb));
1673 		return -EINVAL;
1674 	}
1675 
1676 	while (low < high) {
1677 		mid = (low + high) / 2;
1678 		offset = p + mid * item_size;
1679 
1680 		if (!kaddr || offset < map_start ||
1681 		    (offset + sizeof(struct btrfs_disk_key)) >
1682 		    map_start + map_len) {
1683 
1684 			err = map_private_extent_buffer(eb, offset,
1685 						sizeof(struct btrfs_disk_key),
1686 						&kaddr, &map_start, &map_len);
1687 
1688 			if (!err) {
1689 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1690 							map_start);
1691 			} else if (err == 1) {
1692 				read_extent_buffer(eb, &unaligned,
1693 						   offset, sizeof(unaligned));
1694 				tmp = &unaligned;
1695 			} else {
1696 				return err;
1697 			}
1698 
1699 		} else {
1700 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1701 							map_start);
1702 		}
1703 		ret = comp_keys(tmp, key);
1704 
1705 		if (ret < 0)
1706 			low = mid + 1;
1707 		else if (ret > 0)
1708 			high = mid;
1709 		else {
1710 			*slot = mid;
1711 			return 0;
1712 		}
1713 	}
1714 	*slot = low;
1715 	return 1;
1716 }
1717 
1718 /*
1719  * simple bin_search frontend that does the right thing for
1720  * leaves vs nodes
1721  */
1722 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1723 		     int level, int *slot)
1724 {
1725 	if (level == 0)
1726 		return generic_bin_search(eb,
1727 					  offsetof(struct btrfs_leaf, items),
1728 					  sizeof(struct btrfs_item),
1729 					  key, btrfs_header_nritems(eb),
1730 					  slot);
1731 	else
1732 		return generic_bin_search(eb,
1733 					  offsetof(struct btrfs_node, ptrs),
1734 					  sizeof(struct btrfs_key_ptr),
1735 					  key, btrfs_header_nritems(eb),
1736 					  slot);
1737 }
1738 
1739 static void root_add_used(struct btrfs_root *root, u32 size)
1740 {
1741 	spin_lock(&root->accounting_lock);
1742 	btrfs_set_root_used(&root->root_item,
1743 			    btrfs_root_used(&root->root_item) + size);
1744 	spin_unlock(&root->accounting_lock);
1745 }
1746 
1747 static void root_sub_used(struct btrfs_root *root, u32 size)
1748 {
1749 	spin_lock(&root->accounting_lock);
1750 	btrfs_set_root_used(&root->root_item,
1751 			    btrfs_root_used(&root->root_item) - size);
1752 	spin_unlock(&root->accounting_lock);
1753 }
1754 
1755 /* given a node and slot number, this reads the blocks it points to.  The
1756  * extent buffer is returned with a reference taken (but unlocked).
1757  */
1758 static noinline struct extent_buffer *
1759 read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
1760 	       int slot)
1761 {
1762 	int level = btrfs_header_level(parent);
1763 	struct extent_buffer *eb;
1764 	struct btrfs_key first_key;
1765 
1766 	if (slot < 0 || slot >= btrfs_header_nritems(parent))
1767 		return ERR_PTR(-ENOENT);
1768 
1769 	BUG_ON(level == 0);
1770 
1771 	btrfs_node_key_to_cpu(parent, &first_key, slot);
1772 	eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
1773 			     btrfs_node_ptr_generation(parent, slot),
1774 			     level - 1, &first_key);
1775 	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1776 		free_extent_buffer(eb);
1777 		eb = ERR_PTR(-EIO);
1778 	}
1779 
1780 	return eb;
1781 }
1782 
1783 /*
1784  * node level balancing, used to make sure nodes are in proper order for
1785  * item deletion.  We balance from the top down, so we have to make sure
1786  * that a deletion won't leave an node completely empty later on.
1787  */
1788 static noinline int balance_level(struct btrfs_trans_handle *trans,
1789 			 struct btrfs_root *root,
1790 			 struct btrfs_path *path, int level)
1791 {
1792 	struct btrfs_fs_info *fs_info = root->fs_info;
1793 	struct extent_buffer *right = NULL;
1794 	struct extent_buffer *mid;
1795 	struct extent_buffer *left = NULL;
1796 	struct extent_buffer *parent = NULL;
1797 	int ret = 0;
1798 	int wret;
1799 	int pslot;
1800 	int orig_slot = path->slots[level];
1801 	u64 orig_ptr;
1802 
1803 	ASSERT(level > 0);
1804 
1805 	mid = path->nodes[level];
1806 
1807 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1808 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1809 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1810 
1811 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1812 
1813 	if (level < BTRFS_MAX_LEVEL - 1) {
1814 		parent = path->nodes[level + 1];
1815 		pslot = path->slots[level + 1];
1816 	}
1817 
1818 	/*
1819 	 * deal with the case where there is only one pointer in the root
1820 	 * by promoting the node below to a root
1821 	 */
1822 	if (!parent) {
1823 		struct extent_buffer *child;
1824 
1825 		if (btrfs_header_nritems(mid) != 1)
1826 			return 0;
1827 
1828 		/* promote the child to a root */
1829 		child = read_node_slot(fs_info, mid, 0);
1830 		if (IS_ERR(child)) {
1831 			ret = PTR_ERR(child);
1832 			btrfs_handle_fs_error(fs_info, ret, NULL);
1833 			goto enospc;
1834 		}
1835 
1836 		btrfs_tree_lock(child);
1837 		btrfs_set_lock_blocking(child);
1838 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1839 		if (ret) {
1840 			btrfs_tree_unlock(child);
1841 			free_extent_buffer(child);
1842 			goto enospc;
1843 		}
1844 
1845 		ret = tree_mod_log_insert_root(root->node, child, 1);
1846 		BUG_ON(ret < 0);
1847 		rcu_assign_pointer(root->node, child);
1848 
1849 		add_root_to_dirty_list(root);
1850 		btrfs_tree_unlock(child);
1851 
1852 		path->locks[level] = 0;
1853 		path->nodes[level] = NULL;
1854 		clean_tree_block(fs_info, mid);
1855 		btrfs_tree_unlock(mid);
1856 		/* once for the path */
1857 		free_extent_buffer(mid);
1858 
1859 		root_sub_used(root, mid->len);
1860 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1861 		/* once for the root ptr */
1862 		free_extent_buffer_stale(mid);
1863 		return 0;
1864 	}
1865 	if (btrfs_header_nritems(mid) >
1866 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1867 		return 0;
1868 
1869 	left = read_node_slot(fs_info, parent, pslot - 1);
1870 	if (IS_ERR(left))
1871 		left = NULL;
1872 
1873 	if (left) {
1874 		btrfs_tree_lock(left);
1875 		btrfs_set_lock_blocking(left);
1876 		wret = btrfs_cow_block(trans, root, left,
1877 				       parent, pslot - 1, &left);
1878 		if (wret) {
1879 			ret = wret;
1880 			goto enospc;
1881 		}
1882 	}
1883 
1884 	right = read_node_slot(fs_info, parent, pslot + 1);
1885 	if (IS_ERR(right))
1886 		right = NULL;
1887 
1888 	if (right) {
1889 		btrfs_tree_lock(right);
1890 		btrfs_set_lock_blocking(right);
1891 		wret = btrfs_cow_block(trans, root, right,
1892 				       parent, pslot + 1, &right);
1893 		if (wret) {
1894 			ret = wret;
1895 			goto enospc;
1896 		}
1897 	}
1898 
1899 	/* first, try to make some room in the middle buffer */
1900 	if (left) {
1901 		orig_slot += btrfs_header_nritems(left);
1902 		wret = push_node_left(trans, fs_info, left, mid, 1);
1903 		if (wret < 0)
1904 			ret = wret;
1905 	}
1906 
1907 	/*
1908 	 * then try to empty the right most buffer into the middle
1909 	 */
1910 	if (right) {
1911 		wret = push_node_left(trans, fs_info, mid, right, 1);
1912 		if (wret < 0 && wret != -ENOSPC)
1913 			ret = wret;
1914 		if (btrfs_header_nritems(right) == 0) {
1915 			clean_tree_block(fs_info, right);
1916 			btrfs_tree_unlock(right);
1917 			del_ptr(root, path, level + 1, pslot + 1);
1918 			root_sub_used(root, right->len);
1919 			btrfs_free_tree_block(trans, root, right, 0, 1);
1920 			free_extent_buffer_stale(right);
1921 			right = NULL;
1922 		} else {
1923 			struct btrfs_disk_key right_key;
1924 			btrfs_node_key(right, &right_key, 0);
1925 			ret = tree_mod_log_insert_key(parent, pslot + 1,
1926 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1927 			BUG_ON(ret < 0);
1928 			btrfs_set_node_key(parent, &right_key, pslot + 1);
1929 			btrfs_mark_buffer_dirty(parent);
1930 		}
1931 	}
1932 	if (btrfs_header_nritems(mid) == 1) {
1933 		/*
1934 		 * we're not allowed to leave a node with one item in the
1935 		 * tree during a delete.  A deletion from lower in the tree
1936 		 * could try to delete the only pointer in this node.
1937 		 * So, pull some keys from the left.
1938 		 * There has to be a left pointer at this point because
1939 		 * otherwise we would have pulled some pointers from the
1940 		 * right
1941 		 */
1942 		if (!left) {
1943 			ret = -EROFS;
1944 			btrfs_handle_fs_error(fs_info, ret, NULL);
1945 			goto enospc;
1946 		}
1947 		wret = balance_node_right(trans, fs_info, mid, left);
1948 		if (wret < 0) {
1949 			ret = wret;
1950 			goto enospc;
1951 		}
1952 		if (wret == 1) {
1953 			wret = push_node_left(trans, fs_info, left, mid, 1);
1954 			if (wret < 0)
1955 				ret = wret;
1956 		}
1957 		BUG_ON(wret == 1);
1958 	}
1959 	if (btrfs_header_nritems(mid) == 0) {
1960 		clean_tree_block(fs_info, mid);
1961 		btrfs_tree_unlock(mid);
1962 		del_ptr(root, path, level + 1, pslot);
1963 		root_sub_used(root, mid->len);
1964 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1965 		free_extent_buffer_stale(mid);
1966 		mid = NULL;
1967 	} else {
1968 		/* update the parent key to reflect our changes */
1969 		struct btrfs_disk_key mid_key;
1970 		btrfs_node_key(mid, &mid_key, 0);
1971 		ret = tree_mod_log_insert_key(parent, pslot,
1972 				MOD_LOG_KEY_REPLACE, GFP_NOFS);
1973 		BUG_ON(ret < 0);
1974 		btrfs_set_node_key(parent, &mid_key, pslot);
1975 		btrfs_mark_buffer_dirty(parent);
1976 	}
1977 
1978 	/* update the path */
1979 	if (left) {
1980 		if (btrfs_header_nritems(left) > orig_slot) {
1981 			extent_buffer_get(left);
1982 			/* left was locked after cow */
1983 			path->nodes[level] = left;
1984 			path->slots[level + 1] -= 1;
1985 			path->slots[level] = orig_slot;
1986 			if (mid) {
1987 				btrfs_tree_unlock(mid);
1988 				free_extent_buffer(mid);
1989 			}
1990 		} else {
1991 			orig_slot -= btrfs_header_nritems(left);
1992 			path->slots[level] = orig_slot;
1993 		}
1994 	}
1995 	/* double check we haven't messed things up */
1996 	if (orig_ptr !=
1997 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1998 		BUG();
1999 enospc:
2000 	if (right) {
2001 		btrfs_tree_unlock(right);
2002 		free_extent_buffer(right);
2003 	}
2004 	if (left) {
2005 		if (path->nodes[level] != left)
2006 			btrfs_tree_unlock(left);
2007 		free_extent_buffer(left);
2008 	}
2009 	return ret;
2010 }
2011 
2012 /* Node balancing for insertion.  Here we only split or push nodes around
2013  * when they are completely full.  This is also done top down, so we
2014  * have to be pessimistic.
2015  */
2016 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2017 					  struct btrfs_root *root,
2018 					  struct btrfs_path *path, int level)
2019 {
2020 	struct btrfs_fs_info *fs_info = root->fs_info;
2021 	struct extent_buffer *right = NULL;
2022 	struct extent_buffer *mid;
2023 	struct extent_buffer *left = NULL;
2024 	struct extent_buffer *parent = NULL;
2025 	int ret = 0;
2026 	int wret;
2027 	int pslot;
2028 	int orig_slot = path->slots[level];
2029 
2030 	if (level == 0)
2031 		return 1;
2032 
2033 	mid = path->nodes[level];
2034 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
2035 
2036 	if (level < BTRFS_MAX_LEVEL - 1) {
2037 		parent = path->nodes[level + 1];
2038 		pslot = path->slots[level + 1];
2039 	}
2040 
2041 	if (!parent)
2042 		return 1;
2043 
2044 	left = read_node_slot(fs_info, parent, pslot - 1);
2045 	if (IS_ERR(left))
2046 		left = NULL;
2047 
2048 	/* first, try to make some room in the middle buffer */
2049 	if (left) {
2050 		u32 left_nr;
2051 
2052 		btrfs_tree_lock(left);
2053 		btrfs_set_lock_blocking(left);
2054 
2055 		left_nr = btrfs_header_nritems(left);
2056 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2057 			wret = 1;
2058 		} else {
2059 			ret = btrfs_cow_block(trans, root, left, parent,
2060 					      pslot - 1, &left);
2061 			if (ret)
2062 				wret = 1;
2063 			else {
2064 				wret = push_node_left(trans, fs_info,
2065 						      left, mid, 0);
2066 			}
2067 		}
2068 		if (wret < 0)
2069 			ret = wret;
2070 		if (wret == 0) {
2071 			struct btrfs_disk_key disk_key;
2072 			orig_slot += left_nr;
2073 			btrfs_node_key(mid, &disk_key, 0);
2074 			ret = tree_mod_log_insert_key(parent, pslot,
2075 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2076 			BUG_ON(ret < 0);
2077 			btrfs_set_node_key(parent, &disk_key, pslot);
2078 			btrfs_mark_buffer_dirty(parent);
2079 			if (btrfs_header_nritems(left) > orig_slot) {
2080 				path->nodes[level] = left;
2081 				path->slots[level + 1] -= 1;
2082 				path->slots[level] = orig_slot;
2083 				btrfs_tree_unlock(mid);
2084 				free_extent_buffer(mid);
2085 			} else {
2086 				orig_slot -=
2087 					btrfs_header_nritems(left);
2088 				path->slots[level] = orig_slot;
2089 				btrfs_tree_unlock(left);
2090 				free_extent_buffer(left);
2091 			}
2092 			return 0;
2093 		}
2094 		btrfs_tree_unlock(left);
2095 		free_extent_buffer(left);
2096 	}
2097 	right = read_node_slot(fs_info, parent, pslot + 1);
2098 	if (IS_ERR(right))
2099 		right = NULL;
2100 
2101 	/*
2102 	 * then try to empty the right most buffer into the middle
2103 	 */
2104 	if (right) {
2105 		u32 right_nr;
2106 
2107 		btrfs_tree_lock(right);
2108 		btrfs_set_lock_blocking(right);
2109 
2110 		right_nr = btrfs_header_nritems(right);
2111 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2112 			wret = 1;
2113 		} else {
2114 			ret = btrfs_cow_block(trans, root, right,
2115 					      parent, pslot + 1,
2116 					      &right);
2117 			if (ret)
2118 				wret = 1;
2119 			else {
2120 				wret = balance_node_right(trans, fs_info,
2121 							  right, mid);
2122 			}
2123 		}
2124 		if (wret < 0)
2125 			ret = wret;
2126 		if (wret == 0) {
2127 			struct btrfs_disk_key disk_key;
2128 
2129 			btrfs_node_key(right, &disk_key, 0);
2130 			ret = tree_mod_log_insert_key(parent, pslot + 1,
2131 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2132 			BUG_ON(ret < 0);
2133 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2134 			btrfs_mark_buffer_dirty(parent);
2135 
2136 			if (btrfs_header_nritems(mid) <= orig_slot) {
2137 				path->nodes[level] = right;
2138 				path->slots[level + 1] += 1;
2139 				path->slots[level] = orig_slot -
2140 					btrfs_header_nritems(mid);
2141 				btrfs_tree_unlock(mid);
2142 				free_extent_buffer(mid);
2143 			} else {
2144 				btrfs_tree_unlock(right);
2145 				free_extent_buffer(right);
2146 			}
2147 			return 0;
2148 		}
2149 		btrfs_tree_unlock(right);
2150 		free_extent_buffer(right);
2151 	}
2152 	return 1;
2153 }
2154 
2155 /*
2156  * readahead one full node of leaves, finding things that are close
2157  * to the block in 'slot', and triggering ra on them.
2158  */
2159 static void reada_for_search(struct btrfs_fs_info *fs_info,
2160 			     struct btrfs_path *path,
2161 			     int level, int slot, u64 objectid)
2162 {
2163 	struct extent_buffer *node;
2164 	struct btrfs_disk_key disk_key;
2165 	u32 nritems;
2166 	u64 search;
2167 	u64 target;
2168 	u64 nread = 0;
2169 	struct extent_buffer *eb;
2170 	u32 nr;
2171 	u32 blocksize;
2172 	u32 nscan = 0;
2173 
2174 	if (level != 1)
2175 		return;
2176 
2177 	if (!path->nodes[level])
2178 		return;
2179 
2180 	node = path->nodes[level];
2181 
2182 	search = btrfs_node_blockptr(node, slot);
2183 	blocksize = fs_info->nodesize;
2184 	eb = find_extent_buffer(fs_info, search);
2185 	if (eb) {
2186 		free_extent_buffer(eb);
2187 		return;
2188 	}
2189 
2190 	target = search;
2191 
2192 	nritems = btrfs_header_nritems(node);
2193 	nr = slot;
2194 
2195 	while (1) {
2196 		if (path->reada == READA_BACK) {
2197 			if (nr == 0)
2198 				break;
2199 			nr--;
2200 		} else if (path->reada == READA_FORWARD) {
2201 			nr++;
2202 			if (nr >= nritems)
2203 				break;
2204 		}
2205 		if (path->reada == READA_BACK && objectid) {
2206 			btrfs_node_key(node, &disk_key, nr);
2207 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2208 				break;
2209 		}
2210 		search = btrfs_node_blockptr(node, nr);
2211 		if ((search <= target && target - search <= 65536) ||
2212 		    (search > target && search - target <= 65536)) {
2213 			readahead_tree_block(fs_info, search);
2214 			nread += blocksize;
2215 		}
2216 		nscan++;
2217 		if ((nread > 65536 || nscan > 32))
2218 			break;
2219 	}
2220 }
2221 
2222 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2223 				       struct btrfs_path *path, int level)
2224 {
2225 	int slot;
2226 	int nritems;
2227 	struct extent_buffer *parent;
2228 	struct extent_buffer *eb;
2229 	u64 gen;
2230 	u64 block1 = 0;
2231 	u64 block2 = 0;
2232 
2233 	parent = path->nodes[level + 1];
2234 	if (!parent)
2235 		return;
2236 
2237 	nritems = btrfs_header_nritems(parent);
2238 	slot = path->slots[level + 1];
2239 
2240 	if (slot > 0) {
2241 		block1 = btrfs_node_blockptr(parent, slot - 1);
2242 		gen = btrfs_node_ptr_generation(parent, slot - 1);
2243 		eb = find_extent_buffer(fs_info, block1);
2244 		/*
2245 		 * if we get -eagain from btrfs_buffer_uptodate, we
2246 		 * don't want to return eagain here.  That will loop
2247 		 * forever
2248 		 */
2249 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2250 			block1 = 0;
2251 		free_extent_buffer(eb);
2252 	}
2253 	if (slot + 1 < nritems) {
2254 		block2 = btrfs_node_blockptr(parent, slot + 1);
2255 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2256 		eb = find_extent_buffer(fs_info, block2);
2257 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2258 			block2 = 0;
2259 		free_extent_buffer(eb);
2260 	}
2261 
2262 	if (block1)
2263 		readahead_tree_block(fs_info, block1);
2264 	if (block2)
2265 		readahead_tree_block(fs_info, block2);
2266 }
2267 
2268 
2269 /*
2270  * when we walk down the tree, it is usually safe to unlock the higher layers
2271  * in the tree.  The exceptions are when our path goes through slot 0, because
2272  * operations on the tree might require changing key pointers higher up in the
2273  * tree.
2274  *
2275  * callers might also have set path->keep_locks, which tells this code to keep
2276  * the lock if the path points to the last slot in the block.  This is part of
2277  * walking through the tree, and selecting the next slot in the higher block.
2278  *
2279  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2280  * if lowest_unlock is 1, level 0 won't be unlocked
2281  */
2282 static noinline void unlock_up(struct btrfs_path *path, int level,
2283 			       int lowest_unlock, int min_write_lock_level,
2284 			       int *write_lock_level)
2285 {
2286 	int i;
2287 	int skip_level = level;
2288 	int no_skips = 0;
2289 	struct extent_buffer *t;
2290 
2291 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2292 		if (!path->nodes[i])
2293 			break;
2294 		if (!path->locks[i])
2295 			break;
2296 		if (!no_skips && path->slots[i] == 0) {
2297 			skip_level = i + 1;
2298 			continue;
2299 		}
2300 		if (!no_skips && path->keep_locks) {
2301 			u32 nritems;
2302 			t = path->nodes[i];
2303 			nritems = btrfs_header_nritems(t);
2304 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2305 				skip_level = i + 1;
2306 				continue;
2307 			}
2308 		}
2309 		if (skip_level < i && i >= lowest_unlock)
2310 			no_skips = 1;
2311 
2312 		t = path->nodes[i];
2313 		if (i >= lowest_unlock && i > skip_level) {
2314 			btrfs_tree_unlock_rw(t, path->locks[i]);
2315 			path->locks[i] = 0;
2316 			if (write_lock_level &&
2317 			    i > min_write_lock_level &&
2318 			    i <= *write_lock_level) {
2319 				*write_lock_level = i - 1;
2320 			}
2321 		}
2322 	}
2323 }
2324 
2325 /*
2326  * This releases any locks held in the path starting at level and
2327  * going all the way up to the root.
2328  *
2329  * btrfs_search_slot will keep the lock held on higher nodes in a few
2330  * corner cases, such as COW of the block at slot zero in the node.  This
2331  * ignores those rules, and it should only be called when there are no
2332  * more updates to be done higher up in the tree.
2333  */
2334 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2335 {
2336 	int i;
2337 
2338 	if (path->keep_locks)
2339 		return;
2340 
2341 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2342 		if (!path->nodes[i])
2343 			continue;
2344 		if (!path->locks[i])
2345 			continue;
2346 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2347 		path->locks[i] = 0;
2348 	}
2349 }
2350 
2351 /*
2352  * helper function for btrfs_search_slot.  The goal is to find a block
2353  * in cache without setting the path to blocking.  If we find the block
2354  * we return zero and the path is unchanged.
2355  *
2356  * If we can't find the block, we set the path blocking and do some
2357  * reada.  -EAGAIN is returned and the search must be repeated.
2358  */
2359 static int
2360 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2361 		      struct extent_buffer **eb_ret, int level, int slot,
2362 		      const struct btrfs_key *key)
2363 {
2364 	struct btrfs_fs_info *fs_info = root->fs_info;
2365 	u64 blocknr;
2366 	u64 gen;
2367 	struct extent_buffer *b = *eb_ret;
2368 	struct extent_buffer *tmp;
2369 	struct btrfs_key first_key;
2370 	int ret;
2371 	int parent_level;
2372 
2373 	blocknr = btrfs_node_blockptr(b, slot);
2374 	gen = btrfs_node_ptr_generation(b, slot);
2375 	parent_level = btrfs_header_level(b);
2376 	btrfs_node_key_to_cpu(b, &first_key, slot);
2377 
2378 	tmp = find_extent_buffer(fs_info, blocknr);
2379 	if (tmp) {
2380 		/* first we do an atomic uptodate check */
2381 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2382 			*eb_ret = tmp;
2383 			return 0;
2384 		}
2385 
2386 		/* the pages were up to date, but we failed
2387 		 * the generation number check.  Do a full
2388 		 * read for the generation number that is correct.
2389 		 * We must do this without dropping locks so
2390 		 * we can trust our generation number
2391 		 */
2392 		btrfs_set_path_blocking(p);
2393 
2394 		/* now we're allowed to do a blocking uptodate check */
2395 		ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
2396 		if (!ret) {
2397 			*eb_ret = tmp;
2398 			return 0;
2399 		}
2400 		free_extent_buffer(tmp);
2401 		btrfs_release_path(p);
2402 		return -EIO;
2403 	}
2404 
2405 	/*
2406 	 * reduce lock contention at high levels
2407 	 * of the btree by dropping locks before
2408 	 * we read.  Don't release the lock on the current
2409 	 * level because we need to walk this node to figure
2410 	 * out which blocks to read.
2411 	 */
2412 	btrfs_unlock_up_safe(p, level + 1);
2413 	btrfs_set_path_blocking(p);
2414 
2415 	if (p->reada != READA_NONE)
2416 		reada_for_search(fs_info, p, level, slot, key->objectid);
2417 
2418 	ret = -EAGAIN;
2419 	tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1,
2420 			      &first_key);
2421 	if (!IS_ERR(tmp)) {
2422 		/*
2423 		 * If the read above didn't mark this buffer up to date,
2424 		 * it will never end up being up to date.  Set ret to EIO now
2425 		 * and give up so that our caller doesn't loop forever
2426 		 * on our EAGAINs.
2427 		 */
2428 		if (!extent_buffer_uptodate(tmp))
2429 			ret = -EIO;
2430 		free_extent_buffer(tmp);
2431 	} else {
2432 		ret = PTR_ERR(tmp);
2433 	}
2434 
2435 	btrfs_release_path(p);
2436 	return ret;
2437 }
2438 
2439 /*
2440  * helper function for btrfs_search_slot.  This does all of the checks
2441  * for node-level blocks and does any balancing required based on
2442  * the ins_len.
2443  *
2444  * If no extra work was required, zero is returned.  If we had to
2445  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2446  * start over
2447  */
2448 static int
2449 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2450 		       struct btrfs_root *root, struct btrfs_path *p,
2451 		       struct extent_buffer *b, int level, int ins_len,
2452 		       int *write_lock_level)
2453 {
2454 	struct btrfs_fs_info *fs_info = root->fs_info;
2455 	int ret;
2456 
2457 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2458 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2459 		int sret;
2460 
2461 		if (*write_lock_level < level + 1) {
2462 			*write_lock_level = level + 1;
2463 			btrfs_release_path(p);
2464 			goto again;
2465 		}
2466 
2467 		btrfs_set_path_blocking(p);
2468 		reada_for_balance(fs_info, p, level);
2469 		sret = split_node(trans, root, p, level);
2470 
2471 		BUG_ON(sret > 0);
2472 		if (sret) {
2473 			ret = sret;
2474 			goto done;
2475 		}
2476 		b = p->nodes[level];
2477 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2478 		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2479 		int sret;
2480 
2481 		if (*write_lock_level < level + 1) {
2482 			*write_lock_level = level + 1;
2483 			btrfs_release_path(p);
2484 			goto again;
2485 		}
2486 
2487 		btrfs_set_path_blocking(p);
2488 		reada_for_balance(fs_info, p, level);
2489 		sret = balance_level(trans, root, p, level);
2490 
2491 		if (sret) {
2492 			ret = sret;
2493 			goto done;
2494 		}
2495 		b = p->nodes[level];
2496 		if (!b) {
2497 			btrfs_release_path(p);
2498 			goto again;
2499 		}
2500 		BUG_ON(btrfs_header_nritems(b) == 1);
2501 	}
2502 	return 0;
2503 
2504 again:
2505 	ret = -EAGAIN;
2506 done:
2507 	return ret;
2508 }
2509 
2510 static void key_search_validate(struct extent_buffer *b,
2511 				const struct btrfs_key *key,
2512 				int level)
2513 {
2514 #ifdef CONFIG_BTRFS_ASSERT
2515 	struct btrfs_disk_key disk_key;
2516 
2517 	btrfs_cpu_key_to_disk(&disk_key, key);
2518 
2519 	if (level == 0)
2520 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2521 		    offsetof(struct btrfs_leaf, items[0].key),
2522 		    sizeof(disk_key)));
2523 	else
2524 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2525 		    offsetof(struct btrfs_node, ptrs[0].key),
2526 		    sizeof(disk_key)));
2527 #endif
2528 }
2529 
2530 static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2531 		      int level, int *prev_cmp, int *slot)
2532 {
2533 	if (*prev_cmp != 0) {
2534 		*prev_cmp = btrfs_bin_search(b, key, level, slot);
2535 		return *prev_cmp;
2536 	}
2537 
2538 	key_search_validate(b, key, level);
2539 	*slot = 0;
2540 
2541 	return 0;
2542 }
2543 
2544 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2545 		u64 iobjectid, u64 ioff, u8 key_type,
2546 		struct btrfs_key *found_key)
2547 {
2548 	int ret;
2549 	struct btrfs_key key;
2550 	struct extent_buffer *eb;
2551 
2552 	ASSERT(path);
2553 	ASSERT(found_key);
2554 
2555 	key.type = key_type;
2556 	key.objectid = iobjectid;
2557 	key.offset = ioff;
2558 
2559 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2560 	if (ret < 0)
2561 		return ret;
2562 
2563 	eb = path->nodes[0];
2564 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2565 		ret = btrfs_next_leaf(fs_root, path);
2566 		if (ret)
2567 			return ret;
2568 		eb = path->nodes[0];
2569 	}
2570 
2571 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2572 	if (found_key->type != key.type ||
2573 			found_key->objectid != key.objectid)
2574 		return 1;
2575 
2576 	return 0;
2577 }
2578 
2579 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
2580 							struct btrfs_path *p,
2581 							int write_lock_level)
2582 {
2583 	struct btrfs_fs_info *fs_info = root->fs_info;
2584 	struct extent_buffer *b;
2585 	int root_lock;
2586 	int level = 0;
2587 
2588 	/* We try very hard to do read locks on the root */
2589 	root_lock = BTRFS_READ_LOCK;
2590 
2591 	if (p->search_commit_root) {
2592 		/*
2593 		 * The commit roots are read only so we always do read locks,
2594 		 * and we always must hold the commit_root_sem when doing
2595 		 * searches on them, the only exception is send where we don't
2596 		 * want to block transaction commits for a long time, so
2597 		 * we need to clone the commit root in order to avoid races
2598 		 * with transaction commits that create a snapshot of one of
2599 		 * the roots used by a send operation.
2600 		 */
2601 		if (p->need_commit_sem) {
2602 			down_read(&fs_info->commit_root_sem);
2603 			b = btrfs_clone_extent_buffer(root->commit_root);
2604 			up_read(&fs_info->commit_root_sem);
2605 			if (!b)
2606 				return ERR_PTR(-ENOMEM);
2607 
2608 		} else {
2609 			b = root->commit_root;
2610 			extent_buffer_get(b);
2611 		}
2612 		level = btrfs_header_level(b);
2613 		/*
2614 		 * Ensure that all callers have set skip_locking when
2615 		 * p->search_commit_root = 1.
2616 		 */
2617 		ASSERT(p->skip_locking == 1);
2618 
2619 		goto out;
2620 	}
2621 
2622 	if (p->skip_locking) {
2623 		b = btrfs_root_node(root);
2624 		level = btrfs_header_level(b);
2625 		goto out;
2626 	}
2627 
2628 	/*
2629 	 * If the level is set to maximum, we can skip trying to get the read
2630 	 * lock.
2631 	 */
2632 	if (write_lock_level < BTRFS_MAX_LEVEL) {
2633 		/*
2634 		 * We don't know the level of the root node until we actually
2635 		 * have it read locked
2636 		 */
2637 		b = btrfs_read_lock_root_node(root);
2638 		level = btrfs_header_level(b);
2639 		if (level > write_lock_level)
2640 			goto out;
2641 
2642 		/* Whoops, must trade for write lock */
2643 		btrfs_tree_read_unlock(b);
2644 		free_extent_buffer(b);
2645 	}
2646 
2647 	b = btrfs_lock_root_node(root);
2648 	root_lock = BTRFS_WRITE_LOCK;
2649 
2650 	/* The level might have changed, check again */
2651 	level = btrfs_header_level(b);
2652 
2653 out:
2654 	p->nodes[level] = b;
2655 	if (!p->skip_locking)
2656 		p->locks[level] = root_lock;
2657 	/*
2658 	 * Callers are responsible for dropping b's references.
2659 	 */
2660 	return b;
2661 }
2662 
2663 
2664 /*
2665  * btrfs_search_slot - look for a key in a tree and perform necessary
2666  * modifications to preserve tree invariants.
2667  *
2668  * @trans:	Handle of transaction, used when modifying the tree
2669  * @p:		Holds all btree nodes along the search path
2670  * @root:	The root node of the tree
2671  * @key:	The key we are looking for
2672  * @ins_len:	Indicates purpose of search, for inserts it is 1, for
2673  *		deletions it's -1. 0 for plain searches
2674  * @cow:	boolean should CoW operations be performed. Must always be 1
2675  *		when modifying the tree.
2676  *
2677  * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2678  * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2679  *
2680  * If @key is found, 0 is returned and you can find the item in the leaf level
2681  * of the path (level 0)
2682  *
2683  * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2684  * points to the slot where it should be inserted
2685  *
2686  * If an error is encountered while searching the tree a negative error number
2687  * is returned
2688  */
2689 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2690 		      const struct btrfs_key *key, struct btrfs_path *p,
2691 		      int ins_len, int cow)
2692 {
2693 	struct btrfs_fs_info *fs_info = root->fs_info;
2694 	struct extent_buffer *b;
2695 	int slot;
2696 	int ret;
2697 	int err;
2698 	int level;
2699 	int lowest_unlock = 1;
2700 	/* everything at write_lock_level or lower must be write locked */
2701 	int write_lock_level = 0;
2702 	u8 lowest_level = 0;
2703 	int min_write_lock_level;
2704 	int prev_cmp;
2705 
2706 	lowest_level = p->lowest_level;
2707 	WARN_ON(lowest_level && ins_len > 0);
2708 	WARN_ON(p->nodes[0] != NULL);
2709 	BUG_ON(!cow && ins_len);
2710 
2711 	if (ins_len < 0) {
2712 		lowest_unlock = 2;
2713 
2714 		/* when we are removing items, we might have to go up to level
2715 		 * two as we update tree pointers  Make sure we keep write
2716 		 * for those levels as well
2717 		 */
2718 		write_lock_level = 2;
2719 	} else if (ins_len > 0) {
2720 		/*
2721 		 * for inserting items, make sure we have a write lock on
2722 		 * level 1 so we can update keys
2723 		 */
2724 		write_lock_level = 1;
2725 	}
2726 
2727 	if (!cow)
2728 		write_lock_level = -1;
2729 
2730 	if (cow && (p->keep_locks || p->lowest_level))
2731 		write_lock_level = BTRFS_MAX_LEVEL;
2732 
2733 	min_write_lock_level = write_lock_level;
2734 
2735 again:
2736 	prev_cmp = -1;
2737 	b = btrfs_search_slot_get_root(root, p, write_lock_level);
2738 	if (IS_ERR(b)) {
2739 		ret = PTR_ERR(b);
2740 		goto done;
2741 	}
2742 
2743 	while (b) {
2744 		level = btrfs_header_level(b);
2745 
2746 		/*
2747 		 * setup the path here so we can release it under lock
2748 		 * contention with the cow code
2749 		 */
2750 		if (cow) {
2751 			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2752 
2753 			/*
2754 			 * if we don't really need to cow this block
2755 			 * then we don't want to set the path blocking,
2756 			 * so we test it here
2757 			 */
2758 			if (!should_cow_block(trans, root, b)) {
2759 				trans->dirty = true;
2760 				goto cow_done;
2761 			}
2762 
2763 			/*
2764 			 * must have write locks on this node and the
2765 			 * parent
2766 			 */
2767 			if (level > write_lock_level ||
2768 			    (level + 1 > write_lock_level &&
2769 			    level + 1 < BTRFS_MAX_LEVEL &&
2770 			    p->nodes[level + 1])) {
2771 				write_lock_level = level + 1;
2772 				btrfs_release_path(p);
2773 				goto again;
2774 			}
2775 
2776 			btrfs_set_path_blocking(p);
2777 			if (last_level)
2778 				err = btrfs_cow_block(trans, root, b, NULL, 0,
2779 						      &b);
2780 			else
2781 				err = btrfs_cow_block(trans, root, b,
2782 						      p->nodes[level + 1],
2783 						      p->slots[level + 1], &b);
2784 			if (err) {
2785 				ret = err;
2786 				goto done;
2787 			}
2788 		}
2789 cow_done:
2790 		p->nodes[level] = b;
2791 		/*
2792 		 * Leave path with blocking locks to avoid massive
2793 		 * lock context switch, this is made on purpose.
2794 		 */
2795 
2796 		/*
2797 		 * we have a lock on b and as long as we aren't changing
2798 		 * the tree, there is no way to for the items in b to change.
2799 		 * It is safe to drop the lock on our parent before we
2800 		 * go through the expensive btree search on b.
2801 		 *
2802 		 * If we're inserting or deleting (ins_len != 0), then we might
2803 		 * be changing slot zero, which may require changing the parent.
2804 		 * So, we can't drop the lock until after we know which slot
2805 		 * we're operating on.
2806 		 */
2807 		if (!ins_len && !p->keep_locks) {
2808 			int u = level + 1;
2809 
2810 			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2811 				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2812 				p->locks[u] = 0;
2813 			}
2814 		}
2815 
2816 		ret = key_search(b, key, level, &prev_cmp, &slot);
2817 		if (ret < 0)
2818 			goto done;
2819 
2820 		if (level != 0) {
2821 			int dec = 0;
2822 			if (ret && slot > 0) {
2823 				dec = 1;
2824 				slot -= 1;
2825 			}
2826 			p->slots[level] = slot;
2827 			err = setup_nodes_for_search(trans, root, p, b, level,
2828 					     ins_len, &write_lock_level);
2829 			if (err == -EAGAIN)
2830 				goto again;
2831 			if (err) {
2832 				ret = err;
2833 				goto done;
2834 			}
2835 			b = p->nodes[level];
2836 			slot = p->slots[level];
2837 
2838 			/*
2839 			 * slot 0 is special, if we change the key
2840 			 * we have to update the parent pointer
2841 			 * which means we must have a write lock
2842 			 * on the parent
2843 			 */
2844 			if (slot == 0 && ins_len &&
2845 			    write_lock_level < level + 1) {
2846 				write_lock_level = level + 1;
2847 				btrfs_release_path(p);
2848 				goto again;
2849 			}
2850 
2851 			unlock_up(p, level, lowest_unlock,
2852 				  min_write_lock_level, &write_lock_level);
2853 
2854 			if (level == lowest_level) {
2855 				if (dec)
2856 					p->slots[level]++;
2857 				goto done;
2858 			}
2859 
2860 			err = read_block_for_search(root, p, &b, level,
2861 						    slot, key);
2862 			if (err == -EAGAIN)
2863 				goto again;
2864 			if (err) {
2865 				ret = err;
2866 				goto done;
2867 			}
2868 
2869 			if (!p->skip_locking) {
2870 				level = btrfs_header_level(b);
2871 				if (level <= write_lock_level) {
2872 					err = btrfs_try_tree_write_lock(b);
2873 					if (!err) {
2874 						btrfs_set_path_blocking(p);
2875 						btrfs_tree_lock(b);
2876 					}
2877 					p->locks[level] = BTRFS_WRITE_LOCK;
2878 				} else {
2879 					err = btrfs_tree_read_lock_atomic(b);
2880 					if (!err) {
2881 						btrfs_set_path_blocking(p);
2882 						btrfs_tree_read_lock(b);
2883 					}
2884 					p->locks[level] = BTRFS_READ_LOCK;
2885 				}
2886 				p->nodes[level] = b;
2887 			}
2888 		} else {
2889 			p->slots[level] = slot;
2890 			if (ins_len > 0 &&
2891 			    btrfs_leaf_free_space(fs_info, b) < ins_len) {
2892 				if (write_lock_level < 1) {
2893 					write_lock_level = 1;
2894 					btrfs_release_path(p);
2895 					goto again;
2896 				}
2897 
2898 				btrfs_set_path_blocking(p);
2899 				err = split_leaf(trans, root, key,
2900 						 p, ins_len, ret == 0);
2901 
2902 				BUG_ON(err > 0);
2903 				if (err) {
2904 					ret = err;
2905 					goto done;
2906 				}
2907 			}
2908 			if (!p->search_for_split)
2909 				unlock_up(p, level, lowest_unlock,
2910 					  min_write_lock_level, NULL);
2911 			goto done;
2912 		}
2913 	}
2914 	ret = 1;
2915 done:
2916 	/*
2917 	 * we don't really know what they plan on doing with the path
2918 	 * from here on, so for now just mark it as blocking
2919 	 */
2920 	if (!p->leave_spinning)
2921 		btrfs_set_path_blocking(p);
2922 	if (ret < 0 && !p->skip_release_on_error)
2923 		btrfs_release_path(p);
2924 	return ret;
2925 }
2926 
2927 /*
2928  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2929  * current state of the tree together with the operations recorded in the tree
2930  * modification log to search for the key in a previous version of this tree, as
2931  * denoted by the time_seq parameter.
2932  *
2933  * Naturally, there is no support for insert, delete or cow operations.
2934  *
2935  * The resulting path and return value will be set up as if we called
2936  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2937  */
2938 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2939 			  struct btrfs_path *p, u64 time_seq)
2940 {
2941 	struct btrfs_fs_info *fs_info = root->fs_info;
2942 	struct extent_buffer *b;
2943 	int slot;
2944 	int ret;
2945 	int err;
2946 	int level;
2947 	int lowest_unlock = 1;
2948 	u8 lowest_level = 0;
2949 	int prev_cmp = -1;
2950 
2951 	lowest_level = p->lowest_level;
2952 	WARN_ON(p->nodes[0] != NULL);
2953 
2954 	if (p->search_commit_root) {
2955 		BUG_ON(time_seq);
2956 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2957 	}
2958 
2959 again:
2960 	b = get_old_root(root, time_seq);
2961 	if (!b) {
2962 		ret = -EIO;
2963 		goto done;
2964 	}
2965 	level = btrfs_header_level(b);
2966 	p->locks[level] = BTRFS_READ_LOCK;
2967 
2968 	while (b) {
2969 		level = btrfs_header_level(b);
2970 		p->nodes[level] = b;
2971 
2972 		/*
2973 		 * we have a lock on b and as long as we aren't changing
2974 		 * the tree, there is no way to for the items in b to change.
2975 		 * It is safe to drop the lock on our parent before we
2976 		 * go through the expensive btree search on b.
2977 		 */
2978 		btrfs_unlock_up_safe(p, level + 1);
2979 
2980 		/*
2981 		 * Since we can unwind ebs we want to do a real search every
2982 		 * time.
2983 		 */
2984 		prev_cmp = -1;
2985 		ret = key_search(b, key, level, &prev_cmp, &slot);
2986 
2987 		if (level != 0) {
2988 			int dec = 0;
2989 			if (ret && slot > 0) {
2990 				dec = 1;
2991 				slot -= 1;
2992 			}
2993 			p->slots[level] = slot;
2994 			unlock_up(p, level, lowest_unlock, 0, NULL);
2995 
2996 			if (level == lowest_level) {
2997 				if (dec)
2998 					p->slots[level]++;
2999 				goto done;
3000 			}
3001 
3002 			err = read_block_for_search(root, p, &b, level,
3003 						    slot, key);
3004 			if (err == -EAGAIN)
3005 				goto again;
3006 			if (err) {
3007 				ret = err;
3008 				goto done;
3009 			}
3010 
3011 			level = btrfs_header_level(b);
3012 			err = btrfs_tree_read_lock_atomic(b);
3013 			if (!err) {
3014 				btrfs_set_path_blocking(p);
3015 				btrfs_tree_read_lock(b);
3016 			}
3017 			b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3018 			if (!b) {
3019 				ret = -ENOMEM;
3020 				goto done;
3021 			}
3022 			p->locks[level] = BTRFS_READ_LOCK;
3023 			p->nodes[level] = b;
3024 		} else {
3025 			p->slots[level] = slot;
3026 			unlock_up(p, level, lowest_unlock, 0, NULL);
3027 			goto done;
3028 		}
3029 	}
3030 	ret = 1;
3031 done:
3032 	if (!p->leave_spinning)
3033 		btrfs_set_path_blocking(p);
3034 	if (ret < 0)
3035 		btrfs_release_path(p);
3036 
3037 	return ret;
3038 }
3039 
3040 /*
3041  * helper to use instead of search slot if no exact match is needed but
3042  * instead the next or previous item should be returned.
3043  * When find_higher is true, the next higher item is returned, the next lower
3044  * otherwise.
3045  * When return_any and find_higher are both true, and no higher item is found,
3046  * return the next lower instead.
3047  * When return_any is true and find_higher is false, and no lower item is found,
3048  * return the next higher instead.
3049  * It returns 0 if any item is found, 1 if none is found (tree empty), and
3050  * < 0 on error
3051  */
3052 int btrfs_search_slot_for_read(struct btrfs_root *root,
3053 			       const struct btrfs_key *key,
3054 			       struct btrfs_path *p, int find_higher,
3055 			       int return_any)
3056 {
3057 	int ret;
3058 	struct extent_buffer *leaf;
3059 
3060 again:
3061 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3062 	if (ret <= 0)
3063 		return ret;
3064 	/*
3065 	 * a return value of 1 means the path is at the position where the
3066 	 * item should be inserted. Normally this is the next bigger item,
3067 	 * but in case the previous item is the last in a leaf, path points
3068 	 * to the first free slot in the previous leaf, i.e. at an invalid
3069 	 * item.
3070 	 */
3071 	leaf = p->nodes[0];
3072 
3073 	if (find_higher) {
3074 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3075 			ret = btrfs_next_leaf(root, p);
3076 			if (ret <= 0)
3077 				return ret;
3078 			if (!return_any)
3079 				return 1;
3080 			/*
3081 			 * no higher item found, return the next
3082 			 * lower instead
3083 			 */
3084 			return_any = 0;
3085 			find_higher = 0;
3086 			btrfs_release_path(p);
3087 			goto again;
3088 		}
3089 	} else {
3090 		if (p->slots[0] == 0) {
3091 			ret = btrfs_prev_leaf(root, p);
3092 			if (ret < 0)
3093 				return ret;
3094 			if (!ret) {
3095 				leaf = p->nodes[0];
3096 				if (p->slots[0] == btrfs_header_nritems(leaf))
3097 					p->slots[0]--;
3098 				return 0;
3099 			}
3100 			if (!return_any)
3101 				return 1;
3102 			/*
3103 			 * no lower item found, return the next
3104 			 * higher instead
3105 			 */
3106 			return_any = 0;
3107 			find_higher = 1;
3108 			btrfs_release_path(p);
3109 			goto again;
3110 		} else {
3111 			--p->slots[0];
3112 		}
3113 	}
3114 	return 0;
3115 }
3116 
3117 /*
3118  * adjust the pointers going up the tree, starting at level
3119  * making sure the right key of each node is points to 'key'.
3120  * This is used after shifting pointers to the left, so it stops
3121  * fixing up pointers when a given leaf/node is not in slot 0 of the
3122  * higher levels
3123  *
3124  */
3125 static void fixup_low_keys(struct btrfs_path *path,
3126 			   struct btrfs_disk_key *key, int level)
3127 {
3128 	int i;
3129 	struct extent_buffer *t;
3130 	int ret;
3131 
3132 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3133 		int tslot = path->slots[i];
3134 
3135 		if (!path->nodes[i])
3136 			break;
3137 		t = path->nodes[i];
3138 		ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3139 				GFP_ATOMIC);
3140 		BUG_ON(ret < 0);
3141 		btrfs_set_node_key(t, key, tslot);
3142 		btrfs_mark_buffer_dirty(path->nodes[i]);
3143 		if (tslot != 0)
3144 			break;
3145 	}
3146 }
3147 
3148 /*
3149  * update item key.
3150  *
3151  * This function isn't completely safe. It's the caller's responsibility
3152  * that the new key won't break the order
3153  */
3154 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3155 			     struct btrfs_path *path,
3156 			     const struct btrfs_key *new_key)
3157 {
3158 	struct btrfs_disk_key disk_key;
3159 	struct extent_buffer *eb;
3160 	int slot;
3161 
3162 	eb = path->nodes[0];
3163 	slot = path->slots[0];
3164 	if (slot > 0) {
3165 		btrfs_item_key(eb, &disk_key, slot - 1);
3166 		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3167 	}
3168 	if (slot < btrfs_header_nritems(eb) - 1) {
3169 		btrfs_item_key(eb, &disk_key, slot + 1);
3170 		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3171 	}
3172 
3173 	btrfs_cpu_key_to_disk(&disk_key, new_key);
3174 	btrfs_set_item_key(eb, &disk_key, slot);
3175 	btrfs_mark_buffer_dirty(eb);
3176 	if (slot == 0)
3177 		fixup_low_keys(path, &disk_key, 1);
3178 }
3179 
3180 /*
3181  * try to push data from one node into the next node left in the
3182  * tree.
3183  *
3184  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3185  * error, and > 0 if there was no room in the left hand block.
3186  */
3187 static int push_node_left(struct btrfs_trans_handle *trans,
3188 			  struct btrfs_fs_info *fs_info,
3189 			  struct extent_buffer *dst,
3190 			  struct extent_buffer *src, int empty)
3191 {
3192 	int push_items = 0;
3193 	int src_nritems;
3194 	int dst_nritems;
3195 	int ret = 0;
3196 
3197 	src_nritems = btrfs_header_nritems(src);
3198 	dst_nritems = btrfs_header_nritems(dst);
3199 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3200 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3201 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3202 
3203 	if (!empty && src_nritems <= 8)
3204 		return 1;
3205 
3206 	if (push_items <= 0)
3207 		return 1;
3208 
3209 	if (empty) {
3210 		push_items = min(src_nritems, push_items);
3211 		if (push_items < src_nritems) {
3212 			/* leave at least 8 pointers in the node if
3213 			 * we aren't going to empty it
3214 			 */
3215 			if (src_nritems - push_items < 8) {
3216 				if (push_items <= 8)
3217 					return 1;
3218 				push_items -= 8;
3219 			}
3220 		}
3221 	} else
3222 		push_items = min(src_nritems - 8, push_items);
3223 
3224 	ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
3225 				   push_items);
3226 	if (ret) {
3227 		btrfs_abort_transaction(trans, ret);
3228 		return ret;
3229 	}
3230 	copy_extent_buffer(dst, src,
3231 			   btrfs_node_key_ptr_offset(dst_nritems),
3232 			   btrfs_node_key_ptr_offset(0),
3233 			   push_items * sizeof(struct btrfs_key_ptr));
3234 
3235 	if (push_items < src_nritems) {
3236 		/*
3237 		 * Don't call tree_mod_log_insert_move here, key removal was
3238 		 * already fully logged by tree_mod_log_eb_copy above.
3239 		 */
3240 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3241 				      btrfs_node_key_ptr_offset(push_items),
3242 				      (src_nritems - push_items) *
3243 				      sizeof(struct btrfs_key_ptr));
3244 	}
3245 	btrfs_set_header_nritems(src, src_nritems - push_items);
3246 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3247 	btrfs_mark_buffer_dirty(src);
3248 	btrfs_mark_buffer_dirty(dst);
3249 
3250 	return ret;
3251 }
3252 
3253 /*
3254  * try to push data from one node into the next node right in the
3255  * tree.
3256  *
3257  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3258  * error, and > 0 if there was no room in the right hand block.
3259  *
3260  * this will  only push up to 1/2 the contents of the left node over
3261  */
3262 static int balance_node_right(struct btrfs_trans_handle *trans,
3263 			      struct btrfs_fs_info *fs_info,
3264 			      struct extent_buffer *dst,
3265 			      struct extent_buffer *src)
3266 {
3267 	int push_items = 0;
3268 	int max_push;
3269 	int src_nritems;
3270 	int dst_nritems;
3271 	int ret = 0;
3272 
3273 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3274 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3275 
3276 	src_nritems = btrfs_header_nritems(src);
3277 	dst_nritems = btrfs_header_nritems(dst);
3278 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3279 	if (push_items <= 0)
3280 		return 1;
3281 
3282 	if (src_nritems < 4)
3283 		return 1;
3284 
3285 	max_push = src_nritems / 2 + 1;
3286 	/* don't try to empty the node */
3287 	if (max_push >= src_nritems)
3288 		return 1;
3289 
3290 	if (max_push < push_items)
3291 		push_items = max_push;
3292 
3293 	ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3294 	BUG_ON(ret < 0);
3295 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3296 				      btrfs_node_key_ptr_offset(0),
3297 				      (dst_nritems) *
3298 				      sizeof(struct btrfs_key_ptr));
3299 
3300 	ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
3301 				   src_nritems - push_items, push_items);
3302 	if (ret) {
3303 		btrfs_abort_transaction(trans, ret);
3304 		return ret;
3305 	}
3306 	copy_extent_buffer(dst, src,
3307 			   btrfs_node_key_ptr_offset(0),
3308 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3309 			   push_items * sizeof(struct btrfs_key_ptr));
3310 
3311 	btrfs_set_header_nritems(src, src_nritems - push_items);
3312 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3313 
3314 	btrfs_mark_buffer_dirty(src);
3315 	btrfs_mark_buffer_dirty(dst);
3316 
3317 	return ret;
3318 }
3319 
3320 /*
3321  * helper function to insert a new root level in the tree.
3322  * A new node is allocated, and a single item is inserted to
3323  * point to the existing root
3324  *
3325  * returns zero on success or < 0 on failure.
3326  */
3327 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3328 			   struct btrfs_root *root,
3329 			   struct btrfs_path *path, int level)
3330 {
3331 	struct btrfs_fs_info *fs_info = root->fs_info;
3332 	u64 lower_gen;
3333 	struct extent_buffer *lower;
3334 	struct extent_buffer *c;
3335 	struct extent_buffer *old;
3336 	struct btrfs_disk_key lower_key;
3337 	int ret;
3338 
3339 	BUG_ON(path->nodes[level]);
3340 	BUG_ON(path->nodes[level-1] != root->node);
3341 
3342 	lower = path->nodes[level-1];
3343 	if (level == 1)
3344 		btrfs_item_key(lower, &lower_key, 0);
3345 	else
3346 		btrfs_node_key(lower, &lower_key, 0);
3347 
3348 	c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3349 				   &lower_key, level, root->node->start, 0);
3350 	if (IS_ERR(c))
3351 		return PTR_ERR(c);
3352 
3353 	root_add_used(root, fs_info->nodesize);
3354 
3355 	btrfs_set_header_nritems(c, 1);
3356 	btrfs_set_node_key(c, &lower_key, 0);
3357 	btrfs_set_node_blockptr(c, 0, lower->start);
3358 	lower_gen = btrfs_header_generation(lower);
3359 	WARN_ON(lower_gen != trans->transid);
3360 
3361 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3362 
3363 	btrfs_mark_buffer_dirty(c);
3364 
3365 	old = root->node;
3366 	ret = tree_mod_log_insert_root(root->node, c, 0);
3367 	BUG_ON(ret < 0);
3368 	rcu_assign_pointer(root->node, c);
3369 
3370 	/* the super has an extra ref to root->node */
3371 	free_extent_buffer(old);
3372 
3373 	add_root_to_dirty_list(root);
3374 	extent_buffer_get(c);
3375 	path->nodes[level] = c;
3376 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3377 	path->slots[level] = 0;
3378 	return 0;
3379 }
3380 
3381 /*
3382  * worker function to insert a single pointer in a node.
3383  * the node should have enough room for the pointer already
3384  *
3385  * slot and level indicate where you want the key to go, and
3386  * blocknr is the block the key points to.
3387  */
3388 static void insert_ptr(struct btrfs_trans_handle *trans,
3389 		       struct btrfs_fs_info *fs_info, struct btrfs_path *path,
3390 		       struct btrfs_disk_key *key, u64 bytenr,
3391 		       int slot, int level)
3392 {
3393 	struct extent_buffer *lower;
3394 	int nritems;
3395 	int ret;
3396 
3397 	BUG_ON(!path->nodes[level]);
3398 	btrfs_assert_tree_locked(path->nodes[level]);
3399 	lower = path->nodes[level];
3400 	nritems = btrfs_header_nritems(lower);
3401 	BUG_ON(slot > nritems);
3402 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
3403 	if (slot != nritems) {
3404 		if (level) {
3405 			ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3406 					nritems - slot);
3407 			BUG_ON(ret < 0);
3408 		}
3409 		memmove_extent_buffer(lower,
3410 			      btrfs_node_key_ptr_offset(slot + 1),
3411 			      btrfs_node_key_ptr_offset(slot),
3412 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3413 	}
3414 	if (level) {
3415 		ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3416 				GFP_NOFS);
3417 		BUG_ON(ret < 0);
3418 	}
3419 	btrfs_set_node_key(lower, key, slot);
3420 	btrfs_set_node_blockptr(lower, slot, bytenr);
3421 	WARN_ON(trans->transid == 0);
3422 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3423 	btrfs_set_header_nritems(lower, nritems + 1);
3424 	btrfs_mark_buffer_dirty(lower);
3425 }
3426 
3427 /*
3428  * split the node at the specified level in path in two.
3429  * The path is corrected to point to the appropriate node after the split
3430  *
3431  * Before splitting this tries to make some room in the node by pushing
3432  * left and right, if either one works, it returns right away.
3433  *
3434  * returns 0 on success and < 0 on failure
3435  */
3436 static noinline int split_node(struct btrfs_trans_handle *trans,
3437 			       struct btrfs_root *root,
3438 			       struct btrfs_path *path, int level)
3439 {
3440 	struct btrfs_fs_info *fs_info = root->fs_info;
3441 	struct extent_buffer *c;
3442 	struct extent_buffer *split;
3443 	struct btrfs_disk_key disk_key;
3444 	int mid;
3445 	int ret;
3446 	u32 c_nritems;
3447 
3448 	c = path->nodes[level];
3449 	WARN_ON(btrfs_header_generation(c) != trans->transid);
3450 	if (c == root->node) {
3451 		/*
3452 		 * trying to split the root, lets make a new one
3453 		 *
3454 		 * tree mod log: We don't log_removal old root in
3455 		 * insert_new_root, because that root buffer will be kept as a
3456 		 * normal node. We are going to log removal of half of the
3457 		 * elements below with tree_mod_log_eb_copy. We're holding a
3458 		 * tree lock on the buffer, which is why we cannot race with
3459 		 * other tree_mod_log users.
3460 		 */
3461 		ret = insert_new_root(trans, root, path, level + 1);
3462 		if (ret)
3463 			return ret;
3464 	} else {
3465 		ret = push_nodes_for_insert(trans, root, path, level);
3466 		c = path->nodes[level];
3467 		if (!ret && btrfs_header_nritems(c) <
3468 		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3469 			return 0;
3470 		if (ret < 0)
3471 			return ret;
3472 	}
3473 
3474 	c_nritems = btrfs_header_nritems(c);
3475 	mid = (c_nritems + 1) / 2;
3476 	btrfs_node_key(c, &disk_key, mid);
3477 
3478 	split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3479 			&disk_key, level, c->start, 0);
3480 	if (IS_ERR(split))
3481 		return PTR_ERR(split);
3482 
3483 	root_add_used(root, fs_info->nodesize);
3484 	ASSERT(btrfs_header_level(c) == level);
3485 
3486 	ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
3487 	if (ret) {
3488 		btrfs_abort_transaction(trans, ret);
3489 		return ret;
3490 	}
3491 	copy_extent_buffer(split, c,
3492 			   btrfs_node_key_ptr_offset(0),
3493 			   btrfs_node_key_ptr_offset(mid),
3494 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3495 	btrfs_set_header_nritems(split, c_nritems - mid);
3496 	btrfs_set_header_nritems(c, mid);
3497 	ret = 0;
3498 
3499 	btrfs_mark_buffer_dirty(c);
3500 	btrfs_mark_buffer_dirty(split);
3501 
3502 	insert_ptr(trans, fs_info, path, &disk_key, split->start,
3503 		   path->slots[level + 1] + 1, level + 1);
3504 
3505 	if (path->slots[level] >= mid) {
3506 		path->slots[level] -= mid;
3507 		btrfs_tree_unlock(c);
3508 		free_extent_buffer(c);
3509 		path->nodes[level] = split;
3510 		path->slots[level + 1] += 1;
3511 	} else {
3512 		btrfs_tree_unlock(split);
3513 		free_extent_buffer(split);
3514 	}
3515 	return ret;
3516 }
3517 
3518 /*
3519  * how many bytes are required to store the items in a leaf.  start
3520  * and nr indicate which items in the leaf to check.  This totals up the
3521  * space used both by the item structs and the item data
3522  */
3523 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3524 {
3525 	struct btrfs_item *start_item;
3526 	struct btrfs_item *end_item;
3527 	struct btrfs_map_token token;
3528 	int data_len;
3529 	int nritems = btrfs_header_nritems(l);
3530 	int end = min(nritems, start + nr) - 1;
3531 
3532 	if (!nr)
3533 		return 0;
3534 	btrfs_init_map_token(&token);
3535 	start_item = btrfs_item_nr(start);
3536 	end_item = btrfs_item_nr(end);
3537 	data_len = btrfs_token_item_offset(l, start_item, &token) +
3538 		btrfs_token_item_size(l, start_item, &token);
3539 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3540 	data_len += sizeof(struct btrfs_item) * nr;
3541 	WARN_ON(data_len < 0);
3542 	return data_len;
3543 }
3544 
3545 /*
3546  * The space between the end of the leaf items and
3547  * the start of the leaf data.  IOW, how much room
3548  * the leaf has left for both items and data
3549  */
3550 noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
3551 				   struct extent_buffer *leaf)
3552 {
3553 	int nritems = btrfs_header_nritems(leaf);
3554 	int ret;
3555 
3556 	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3557 	if (ret < 0) {
3558 		btrfs_crit(fs_info,
3559 			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3560 			   ret,
3561 			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3562 			   leaf_space_used(leaf, 0, nritems), nritems);
3563 	}
3564 	return ret;
3565 }
3566 
3567 /*
3568  * min slot controls the lowest index we're willing to push to the
3569  * right.  We'll push up to and including min_slot, but no lower
3570  */
3571 static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
3572 				      struct btrfs_path *path,
3573 				      int data_size, int empty,
3574 				      struct extent_buffer *right,
3575 				      int free_space, u32 left_nritems,
3576 				      u32 min_slot)
3577 {
3578 	struct extent_buffer *left = path->nodes[0];
3579 	struct extent_buffer *upper = path->nodes[1];
3580 	struct btrfs_map_token token;
3581 	struct btrfs_disk_key disk_key;
3582 	int slot;
3583 	u32 i;
3584 	int push_space = 0;
3585 	int push_items = 0;
3586 	struct btrfs_item *item;
3587 	u32 nr;
3588 	u32 right_nritems;
3589 	u32 data_end;
3590 	u32 this_item_size;
3591 
3592 	btrfs_init_map_token(&token);
3593 
3594 	if (empty)
3595 		nr = 0;
3596 	else
3597 		nr = max_t(u32, 1, min_slot);
3598 
3599 	if (path->slots[0] >= left_nritems)
3600 		push_space += data_size;
3601 
3602 	slot = path->slots[1];
3603 	i = left_nritems - 1;
3604 	while (i >= nr) {
3605 		item = btrfs_item_nr(i);
3606 
3607 		if (!empty && push_items > 0) {
3608 			if (path->slots[0] > i)
3609 				break;
3610 			if (path->slots[0] == i) {
3611 				int space = btrfs_leaf_free_space(fs_info, left);
3612 				if (space + push_space * 2 > free_space)
3613 					break;
3614 			}
3615 		}
3616 
3617 		if (path->slots[0] == i)
3618 			push_space += data_size;
3619 
3620 		this_item_size = btrfs_item_size(left, item);
3621 		if (this_item_size + sizeof(*item) + push_space > free_space)
3622 			break;
3623 
3624 		push_items++;
3625 		push_space += this_item_size + sizeof(*item);
3626 		if (i == 0)
3627 			break;
3628 		i--;
3629 	}
3630 
3631 	if (push_items == 0)
3632 		goto out_unlock;
3633 
3634 	WARN_ON(!empty && push_items == left_nritems);
3635 
3636 	/* push left to right */
3637 	right_nritems = btrfs_header_nritems(right);
3638 
3639 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3640 	push_space -= leaf_data_end(fs_info, left);
3641 
3642 	/* make room in the right data area */
3643 	data_end = leaf_data_end(fs_info, right);
3644 	memmove_extent_buffer(right,
3645 			      BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3646 			      BTRFS_LEAF_DATA_OFFSET + data_end,
3647 			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3648 
3649 	/* copy from the left data area */
3650 	copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3651 		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3652 		     BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
3653 		     push_space);
3654 
3655 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3656 			      btrfs_item_nr_offset(0),
3657 			      right_nritems * sizeof(struct btrfs_item));
3658 
3659 	/* copy the items from left to right */
3660 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3661 		   btrfs_item_nr_offset(left_nritems - push_items),
3662 		   push_items * sizeof(struct btrfs_item));
3663 
3664 	/* update the item pointers */
3665 	right_nritems += push_items;
3666 	btrfs_set_header_nritems(right, right_nritems);
3667 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3668 	for (i = 0; i < right_nritems; i++) {
3669 		item = btrfs_item_nr(i);
3670 		push_space -= btrfs_token_item_size(right, item, &token);
3671 		btrfs_set_token_item_offset(right, item, push_space, &token);
3672 	}
3673 
3674 	left_nritems -= push_items;
3675 	btrfs_set_header_nritems(left, left_nritems);
3676 
3677 	if (left_nritems)
3678 		btrfs_mark_buffer_dirty(left);
3679 	else
3680 		clean_tree_block(fs_info, left);
3681 
3682 	btrfs_mark_buffer_dirty(right);
3683 
3684 	btrfs_item_key(right, &disk_key, 0);
3685 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3686 	btrfs_mark_buffer_dirty(upper);
3687 
3688 	/* then fixup the leaf pointer in the path */
3689 	if (path->slots[0] >= left_nritems) {
3690 		path->slots[0] -= left_nritems;
3691 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3692 			clean_tree_block(fs_info, path->nodes[0]);
3693 		btrfs_tree_unlock(path->nodes[0]);
3694 		free_extent_buffer(path->nodes[0]);
3695 		path->nodes[0] = right;
3696 		path->slots[1] += 1;
3697 	} else {
3698 		btrfs_tree_unlock(right);
3699 		free_extent_buffer(right);
3700 	}
3701 	return 0;
3702 
3703 out_unlock:
3704 	btrfs_tree_unlock(right);
3705 	free_extent_buffer(right);
3706 	return 1;
3707 }
3708 
3709 /*
3710  * push some data in the path leaf to the right, trying to free up at
3711  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3712  *
3713  * returns 1 if the push failed because the other node didn't have enough
3714  * room, 0 if everything worked out and < 0 if there were major errors.
3715  *
3716  * this will push starting from min_slot to the end of the leaf.  It won't
3717  * push any slot lower than min_slot
3718  */
3719 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3720 			   *root, struct btrfs_path *path,
3721 			   int min_data_size, int data_size,
3722 			   int empty, u32 min_slot)
3723 {
3724 	struct btrfs_fs_info *fs_info = root->fs_info;
3725 	struct extent_buffer *left = path->nodes[0];
3726 	struct extent_buffer *right;
3727 	struct extent_buffer *upper;
3728 	int slot;
3729 	int free_space;
3730 	u32 left_nritems;
3731 	int ret;
3732 
3733 	if (!path->nodes[1])
3734 		return 1;
3735 
3736 	slot = path->slots[1];
3737 	upper = path->nodes[1];
3738 	if (slot >= btrfs_header_nritems(upper) - 1)
3739 		return 1;
3740 
3741 	btrfs_assert_tree_locked(path->nodes[1]);
3742 
3743 	right = read_node_slot(fs_info, upper, slot + 1);
3744 	/*
3745 	 * slot + 1 is not valid or we fail to read the right node,
3746 	 * no big deal, just return.
3747 	 */
3748 	if (IS_ERR(right))
3749 		return 1;
3750 
3751 	btrfs_tree_lock(right);
3752 	btrfs_set_lock_blocking(right);
3753 
3754 	free_space = btrfs_leaf_free_space(fs_info, right);
3755 	if (free_space < data_size)
3756 		goto out_unlock;
3757 
3758 	/* cow and double check */
3759 	ret = btrfs_cow_block(trans, root, right, upper,
3760 			      slot + 1, &right);
3761 	if (ret)
3762 		goto out_unlock;
3763 
3764 	free_space = btrfs_leaf_free_space(fs_info, right);
3765 	if (free_space < data_size)
3766 		goto out_unlock;
3767 
3768 	left_nritems = btrfs_header_nritems(left);
3769 	if (left_nritems == 0)
3770 		goto out_unlock;
3771 
3772 	if (path->slots[0] == left_nritems && !empty) {
3773 		/* Key greater than all keys in the leaf, right neighbor has
3774 		 * enough room for it and we're not emptying our leaf to delete
3775 		 * it, therefore use right neighbor to insert the new item and
3776 		 * no need to touch/dirty our left leaf. */
3777 		btrfs_tree_unlock(left);
3778 		free_extent_buffer(left);
3779 		path->nodes[0] = right;
3780 		path->slots[0] = 0;
3781 		path->slots[1]++;
3782 		return 0;
3783 	}
3784 
3785 	return __push_leaf_right(fs_info, path, min_data_size, empty,
3786 				right, free_space, left_nritems, min_slot);
3787 out_unlock:
3788 	btrfs_tree_unlock(right);
3789 	free_extent_buffer(right);
3790 	return 1;
3791 }
3792 
3793 /*
3794  * push some data in the path leaf to the left, trying to free up at
3795  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3796  *
3797  * max_slot can put a limit on how far into the leaf we'll push items.  The
3798  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3799  * items
3800  */
3801 static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
3802 				     struct btrfs_path *path, int data_size,
3803 				     int empty, struct extent_buffer *left,
3804 				     int free_space, u32 right_nritems,
3805 				     u32 max_slot)
3806 {
3807 	struct btrfs_disk_key disk_key;
3808 	struct extent_buffer *right = path->nodes[0];
3809 	int i;
3810 	int push_space = 0;
3811 	int push_items = 0;
3812 	struct btrfs_item *item;
3813 	u32 old_left_nritems;
3814 	u32 nr;
3815 	int ret = 0;
3816 	u32 this_item_size;
3817 	u32 old_left_item_size;
3818 	struct btrfs_map_token token;
3819 
3820 	btrfs_init_map_token(&token);
3821 
3822 	if (empty)
3823 		nr = min(right_nritems, max_slot);
3824 	else
3825 		nr = min(right_nritems - 1, max_slot);
3826 
3827 	for (i = 0; i < nr; i++) {
3828 		item = btrfs_item_nr(i);
3829 
3830 		if (!empty && push_items > 0) {
3831 			if (path->slots[0] < i)
3832 				break;
3833 			if (path->slots[0] == i) {
3834 				int space = btrfs_leaf_free_space(fs_info, right);
3835 				if (space + push_space * 2 > free_space)
3836 					break;
3837 			}
3838 		}
3839 
3840 		if (path->slots[0] == i)
3841 			push_space += data_size;
3842 
3843 		this_item_size = btrfs_item_size(right, item);
3844 		if (this_item_size + sizeof(*item) + push_space > free_space)
3845 			break;
3846 
3847 		push_items++;
3848 		push_space += this_item_size + sizeof(*item);
3849 	}
3850 
3851 	if (push_items == 0) {
3852 		ret = 1;
3853 		goto out;
3854 	}
3855 	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3856 
3857 	/* push data from right to left */
3858 	copy_extent_buffer(left, right,
3859 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3860 			   btrfs_item_nr_offset(0),
3861 			   push_items * sizeof(struct btrfs_item));
3862 
3863 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3864 		     btrfs_item_offset_nr(right, push_items - 1);
3865 
3866 	copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3867 		     leaf_data_end(fs_info, left) - push_space,
3868 		     BTRFS_LEAF_DATA_OFFSET +
3869 		     btrfs_item_offset_nr(right, push_items - 1),
3870 		     push_space);
3871 	old_left_nritems = btrfs_header_nritems(left);
3872 	BUG_ON(old_left_nritems <= 0);
3873 
3874 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3875 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3876 		u32 ioff;
3877 
3878 		item = btrfs_item_nr(i);
3879 
3880 		ioff = btrfs_token_item_offset(left, item, &token);
3881 		btrfs_set_token_item_offset(left, item,
3882 		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3883 		      &token);
3884 	}
3885 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3886 
3887 	/* fixup right node */
3888 	if (push_items > right_nritems)
3889 		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3890 		       right_nritems);
3891 
3892 	if (push_items < right_nritems) {
3893 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3894 						  leaf_data_end(fs_info, right);
3895 		memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3896 				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3897 				      BTRFS_LEAF_DATA_OFFSET +
3898 				      leaf_data_end(fs_info, right), push_space);
3899 
3900 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3901 			      btrfs_item_nr_offset(push_items),
3902 			     (btrfs_header_nritems(right) - push_items) *
3903 			     sizeof(struct btrfs_item));
3904 	}
3905 	right_nritems -= push_items;
3906 	btrfs_set_header_nritems(right, right_nritems);
3907 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3908 	for (i = 0; i < right_nritems; i++) {
3909 		item = btrfs_item_nr(i);
3910 
3911 		push_space = push_space - btrfs_token_item_size(right,
3912 								item, &token);
3913 		btrfs_set_token_item_offset(right, item, push_space, &token);
3914 	}
3915 
3916 	btrfs_mark_buffer_dirty(left);
3917 	if (right_nritems)
3918 		btrfs_mark_buffer_dirty(right);
3919 	else
3920 		clean_tree_block(fs_info, right);
3921 
3922 	btrfs_item_key(right, &disk_key, 0);
3923 	fixup_low_keys(path, &disk_key, 1);
3924 
3925 	/* then fixup the leaf pointer in the path */
3926 	if (path->slots[0] < push_items) {
3927 		path->slots[0] += old_left_nritems;
3928 		btrfs_tree_unlock(path->nodes[0]);
3929 		free_extent_buffer(path->nodes[0]);
3930 		path->nodes[0] = left;
3931 		path->slots[1] -= 1;
3932 	} else {
3933 		btrfs_tree_unlock(left);
3934 		free_extent_buffer(left);
3935 		path->slots[0] -= push_items;
3936 	}
3937 	BUG_ON(path->slots[0] < 0);
3938 	return ret;
3939 out:
3940 	btrfs_tree_unlock(left);
3941 	free_extent_buffer(left);
3942 	return ret;
3943 }
3944 
3945 /*
3946  * push some data in the path leaf to the left, trying to free up at
3947  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3948  *
3949  * max_slot can put a limit on how far into the leaf we'll push items.  The
3950  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3951  * items
3952  */
3953 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3954 			  *root, struct btrfs_path *path, int min_data_size,
3955 			  int data_size, int empty, u32 max_slot)
3956 {
3957 	struct btrfs_fs_info *fs_info = root->fs_info;
3958 	struct extent_buffer *right = path->nodes[0];
3959 	struct extent_buffer *left;
3960 	int slot;
3961 	int free_space;
3962 	u32 right_nritems;
3963 	int ret = 0;
3964 
3965 	slot = path->slots[1];
3966 	if (slot == 0)
3967 		return 1;
3968 	if (!path->nodes[1])
3969 		return 1;
3970 
3971 	right_nritems = btrfs_header_nritems(right);
3972 	if (right_nritems == 0)
3973 		return 1;
3974 
3975 	btrfs_assert_tree_locked(path->nodes[1]);
3976 
3977 	left = read_node_slot(fs_info, path->nodes[1], slot - 1);
3978 	/*
3979 	 * slot - 1 is not valid or we fail to read the left node,
3980 	 * no big deal, just return.
3981 	 */
3982 	if (IS_ERR(left))
3983 		return 1;
3984 
3985 	btrfs_tree_lock(left);
3986 	btrfs_set_lock_blocking(left);
3987 
3988 	free_space = btrfs_leaf_free_space(fs_info, left);
3989 	if (free_space < data_size) {
3990 		ret = 1;
3991 		goto out;
3992 	}
3993 
3994 	/* cow and double check */
3995 	ret = btrfs_cow_block(trans, root, left,
3996 			      path->nodes[1], slot - 1, &left);
3997 	if (ret) {
3998 		/* we hit -ENOSPC, but it isn't fatal here */
3999 		if (ret == -ENOSPC)
4000 			ret = 1;
4001 		goto out;
4002 	}
4003 
4004 	free_space = btrfs_leaf_free_space(fs_info, left);
4005 	if (free_space < data_size) {
4006 		ret = 1;
4007 		goto out;
4008 	}
4009 
4010 	return __push_leaf_left(fs_info, path, min_data_size,
4011 			       empty, left, free_space, right_nritems,
4012 			       max_slot);
4013 out:
4014 	btrfs_tree_unlock(left);
4015 	free_extent_buffer(left);
4016 	return ret;
4017 }
4018 
4019 /*
4020  * split the path's leaf in two, making sure there is at least data_size
4021  * available for the resulting leaf level of the path.
4022  */
4023 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4024 				    struct btrfs_fs_info *fs_info,
4025 				    struct btrfs_path *path,
4026 				    struct extent_buffer *l,
4027 				    struct extent_buffer *right,
4028 				    int slot, int mid, int nritems)
4029 {
4030 	int data_copy_size;
4031 	int rt_data_off;
4032 	int i;
4033 	struct btrfs_disk_key disk_key;
4034 	struct btrfs_map_token token;
4035 
4036 	btrfs_init_map_token(&token);
4037 
4038 	nritems = nritems - mid;
4039 	btrfs_set_header_nritems(right, nritems);
4040 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
4041 
4042 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4043 			   btrfs_item_nr_offset(mid),
4044 			   nritems * sizeof(struct btrfs_item));
4045 
4046 	copy_extent_buffer(right, l,
4047 		     BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4048 		     data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4049 		     leaf_data_end(fs_info, l), data_copy_size);
4050 
4051 	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4052 
4053 	for (i = 0; i < nritems; i++) {
4054 		struct btrfs_item *item = btrfs_item_nr(i);
4055 		u32 ioff;
4056 
4057 		ioff = btrfs_token_item_offset(right, item, &token);
4058 		btrfs_set_token_item_offset(right, item,
4059 					    ioff + rt_data_off, &token);
4060 	}
4061 
4062 	btrfs_set_header_nritems(l, mid);
4063 	btrfs_item_key(right, &disk_key, 0);
4064 	insert_ptr(trans, fs_info, path, &disk_key, right->start,
4065 		   path->slots[1] + 1, 1);
4066 
4067 	btrfs_mark_buffer_dirty(right);
4068 	btrfs_mark_buffer_dirty(l);
4069 	BUG_ON(path->slots[0] != slot);
4070 
4071 	if (mid <= slot) {
4072 		btrfs_tree_unlock(path->nodes[0]);
4073 		free_extent_buffer(path->nodes[0]);
4074 		path->nodes[0] = right;
4075 		path->slots[0] -= mid;
4076 		path->slots[1] += 1;
4077 	} else {
4078 		btrfs_tree_unlock(right);
4079 		free_extent_buffer(right);
4080 	}
4081 
4082 	BUG_ON(path->slots[0] < 0);
4083 }
4084 
4085 /*
4086  * double splits happen when we need to insert a big item in the middle
4087  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
4088  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4089  *          A                 B                 C
4090  *
4091  * We avoid this by trying to push the items on either side of our target
4092  * into the adjacent leaves.  If all goes well we can avoid the double split
4093  * completely.
4094  */
4095 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4096 					  struct btrfs_root *root,
4097 					  struct btrfs_path *path,
4098 					  int data_size)
4099 {
4100 	struct btrfs_fs_info *fs_info = root->fs_info;
4101 	int ret;
4102 	int progress = 0;
4103 	int slot;
4104 	u32 nritems;
4105 	int space_needed = data_size;
4106 
4107 	slot = path->slots[0];
4108 	if (slot < btrfs_header_nritems(path->nodes[0]))
4109 		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4110 
4111 	/*
4112 	 * try to push all the items after our slot into the
4113 	 * right leaf
4114 	 */
4115 	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4116 	if (ret < 0)
4117 		return ret;
4118 
4119 	if (ret == 0)
4120 		progress++;
4121 
4122 	nritems = btrfs_header_nritems(path->nodes[0]);
4123 	/*
4124 	 * our goal is to get our slot at the start or end of a leaf.  If
4125 	 * we've done so we're done
4126 	 */
4127 	if (path->slots[0] == 0 || path->slots[0] == nritems)
4128 		return 0;
4129 
4130 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4131 		return 0;
4132 
4133 	/* try to push all the items before our slot into the next leaf */
4134 	slot = path->slots[0];
4135 	space_needed = data_size;
4136 	if (slot > 0)
4137 		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4138 	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4139 	if (ret < 0)
4140 		return ret;
4141 
4142 	if (ret == 0)
4143 		progress++;
4144 
4145 	if (progress)
4146 		return 0;
4147 	return 1;
4148 }
4149 
4150 /*
4151  * split the path's leaf in two, making sure there is at least data_size
4152  * available for the resulting leaf level of the path.
4153  *
4154  * returns 0 if all went well and < 0 on failure.
4155  */
4156 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4157 			       struct btrfs_root *root,
4158 			       const struct btrfs_key *ins_key,
4159 			       struct btrfs_path *path, int data_size,
4160 			       int extend)
4161 {
4162 	struct btrfs_disk_key disk_key;
4163 	struct extent_buffer *l;
4164 	u32 nritems;
4165 	int mid;
4166 	int slot;
4167 	struct extent_buffer *right;
4168 	struct btrfs_fs_info *fs_info = root->fs_info;
4169 	int ret = 0;
4170 	int wret;
4171 	int split;
4172 	int num_doubles = 0;
4173 	int tried_avoid_double = 0;
4174 
4175 	l = path->nodes[0];
4176 	slot = path->slots[0];
4177 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
4178 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4179 		return -EOVERFLOW;
4180 
4181 	/* first try to make some room by pushing left and right */
4182 	if (data_size && path->nodes[1]) {
4183 		int space_needed = data_size;
4184 
4185 		if (slot < btrfs_header_nritems(l))
4186 			space_needed -= btrfs_leaf_free_space(fs_info, l);
4187 
4188 		wret = push_leaf_right(trans, root, path, space_needed,
4189 				       space_needed, 0, 0);
4190 		if (wret < 0)
4191 			return wret;
4192 		if (wret) {
4193 			space_needed = data_size;
4194 			if (slot > 0)
4195 				space_needed -= btrfs_leaf_free_space(fs_info,
4196 								      l);
4197 			wret = push_leaf_left(trans, root, path, space_needed,
4198 					      space_needed, 0, (u32)-1);
4199 			if (wret < 0)
4200 				return wret;
4201 		}
4202 		l = path->nodes[0];
4203 
4204 		/* did the pushes work? */
4205 		if (btrfs_leaf_free_space(fs_info, l) >= data_size)
4206 			return 0;
4207 	}
4208 
4209 	if (!path->nodes[1]) {
4210 		ret = insert_new_root(trans, root, path, 1);
4211 		if (ret)
4212 			return ret;
4213 	}
4214 again:
4215 	split = 1;
4216 	l = path->nodes[0];
4217 	slot = path->slots[0];
4218 	nritems = btrfs_header_nritems(l);
4219 	mid = (nritems + 1) / 2;
4220 
4221 	if (mid <= slot) {
4222 		if (nritems == 1 ||
4223 		    leaf_space_used(l, mid, nritems - mid) + data_size >
4224 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4225 			if (slot >= nritems) {
4226 				split = 0;
4227 			} else {
4228 				mid = slot;
4229 				if (mid != nritems &&
4230 				    leaf_space_used(l, mid, nritems - mid) +
4231 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4232 					if (data_size && !tried_avoid_double)
4233 						goto push_for_double;
4234 					split = 2;
4235 				}
4236 			}
4237 		}
4238 	} else {
4239 		if (leaf_space_used(l, 0, mid) + data_size >
4240 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4241 			if (!extend && data_size && slot == 0) {
4242 				split = 0;
4243 			} else if ((extend || !data_size) && slot == 0) {
4244 				mid = 1;
4245 			} else {
4246 				mid = slot;
4247 				if (mid != nritems &&
4248 				    leaf_space_used(l, mid, nritems - mid) +
4249 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4250 					if (data_size && !tried_avoid_double)
4251 						goto push_for_double;
4252 					split = 2;
4253 				}
4254 			}
4255 		}
4256 	}
4257 
4258 	if (split == 0)
4259 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4260 	else
4261 		btrfs_item_key(l, &disk_key, mid);
4262 
4263 	right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4264 			&disk_key, 0, l->start, 0);
4265 	if (IS_ERR(right))
4266 		return PTR_ERR(right);
4267 
4268 	root_add_used(root, fs_info->nodesize);
4269 
4270 	if (split == 0) {
4271 		if (mid <= slot) {
4272 			btrfs_set_header_nritems(right, 0);
4273 			insert_ptr(trans, fs_info, path, &disk_key,
4274 				   right->start, path->slots[1] + 1, 1);
4275 			btrfs_tree_unlock(path->nodes[0]);
4276 			free_extent_buffer(path->nodes[0]);
4277 			path->nodes[0] = right;
4278 			path->slots[0] = 0;
4279 			path->slots[1] += 1;
4280 		} else {
4281 			btrfs_set_header_nritems(right, 0);
4282 			insert_ptr(trans, fs_info, path, &disk_key,
4283 				   right->start, path->slots[1], 1);
4284 			btrfs_tree_unlock(path->nodes[0]);
4285 			free_extent_buffer(path->nodes[0]);
4286 			path->nodes[0] = right;
4287 			path->slots[0] = 0;
4288 			if (path->slots[1] == 0)
4289 				fixup_low_keys(path, &disk_key, 1);
4290 		}
4291 		/*
4292 		 * We create a new leaf 'right' for the required ins_len and
4293 		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4294 		 * the content of ins_len to 'right'.
4295 		 */
4296 		return ret;
4297 	}
4298 
4299 	copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
4300 
4301 	if (split == 2) {
4302 		BUG_ON(num_doubles != 0);
4303 		num_doubles++;
4304 		goto again;
4305 	}
4306 
4307 	return 0;
4308 
4309 push_for_double:
4310 	push_for_double_split(trans, root, path, data_size);
4311 	tried_avoid_double = 1;
4312 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4313 		return 0;
4314 	goto again;
4315 }
4316 
4317 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4318 					 struct btrfs_root *root,
4319 					 struct btrfs_path *path, int ins_len)
4320 {
4321 	struct btrfs_fs_info *fs_info = root->fs_info;
4322 	struct btrfs_key key;
4323 	struct extent_buffer *leaf;
4324 	struct btrfs_file_extent_item *fi;
4325 	u64 extent_len = 0;
4326 	u32 item_size;
4327 	int ret;
4328 
4329 	leaf = path->nodes[0];
4330 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4331 
4332 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4333 	       key.type != BTRFS_EXTENT_CSUM_KEY);
4334 
4335 	if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
4336 		return 0;
4337 
4338 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4339 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4340 		fi = btrfs_item_ptr(leaf, path->slots[0],
4341 				    struct btrfs_file_extent_item);
4342 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4343 	}
4344 	btrfs_release_path(path);
4345 
4346 	path->keep_locks = 1;
4347 	path->search_for_split = 1;
4348 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4349 	path->search_for_split = 0;
4350 	if (ret > 0)
4351 		ret = -EAGAIN;
4352 	if (ret < 0)
4353 		goto err;
4354 
4355 	ret = -EAGAIN;
4356 	leaf = path->nodes[0];
4357 	/* if our item isn't there, return now */
4358 	if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4359 		goto err;
4360 
4361 	/* the leaf has  changed, it now has room.  return now */
4362 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
4363 		goto err;
4364 
4365 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4366 		fi = btrfs_item_ptr(leaf, path->slots[0],
4367 				    struct btrfs_file_extent_item);
4368 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4369 			goto err;
4370 	}
4371 
4372 	btrfs_set_path_blocking(path);
4373 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4374 	if (ret)
4375 		goto err;
4376 
4377 	path->keep_locks = 0;
4378 	btrfs_unlock_up_safe(path, 1);
4379 	return 0;
4380 err:
4381 	path->keep_locks = 0;
4382 	return ret;
4383 }
4384 
4385 static noinline int split_item(struct btrfs_fs_info *fs_info,
4386 			       struct btrfs_path *path,
4387 			       const struct btrfs_key *new_key,
4388 			       unsigned long split_offset)
4389 {
4390 	struct extent_buffer *leaf;
4391 	struct btrfs_item *item;
4392 	struct btrfs_item *new_item;
4393 	int slot;
4394 	char *buf;
4395 	u32 nritems;
4396 	u32 item_size;
4397 	u32 orig_offset;
4398 	struct btrfs_disk_key disk_key;
4399 
4400 	leaf = path->nodes[0];
4401 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
4402 
4403 	btrfs_set_path_blocking(path);
4404 
4405 	item = btrfs_item_nr(path->slots[0]);
4406 	orig_offset = btrfs_item_offset(leaf, item);
4407 	item_size = btrfs_item_size(leaf, item);
4408 
4409 	buf = kmalloc(item_size, GFP_NOFS);
4410 	if (!buf)
4411 		return -ENOMEM;
4412 
4413 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4414 			    path->slots[0]), item_size);
4415 
4416 	slot = path->slots[0] + 1;
4417 	nritems = btrfs_header_nritems(leaf);
4418 	if (slot != nritems) {
4419 		/* shift the items */
4420 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4421 				btrfs_item_nr_offset(slot),
4422 				(nritems - slot) * sizeof(struct btrfs_item));
4423 	}
4424 
4425 	btrfs_cpu_key_to_disk(&disk_key, new_key);
4426 	btrfs_set_item_key(leaf, &disk_key, slot);
4427 
4428 	new_item = btrfs_item_nr(slot);
4429 
4430 	btrfs_set_item_offset(leaf, new_item, orig_offset);
4431 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4432 
4433 	btrfs_set_item_offset(leaf, item,
4434 			      orig_offset + item_size - split_offset);
4435 	btrfs_set_item_size(leaf, item, split_offset);
4436 
4437 	btrfs_set_header_nritems(leaf, nritems + 1);
4438 
4439 	/* write the data for the start of the original item */
4440 	write_extent_buffer(leaf, buf,
4441 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4442 			    split_offset);
4443 
4444 	/* write the data for the new item */
4445 	write_extent_buffer(leaf, buf + split_offset,
4446 			    btrfs_item_ptr_offset(leaf, slot),
4447 			    item_size - split_offset);
4448 	btrfs_mark_buffer_dirty(leaf);
4449 
4450 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
4451 	kfree(buf);
4452 	return 0;
4453 }
4454 
4455 /*
4456  * This function splits a single item into two items,
4457  * giving 'new_key' to the new item and splitting the
4458  * old one at split_offset (from the start of the item).
4459  *
4460  * The path may be released by this operation.  After
4461  * the split, the path is pointing to the old item.  The
4462  * new item is going to be in the same node as the old one.
4463  *
4464  * Note, the item being split must be smaller enough to live alone on
4465  * a tree block with room for one extra struct btrfs_item
4466  *
4467  * This allows us to split the item in place, keeping a lock on the
4468  * leaf the entire time.
4469  */
4470 int btrfs_split_item(struct btrfs_trans_handle *trans,
4471 		     struct btrfs_root *root,
4472 		     struct btrfs_path *path,
4473 		     const struct btrfs_key *new_key,
4474 		     unsigned long split_offset)
4475 {
4476 	int ret;
4477 	ret = setup_leaf_for_split(trans, root, path,
4478 				   sizeof(struct btrfs_item));
4479 	if (ret)
4480 		return ret;
4481 
4482 	ret = split_item(root->fs_info, path, new_key, split_offset);
4483 	return ret;
4484 }
4485 
4486 /*
4487  * This function duplicate a item, giving 'new_key' to the new item.
4488  * It guarantees both items live in the same tree leaf and the new item
4489  * is contiguous with the original item.
4490  *
4491  * This allows us to split file extent in place, keeping a lock on the
4492  * leaf the entire time.
4493  */
4494 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4495 			 struct btrfs_root *root,
4496 			 struct btrfs_path *path,
4497 			 const struct btrfs_key *new_key)
4498 {
4499 	struct extent_buffer *leaf;
4500 	int ret;
4501 	u32 item_size;
4502 
4503 	leaf = path->nodes[0];
4504 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4505 	ret = setup_leaf_for_split(trans, root, path,
4506 				   item_size + sizeof(struct btrfs_item));
4507 	if (ret)
4508 		return ret;
4509 
4510 	path->slots[0]++;
4511 	setup_items_for_insert(root, path, new_key, &item_size,
4512 			       item_size, item_size +
4513 			       sizeof(struct btrfs_item), 1);
4514 	leaf = path->nodes[0];
4515 	memcpy_extent_buffer(leaf,
4516 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4517 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4518 			     item_size);
4519 	return 0;
4520 }
4521 
4522 /*
4523  * make the item pointed to by the path smaller.  new_size indicates
4524  * how small to make it, and from_end tells us if we just chop bytes
4525  * off the end of the item or if we shift the item to chop bytes off
4526  * the front.
4527  */
4528 void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
4529 			 struct btrfs_path *path, u32 new_size, int from_end)
4530 {
4531 	int slot;
4532 	struct extent_buffer *leaf;
4533 	struct btrfs_item *item;
4534 	u32 nritems;
4535 	unsigned int data_end;
4536 	unsigned int old_data_start;
4537 	unsigned int old_size;
4538 	unsigned int size_diff;
4539 	int i;
4540 	struct btrfs_map_token token;
4541 
4542 	btrfs_init_map_token(&token);
4543 
4544 	leaf = path->nodes[0];
4545 	slot = path->slots[0];
4546 
4547 	old_size = btrfs_item_size_nr(leaf, slot);
4548 	if (old_size == new_size)
4549 		return;
4550 
4551 	nritems = btrfs_header_nritems(leaf);
4552 	data_end = leaf_data_end(fs_info, leaf);
4553 
4554 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4555 
4556 	size_diff = old_size - new_size;
4557 
4558 	BUG_ON(slot < 0);
4559 	BUG_ON(slot >= nritems);
4560 
4561 	/*
4562 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4563 	 */
4564 	/* first correct the data pointers */
4565 	for (i = slot; i < nritems; i++) {
4566 		u32 ioff;
4567 		item = btrfs_item_nr(i);
4568 
4569 		ioff = btrfs_token_item_offset(leaf, item, &token);
4570 		btrfs_set_token_item_offset(leaf, item,
4571 					    ioff + size_diff, &token);
4572 	}
4573 
4574 	/* shift the data */
4575 	if (from_end) {
4576 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4577 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4578 			      data_end, old_data_start + new_size - data_end);
4579 	} else {
4580 		struct btrfs_disk_key disk_key;
4581 		u64 offset;
4582 
4583 		btrfs_item_key(leaf, &disk_key, slot);
4584 
4585 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4586 			unsigned long ptr;
4587 			struct btrfs_file_extent_item *fi;
4588 
4589 			fi = btrfs_item_ptr(leaf, slot,
4590 					    struct btrfs_file_extent_item);
4591 			fi = (struct btrfs_file_extent_item *)(
4592 			     (unsigned long)fi - size_diff);
4593 
4594 			if (btrfs_file_extent_type(leaf, fi) ==
4595 			    BTRFS_FILE_EXTENT_INLINE) {
4596 				ptr = btrfs_item_ptr_offset(leaf, slot);
4597 				memmove_extent_buffer(leaf, ptr,
4598 				      (unsigned long)fi,
4599 				      BTRFS_FILE_EXTENT_INLINE_DATA_START);
4600 			}
4601 		}
4602 
4603 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4604 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4605 			      data_end, old_data_start - data_end);
4606 
4607 		offset = btrfs_disk_key_offset(&disk_key);
4608 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4609 		btrfs_set_item_key(leaf, &disk_key, slot);
4610 		if (slot == 0)
4611 			fixup_low_keys(path, &disk_key, 1);
4612 	}
4613 
4614 	item = btrfs_item_nr(slot);
4615 	btrfs_set_item_size(leaf, item, new_size);
4616 	btrfs_mark_buffer_dirty(leaf);
4617 
4618 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4619 		btrfs_print_leaf(leaf);
4620 		BUG();
4621 	}
4622 }
4623 
4624 /*
4625  * make the item pointed to by the path bigger, data_size is the added size.
4626  */
4627 void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
4628 		       u32 data_size)
4629 {
4630 	int slot;
4631 	struct extent_buffer *leaf;
4632 	struct btrfs_item *item;
4633 	u32 nritems;
4634 	unsigned int data_end;
4635 	unsigned int old_data;
4636 	unsigned int old_size;
4637 	int i;
4638 	struct btrfs_map_token token;
4639 
4640 	btrfs_init_map_token(&token);
4641 
4642 	leaf = path->nodes[0];
4643 
4644 	nritems = btrfs_header_nritems(leaf);
4645 	data_end = leaf_data_end(fs_info, leaf);
4646 
4647 	if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
4648 		btrfs_print_leaf(leaf);
4649 		BUG();
4650 	}
4651 	slot = path->slots[0];
4652 	old_data = btrfs_item_end_nr(leaf, slot);
4653 
4654 	BUG_ON(slot < 0);
4655 	if (slot >= nritems) {
4656 		btrfs_print_leaf(leaf);
4657 		btrfs_crit(fs_info, "slot %d too large, nritems %d",
4658 			   slot, nritems);
4659 		BUG_ON(1);
4660 	}
4661 
4662 	/*
4663 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4664 	 */
4665 	/* first correct the data pointers */
4666 	for (i = slot; i < nritems; i++) {
4667 		u32 ioff;
4668 		item = btrfs_item_nr(i);
4669 
4670 		ioff = btrfs_token_item_offset(leaf, item, &token);
4671 		btrfs_set_token_item_offset(leaf, item,
4672 					    ioff - data_size, &token);
4673 	}
4674 
4675 	/* shift the data */
4676 	memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4677 		      data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4678 		      data_end, old_data - data_end);
4679 
4680 	data_end = old_data;
4681 	old_size = btrfs_item_size_nr(leaf, slot);
4682 	item = btrfs_item_nr(slot);
4683 	btrfs_set_item_size(leaf, item, old_size + data_size);
4684 	btrfs_mark_buffer_dirty(leaf);
4685 
4686 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4687 		btrfs_print_leaf(leaf);
4688 		BUG();
4689 	}
4690 }
4691 
4692 /*
4693  * this is a helper for btrfs_insert_empty_items, the main goal here is
4694  * to save stack depth by doing the bulk of the work in a function
4695  * that doesn't call btrfs_search_slot
4696  */
4697 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4698 			    const struct btrfs_key *cpu_key, u32 *data_size,
4699 			    u32 total_data, u32 total_size, int nr)
4700 {
4701 	struct btrfs_fs_info *fs_info = root->fs_info;
4702 	struct btrfs_item *item;
4703 	int i;
4704 	u32 nritems;
4705 	unsigned int data_end;
4706 	struct btrfs_disk_key disk_key;
4707 	struct extent_buffer *leaf;
4708 	int slot;
4709 	struct btrfs_map_token token;
4710 
4711 	if (path->slots[0] == 0) {
4712 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4713 		fixup_low_keys(path, &disk_key, 1);
4714 	}
4715 	btrfs_unlock_up_safe(path, 1);
4716 
4717 	btrfs_init_map_token(&token);
4718 
4719 	leaf = path->nodes[0];
4720 	slot = path->slots[0];
4721 
4722 	nritems = btrfs_header_nritems(leaf);
4723 	data_end = leaf_data_end(fs_info, leaf);
4724 
4725 	if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
4726 		btrfs_print_leaf(leaf);
4727 		btrfs_crit(fs_info, "not enough freespace need %u have %d",
4728 			   total_size, btrfs_leaf_free_space(fs_info, leaf));
4729 		BUG();
4730 	}
4731 
4732 	if (slot != nritems) {
4733 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4734 
4735 		if (old_data < data_end) {
4736 			btrfs_print_leaf(leaf);
4737 			btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4738 				   slot, old_data, data_end);
4739 			BUG_ON(1);
4740 		}
4741 		/*
4742 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4743 		 */
4744 		/* first correct the data pointers */
4745 		for (i = slot; i < nritems; i++) {
4746 			u32 ioff;
4747 
4748 			item = btrfs_item_nr(i);
4749 			ioff = btrfs_token_item_offset(leaf, item, &token);
4750 			btrfs_set_token_item_offset(leaf, item,
4751 						    ioff - total_data, &token);
4752 		}
4753 		/* shift the items */
4754 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4755 			      btrfs_item_nr_offset(slot),
4756 			      (nritems - slot) * sizeof(struct btrfs_item));
4757 
4758 		/* shift the data */
4759 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4760 			      data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4761 			      data_end, old_data - data_end);
4762 		data_end = old_data;
4763 	}
4764 
4765 	/* setup the item for the new data */
4766 	for (i = 0; i < nr; i++) {
4767 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4768 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4769 		item = btrfs_item_nr(slot + i);
4770 		btrfs_set_token_item_offset(leaf, item,
4771 					    data_end - data_size[i], &token);
4772 		data_end -= data_size[i];
4773 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4774 	}
4775 
4776 	btrfs_set_header_nritems(leaf, nritems + nr);
4777 	btrfs_mark_buffer_dirty(leaf);
4778 
4779 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4780 		btrfs_print_leaf(leaf);
4781 		BUG();
4782 	}
4783 }
4784 
4785 /*
4786  * Given a key and some data, insert items into the tree.
4787  * This does all the path init required, making room in the tree if needed.
4788  */
4789 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4790 			    struct btrfs_root *root,
4791 			    struct btrfs_path *path,
4792 			    const struct btrfs_key *cpu_key, u32 *data_size,
4793 			    int nr)
4794 {
4795 	int ret = 0;
4796 	int slot;
4797 	int i;
4798 	u32 total_size = 0;
4799 	u32 total_data = 0;
4800 
4801 	for (i = 0; i < nr; i++)
4802 		total_data += data_size[i];
4803 
4804 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4805 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4806 	if (ret == 0)
4807 		return -EEXIST;
4808 	if (ret < 0)
4809 		return ret;
4810 
4811 	slot = path->slots[0];
4812 	BUG_ON(slot < 0);
4813 
4814 	setup_items_for_insert(root, path, cpu_key, data_size,
4815 			       total_data, total_size, nr);
4816 	return 0;
4817 }
4818 
4819 /*
4820  * Given a key and some data, insert an item into the tree.
4821  * This does all the path init required, making room in the tree if needed.
4822  */
4823 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4824 		      const struct btrfs_key *cpu_key, void *data,
4825 		      u32 data_size)
4826 {
4827 	int ret = 0;
4828 	struct btrfs_path *path;
4829 	struct extent_buffer *leaf;
4830 	unsigned long ptr;
4831 
4832 	path = btrfs_alloc_path();
4833 	if (!path)
4834 		return -ENOMEM;
4835 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4836 	if (!ret) {
4837 		leaf = path->nodes[0];
4838 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4839 		write_extent_buffer(leaf, data, ptr, data_size);
4840 		btrfs_mark_buffer_dirty(leaf);
4841 	}
4842 	btrfs_free_path(path);
4843 	return ret;
4844 }
4845 
4846 /*
4847  * delete the pointer from a given node.
4848  *
4849  * the tree should have been previously balanced so the deletion does not
4850  * empty a node.
4851  */
4852 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4853 		    int level, int slot)
4854 {
4855 	struct extent_buffer *parent = path->nodes[level];
4856 	u32 nritems;
4857 	int ret;
4858 
4859 	nritems = btrfs_header_nritems(parent);
4860 	if (slot != nritems - 1) {
4861 		if (level) {
4862 			ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4863 					nritems - slot - 1);
4864 			BUG_ON(ret < 0);
4865 		}
4866 		memmove_extent_buffer(parent,
4867 			      btrfs_node_key_ptr_offset(slot),
4868 			      btrfs_node_key_ptr_offset(slot + 1),
4869 			      sizeof(struct btrfs_key_ptr) *
4870 			      (nritems - slot - 1));
4871 	} else if (level) {
4872 		ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4873 				GFP_NOFS);
4874 		BUG_ON(ret < 0);
4875 	}
4876 
4877 	nritems--;
4878 	btrfs_set_header_nritems(parent, nritems);
4879 	if (nritems == 0 && parent == root->node) {
4880 		BUG_ON(btrfs_header_level(root->node) != 1);
4881 		/* just turn the root into a leaf and break */
4882 		btrfs_set_header_level(root->node, 0);
4883 	} else if (slot == 0) {
4884 		struct btrfs_disk_key disk_key;
4885 
4886 		btrfs_node_key(parent, &disk_key, 0);
4887 		fixup_low_keys(path, &disk_key, level + 1);
4888 	}
4889 	btrfs_mark_buffer_dirty(parent);
4890 }
4891 
4892 /*
4893  * a helper function to delete the leaf pointed to by path->slots[1] and
4894  * path->nodes[1].
4895  *
4896  * This deletes the pointer in path->nodes[1] and frees the leaf
4897  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4898  *
4899  * The path must have already been setup for deleting the leaf, including
4900  * all the proper balancing.  path->nodes[1] must be locked.
4901  */
4902 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4903 				    struct btrfs_root *root,
4904 				    struct btrfs_path *path,
4905 				    struct extent_buffer *leaf)
4906 {
4907 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4908 	del_ptr(root, path, 1, path->slots[1]);
4909 
4910 	/*
4911 	 * btrfs_free_extent is expensive, we want to make sure we
4912 	 * aren't holding any locks when we call it
4913 	 */
4914 	btrfs_unlock_up_safe(path, 0);
4915 
4916 	root_sub_used(root, leaf->len);
4917 
4918 	extent_buffer_get(leaf);
4919 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4920 	free_extent_buffer_stale(leaf);
4921 }
4922 /*
4923  * delete the item at the leaf level in path.  If that empties
4924  * the leaf, remove it from the tree
4925  */
4926 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4927 		    struct btrfs_path *path, int slot, int nr)
4928 {
4929 	struct btrfs_fs_info *fs_info = root->fs_info;
4930 	struct extent_buffer *leaf;
4931 	struct btrfs_item *item;
4932 	u32 last_off;
4933 	u32 dsize = 0;
4934 	int ret = 0;
4935 	int wret;
4936 	int i;
4937 	u32 nritems;
4938 	struct btrfs_map_token token;
4939 
4940 	btrfs_init_map_token(&token);
4941 
4942 	leaf = path->nodes[0];
4943 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4944 
4945 	for (i = 0; i < nr; i++)
4946 		dsize += btrfs_item_size_nr(leaf, slot + i);
4947 
4948 	nritems = btrfs_header_nritems(leaf);
4949 
4950 	if (slot + nr != nritems) {
4951 		int data_end = leaf_data_end(fs_info, leaf);
4952 
4953 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4954 			      data_end + dsize,
4955 			      BTRFS_LEAF_DATA_OFFSET + data_end,
4956 			      last_off - data_end);
4957 
4958 		for (i = slot + nr; i < nritems; i++) {
4959 			u32 ioff;
4960 
4961 			item = btrfs_item_nr(i);
4962 			ioff = btrfs_token_item_offset(leaf, item, &token);
4963 			btrfs_set_token_item_offset(leaf, item,
4964 						    ioff + dsize, &token);
4965 		}
4966 
4967 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4968 			      btrfs_item_nr_offset(slot + nr),
4969 			      sizeof(struct btrfs_item) *
4970 			      (nritems - slot - nr));
4971 	}
4972 	btrfs_set_header_nritems(leaf, nritems - nr);
4973 	nritems -= nr;
4974 
4975 	/* delete the leaf if we've emptied it */
4976 	if (nritems == 0) {
4977 		if (leaf == root->node) {
4978 			btrfs_set_header_level(leaf, 0);
4979 		} else {
4980 			btrfs_set_path_blocking(path);
4981 			clean_tree_block(fs_info, leaf);
4982 			btrfs_del_leaf(trans, root, path, leaf);
4983 		}
4984 	} else {
4985 		int used = leaf_space_used(leaf, 0, nritems);
4986 		if (slot == 0) {
4987 			struct btrfs_disk_key disk_key;
4988 
4989 			btrfs_item_key(leaf, &disk_key, 0);
4990 			fixup_low_keys(path, &disk_key, 1);
4991 		}
4992 
4993 		/* delete the leaf if it is mostly empty */
4994 		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4995 			/* push_leaf_left fixes the path.
4996 			 * make sure the path still points to our leaf
4997 			 * for possible call to del_ptr below
4998 			 */
4999 			slot = path->slots[1];
5000 			extent_buffer_get(leaf);
5001 
5002 			btrfs_set_path_blocking(path);
5003 			wret = push_leaf_left(trans, root, path, 1, 1,
5004 					      1, (u32)-1);
5005 			if (wret < 0 && wret != -ENOSPC)
5006 				ret = wret;
5007 
5008 			if (path->nodes[0] == leaf &&
5009 			    btrfs_header_nritems(leaf)) {
5010 				wret = push_leaf_right(trans, root, path, 1,
5011 						       1, 1, 0);
5012 				if (wret < 0 && wret != -ENOSPC)
5013 					ret = wret;
5014 			}
5015 
5016 			if (btrfs_header_nritems(leaf) == 0) {
5017 				path->slots[1] = slot;
5018 				btrfs_del_leaf(trans, root, path, leaf);
5019 				free_extent_buffer(leaf);
5020 				ret = 0;
5021 			} else {
5022 				/* if we're still in the path, make sure
5023 				 * we're dirty.  Otherwise, one of the
5024 				 * push_leaf functions must have already
5025 				 * dirtied this buffer
5026 				 */
5027 				if (path->nodes[0] == leaf)
5028 					btrfs_mark_buffer_dirty(leaf);
5029 				free_extent_buffer(leaf);
5030 			}
5031 		} else {
5032 			btrfs_mark_buffer_dirty(leaf);
5033 		}
5034 	}
5035 	return ret;
5036 }
5037 
5038 /*
5039  * search the tree again to find a leaf with lesser keys
5040  * returns 0 if it found something or 1 if there are no lesser leaves.
5041  * returns < 0 on io errors.
5042  *
5043  * This may release the path, and so you may lose any locks held at the
5044  * time you call it.
5045  */
5046 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5047 {
5048 	struct btrfs_key key;
5049 	struct btrfs_disk_key found_key;
5050 	int ret;
5051 
5052 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5053 
5054 	if (key.offset > 0) {
5055 		key.offset--;
5056 	} else if (key.type > 0) {
5057 		key.type--;
5058 		key.offset = (u64)-1;
5059 	} else if (key.objectid > 0) {
5060 		key.objectid--;
5061 		key.type = (u8)-1;
5062 		key.offset = (u64)-1;
5063 	} else {
5064 		return 1;
5065 	}
5066 
5067 	btrfs_release_path(path);
5068 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5069 	if (ret < 0)
5070 		return ret;
5071 	btrfs_item_key(path->nodes[0], &found_key, 0);
5072 	ret = comp_keys(&found_key, &key);
5073 	/*
5074 	 * We might have had an item with the previous key in the tree right
5075 	 * before we released our path. And after we released our path, that
5076 	 * item might have been pushed to the first slot (0) of the leaf we
5077 	 * were holding due to a tree balance. Alternatively, an item with the
5078 	 * previous key can exist as the only element of a leaf (big fat item).
5079 	 * Therefore account for these 2 cases, so that our callers (like
5080 	 * btrfs_previous_item) don't miss an existing item with a key matching
5081 	 * the previous key we computed above.
5082 	 */
5083 	if (ret <= 0)
5084 		return 0;
5085 	return 1;
5086 }
5087 
5088 /*
5089  * A helper function to walk down the tree starting at min_key, and looking
5090  * for nodes or leaves that are have a minimum transaction id.
5091  * This is used by the btree defrag code, and tree logging
5092  *
5093  * This does not cow, but it does stuff the starting key it finds back
5094  * into min_key, so you can call btrfs_search_slot with cow=1 on the
5095  * key and get a writable path.
5096  *
5097  * This honors path->lowest_level to prevent descent past a given level
5098  * of the tree.
5099  *
5100  * min_trans indicates the oldest transaction that you are interested
5101  * in walking through.  Any nodes or leaves older than min_trans are
5102  * skipped over (without reading them).
5103  *
5104  * returns zero if something useful was found, < 0 on error and 1 if there
5105  * was nothing in the tree that matched the search criteria.
5106  */
5107 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5108 			 struct btrfs_path *path,
5109 			 u64 min_trans)
5110 {
5111 	struct btrfs_fs_info *fs_info = root->fs_info;
5112 	struct extent_buffer *cur;
5113 	struct btrfs_key found_key;
5114 	int slot;
5115 	int sret;
5116 	u32 nritems;
5117 	int level;
5118 	int ret = 1;
5119 	int keep_locks = path->keep_locks;
5120 
5121 	path->keep_locks = 1;
5122 again:
5123 	cur = btrfs_read_lock_root_node(root);
5124 	level = btrfs_header_level(cur);
5125 	WARN_ON(path->nodes[level]);
5126 	path->nodes[level] = cur;
5127 	path->locks[level] = BTRFS_READ_LOCK;
5128 
5129 	if (btrfs_header_generation(cur) < min_trans) {
5130 		ret = 1;
5131 		goto out;
5132 	}
5133 	while (1) {
5134 		nritems = btrfs_header_nritems(cur);
5135 		level = btrfs_header_level(cur);
5136 		sret = btrfs_bin_search(cur, min_key, level, &slot);
5137 
5138 		/* at the lowest level, we're done, setup the path and exit */
5139 		if (level == path->lowest_level) {
5140 			if (slot >= nritems)
5141 				goto find_next_key;
5142 			ret = 0;
5143 			path->slots[level] = slot;
5144 			btrfs_item_key_to_cpu(cur, &found_key, slot);
5145 			goto out;
5146 		}
5147 		if (sret && slot > 0)
5148 			slot--;
5149 		/*
5150 		 * check this node pointer against the min_trans parameters.
5151 		 * If it is too old, old, skip to the next one.
5152 		 */
5153 		while (slot < nritems) {
5154 			u64 gen;
5155 
5156 			gen = btrfs_node_ptr_generation(cur, slot);
5157 			if (gen < min_trans) {
5158 				slot++;
5159 				continue;
5160 			}
5161 			break;
5162 		}
5163 find_next_key:
5164 		/*
5165 		 * we didn't find a candidate key in this node, walk forward
5166 		 * and find another one
5167 		 */
5168 		if (slot >= nritems) {
5169 			path->slots[level] = slot;
5170 			btrfs_set_path_blocking(path);
5171 			sret = btrfs_find_next_key(root, path, min_key, level,
5172 						  min_trans);
5173 			if (sret == 0) {
5174 				btrfs_release_path(path);
5175 				goto again;
5176 			} else {
5177 				goto out;
5178 			}
5179 		}
5180 		/* save our key for returning back */
5181 		btrfs_node_key_to_cpu(cur, &found_key, slot);
5182 		path->slots[level] = slot;
5183 		if (level == path->lowest_level) {
5184 			ret = 0;
5185 			goto out;
5186 		}
5187 		btrfs_set_path_blocking(path);
5188 		cur = read_node_slot(fs_info, cur, slot);
5189 		if (IS_ERR(cur)) {
5190 			ret = PTR_ERR(cur);
5191 			goto out;
5192 		}
5193 
5194 		btrfs_tree_read_lock(cur);
5195 
5196 		path->locks[level - 1] = BTRFS_READ_LOCK;
5197 		path->nodes[level - 1] = cur;
5198 		unlock_up(path, level, 1, 0, NULL);
5199 	}
5200 out:
5201 	path->keep_locks = keep_locks;
5202 	if (ret == 0) {
5203 		btrfs_unlock_up_safe(path, path->lowest_level + 1);
5204 		btrfs_set_path_blocking(path);
5205 		memcpy(min_key, &found_key, sizeof(found_key));
5206 	}
5207 	return ret;
5208 }
5209 
5210 static int tree_move_down(struct btrfs_fs_info *fs_info,
5211 			   struct btrfs_path *path,
5212 			   int *level)
5213 {
5214 	struct extent_buffer *eb;
5215 
5216 	BUG_ON(*level == 0);
5217 	eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
5218 	if (IS_ERR(eb))
5219 		return PTR_ERR(eb);
5220 
5221 	path->nodes[*level - 1] = eb;
5222 	path->slots[*level - 1] = 0;
5223 	(*level)--;
5224 	return 0;
5225 }
5226 
5227 static int tree_move_next_or_upnext(struct btrfs_path *path,
5228 				    int *level, int root_level)
5229 {
5230 	int ret = 0;
5231 	int nritems;
5232 	nritems = btrfs_header_nritems(path->nodes[*level]);
5233 
5234 	path->slots[*level]++;
5235 
5236 	while (path->slots[*level] >= nritems) {
5237 		if (*level == root_level)
5238 			return -1;
5239 
5240 		/* move upnext */
5241 		path->slots[*level] = 0;
5242 		free_extent_buffer(path->nodes[*level]);
5243 		path->nodes[*level] = NULL;
5244 		(*level)++;
5245 		path->slots[*level]++;
5246 
5247 		nritems = btrfs_header_nritems(path->nodes[*level]);
5248 		ret = 1;
5249 	}
5250 	return ret;
5251 }
5252 
5253 /*
5254  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5255  * or down.
5256  */
5257 static int tree_advance(struct btrfs_fs_info *fs_info,
5258 			struct btrfs_path *path,
5259 			int *level, int root_level,
5260 			int allow_down,
5261 			struct btrfs_key *key)
5262 {
5263 	int ret;
5264 
5265 	if (*level == 0 || !allow_down) {
5266 		ret = tree_move_next_or_upnext(path, level, root_level);
5267 	} else {
5268 		ret = tree_move_down(fs_info, path, level);
5269 	}
5270 	if (ret >= 0) {
5271 		if (*level == 0)
5272 			btrfs_item_key_to_cpu(path->nodes[*level], key,
5273 					path->slots[*level]);
5274 		else
5275 			btrfs_node_key_to_cpu(path->nodes[*level], key,
5276 					path->slots[*level]);
5277 	}
5278 	return ret;
5279 }
5280 
5281 static int tree_compare_item(struct btrfs_path *left_path,
5282 			     struct btrfs_path *right_path,
5283 			     char *tmp_buf)
5284 {
5285 	int cmp;
5286 	int len1, len2;
5287 	unsigned long off1, off2;
5288 
5289 	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5290 	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5291 	if (len1 != len2)
5292 		return 1;
5293 
5294 	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5295 	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5296 				right_path->slots[0]);
5297 
5298 	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5299 
5300 	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5301 	if (cmp)
5302 		return 1;
5303 	return 0;
5304 }
5305 
5306 #define ADVANCE 1
5307 #define ADVANCE_ONLY_NEXT -1
5308 
5309 /*
5310  * This function compares two trees and calls the provided callback for
5311  * every changed/new/deleted item it finds.
5312  * If shared tree blocks are encountered, whole subtrees are skipped, making
5313  * the compare pretty fast on snapshotted subvolumes.
5314  *
5315  * This currently works on commit roots only. As commit roots are read only,
5316  * we don't do any locking. The commit roots are protected with transactions.
5317  * Transactions are ended and rejoined when a commit is tried in between.
5318  *
5319  * This function checks for modifications done to the trees while comparing.
5320  * If it detects a change, it aborts immediately.
5321  */
5322 int btrfs_compare_trees(struct btrfs_root *left_root,
5323 			struct btrfs_root *right_root,
5324 			btrfs_changed_cb_t changed_cb, void *ctx)
5325 {
5326 	struct btrfs_fs_info *fs_info = left_root->fs_info;
5327 	int ret;
5328 	int cmp;
5329 	struct btrfs_path *left_path = NULL;
5330 	struct btrfs_path *right_path = NULL;
5331 	struct btrfs_key left_key;
5332 	struct btrfs_key right_key;
5333 	char *tmp_buf = NULL;
5334 	int left_root_level;
5335 	int right_root_level;
5336 	int left_level;
5337 	int right_level;
5338 	int left_end_reached;
5339 	int right_end_reached;
5340 	int advance_left;
5341 	int advance_right;
5342 	u64 left_blockptr;
5343 	u64 right_blockptr;
5344 	u64 left_gen;
5345 	u64 right_gen;
5346 
5347 	left_path = btrfs_alloc_path();
5348 	if (!left_path) {
5349 		ret = -ENOMEM;
5350 		goto out;
5351 	}
5352 	right_path = btrfs_alloc_path();
5353 	if (!right_path) {
5354 		ret = -ENOMEM;
5355 		goto out;
5356 	}
5357 
5358 	tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
5359 	if (!tmp_buf) {
5360 		ret = -ENOMEM;
5361 		goto out;
5362 	}
5363 
5364 	left_path->search_commit_root = 1;
5365 	left_path->skip_locking = 1;
5366 	right_path->search_commit_root = 1;
5367 	right_path->skip_locking = 1;
5368 
5369 	/*
5370 	 * Strategy: Go to the first items of both trees. Then do
5371 	 *
5372 	 * If both trees are at level 0
5373 	 *   Compare keys of current items
5374 	 *     If left < right treat left item as new, advance left tree
5375 	 *       and repeat
5376 	 *     If left > right treat right item as deleted, advance right tree
5377 	 *       and repeat
5378 	 *     If left == right do deep compare of items, treat as changed if
5379 	 *       needed, advance both trees and repeat
5380 	 * If both trees are at the same level but not at level 0
5381 	 *   Compare keys of current nodes/leafs
5382 	 *     If left < right advance left tree and repeat
5383 	 *     If left > right advance right tree and repeat
5384 	 *     If left == right compare blockptrs of the next nodes/leafs
5385 	 *       If they match advance both trees but stay at the same level
5386 	 *         and repeat
5387 	 *       If they don't match advance both trees while allowing to go
5388 	 *         deeper and repeat
5389 	 * If tree levels are different
5390 	 *   Advance the tree that needs it and repeat
5391 	 *
5392 	 * Advancing a tree means:
5393 	 *   If we are at level 0, try to go to the next slot. If that's not
5394 	 *   possible, go one level up and repeat. Stop when we found a level
5395 	 *   where we could go to the next slot. We may at this point be on a
5396 	 *   node or a leaf.
5397 	 *
5398 	 *   If we are not at level 0 and not on shared tree blocks, go one
5399 	 *   level deeper.
5400 	 *
5401 	 *   If we are not at level 0 and on shared tree blocks, go one slot to
5402 	 *   the right if possible or go up and right.
5403 	 */
5404 
5405 	down_read(&fs_info->commit_root_sem);
5406 	left_level = btrfs_header_level(left_root->commit_root);
5407 	left_root_level = left_level;
5408 	left_path->nodes[left_level] =
5409 			btrfs_clone_extent_buffer(left_root->commit_root);
5410 	if (!left_path->nodes[left_level]) {
5411 		up_read(&fs_info->commit_root_sem);
5412 		ret = -ENOMEM;
5413 		goto out;
5414 	}
5415 
5416 	right_level = btrfs_header_level(right_root->commit_root);
5417 	right_root_level = right_level;
5418 	right_path->nodes[right_level] =
5419 			btrfs_clone_extent_buffer(right_root->commit_root);
5420 	if (!right_path->nodes[right_level]) {
5421 		up_read(&fs_info->commit_root_sem);
5422 		ret = -ENOMEM;
5423 		goto out;
5424 	}
5425 	up_read(&fs_info->commit_root_sem);
5426 
5427 	if (left_level == 0)
5428 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
5429 				&left_key, left_path->slots[left_level]);
5430 	else
5431 		btrfs_node_key_to_cpu(left_path->nodes[left_level],
5432 				&left_key, left_path->slots[left_level]);
5433 	if (right_level == 0)
5434 		btrfs_item_key_to_cpu(right_path->nodes[right_level],
5435 				&right_key, right_path->slots[right_level]);
5436 	else
5437 		btrfs_node_key_to_cpu(right_path->nodes[right_level],
5438 				&right_key, right_path->slots[right_level]);
5439 
5440 	left_end_reached = right_end_reached = 0;
5441 	advance_left = advance_right = 0;
5442 
5443 	while (1) {
5444 		if (advance_left && !left_end_reached) {
5445 			ret = tree_advance(fs_info, left_path, &left_level,
5446 					left_root_level,
5447 					advance_left != ADVANCE_ONLY_NEXT,
5448 					&left_key);
5449 			if (ret == -1)
5450 				left_end_reached = ADVANCE;
5451 			else if (ret < 0)
5452 				goto out;
5453 			advance_left = 0;
5454 		}
5455 		if (advance_right && !right_end_reached) {
5456 			ret = tree_advance(fs_info, right_path, &right_level,
5457 					right_root_level,
5458 					advance_right != ADVANCE_ONLY_NEXT,
5459 					&right_key);
5460 			if (ret == -1)
5461 				right_end_reached = ADVANCE;
5462 			else if (ret < 0)
5463 				goto out;
5464 			advance_right = 0;
5465 		}
5466 
5467 		if (left_end_reached && right_end_reached) {
5468 			ret = 0;
5469 			goto out;
5470 		} else if (left_end_reached) {
5471 			if (right_level == 0) {
5472 				ret = changed_cb(left_path, right_path,
5473 						&right_key,
5474 						BTRFS_COMPARE_TREE_DELETED,
5475 						ctx);
5476 				if (ret < 0)
5477 					goto out;
5478 			}
5479 			advance_right = ADVANCE;
5480 			continue;
5481 		} else if (right_end_reached) {
5482 			if (left_level == 0) {
5483 				ret = changed_cb(left_path, right_path,
5484 						&left_key,
5485 						BTRFS_COMPARE_TREE_NEW,
5486 						ctx);
5487 				if (ret < 0)
5488 					goto out;
5489 			}
5490 			advance_left = ADVANCE;
5491 			continue;
5492 		}
5493 
5494 		if (left_level == 0 && right_level == 0) {
5495 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5496 			if (cmp < 0) {
5497 				ret = changed_cb(left_path, right_path,
5498 						&left_key,
5499 						BTRFS_COMPARE_TREE_NEW,
5500 						ctx);
5501 				if (ret < 0)
5502 					goto out;
5503 				advance_left = ADVANCE;
5504 			} else if (cmp > 0) {
5505 				ret = changed_cb(left_path, right_path,
5506 						&right_key,
5507 						BTRFS_COMPARE_TREE_DELETED,
5508 						ctx);
5509 				if (ret < 0)
5510 					goto out;
5511 				advance_right = ADVANCE;
5512 			} else {
5513 				enum btrfs_compare_tree_result result;
5514 
5515 				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5516 				ret = tree_compare_item(left_path, right_path,
5517 							tmp_buf);
5518 				if (ret)
5519 					result = BTRFS_COMPARE_TREE_CHANGED;
5520 				else
5521 					result = BTRFS_COMPARE_TREE_SAME;
5522 				ret = changed_cb(left_path, right_path,
5523 						 &left_key, result, ctx);
5524 				if (ret < 0)
5525 					goto out;
5526 				advance_left = ADVANCE;
5527 				advance_right = ADVANCE;
5528 			}
5529 		} else if (left_level == right_level) {
5530 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5531 			if (cmp < 0) {
5532 				advance_left = ADVANCE;
5533 			} else if (cmp > 0) {
5534 				advance_right = ADVANCE;
5535 			} else {
5536 				left_blockptr = btrfs_node_blockptr(
5537 						left_path->nodes[left_level],
5538 						left_path->slots[left_level]);
5539 				right_blockptr = btrfs_node_blockptr(
5540 						right_path->nodes[right_level],
5541 						right_path->slots[right_level]);
5542 				left_gen = btrfs_node_ptr_generation(
5543 						left_path->nodes[left_level],
5544 						left_path->slots[left_level]);
5545 				right_gen = btrfs_node_ptr_generation(
5546 						right_path->nodes[right_level],
5547 						right_path->slots[right_level]);
5548 				if (left_blockptr == right_blockptr &&
5549 				    left_gen == right_gen) {
5550 					/*
5551 					 * As we're on a shared block, don't
5552 					 * allow to go deeper.
5553 					 */
5554 					advance_left = ADVANCE_ONLY_NEXT;
5555 					advance_right = ADVANCE_ONLY_NEXT;
5556 				} else {
5557 					advance_left = ADVANCE;
5558 					advance_right = ADVANCE;
5559 				}
5560 			}
5561 		} else if (left_level < right_level) {
5562 			advance_right = ADVANCE;
5563 		} else {
5564 			advance_left = ADVANCE;
5565 		}
5566 	}
5567 
5568 out:
5569 	btrfs_free_path(left_path);
5570 	btrfs_free_path(right_path);
5571 	kvfree(tmp_buf);
5572 	return ret;
5573 }
5574 
5575 /*
5576  * this is similar to btrfs_next_leaf, but does not try to preserve
5577  * and fixup the path.  It looks for and returns the next key in the
5578  * tree based on the current path and the min_trans parameters.
5579  *
5580  * 0 is returned if another key is found, < 0 if there are any errors
5581  * and 1 is returned if there are no higher keys in the tree
5582  *
5583  * path->keep_locks should be set to 1 on the search made before
5584  * calling this function.
5585  */
5586 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5587 			struct btrfs_key *key, int level, u64 min_trans)
5588 {
5589 	int slot;
5590 	struct extent_buffer *c;
5591 
5592 	WARN_ON(!path->keep_locks);
5593 	while (level < BTRFS_MAX_LEVEL) {
5594 		if (!path->nodes[level])
5595 			return 1;
5596 
5597 		slot = path->slots[level] + 1;
5598 		c = path->nodes[level];
5599 next:
5600 		if (slot >= btrfs_header_nritems(c)) {
5601 			int ret;
5602 			int orig_lowest;
5603 			struct btrfs_key cur_key;
5604 			if (level + 1 >= BTRFS_MAX_LEVEL ||
5605 			    !path->nodes[level + 1])
5606 				return 1;
5607 
5608 			if (path->locks[level + 1]) {
5609 				level++;
5610 				continue;
5611 			}
5612 
5613 			slot = btrfs_header_nritems(c) - 1;
5614 			if (level == 0)
5615 				btrfs_item_key_to_cpu(c, &cur_key, slot);
5616 			else
5617 				btrfs_node_key_to_cpu(c, &cur_key, slot);
5618 
5619 			orig_lowest = path->lowest_level;
5620 			btrfs_release_path(path);
5621 			path->lowest_level = level;
5622 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5623 						0, 0);
5624 			path->lowest_level = orig_lowest;
5625 			if (ret < 0)
5626 				return ret;
5627 
5628 			c = path->nodes[level];
5629 			slot = path->slots[level];
5630 			if (ret == 0)
5631 				slot++;
5632 			goto next;
5633 		}
5634 
5635 		if (level == 0)
5636 			btrfs_item_key_to_cpu(c, key, slot);
5637 		else {
5638 			u64 gen = btrfs_node_ptr_generation(c, slot);
5639 
5640 			if (gen < min_trans) {
5641 				slot++;
5642 				goto next;
5643 			}
5644 			btrfs_node_key_to_cpu(c, key, slot);
5645 		}
5646 		return 0;
5647 	}
5648 	return 1;
5649 }
5650 
5651 /*
5652  * search the tree again to find a leaf with greater keys
5653  * returns 0 if it found something or 1 if there are no greater leaves.
5654  * returns < 0 on io errors.
5655  */
5656 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5657 {
5658 	return btrfs_next_old_leaf(root, path, 0);
5659 }
5660 
5661 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5662 			u64 time_seq)
5663 {
5664 	int slot;
5665 	int level;
5666 	struct extent_buffer *c;
5667 	struct extent_buffer *next;
5668 	struct btrfs_key key;
5669 	u32 nritems;
5670 	int ret;
5671 	int old_spinning = path->leave_spinning;
5672 	int next_rw_lock = 0;
5673 
5674 	nritems = btrfs_header_nritems(path->nodes[0]);
5675 	if (nritems == 0)
5676 		return 1;
5677 
5678 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5679 again:
5680 	level = 1;
5681 	next = NULL;
5682 	next_rw_lock = 0;
5683 	btrfs_release_path(path);
5684 
5685 	path->keep_locks = 1;
5686 	path->leave_spinning = 1;
5687 
5688 	if (time_seq)
5689 		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5690 	else
5691 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5692 	path->keep_locks = 0;
5693 
5694 	if (ret < 0)
5695 		return ret;
5696 
5697 	nritems = btrfs_header_nritems(path->nodes[0]);
5698 	/*
5699 	 * by releasing the path above we dropped all our locks.  A balance
5700 	 * could have added more items next to the key that used to be
5701 	 * at the very end of the block.  So, check again here and
5702 	 * advance the path if there are now more items available.
5703 	 */
5704 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5705 		if (ret == 0)
5706 			path->slots[0]++;
5707 		ret = 0;
5708 		goto done;
5709 	}
5710 	/*
5711 	 * So the above check misses one case:
5712 	 * - after releasing the path above, someone has removed the item that
5713 	 *   used to be at the very end of the block, and balance between leafs
5714 	 *   gets another one with bigger key.offset to replace it.
5715 	 *
5716 	 * This one should be returned as well, or we can get leaf corruption
5717 	 * later(esp. in __btrfs_drop_extents()).
5718 	 *
5719 	 * And a bit more explanation about this check,
5720 	 * with ret > 0, the key isn't found, the path points to the slot
5721 	 * where it should be inserted, so the path->slots[0] item must be the
5722 	 * bigger one.
5723 	 */
5724 	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5725 		ret = 0;
5726 		goto done;
5727 	}
5728 
5729 	while (level < BTRFS_MAX_LEVEL) {
5730 		if (!path->nodes[level]) {
5731 			ret = 1;
5732 			goto done;
5733 		}
5734 
5735 		slot = path->slots[level] + 1;
5736 		c = path->nodes[level];
5737 		if (slot >= btrfs_header_nritems(c)) {
5738 			level++;
5739 			if (level == BTRFS_MAX_LEVEL) {
5740 				ret = 1;
5741 				goto done;
5742 			}
5743 			continue;
5744 		}
5745 
5746 		if (next) {
5747 			btrfs_tree_unlock_rw(next, next_rw_lock);
5748 			free_extent_buffer(next);
5749 		}
5750 
5751 		next = c;
5752 		next_rw_lock = path->locks[level];
5753 		ret = read_block_for_search(root, path, &next, level,
5754 					    slot, &key);
5755 		if (ret == -EAGAIN)
5756 			goto again;
5757 
5758 		if (ret < 0) {
5759 			btrfs_release_path(path);
5760 			goto done;
5761 		}
5762 
5763 		if (!path->skip_locking) {
5764 			ret = btrfs_try_tree_read_lock(next);
5765 			if (!ret && time_seq) {
5766 				/*
5767 				 * If we don't get the lock, we may be racing
5768 				 * with push_leaf_left, holding that lock while
5769 				 * itself waiting for the leaf we've currently
5770 				 * locked. To solve this situation, we give up
5771 				 * on our lock and cycle.
5772 				 */
5773 				free_extent_buffer(next);
5774 				btrfs_release_path(path);
5775 				cond_resched();
5776 				goto again;
5777 			}
5778 			if (!ret) {
5779 				btrfs_set_path_blocking(path);
5780 				btrfs_tree_read_lock(next);
5781 			}
5782 			next_rw_lock = BTRFS_READ_LOCK;
5783 		}
5784 		break;
5785 	}
5786 	path->slots[level] = slot;
5787 	while (1) {
5788 		level--;
5789 		c = path->nodes[level];
5790 		if (path->locks[level])
5791 			btrfs_tree_unlock_rw(c, path->locks[level]);
5792 
5793 		free_extent_buffer(c);
5794 		path->nodes[level] = next;
5795 		path->slots[level] = 0;
5796 		if (!path->skip_locking)
5797 			path->locks[level] = next_rw_lock;
5798 		if (!level)
5799 			break;
5800 
5801 		ret = read_block_for_search(root, path, &next, level,
5802 					    0, &key);
5803 		if (ret == -EAGAIN)
5804 			goto again;
5805 
5806 		if (ret < 0) {
5807 			btrfs_release_path(path);
5808 			goto done;
5809 		}
5810 
5811 		if (!path->skip_locking) {
5812 			ret = btrfs_try_tree_read_lock(next);
5813 			if (!ret) {
5814 				btrfs_set_path_blocking(path);
5815 				btrfs_tree_read_lock(next);
5816 			}
5817 			next_rw_lock = BTRFS_READ_LOCK;
5818 		}
5819 	}
5820 	ret = 0;
5821 done:
5822 	unlock_up(path, 0, 1, 0, NULL);
5823 	path->leave_spinning = old_spinning;
5824 	if (!old_spinning)
5825 		btrfs_set_path_blocking(path);
5826 
5827 	return ret;
5828 }
5829 
5830 /*
5831  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5832  * searching until it gets past min_objectid or finds an item of 'type'
5833  *
5834  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5835  */
5836 int btrfs_previous_item(struct btrfs_root *root,
5837 			struct btrfs_path *path, u64 min_objectid,
5838 			int type)
5839 {
5840 	struct btrfs_key found_key;
5841 	struct extent_buffer *leaf;
5842 	u32 nritems;
5843 	int ret;
5844 
5845 	while (1) {
5846 		if (path->slots[0] == 0) {
5847 			btrfs_set_path_blocking(path);
5848 			ret = btrfs_prev_leaf(root, path);
5849 			if (ret != 0)
5850 				return ret;
5851 		} else {
5852 			path->slots[0]--;
5853 		}
5854 		leaf = path->nodes[0];
5855 		nritems = btrfs_header_nritems(leaf);
5856 		if (nritems == 0)
5857 			return 1;
5858 		if (path->slots[0] == nritems)
5859 			path->slots[0]--;
5860 
5861 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5862 		if (found_key.objectid < min_objectid)
5863 			break;
5864 		if (found_key.type == type)
5865 			return 0;
5866 		if (found_key.objectid == min_objectid &&
5867 		    found_key.type < type)
5868 			break;
5869 	}
5870 	return 1;
5871 }
5872 
5873 /*
5874  * search in extent tree to find a previous Metadata/Data extent item with
5875  * min objecitd.
5876  *
5877  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5878  */
5879 int btrfs_previous_extent_item(struct btrfs_root *root,
5880 			struct btrfs_path *path, u64 min_objectid)
5881 {
5882 	struct btrfs_key found_key;
5883 	struct extent_buffer *leaf;
5884 	u32 nritems;
5885 	int ret;
5886 
5887 	while (1) {
5888 		if (path->slots[0] == 0) {
5889 			btrfs_set_path_blocking(path);
5890 			ret = btrfs_prev_leaf(root, path);
5891 			if (ret != 0)
5892 				return ret;
5893 		} else {
5894 			path->slots[0]--;
5895 		}
5896 		leaf = path->nodes[0];
5897 		nritems = btrfs_header_nritems(leaf);
5898 		if (nritems == 0)
5899 			return 1;
5900 		if (path->slots[0] == nritems)
5901 			path->slots[0]--;
5902 
5903 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5904 		if (found_key.objectid < min_objectid)
5905 			break;
5906 		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5907 		    found_key.type == BTRFS_METADATA_ITEM_KEY)
5908 			return 0;
5909 		if (found_key.objectid == min_objectid &&
5910 		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
5911 			break;
5912 	}
5913 	return 1;
5914 }
5915