xref: /openbmc/linux/fs/btrfs/ctree.c (revision ccb01374)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
9 #include <linux/mm.h>
10 #include "ctree.h"
11 #include "disk-io.h"
12 #include "transaction.h"
13 #include "print-tree.h"
14 #include "locking.h"
15 #include "volumes.h"
16 
17 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
18 		      *root, struct btrfs_path *path, int level);
19 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
20 		      const struct btrfs_key *ins_key, struct btrfs_path *path,
21 		      int data_size, int extend);
22 static int push_node_left(struct btrfs_trans_handle *trans,
23 			  struct btrfs_fs_info *fs_info,
24 			  struct extent_buffer *dst,
25 			  struct extent_buffer *src, int empty);
26 static int balance_node_right(struct btrfs_trans_handle *trans,
27 			      struct btrfs_fs_info *fs_info,
28 			      struct extent_buffer *dst_buf,
29 			      struct extent_buffer *src_buf);
30 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
31 		    int level, int slot);
32 
33 struct btrfs_path *btrfs_alloc_path(void)
34 {
35 	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
36 }
37 
38 /*
39  * set all locked nodes in the path to blocking locks.  This should
40  * be done before scheduling
41  */
42 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
43 {
44 	int i;
45 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
46 		if (!p->nodes[i] || !p->locks[i])
47 			continue;
48 		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
49 		if (p->locks[i] == BTRFS_READ_LOCK)
50 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
51 		else if (p->locks[i] == BTRFS_WRITE_LOCK)
52 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
53 	}
54 }
55 
56 /* this also releases the path */
57 void btrfs_free_path(struct btrfs_path *p)
58 {
59 	if (!p)
60 		return;
61 	btrfs_release_path(p);
62 	kmem_cache_free(btrfs_path_cachep, p);
63 }
64 
65 /*
66  * path release drops references on the extent buffers in the path
67  * and it drops any locks held by this path
68  *
69  * It is safe to call this on paths that no locks or extent buffers held.
70  */
71 noinline void btrfs_release_path(struct btrfs_path *p)
72 {
73 	int i;
74 
75 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
76 		p->slots[i] = 0;
77 		if (!p->nodes[i])
78 			continue;
79 		if (p->locks[i]) {
80 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
81 			p->locks[i] = 0;
82 		}
83 		free_extent_buffer(p->nodes[i]);
84 		p->nodes[i] = NULL;
85 	}
86 }
87 
88 /*
89  * safely gets a reference on the root node of a tree.  A lock
90  * is not taken, so a concurrent writer may put a different node
91  * at the root of the tree.  See btrfs_lock_root_node for the
92  * looping required.
93  *
94  * The extent buffer returned by this has a reference taken, so
95  * it won't disappear.  It may stop being the root of the tree
96  * at any time because there are no locks held.
97  */
98 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
99 {
100 	struct extent_buffer *eb;
101 
102 	while (1) {
103 		rcu_read_lock();
104 		eb = rcu_dereference(root->node);
105 
106 		/*
107 		 * RCU really hurts here, we could free up the root node because
108 		 * it was COWed but we may not get the new root node yet so do
109 		 * the inc_not_zero dance and if it doesn't work then
110 		 * synchronize_rcu and try again.
111 		 */
112 		if (atomic_inc_not_zero(&eb->refs)) {
113 			rcu_read_unlock();
114 			break;
115 		}
116 		rcu_read_unlock();
117 		synchronize_rcu();
118 	}
119 	return eb;
120 }
121 
122 /* loop around taking references on and locking the root node of the
123  * tree until you end up with a lock on the root.  A locked buffer
124  * is returned, with a reference held.
125  */
126 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
127 {
128 	struct extent_buffer *eb;
129 
130 	while (1) {
131 		eb = btrfs_root_node(root);
132 		btrfs_tree_lock(eb);
133 		if (eb == root->node)
134 			break;
135 		btrfs_tree_unlock(eb);
136 		free_extent_buffer(eb);
137 	}
138 	return eb;
139 }
140 
141 /* loop around taking references on and locking the root node of the
142  * tree until you end up with a lock on the root.  A locked buffer
143  * is returned, with a reference held.
144  */
145 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
146 {
147 	struct extent_buffer *eb;
148 
149 	while (1) {
150 		eb = btrfs_root_node(root);
151 		btrfs_tree_read_lock(eb);
152 		if (eb == root->node)
153 			break;
154 		btrfs_tree_read_unlock(eb);
155 		free_extent_buffer(eb);
156 	}
157 	return eb;
158 }
159 
160 /* cowonly root (everything not a reference counted cow subvolume), just get
161  * put onto a simple dirty list.  transaction.c walks this to make sure they
162  * get properly updated on disk.
163  */
164 static void add_root_to_dirty_list(struct btrfs_root *root)
165 {
166 	struct btrfs_fs_info *fs_info = root->fs_info;
167 
168 	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
169 	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
170 		return;
171 
172 	spin_lock(&fs_info->trans_lock);
173 	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
174 		/* Want the extent tree to be the last on the list */
175 		if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
176 			list_move_tail(&root->dirty_list,
177 				       &fs_info->dirty_cowonly_roots);
178 		else
179 			list_move(&root->dirty_list,
180 				  &fs_info->dirty_cowonly_roots);
181 	}
182 	spin_unlock(&fs_info->trans_lock);
183 }
184 
185 /*
186  * used by snapshot creation to make a copy of a root for a tree with
187  * a given objectid.  The buffer with the new root node is returned in
188  * cow_ret, and this func returns zero on success or a negative error code.
189  */
190 int btrfs_copy_root(struct btrfs_trans_handle *trans,
191 		      struct btrfs_root *root,
192 		      struct extent_buffer *buf,
193 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
194 {
195 	struct btrfs_fs_info *fs_info = root->fs_info;
196 	struct extent_buffer *cow;
197 	int ret = 0;
198 	int level;
199 	struct btrfs_disk_key disk_key;
200 
201 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
202 		trans->transid != fs_info->running_transaction->transid);
203 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
204 		trans->transid != root->last_trans);
205 
206 	level = btrfs_header_level(buf);
207 	if (level == 0)
208 		btrfs_item_key(buf, &disk_key, 0);
209 	else
210 		btrfs_node_key(buf, &disk_key, 0);
211 
212 	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
213 			&disk_key, level, buf->start, 0);
214 	if (IS_ERR(cow))
215 		return PTR_ERR(cow);
216 
217 	copy_extent_buffer_full(cow, buf);
218 	btrfs_set_header_bytenr(cow, cow->start);
219 	btrfs_set_header_generation(cow, trans->transid);
220 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
221 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
222 				     BTRFS_HEADER_FLAG_RELOC);
223 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
224 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
225 	else
226 		btrfs_set_header_owner(cow, new_root_objectid);
227 
228 	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
229 
230 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
231 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
232 		ret = btrfs_inc_ref(trans, root, cow, 1);
233 	else
234 		ret = btrfs_inc_ref(trans, root, cow, 0);
235 
236 	if (ret)
237 		return ret;
238 
239 	btrfs_mark_buffer_dirty(cow);
240 	*cow_ret = cow;
241 	return 0;
242 }
243 
244 enum mod_log_op {
245 	MOD_LOG_KEY_REPLACE,
246 	MOD_LOG_KEY_ADD,
247 	MOD_LOG_KEY_REMOVE,
248 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
249 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
250 	MOD_LOG_MOVE_KEYS,
251 	MOD_LOG_ROOT_REPLACE,
252 };
253 
254 struct tree_mod_root {
255 	u64 logical;
256 	u8 level;
257 };
258 
259 struct tree_mod_elem {
260 	struct rb_node node;
261 	u64 logical;
262 	u64 seq;
263 	enum mod_log_op op;
264 
265 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
266 	int slot;
267 
268 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
269 	u64 generation;
270 
271 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
272 	struct btrfs_disk_key key;
273 	u64 blockptr;
274 
275 	/* this is used for op == MOD_LOG_MOVE_KEYS */
276 	struct {
277 		int dst_slot;
278 		int nr_items;
279 	} move;
280 
281 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
282 	struct tree_mod_root old_root;
283 };
284 
285 /*
286  * Pull a new tree mod seq number for our operation.
287  */
288 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
289 {
290 	return atomic64_inc_return(&fs_info->tree_mod_seq);
291 }
292 
293 /*
294  * This adds a new blocker to the tree mod log's blocker list if the @elem
295  * passed does not already have a sequence number set. So when a caller expects
296  * to record tree modifications, it should ensure to set elem->seq to zero
297  * before calling btrfs_get_tree_mod_seq.
298  * Returns a fresh, unused tree log modification sequence number, even if no new
299  * blocker was added.
300  */
301 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
302 			   struct seq_list *elem)
303 {
304 	write_lock(&fs_info->tree_mod_log_lock);
305 	spin_lock(&fs_info->tree_mod_seq_lock);
306 	if (!elem->seq) {
307 		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
308 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
309 	}
310 	spin_unlock(&fs_info->tree_mod_seq_lock);
311 	write_unlock(&fs_info->tree_mod_log_lock);
312 
313 	return elem->seq;
314 }
315 
316 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
317 			    struct seq_list *elem)
318 {
319 	struct rb_root *tm_root;
320 	struct rb_node *node;
321 	struct rb_node *next;
322 	struct seq_list *cur_elem;
323 	struct tree_mod_elem *tm;
324 	u64 min_seq = (u64)-1;
325 	u64 seq_putting = elem->seq;
326 
327 	if (!seq_putting)
328 		return;
329 
330 	spin_lock(&fs_info->tree_mod_seq_lock);
331 	list_del(&elem->list);
332 	elem->seq = 0;
333 
334 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
335 		if (cur_elem->seq < min_seq) {
336 			if (seq_putting > cur_elem->seq) {
337 				/*
338 				 * blocker with lower sequence number exists, we
339 				 * cannot remove anything from the log
340 				 */
341 				spin_unlock(&fs_info->tree_mod_seq_lock);
342 				return;
343 			}
344 			min_seq = cur_elem->seq;
345 		}
346 	}
347 	spin_unlock(&fs_info->tree_mod_seq_lock);
348 
349 	/*
350 	 * anything that's lower than the lowest existing (read: blocked)
351 	 * sequence number can be removed from the tree.
352 	 */
353 	write_lock(&fs_info->tree_mod_log_lock);
354 	tm_root = &fs_info->tree_mod_log;
355 	for (node = rb_first(tm_root); node; node = next) {
356 		next = rb_next(node);
357 		tm = rb_entry(node, struct tree_mod_elem, node);
358 		if (tm->seq > min_seq)
359 			continue;
360 		rb_erase(node, tm_root);
361 		kfree(tm);
362 	}
363 	write_unlock(&fs_info->tree_mod_log_lock);
364 }
365 
366 /*
367  * key order of the log:
368  *       node/leaf start address -> sequence
369  *
370  * The 'start address' is the logical address of the *new* root node
371  * for root replace operations, or the logical address of the affected
372  * block for all other operations.
373  *
374  * Note: must be called with write lock for fs_info::tree_mod_log_lock.
375  */
376 static noinline int
377 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
378 {
379 	struct rb_root *tm_root;
380 	struct rb_node **new;
381 	struct rb_node *parent = NULL;
382 	struct tree_mod_elem *cur;
383 
384 	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
385 
386 	tm_root = &fs_info->tree_mod_log;
387 	new = &tm_root->rb_node;
388 	while (*new) {
389 		cur = rb_entry(*new, struct tree_mod_elem, node);
390 		parent = *new;
391 		if (cur->logical < tm->logical)
392 			new = &((*new)->rb_left);
393 		else if (cur->logical > tm->logical)
394 			new = &((*new)->rb_right);
395 		else if (cur->seq < tm->seq)
396 			new = &((*new)->rb_left);
397 		else if (cur->seq > tm->seq)
398 			new = &((*new)->rb_right);
399 		else
400 			return -EEXIST;
401 	}
402 
403 	rb_link_node(&tm->node, parent, new);
404 	rb_insert_color(&tm->node, tm_root);
405 	return 0;
406 }
407 
408 /*
409  * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
410  * returns zero with the tree_mod_log_lock acquired. The caller must hold
411  * this until all tree mod log insertions are recorded in the rb tree and then
412  * write unlock fs_info::tree_mod_log_lock.
413  */
414 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
415 				    struct extent_buffer *eb) {
416 	smp_mb();
417 	if (list_empty(&(fs_info)->tree_mod_seq_list))
418 		return 1;
419 	if (eb && btrfs_header_level(eb) == 0)
420 		return 1;
421 
422 	write_lock(&fs_info->tree_mod_log_lock);
423 	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
424 		write_unlock(&fs_info->tree_mod_log_lock);
425 		return 1;
426 	}
427 
428 	return 0;
429 }
430 
431 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
432 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
433 				    struct extent_buffer *eb)
434 {
435 	smp_mb();
436 	if (list_empty(&(fs_info)->tree_mod_seq_list))
437 		return 0;
438 	if (eb && btrfs_header_level(eb) == 0)
439 		return 0;
440 
441 	return 1;
442 }
443 
444 static struct tree_mod_elem *
445 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
446 		    enum mod_log_op op, gfp_t flags)
447 {
448 	struct tree_mod_elem *tm;
449 
450 	tm = kzalloc(sizeof(*tm), flags);
451 	if (!tm)
452 		return NULL;
453 
454 	tm->logical = eb->start;
455 	if (op != MOD_LOG_KEY_ADD) {
456 		btrfs_node_key(eb, &tm->key, slot);
457 		tm->blockptr = btrfs_node_blockptr(eb, slot);
458 	}
459 	tm->op = op;
460 	tm->slot = slot;
461 	tm->generation = btrfs_node_ptr_generation(eb, slot);
462 	RB_CLEAR_NODE(&tm->node);
463 
464 	return tm;
465 }
466 
467 static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
468 		enum mod_log_op op, gfp_t flags)
469 {
470 	struct tree_mod_elem *tm;
471 	int ret;
472 
473 	if (!tree_mod_need_log(eb->fs_info, eb))
474 		return 0;
475 
476 	tm = alloc_tree_mod_elem(eb, slot, op, flags);
477 	if (!tm)
478 		return -ENOMEM;
479 
480 	if (tree_mod_dont_log(eb->fs_info, eb)) {
481 		kfree(tm);
482 		return 0;
483 	}
484 
485 	ret = __tree_mod_log_insert(eb->fs_info, tm);
486 	write_unlock(&eb->fs_info->tree_mod_log_lock);
487 	if (ret)
488 		kfree(tm);
489 
490 	return ret;
491 }
492 
493 static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
494 		int dst_slot, int src_slot, int nr_items)
495 {
496 	struct tree_mod_elem *tm = NULL;
497 	struct tree_mod_elem **tm_list = NULL;
498 	int ret = 0;
499 	int i;
500 	int locked = 0;
501 
502 	if (!tree_mod_need_log(eb->fs_info, eb))
503 		return 0;
504 
505 	tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
506 	if (!tm_list)
507 		return -ENOMEM;
508 
509 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
510 	if (!tm) {
511 		ret = -ENOMEM;
512 		goto free_tms;
513 	}
514 
515 	tm->logical = eb->start;
516 	tm->slot = src_slot;
517 	tm->move.dst_slot = dst_slot;
518 	tm->move.nr_items = nr_items;
519 	tm->op = MOD_LOG_MOVE_KEYS;
520 
521 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
522 		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
523 		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
524 		if (!tm_list[i]) {
525 			ret = -ENOMEM;
526 			goto free_tms;
527 		}
528 	}
529 
530 	if (tree_mod_dont_log(eb->fs_info, eb))
531 		goto free_tms;
532 	locked = 1;
533 
534 	/*
535 	 * When we override something during the move, we log these removals.
536 	 * This can only happen when we move towards the beginning of the
537 	 * buffer, i.e. dst_slot < src_slot.
538 	 */
539 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
540 		ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
541 		if (ret)
542 			goto free_tms;
543 	}
544 
545 	ret = __tree_mod_log_insert(eb->fs_info, tm);
546 	if (ret)
547 		goto free_tms;
548 	write_unlock(&eb->fs_info->tree_mod_log_lock);
549 	kfree(tm_list);
550 
551 	return 0;
552 free_tms:
553 	for (i = 0; i < nr_items; i++) {
554 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
555 			rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
556 		kfree(tm_list[i]);
557 	}
558 	if (locked)
559 		write_unlock(&eb->fs_info->tree_mod_log_lock);
560 	kfree(tm_list);
561 	kfree(tm);
562 
563 	return ret;
564 }
565 
566 static inline int
567 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
568 		       struct tree_mod_elem **tm_list,
569 		       int nritems)
570 {
571 	int i, j;
572 	int ret;
573 
574 	for (i = nritems - 1; i >= 0; i--) {
575 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
576 		if (ret) {
577 			for (j = nritems - 1; j > i; j--)
578 				rb_erase(&tm_list[j]->node,
579 					 &fs_info->tree_mod_log);
580 			return ret;
581 		}
582 	}
583 
584 	return 0;
585 }
586 
587 static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
588 			 struct extent_buffer *new_root, int log_removal)
589 {
590 	struct btrfs_fs_info *fs_info = old_root->fs_info;
591 	struct tree_mod_elem *tm = NULL;
592 	struct tree_mod_elem **tm_list = NULL;
593 	int nritems = 0;
594 	int ret = 0;
595 	int i;
596 
597 	if (!tree_mod_need_log(fs_info, NULL))
598 		return 0;
599 
600 	if (log_removal && btrfs_header_level(old_root) > 0) {
601 		nritems = btrfs_header_nritems(old_root);
602 		tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
603 				  GFP_NOFS);
604 		if (!tm_list) {
605 			ret = -ENOMEM;
606 			goto free_tms;
607 		}
608 		for (i = 0; i < nritems; i++) {
609 			tm_list[i] = alloc_tree_mod_elem(old_root, i,
610 			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
611 			if (!tm_list[i]) {
612 				ret = -ENOMEM;
613 				goto free_tms;
614 			}
615 		}
616 	}
617 
618 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
619 	if (!tm) {
620 		ret = -ENOMEM;
621 		goto free_tms;
622 	}
623 
624 	tm->logical = new_root->start;
625 	tm->old_root.logical = old_root->start;
626 	tm->old_root.level = btrfs_header_level(old_root);
627 	tm->generation = btrfs_header_generation(old_root);
628 	tm->op = MOD_LOG_ROOT_REPLACE;
629 
630 	if (tree_mod_dont_log(fs_info, NULL))
631 		goto free_tms;
632 
633 	if (tm_list)
634 		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
635 	if (!ret)
636 		ret = __tree_mod_log_insert(fs_info, tm);
637 
638 	write_unlock(&fs_info->tree_mod_log_lock);
639 	if (ret)
640 		goto free_tms;
641 	kfree(tm_list);
642 
643 	return ret;
644 
645 free_tms:
646 	if (tm_list) {
647 		for (i = 0; i < nritems; i++)
648 			kfree(tm_list[i]);
649 		kfree(tm_list);
650 	}
651 	kfree(tm);
652 
653 	return ret;
654 }
655 
656 static struct tree_mod_elem *
657 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
658 		      int smallest)
659 {
660 	struct rb_root *tm_root;
661 	struct rb_node *node;
662 	struct tree_mod_elem *cur = NULL;
663 	struct tree_mod_elem *found = NULL;
664 
665 	read_lock(&fs_info->tree_mod_log_lock);
666 	tm_root = &fs_info->tree_mod_log;
667 	node = tm_root->rb_node;
668 	while (node) {
669 		cur = rb_entry(node, struct tree_mod_elem, node);
670 		if (cur->logical < start) {
671 			node = node->rb_left;
672 		} else if (cur->logical > start) {
673 			node = node->rb_right;
674 		} else if (cur->seq < min_seq) {
675 			node = node->rb_left;
676 		} else if (!smallest) {
677 			/* we want the node with the highest seq */
678 			if (found)
679 				BUG_ON(found->seq > cur->seq);
680 			found = cur;
681 			node = node->rb_left;
682 		} else if (cur->seq > min_seq) {
683 			/* we want the node with the smallest seq */
684 			if (found)
685 				BUG_ON(found->seq < cur->seq);
686 			found = cur;
687 			node = node->rb_right;
688 		} else {
689 			found = cur;
690 			break;
691 		}
692 	}
693 	read_unlock(&fs_info->tree_mod_log_lock);
694 
695 	return found;
696 }
697 
698 /*
699  * this returns the element from the log with the smallest time sequence
700  * value that's in the log (the oldest log item). any element with a time
701  * sequence lower than min_seq will be ignored.
702  */
703 static struct tree_mod_elem *
704 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
705 			   u64 min_seq)
706 {
707 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
708 }
709 
710 /*
711  * this returns the element from the log with the largest time sequence
712  * value that's in the log (the most recent log item). any element with
713  * a time sequence lower than min_seq will be ignored.
714  */
715 static struct tree_mod_elem *
716 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
717 {
718 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
719 }
720 
721 static noinline int
722 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
723 		     struct extent_buffer *src, unsigned long dst_offset,
724 		     unsigned long src_offset, int nr_items)
725 {
726 	int ret = 0;
727 	struct tree_mod_elem **tm_list = NULL;
728 	struct tree_mod_elem **tm_list_add, **tm_list_rem;
729 	int i;
730 	int locked = 0;
731 
732 	if (!tree_mod_need_log(fs_info, NULL))
733 		return 0;
734 
735 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
736 		return 0;
737 
738 	tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
739 			  GFP_NOFS);
740 	if (!tm_list)
741 		return -ENOMEM;
742 
743 	tm_list_add = tm_list;
744 	tm_list_rem = tm_list + nr_items;
745 	for (i = 0; i < nr_items; i++) {
746 		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
747 		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
748 		if (!tm_list_rem[i]) {
749 			ret = -ENOMEM;
750 			goto free_tms;
751 		}
752 
753 		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
754 		    MOD_LOG_KEY_ADD, GFP_NOFS);
755 		if (!tm_list_add[i]) {
756 			ret = -ENOMEM;
757 			goto free_tms;
758 		}
759 	}
760 
761 	if (tree_mod_dont_log(fs_info, NULL))
762 		goto free_tms;
763 	locked = 1;
764 
765 	for (i = 0; i < nr_items; i++) {
766 		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
767 		if (ret)
768 			goto free_tms;
769 		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
770 		if (ret)
771 			goto free_tms;
772 	}
773 
774 	write_unlock(&fs_info->tree_mod_log_lock);
775 	kfree(tm_list);
776 
777 	return 0;
778 
779 free_tms:
780 	for (i = 0; i < nr_items * 2; i++) {
781 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
782 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
783 		kfree(tm_list[i]);
784 	}
785 	if (locked)
786 		write_unlock(&fs_info->tree_mod_log_lock);
787 	kfree(tm_list);
788 
789 	return ret;
790 }
791 
792 static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
793 {
794 	struct tree_mod_elem **tm_list = NULL;
795 	int nritems = 0;
796 	int i;
797 	int ret = 0;
798 
799 	if (btrfs_header_level(eb) == 0)
800 		return 0;
801 
802 	if (!tree_mod_need_log(eb->fs_info, NULL))
803 		return 0;
804 
805 	nritems = btrfs_header_nritems(eb);
806 	tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
807 	if (!tm_list)
808 		return -ENOMEM;
809 
810 	for (i = 0; i < nritems; i++) {
811 		tm_list[i] = alloc_tree_mod_elem(eb, i,
812 		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
813 		if (!tm_list[i]) {
814 			ret = -ENOMEM;
815 			goto free_tms;
816 		}
817 	}
818 
819 	if (tree_mod_dont_log(eb->fs_info, eb))
820 		goto free_tms;
821 
822 	ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
823 	write_unlock(&eb->fs_info->tree_mod_log_lock);
824 	if (ret)
825 		goto free_tms;
826 	kfree(tm_list);
827 
828 	return 0;
829 
830 free_tms:
831 	for (i = 0; i < nritems; i++)
832 		kfree(tm_list[i]);
833 	kfree(tm_list);
834 
835 	return ret;
836 }
837 
838 /*
839  * check if the tree block can be shared by multiple trees
840  */
841 int btrfs_block_can_be_shared(struct btrfs_root *root,
842 			      struct extent_buffer *buf)
843 {
844 	/*
845 	 * Tree blocks not in reference counted trees and tree roots
846 	 * are never shared. If a block was allocated after the last
847 	 * snapshot and the block was not allocated by tree relocation,
848 	 * we know the block is not shared.
849 	 */
850 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
851 	    buf != root->node && buf != root->commit_root &&
852 	    (btrfs_header_generation(buf) <=
853 	     btrfs_root_last_snapshot(&root->root_item) ||
854 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
855 		return 1;
856 
857 	return 0;
858 }
859 
860 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
861 				       struct btrfs_root *root,
862 				       struct extent_buffer *buf,
863 				       struct extent_buffer *cow,
864 				       int *last_ref)
865 {
866 	struct btrfs_fs_info *fs_info = root->fs_info;
867 	u64 refs;
868 	u64 owner;
869 	u64 flags;
870 	u64 new_flags = 0;
871 	int ret;
872 
873 	/*
874 	 * Backrefs update rules:
875 	 *
876 	 * Always use full backrefs for extent pointers in tree block
877 	 * allocated by tree relocation.
878 	 *
879 	 * If a shared tree block is no longer referenced by its owner
880 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
881 	 * use full backrefs for extent pointers in tree block.
882 	 *
883 	 * If a tree block is been relocating
884 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
885 	 * use full backrefs for extent pointers in tree block.
886 	 * The reason for this is some operations (such as drop tree)
887 	 * are only allowed for blocks use full backrefs.
888 	 */
889 
890 	if (btrfs_block_can_be_shared(root, buf)) {
891 		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
892 					       btrfs_header_level(buf), 1,
893 					       &refs, &flags);
894 		if (ret)
895 			return ret;
896 		if (refs == 0) {
897 			ret = -EROFS;
898 			btrfs_handle_fs_error(fs_info, ret, NULL);
899 			return ret;
900 		}
901 	} else {
902 		refs = 1;
903 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
904 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
905 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
906 		else
907 			flags = 0;
908 	}
909 
910 	owner = btrfs_header_owner(buf);
911 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
912 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
913 
914 	if (refs > 1) {
915 		if ((owner == root->root_key.objectid ||
916 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
917 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
918 			ret = btrfs_inc_ref(trans, root, buf, 1);
919 			if (ret)
920 				return ret;
921 
922 			if (root->root_key.objectid ==
923 			    BTRFS_TREE_RELOC_OBJECTID) {
924 				ret = btrfs_dec_ref(trans, root, buf, 0);
925 				if (ret)
926 					return ret;
927 				ret = btrfs_inc_ref(trans, root, cow, 1);
928 				if (ret)
929 					return ret;
930 			}
931 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
932 		} else {
933 
934 			if (root->root_key.objectid ==
935 			    BTRFS_TREE_RELOC_OBJECTID)
936 				ret = btrfs_inc_ref(trans, root, cow, 1);
937 			else
938 				ret = btrfs_inc_ref(trans, root, cow, 0);
939 			if (ret)
940 				return ret;
941 		}
942 		if (new_flags != 0) {
943 			int level = btrfs_header_level(buf);
944 
945 			ret = btrfs_set_disk_extent_flags(trans, fs_info,
946 							  buf->start,
947 							  buf->len,
948 							  new_flags, level, 0);
949 			if (ret)
950 				return ret;
951 		}
952 	} else {
953 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
954 			if (root->root_key.objectid ==
955 			    BTRFS_TREE_RELOC_OBJECTID)
956 				ret = btrfs_inc_ref(trans, root, cow, 1);
957 			else
958 				ret = btrfs_inc_ref(trans, root, cow, 0);
959 			if (ret)
960 				return ret;
961 			ret = btrfs_dec_ref(trans, root, buf, 1);
962 			if (ret)
963 				return ret;
964 		}
965 		clean_tree_block(fs_info, buf);
966 		*last_ref = 1;
967 	}
968 	return 0;
969 }
970 
971 /*
972  * does the dirty work in cow of a single block.  The parent block (if
973  * supplied) is updated to point to the new cow copy.  The new buffer is marked
974  * dirty and returned locked.  If you modify the block it needs to be marked
975  * dirty again.
976  *
977  * search_start -- an allocation hint for the new block
978  *
979  * empty_size -- a hint that you plan on doing more cow.  This is the size in
980  * bytes the allocator should try to find free next to the block it returns.
981  * This is just a hint and may be ignored by the allocator.
982  */
983 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
984 			     struct btrfs_root *root,
985 			     struct extent_buffer *buf,
986 			     struct extent_buffer *parent, int parent_slot,
987 			     struct extent_buffer **cow_ret,
988 			     u64 search_start, u64 empty_size)
989 {
990 	struct btrfs_fs_info *fs_info = root->fs_info;
991 	struct btrfs_disk_key disk_key;
992 	struct extent_buffer *cow;
993 	int level, ret;
994 	int last_ref = 0;
995 	int unlock_orig = 0;
996 	u64 parent_start = 0;
997 
998 	if (*cow_ret == buf)
999 		unlock_orig = 1;
1000 
1001 	btrfs_assert_tree_locked(buf);
1002 
1003 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1004 		trans->transid != fs_info->running_transaction->transid);
1005 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1006 		trans->transid != root->last_trans);
1007 
1008 	level = btrfs_header_level(buf);
1009 
1010 	if (level == 0)
1011 		btrfs_item_key(buf, &disk_key, 0);
1012 	else
1013 		btrfs_node_key(buf, &disk_key, 0);
1014 
1015 	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1016 		parent_start = parent->start;
1017 
1018 	/*
1019 	 * If we are COWing a node/leaf from the extent, chunk or device trees,
1020 	 * make sure that we do not finish block group creation of pending block
1021 	 * groups. We do this to avoid a deadlock.
1022 	 * COWing can result in allocation of a new chunk, and flushing pending
1023 	 * block groups (btrfs_create_pending_block_groups()) can be triggered
1024 	 * when finishing allocation of a new chunk. Creation of a pending block
1025 	 * group modifies the extent, chunk and device trees, therefore we could
1026 	 * deadlock with ourselves since we are holding a lock on an extent
1027 	 * buffer that btrfs_create_pending_block_groups() may try to COW later.
1028 	 */
1029 	if (root == fs_info->extent_root ||
1030 	    root == fs_info->chunk_root ||
1031 	    root == fs_info->dev_root)
1032 		trans->can_flush_pending_bgs = false;
1033 
1034 	cow = btrfs_alloc_tree_block(trans, root, parent_start,
1035 			root->root_key.objectid, &disk_key, level,
1036 			search_start, empty_size);
1037 	trans->can_flush_pending_bgs = true;
1038 	if (IS_ERR(cow))
1039 		return PTR_ERR(cow);
1040 
1041 	/* cow is set to blocking by btrfs_init_new_buffer */
1042 
1043 	copy_extent_buffer_full(cow, buf);
1044 	btrfs_set_header_bytenr(cow, cow->start);
1045 	btrfs_set_header_generation(cow, trans->transid);
1046 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1047 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1048 				     BTRFS_HEADER_FLAG_RELOC);
1049 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1050 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1051 	else
1052 		btrfs_set_header_owner(cow, root->root_key.objectid);
1053 
1054 	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
1055 
1056 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1057 	if (ret) {
1058 		btrfs_abort_transaction(trans, ret);
1059 		return ret;
1060 	}
1061 
1062 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1063 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1064 		if (ret) {
1065 			btrfs_abort_transaction(trans, ret);
1066 			return ret;
1067 		}
1068 	}
1069 
1070 	if (buf == root->node) {
1071 		WARN_ON(parent && parent != buf);
1072 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1073 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1074 			parent_start = buf->start;
1075 
1076 		extent_buffer_get(cow);
1077 		ret = tree_mod_log_insert_root(root->node, cow, 1);
1078 		BUG_ON(ret < 0);
1079 		rcu_assign_pointer(root->node, cow);
1080 
1081 		btrfs_free_tree_block(trans, root, buf, parent_start,
1082 				      last_ref);
1083 		free_extent_buffer(buf);
1084 		add_root_to_dirty_list(root);
1085 	} else {
1086 		WARN_ON(trans->transid != btrfs_header_generation(parent));
1087 		tree_mod_log_insert_key(parent, parent_slot,
1088 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1089 		btrfs_set_node_blockptr(parent, parent_slot,
1090 					cow->start);
1091 		btrfs_set_node_ptr_generation(parent, parent_slot,
1092 					      trans->transid);
1093 		btrfs_mark_buffer_dirty(parent);
1094 		if (last_ref) {
1095 			ret = tree_mod_log_free_eb(buf);
1096 			if (ret) {
1097 				btrfs_abort_transaction(trans, ret);
1098 				return ret;
1099 			}
1100 		}
1101 		btrfs_free_tree_block(trans, root, buf, parent_start,
1102 				      last_ref);
1103 	}
1104 	if (unlock_orig)
1105 		btrfs_tree_unlock(buf);
1106 	free_extent_buffer_stale(buf);
1107 	btrfs_mark_buffer_dirty(cow);
1108 	*cow_ret = cow;
1109 	return 0;
1110 }
1111 
1112 /*
1113  * returns the logical address of the oldest predecessor of the given root.
1114  * entries older than time_seq are ignored.
1115  */
1116 static struct tree_mod_elem *__tree_mod_log_oldest_root(
1117 		struct extent_buffer *eb_root, u64 time_seq)
1118 {
1119 	struct tree_mod_elem *tm;
1120 	struct tree_mod_elem *found = NULL;
1121 	u64 root_logical = eb_root->start;
1122 	int looped = 0;
1123 
1124 	if (!time_seq)
1125 		return NULL;
1126 
1127 	/*
1128 	 * the very last operation that's logged for a root is the
1129 	 * replacement operation (if it is replaced at all). this has
1130 	 * the logical address of the *new* root, making it the very
1131 	 * first operation that's logged for this root.
1132 	 */
1133 	while (1) {
1134 		tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1135 						time_seq);
1136 		if (!looped && !tm)
1137 			return NULL;
1138 		/*
1139 		 * if there are no tree operation for the oldest root, we simply
1140 		 * return it. this should only happen if that (old) root is at
1141 		 * level 0.
1142 		 */
1143 		if (!tm)
1144 			break;
1145 
1146 		/*
1147 		 * if there's an operation that's not a root replacement, we
1148 		 * found the oldest version of our root. normally, we'll find a
1149 		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1150 		 */
1151 		if (tm->op != MOD_LOG_ROOT_REPLACE)
1152 			break;
1153 
1154 		found = tm;
1155 		root_logical = tm->old_root.logical;
1156 		looped = 1;
1157 	}
1158 
1159 	/* if there's no old root to return, return what we found instead */
1160 	if (!found)
1161 		found = tm;
1162 
1163 	return found;
1164 }
1165 
1166 /*
1167  * tm is a pointer to the first operation to rewind within eb. then, all
1168  * previous operations will be rewound (until we reach something older than
1169  * time_seq).
1170  */
1171 static void
1172 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1173 		      u64 time_seq, struct tree_mod_elem *first_tm)
1174 {
1175 	u32 n;
1176 	struct rb_node *next;
1177 	struct tree_mod_elem *tm = first_tm;
1178 	unsigned long o_dst;
1179 	unsigned long o_src;
1180 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1181 
1182 	n = btrfs_header_nritems(eb);
1183 	read_lock(&fs_info->tree_mod_log_lock);
1184 	while (tm && tm->seq >= time_seq) {
1185 		/*
1186 		 * all the operations are recorded with the operator used for
1187 		 * the modification. as we're going backwards, we do the
1188 		 * opposite of each operation here.
1189 		 */
1190 		switch (tm->op) {
1191 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1192 			BUG_ON(tm->slot < n);
1193 			/* Fallthrough */
1194 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1195 		case MOD_LOG_KEY_REMOVE:
1196 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1197 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1198 			btrfs_set_node_ptr_generation(eb, tm->slot,
1199 						      tm->generation);
1200 			n++;
1201 			break;
1202 		case MOD_LOG_KEY_REPLACE:
1203 			BUG_ON(tm->slot >= n);
1204 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1205 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1206 			btrfs_set_node_ptr_generation(eb, tm->slot,
1207 						      tm->generation);
1208 			break;
1209 		case MOD_LOG_KEY_ADD:
1210 			/* if a move operation is needed it's in the log */
1211 			n--;
1212 			break;
1213 		case MOD_LOG_MOVE_KEYS:
1214 			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1215 			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1216 			memmove_extent_buffer(eb, o_dst, o_src,
1217 					      tm->move.nr_items * p_size);
1218 			break;
1219 		case MOD_LOG_ROOT_REPLACE:
1220 			/*
1221 			 * this operation is special. for roots, this must be
1222 			 * handled explicitly before rewinding.
1223 			 * for non-roots, this operation may exist if the node
1224 			 * was a root: root A -> child B; then A gets empty and
1225 			 * B is promoted to the new root. in the mod log, we'll
1226 			 * have a root-replace operation for B, a tree block
1227 			 * that is no root. we simply ignore that operation.
1228 			 */
1229 			break;
1230 		}
1231 		next = rb_next(&tm->node);
1232 		if (!next)
1233 			break;
1234 		tm = rb_entry(next, struct tree_mod_elem, node);
1235 		if (tm->logical != first_tm->logical)
1236 			break;
1237 	}
1238 	read_unlock(&fs_info->tree_mod_log_lock);
1239 	btrfs_set_header_nritems(eb, n);
1240 }
1241 
1242 /*
1243  * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1244  * is returned. If rewind operations happen, a fresh buffer is returned. The
1245  * returned buffer is always read-locked. If the returned buffer is not the
1246  * input buffer, the lock on the input buffer is released and the input buffer
1247  * is freed (its refcount is decremented).
1248  */
1249 static struct extent_buffer *
1250 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1251 		    struct extent_buffer *eb, u64 time_seq)
1252 {
1253 	struct extent_buffer *eb_rewin;
1254 	struct tree_mod_elem *tm;
1255 
1256 	if (!time_seq)
1257 		return eb;
1258 
1259 	if (btrfs_header_level(eb) == 0)
1260 		return eb;
1261 
1262 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1263 	if (!tm)
1264 		return eb;
1265 
1266 	btrfs_set_path_blocking(path);
1267 	btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1268 
1269 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1270 		BUG_ON(tm->slot != 0);
1271 		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1272 		if (!eb_rewin) {
1273 			btrfs_tree_read_unlock_blocking(eb);
1274 			free_extent_buffer(eb);
1275 			return NULL;
1276 		}
1277 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1278 		btrfs_set_header_backref_rev(eb_rewin,
1279 					     btrfs_header_backref_rev(eb));
1280 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1281 		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1282 	} else {
1283 		eb_rewin = btrfs_clone_extent_buffer(eb);
1284 		if (!eb_rewin) {
1285 			btrfs_tree_read_unlock_blocking(eb);
1286 			free_extent_buffer(eb);
1287 			return NULL;
1288 		}
1289 	}
1290 
1291 	btrfs_tree_read_unlock_blocking(eb);
1292 	free_extent_buffer(eb);
1293 
1294 	btrfs_tree_read_lock(eb_rewin);
1295 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1296 	WARN_ON(btrfs_header_nritems(eb_rewin) >
1297 		BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1298 
1299 	return eb_rewin;
1300 }
1301 
1302 /*
1303  * get_old_root() rewinds the state of @root's root node to the given @time_seq
1304  * value. If there are no changes, the current root->root_node is returned. If
1305  * anything changed in between, there's a fresh buffer allocated on which the
1306  * rewind operations are done. In any case, the returned buffer is read locked.
1307  * Returns NULL on error (with no locks held).
1308  */
1309 static inline struct extent_buffer *
1310 get_old_root(struct btrfs_root *root, u64 time_seq)
1311 {
1312 	struct btrfs_fs_info *fs_info = root->fs_info;
1313 	struct tree_mod_elem *tm;
1314 	struct extent_buffer *eb = NULL;
1315 	struct extent_buffer *eb_root;
1316 	struct extent_buffer *old;
1317 	struct tree_mod_root *old_root = NULL;
1318 	u64 old_generation = 0;
1319 	u64 logical;
1320 	int level;
1321 
1322 	eb_root = btrfs_read_lock_root_node(root);
1323 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1324 	if (!tm)
1325 		return eb_root;
1326 
1327 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1328 		old_root = &tm->old_root;
1329 		old_generation = tm->generation;
1330 		logical = old_root->logical;
1331 		level = old_root->level;
1332 	} else {
1333 		logical = eb_root->start;
1334 		level = btrfs_header_level(eb_root);
1335 	}
1336 
1337 	tm = tree_mod_log_search(fs_info, logical, time_seq);
1338 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1339 		btrfs_tree_read_unlock(eb_root);
1340 		free_extent_buffer(eb_root);
1341 		old = read_tree_block(fs_info, logical, 0, level, NULL);
1342 		if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1343 			if (!IS_ERR(old))
1344 				free_extent_buffer(old);
1345 			btrfs_warn(fs_info,
1346 				   "failed to read tree block %llu from get_old_root",
1347 				   logical);
1348 		} else {
1349 			eb = btrfs_clone_extent_buffer(old);
1350 			free_extent_buffer(old);
1351 		}
1352 	} else if (old_root) {
1353 		btrfs_tree_read_unlock(eb_root);
1354 		free_extent_buffer(eb_root);
1355 		eb = alloc_dummy_extent_buffer(fs_info, logical);
1356 	} else {
1357 		btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1358 		eb = btrfs_clone_extent_buffer(eb_root);
1359 		btrfs_tree_read_unlock_blocking(eb_root);
1360 		free_extent_buffer(eb_root);
1361 	}
1362 
1363 	if (!eb)
1364 		return NULL;
1365 	btrfs_tree_read_lock(eb);
1366 	if (old_root) {
1367 		btrfs_set_header_bytenr(eb, eb->start);
1368 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1369 		btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1370 		btrfs_set_header_level(eb, old_root->level);
1371 		btrfs_set_header_generation(eb, old_generation);
1372 	}
1373 	if (tm)
1374 		__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1375 	else
1376 		WARN_ON(btrfs_header_level(eb) != 0);
1377 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1378 
1379 	return eb;
1380 }
1381 
1382 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1383 {
1384 	struct tree_mod_elem *tm;
1385 	int level;
1386 	struct extent_buffer *eb_root = btrfs_root_node(root);
1387 
1388 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1389 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1390 		level = tm->old_root.level;
1391 	} else {
1392 		level = btrfs_header_level(eb_root);
1393 	}
1394 	free_extent_buffer(eb_root);
1395 
1396 	return level;
1397 }
1398 
1399 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1400 				   struct btrfs_root *root,
1401 				   struct extent_buffer *buf)
1402 {
1403 	if (btrfs_is_testing(root->fs_info))
1404 		return 0;
1405 
1406 	/* Ensure we can see the FORCE_COW bit */
1407 	smp_mb__before_atomic();
1408 
1409 	/*
1410 	 * We do not need to cow a block if
1411 	 * 1) this block is not created or changed in this transaction;
1412 	 * 2) this block does not belong to TREE_RELOC tree;
1413 	 * 3) the root is not forced COW.
1414 	 *
1415 	 * What is forced COW:
1416 	 *    when we create snapshot during committing the transaction,
1417 	 *    after we've finished copying src root, we must COW the shared
1418 	 *    block to ensure the metadata consistency.
1419 	 */
1420 	if (btrfs_header_generation(buf) == trans->transid &&
1421 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1422 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1423 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1424 	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1425 		return 0;
1426 	return 1;
1427 }
1428 
1429 /*
1430  * cows a single block, see __btrfs_cow_block for the real work.
1431  * This version of it has extra checks so that a block isn't COWed more than
1432  * once per transaction, as long as it hasn't been written yet
1433  */
1434 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1435 		    struct btrfs_root *root, struct extent_buffer *buf,
1436 		    struct extent_buffer *parent, int parent_slot,
1437 		    struct extent_buffer **cow_ret)
1438 {
1439 	struct btrfs_fs_info *fs_info = root->fs_info;
1440 	u64 search_start;
1441 	int ret;
1442 
1443 	if (test_bit(BTRFS_ROOT_DELETING, &root->state))
1444 		btrfs_err(fs_info,
1445 			"COW'ing blocks on a fs root that's being dropped");
1446 
1447 	if (trans->transaction != fs_info->running_transaction)
1448 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1449 		       trans->transid,
1450 		       fs_info->running_transaction->transid);
1451 
1452 	if (trans->transid != fs_info->generation)
1453 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1454 		       trans->transid, fs_info->generation);
1455 
1456 	if (!should_cow_block(trans, root, buf)) {
1457 		trans->dirty = true;
1458 		*cow_ret = buf;
1459 		return 0;
1460 	}
1461 
1462 	search_start = buf->start & ~((u64)SZ_1G - 1);
1463 
1464 	if (parent)
1465 		btrfs_set_lock_blocking(parent);
1466 	btrfs_set_lock_blocking(buf);
1467 
1468 	ret = __btrfs_cow_block(trans, root, buf, parent,
1469 				 parent_slot, cow_ret, search_start, 0);
1470 
1471 	trace_btrfs_cow_block(root, buf, *cow_ret);
1472 
1473 	return ret;
1474 }
1475 
1476 /*
1477  * helper function for defrag to decide if two blocks pointed to by a
1478  * node are actually close by
1479  */
1480 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1481 {
1482 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1483 		return 1;
1484 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1485 		return 1;
1486 	return 0;
1487 }
1488 
1489 /*
1490  * compare two keys in a memcmp fashion
1491  */
1492 static int comp_keys(const struct btrfs_disk_key *disk,
1493 		     const struct btrfs_key *k2)
1494 {
1495 	struct btrfs_key k1;
1496 
1497 	btrfs_disk_key_to_cpu(&k1, disk);
1498 
1499 	return btrfs_comp_cpu_keys(&k1, k2);
1500 }
1501 
1502 /*
1503  * same as comp_keys only with two btrfs_key's
1504  */
1505 int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1506 {
1507 	if (k1->objectid > k2->objectid)
1508 		return 1;
1509 	if (k1->objectid < k2->objectid)
1510 		return -1;
1511 	if (k1->type > k2->type)
1512 		return 1;
1513 	if (k1->type < k2->type)
1514 		return -1;
1515 	if (k1->offset > k2->offset)
1516 		return 1;
1517 	if (k1->offset < k2->offset)
1518 		return -1;
1519 	return 0;
1520 }
1521 
1522 /*
1523  * this is used by the defrag code to go through all the
1524  * leaves pointed to by a node and reallocate them so that
1525  * disk order is close to key order
1526  */
1527 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1528 		       struct btrfs_root *root, struct extent_buffer *parent,
1529 		       int start_slot, u64 *last_ret,
1530 		       struct btrfs_key *progress)
1531 {
1532 	struct btrfs_fs_info *fs_info = root->fs_info;
1533 	struct extent_buffer *cur;
1534 	u64 blocknr;
1535 	u64 gen;
1536 	u64 search_start = *last_ret;
1537 	u64 last_block = 0;
1538 	u64 other;
1539 	u32 parent_nritems;
1540 	int end_slot;
1541 	int i;
1542 	int err = 0;
1543 	int parent_level;
1544 	int uptodate;
1545 	u32 blocksize;
1546 	int progress_passed = 0;
1547 	struct btrfs_disk_key disk_key;
1548 
1549 	parent_level = btrfs_header_level(parent);
1550 
1551 	WARN_ON(trans->transaction != fs_info->running_transaction);
1552 	WARN_ON(trans->transid != fs_info->generation);
1553 
1554 	parent_nritems = btrfs_header_nritems(parent);
1555 	blocksize = fs_info->nodesize;
1556 	end_slot = parent_nritems - 1;
1557 
1558 	if (parent_nritems <= 1)
1559 		return 0;
1560 
1561 	btrfs_set_lock_blocking(parent);
1562 
1563 	for (i = start_slot; i <= end_slot; i++) {
1564 		struct btrfs_key first_key;
1565 		int close = 1;
1566 
1567 		btrfs_node_key(parent, &disk_key, i);
1568 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1569 			continue;
1570 
1571 		progress_passed = 1;
1572 		blocknr = btrfs_node_blockptr(parent, i);
1573 		gen = btrfs_node_ptr_generation(parent, i);
1574 		btrfs_node_key_to_cpu(parent, &first_key, i);
1575 		if (last_block == 0)
1576 			last_block = blocknr;
1577 
1578 		if (i > 0) {
1579 			other = btrfs_node_blockptr(parent, i - 1);
1580 			close = close_blocks(blocknr, other, blocksize);
1581 		}
1582 		if (!close && i < end_slot) {
1583 			other = btrfs_node_blockptr(parent, i + 1);
1584 			close = close_blocks(blocknr, other, blocksize);
1585 		}
1586 		if (close) {
1587 			last_block = blocknr;
1588 			continue;
1589 		}
1590 
1591 		cur = find_extent_buffer(fs_info, blocknr);
1592 		if (cur)
1593 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1594 		else
1595 			uptodate = 0;
1596 		if (!cur || !uptodate) {
1597 			if (!cur) {
1598 				cur = read_tree_block(fs_info, blocknr, gen,
1599 						      parent_level - 1,
1600 						      &first_key);
1601 				if (IS_ERR(cur)) {
1602 					return PTR_ERR(cur);
1603 				} else if (!extent_buffer_uptodate(cur)) {
1604 					free_extent_buffer(cur);
1605 					return -EIO;
1606 				}
1607 			} else if (!uptodate) {
1608 				err = btrfs_read_buffer(cur, gen,
1609 						parent_level - 1,&first_key);
1610 				if (err) {
1611 					free_extent_buffer(cur);
1612 					return err;
1613 				}
1614 			}
1615 		}
1616 		if (search_start == 0)
1617 			search_start = last_block;
1618 
1619 		btrfs_tree_lock(cur);
1620 		btrfs_set_lock_blocking(cur);
1621 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1622 					&cur, search_start,
1623 					min(16 * blocksize,
1624 					    (end_slot - i) * blocksize));
1625 		if (err) {
1626 			btrfs_tree_unlock(cur);
1627 			free_extent_buffer(cur);
1628 			break;
1629 		}
1630 		search_start = cur->start;
1631 		last_block = cur->start;
1632 		*last_ret = search_start;
1633 		btrfs_tree_unlock(cur);
1634 		free_extent_buffer(cur);
1635 	}
1636 	return err;
1637 }
1638 
1639 /*
1640  * search for key in the extent_buffer.  The items start at offset p,
1641  * and they are item_size apart.  There are 'max' items in p.
1642  *
1643  * the slot in the array is returned via slot, and it points to
1644  * the place where you would insert key if it is not found in
1645  * the array.
1646  *
1647  * slot may point to max if the key is bigger than all of the keys
1648  */
1649 static noinline int generic_bin_search(struct extent_buffer *eb,
1650 				       unsigned long p, int item_size,
1651 				       const struct btrfs_key *key,
1652 				       int max, int *slot)
1653 {
1654 	int low = 0;
1655 	int high = max;
1656 	int mid;
1657 	int ret;
1658 	struct btrfs_disk_key *tmp = NULL;
1659 	struct btrfs_disk_key unaligned;
1660 	unsigned long offset;
1661 	char *kaddr = NULL;
1662 	unsigned long map_start = 0;
1663 	unsigned long map_len = 0;
1664 	int err;
1665 
1666 	if (low > high) {
1667 		btrfs_err(eb->fs_info,
1668 		 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1669 			  __func__, low, high, eb->start,
1670 			  btrfs_header_owner(eb), btrfs_header_level(eb));
1671 		return -EINVAL;
1672 	}
1673 
1674 	while (low < high) {
1675 		mid = (low + high) / 2;
1676 		offset = p + mid * item_size;
1677 
1678 		if (!kaddr || offset < map_start ||
1679 		    (offset + sizeof(struct btrfs_disk_key)) >
1680 		    map_start + map_len) {
1681 
1682 			err = map_private_extent_buffer(eb, offset,
1683 						sizeof(struct btrfs_disk_key),
1684 						&kaddr, &map_start, &map_len);
1685 
1686 			if (!err) {
1687 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1688 							map_start);
1689 			} else if (err == 1) {
1690 				read_extent_buffer(eb, &unaligned,
1691 						   offset, sizeof(unaligned));
1692 				tmp = &unaligned;
1693 			} else {
1694 				return err;
1695 			}
1696 
1697 		} else {
1698 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1699 							map_start);
1700 		}
1701 		ret = comp_keys(tmp, key);
1702 
1703 		if (ret < 0)
1704 			low = mid + 1;
1705 		else if (ret > 0)
1706 			high = mid;
1707 		else {
1708 			*slot = mid;
1709 			return 0;
1710 		}
1711 	}
1712 	*slot = low;
1713 	return 1;
1714 }
1715 
1716 /*
1717  * simple bin_search frontend that does the right thing for
1718  * leaves vs nodes
1719  */
1720 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1721 		     int level, int *slot)
1722 {
1723 	if (level == 0)
1724 		return generic_bin_search(eb,
1725 					  offsetof(struct btrfs_leaf, items),
1726 					  sizeof(struct btrfs_item),
1727 					  key, btrfs_header_nritems(eb),
1728 					  slot);
1729 	else
1730 		return generic_bin_search(eb,
1731 					  offsetof(struct btrfs_node, ptrs),
1732 					  sizeof(struct btrfs_key_ptr),
1733 					  key, btrfs_header_nritems(eb),
1734 					  slot);
1735 }
1736 
1737 static void root_add_used(struct btrfs_root *root, u32 size)
1738 {
1739 	spin_lock(&root->accounting_lock);
1740 	btrfs_set_root_used(&root->root_item,
1741 			    btrfs_root_used(&root->root_item) + size);
1742 	spin_unlock(&root->accounting_lock);
1743 }
1744 
1745 static void root_sub_used(struct btrfs_root *root, u32 size)
1746 {
1747 	spin_lock(&root->accounting_lock);
1748 	btrfs_set_root_used(&root->root_item,
1749 			    btrfs_root_used(&root->root_item) - size);
1750 	spin_unlock(&root->accounting_lock);
1751 }
1752 
1753 /* given a node and slot number, this reads the blocks it points to.  The
1754  * extent buffer is returned with a reference taken (but unlocked).
1755  */
1756 static noinline struct extent_buffer *
1757 read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
1758 	       int slot)
1759 {
1760 	int level = btrfs_header_level(parent);
1761 	struct extent_buffer *eb;
1762 	struct btrfs_key first_key;
1763 
1764 	if (slot < 0 || slot >= btrfs_header_nritems(parent))
1765 		return ERR_PTR(-ENOENT);
1766 
1767 	BUG_ON(level == 0);
1768 
1769 	btrfs_node_key_to_cpu(parent, &first_key, slot);
1770 	eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
1771 			     btrfs_node_ptr_generation(parent, slot),
1772 			     level - 1, &first_key);
1773 	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1774 		free_extent_buffer(eb);
1775 		eb = ERR_PTR(-EIO);
1776 	}
1777 
1778 	return eb;
1779 }
1780 
1781 /*
1782  * node level balancing, used to make sure nodes are in proper order for
1783  * item deletion.  We balance from the top down, so we have to make sure
1784  * that a deletion won't leave an node completely empty later on.
1785  */
1786 static noinline int balance_level(struct btrfs_trans_handle *trans,
1787 			 struct btrfs_root *root,
1788 			 struct btrfs_path *path, int level)
1789 {
1790 	struct btrfs_fs_info *fs_info = root->fs_info;
1791 	struct extent_buffer *right = NULL;
1792 	struct extent_buffer *mid;
1793 	struct extent_buffer *left = NULL;
1794 	struct extent_buffer *parent = NULL;
1795 	int ret = 0;
1796 	int wret;
1797 	int pslot;
1798 	int orig_slot = path->slots[level];
1799 	u64 orig_ptr;
1800 
1801 	ASSERT(level > 0);
1802 
1803 	mid = path->nodes[level];
1804 
1805 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1806 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1807 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1808 
1809 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1810 
1811 	if (level < BTRFS_MAX_LEVEL - 1) {
1812 		parent = path->nodes[level + 1];
1813 		pslot = path->slots[level + 1];
1814 	}
1815 
1816 	/*
1817 	 * deal with the case where there is only one pointer in the root
1818 	 * by promoting the node below to a root
1819 	 */
1820 	if (!parent) {
1821 		struct extent_buffer *child;
1822 
1823 		if (btrfs_header_nritems(mid) != 1)
1824 			return 0;
1825 
1826 		/* promote the child to a root */
1827 		child = read_node_slot(fs_info, mid, 0);
1828 		if (IS_ERR(child)) {
1829 			ret = PTR_ERR(child);
1830 			btrfs_handle_fs_error(fs_info, ret, NULL);
1831 			goto enospc;
1832 		}
1833 
1834 		btrfs_tree_lock(child);
1835 		btrfs_set_lock_blocking(child);
1836 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1837 		if (ret) {
1838 			btrfs_tree_unlock(child);
1839 			free_extent_buffer(child);
1840 			goto enospc;
1841 		}
1842 
1843 		ret = tree_mod_log_insert_root(root->node, child, 1);
1844 		BUG_ON(ret < 0);
1845 		rcu_assign_pointer(root->node, child);
1846 
1847 		add_root_to_dirty_list(root);
1848 		btrfs_tree_unlock(child);
1849 
1850 		path->locks[level] = 0;
1851 		path->nodes[level] = NULL;
1852 		clean_tree_block(fs_info, mid);
1853 		btrfs_tree_unlock(mid);
1854 		/* once for the path */
1855 		free_extent_buffer(mid);
1856 
1857 		root_sub_used(root, mid->len);
1858 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1859 		/* once for the root ptr */
1860 		free_extent_buffer_stale(mid);
1861 		return 0;
1862 	}
1863 	if (btrfs_header_nritems(mid) >
1864 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1865 		return 0;
1866 
1867 	left = read_node_slot(fs_info, parent, pslot - 1);
1868 	if (IS_ERR(left))
1869 		left = NULL;
1870 
1871 	if (left) {
1872 		btrfs_tree_lock(left);
1873 		btrfs_set_lock_blocking(left);
1874 		wret = btrfs_cow_block(trans, root, left,
1875 				       parent, pslot - 1, &left);
1876 		if (wret) {
1877 			ret = wret;
1878 			goto enospc;
1879 		}
1880 	}
1881 
1882 	right = read_node_slot(fs_info, parent, pslot + 1);
1883 	if (IS_ERR(right))
1884 		right = NULL;
1885 
1886 	if (right) {
1887 		btrfs_tree_lock(right);
1888 		btrfs_set_lock_blocking(right);
1889 		wret = btrfs_cow_block(trans, root, right,
1890 				       parent, pslot + 1, &right);
1891 		if (wret) {
1892 			ret = wret;
1893 			goto enospc;
1894 		}
1895 	}
1896 
1897 	/* first, try to make some room in the middle buffer */
1898 	if (left) {
1899 		orig_slot += btrfs_header_nritems(left);
1900 		wret = push_node_left(trans, fs_info, left, mid, 1);
1901 		if (wret < 0)
1902 			ret = wret;
1903 	}
1904 
1905 	/*
1906 	 * then try to empty the right most buffer into the middle
1907 	 */
1908 	if (right) {
1909 		wret = push_node_left(trans, fs_info, mid, right, 1);
1910 		if (wret < 0 && wret != -ENOSPC)
1911 			ret = wret;
1912 		if (btrfs_header_nritems(right) == 0) {
1913 			clean_tree_block(fs_info, right);
1914 			btrfs_tree_unlock(right);
1915 			del_ptr(root, path, level + 1, pslot + 1);
1916 			root_sub_used(root, right->len);
1917 			btrfs_free_tree_block(trans, root, right, 0, 1);
1918 			free_extent_buffer_stale(right);
1919 			right = NULL;
1920 		} else {
1921 			struct btrfs_disk_key right_key;
1922 			btrfs_node_key(right, &right_key, 0);
1923 			ret = tree_mod_log_insert_key(parent, pslot + 1,
1924 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1925 			BUG_ON(ret < 0);
1926 			btrfs_set_node_key(parent, &right_key, pslot + 1);
1927 			btrfs_mark_buffer_dirty(parent);
1928 		}
1929 	}
1930 	if (btrfs_header_nritems(mid) == 1) {
1931 		/*
1932 		 * we're not allowed to leave a node with one item in the
1933 		 * tree during a delete.  A deletion from lower in the tree
1934 		 * could try to delete the only pointer in this node.
1935 		 * So, pull some keys from the left.
1936 		 * There has to be a left pointer at this point because
1937 		 * otherwise we would have pulled some pointers from the
1938 		 * right
1939 		 */
1940 		if (!left) {
1941 			ret = -EROFS;
1942 			btrfs_handle_fs_error(fs_info, ret, NULL);
1943 			goto enospc;
1944 		}
1945 		wret = balance_node_right(trans, fs_info, mid, left);
1946 		if (wret < 0) {
1947 			ret = wret;
1948 			goto enospc;
1949 		}
1950 		if (wret == 1) {
1951 			wret = push_node_left(trans, fs_info, left, mid, 1);
1952 			if (wret < 0)
1953 				ret = wret;
1954 		}
1955 		BUG_ON(wret == 1);
1956 	}
1957 	if (btrfs_header_nritems(mid) == 0) {
1958 		clean_tree_block(fs_info, mid);
1959 		btrfs_tree_unlock(mid);
1960 		del_ptr(root, path, level + 1, pslot);
1961 		root_sub_used(root, mid->len);
1962 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1963 		free_extent_buffer_stale(mid);
1964 		mid = NULL;
1965 	} else {
1966 		/* update the parent key to reflect our changes */
1967 		struct btrfs_disk_key mid_key;
1968 		btrfs_node_key(mid, &mid_key, 0);
1969 		ret = tree_mod_log_insert_key(parent, pslot,
1970 				MOD_LOG_KEY_REPLACE, GFP_NOFS);
1971 		BUG_ON(ret < 0);
1972 		btrfs_set_node_key(parent, &mid_key, pslot);
1973 		btrfs_mark_buffer_dirty(parent);
1974 	}
1975 
1976 	/* update the path */
1977 	if (left) {
1978 		if (btrfs_header_nritems(left) > orig_slot) {
1979 			extent_buffer_get(left);
1980 			/* left was locked after cow */
1981 			path->nodes[level] = left;
1982 			path->slots[level + 1] -= 1;
1983 			path->slots[level] = orig_slot;
1984 			if (mid) {
1985 				btrfs_tree_unlock(mid);
1986 				free_extent_buffer(mid);
1987 			}
1988 		} else {
1989 			orig_slot -= btrfs_header_nritems(left);
1990 			path->slots[level] = orig_slot;
1991 		}
1992 	}
1993 	/* double check we haven't messed things up */
1994 	if (orig_ptr !=
1995 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1996 		BUG();
1997 enospc:
1998 	if (right) {
1999 		btrfs_tree_unlock(right);
2000 		free_extent_buffer(right);
2001 	}
2002 	if (left) {
2003 		if (path->nodes[level] != left)
2004 			btrfs_tree_unlock(left);
2005 		free_extent_buffer(left);
2006 	}
2007 	return ret;
2008 }
2009 
2010 /* Node balancing for insertion.  Here we only split or push nodes around
2011  * when they are completely full.  This is also done top down, so we
2012  * have to be pessimistic.
2013  */
2014 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2015 					  struct btrfs_root *root,
2016 					  struct btrfs_path *path, int level)
2017 {
2018 	struct btrfs_fs_info *fs_info = root->fs_info;
2019 	struct extent_buffer *right = NULL;
2020 	struct extent_buffer *mid;
2021 	struct extent_buffer *left = NULL;
2022 	struct extent_buffer *parent = NULL;
2023 	int ret = 0;
2024 	int wret;
2025 	int pslot;
2026 	int orig_slot = path->slots[level];
2027 
2028 	if (level == 0)
2029 		return 1;
2030 
2031 	mid = path->nodes[level];
2032 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
2033 
2034 	if (level < BTRFS_MAX_LEVEL - 1) {
2035 		parent = path->nodes[level + 1];
2036 		pslot = path->slots[level + 1];
2037 	}
2038 
2039 	if (!parent)
2040 		return 1;
2041 
2042 	left = read_node_slot(fs_info, parent, pslot - 1);
2043 	if (IS_ERR(left))
2044 		left = NULL;
2045 
2046 	/* first, try to make some room in the middle buffer */
2047 	if (left) {
2048 		u32 left_nr;
2049 
2050 		btrfs_tree_lock(left);
2051 		btrfs_set_lock_blocking(left);
2052 
2053 		left_nr = btrfs_header_nritems(left);
2054 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2055 			wret = 1;
2056 		} else {
2057 			ret = btrfs_cow_block(trans, root, left, parent,
2058 					      pslot - 1, &left);
2059 			if (ret)
2060 				wret = 1;
2061 			else {
2062 				wret = push_node_left(trans, fs_info,
2063 						      left, mid, 0);
2064 			}
2065 		}
2066 		if (wret < 0)
2067 			ret = wret;
2068 		if (wret == 0) {
2069 			struct btrfs_disk_key disk_key;
2070 			orig_slot += left_nr;
2071 			btrfs_node_key(mid, &disk_key, 0);
2072 			ret = tree_mod_log_insert_key(parent, pslot,
2073 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2074 			BUG_ON(ret < 0);
2075 			btrfs_set_node_key(parent, &disk_key, pslot);
2076 			btrfs_mark_buffer_dirty(parent);
2077 			if (btrfs_header_nritems(left) > orig_slot) {
2078 				path->nodes[level] = left;
2079 				path->slots[level + 1] -= 1;
2080 				path->slots[level] = orig_slot;
2081 				btrfs_tree_unlock(mid);
2082 				free_extent_buffer(mid);
2083 			} else {
2084 				orig_slot -=
2085 					btrfs_header_nritems(left);
2086 				path->slots[level] = orig_slot;
2087 				btrfs_tree_unlock(left);
2088 				free_extent_buffer(left);
2089 			}
2090 			return 0;
2091 		}
2092 		btrfs_tree_unlock(left);
2093 		free_extent_buffer(left);
2094 	}
2095 	right = read_node_slot(fs_info, parent, pslot + 1);
2096 	if (IS_ERR(right))
2097 		right = NULL;
2098 
2099 	/*
2100 	 * then try to empty the right most buffer into the middle
2101 	 */
2102 	if (right) {
2103 		u32 right_nr;
2104 
2105 		btrfs_tree_lock(right);
2106 		btrfs_set_lock_blocking(right);
2107 
2108 		right_nr = btrfs_header_nritems(right);
2109 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2110 			wret = 1;
2111 		} else {
2112 			ret = btrfs_cow_block(trans, root, right,
2113 					      parent, pslot + 1,
2114 					      &right);
2115 			if (ret)
2116 				wret = 1;
2117 			else {
2118 				wret = balance_node_right(trans, fs_info,
2119 							  right, mid);
2120 			}
2121 		}
2122 		if (wret < 0)
2123 			ret = wret;
2124 		if (wret == 0) {
2125 			struct btrfs_disk_key disk_key;
2126 
2127 			btrfs_node_key(right, &disk_key, 0);
2128 			ret = tree_mod_log_insert_key(parent, pslot + 1,
2129 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2130 			BUG_ON(ret < 0);
2131 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2132 			btrfs_mark_buffer_dirty(parent);
2133 
2134 			if (btrfs_header_nritems(mid) <= orig_slot) {
2135 				path->nodes[level] = right;
2136 				path->slots[level + 1] += 1;
2137 				path->slots[level] = orig_slot -
2138 					btrfs_header_nritems(mid);
2139 				btrfs_tree_unlock(mid);
2140 				free_extent_buffer(mid);
2141 			} else {
2142 				btrfs_tree_unlock(right);
2143 				free_extent_buffer(right);
2144 			}
2145 			return 0;
2146 		}
2147 		btrfs_tree_unlock(right);
2148 		free_extent_buffer(right);
2149 	}
2150 	return 1;
2151 }
2152 
2153 /*
2154  * readahead one full node of leaves, finding things that are close
2155  * to the block in 'slot', and triggering ra on them.
2156  */
2157 static void reada_for_search(struct btrfs_fs_info *fs_info,
2158 			     struct btrfs_path *path,
2159 			     int level, int slot, u64 objectid)
2160 {
2161 	struct extent_buffer *node;
2162 	struct btrfs_disk_key disk_key;
2163 	u32 nritems;
2164 	u64 search;
2165 	u64 target;
2166 	u64 nread = 0;
2167 	struct extent_buffer *eb;
2168 	u32 nr;
2169 	u32 blocksize;
2170 	u32 nscan = 0;
2171 
2172 	if (level != 1)
2173 		return;
2174 
2175 	if (!path->nodes[level])
2176 		return;
2177 
2178 	node = path->nodes[level];
2179 
2180 	search = btrfs_node_blockptr(node, slot);
2181 	blocksize = fs_info->nodesize;
2182 	eb = find_extent_buffer(fs_info, search);
2183 	if (eb) {
2184 		free_extent_buffer(eb);
2185 		return;
2186 	}
2187 
2188 	target = search;
2189 
2190 	nritems = btrfs_header_nritems(node);
2191 	nr = slot;
2192 
2193 	while (1) {
2194 		if (path->reada == READA_BACK) {
2195 			if (nr == 0)
2196 				break;
2197 			nr--;
2198 		} else if (path->reada == READA_FORWARD) {
2199 			nr++;
2200 			if (nr >= nritems)
2201 				break;
2202 		}
2203 		if (path->reada == READA_BACK && objectid) {
2204 			btrfs_node_key(node, &disk_key, nr);
2205 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2206 				break;
2207 		}
2208 		search = btrfs_node_blockptr(node, nr);
2209 		if ((search <= target && target - search <= 65536) ||
2210 		    (search > target && search - target <= 65536)) {
2211 			readahead_tree_block(fs_info, search);
2212 			nread += blocksize;
2213 		}
2214 		nscan++;
2215 		if ((nread > 65536 || nscan > 32))
2216 			break;
2217 	}
2218 }
2219 
2220 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2221 				       struct btrfs_path *path, int level)
2222 {
2223 	int slot;
2224 	int nritems;
2225 	struct extent_buffer *parent;
2226 	struct extent_buffer *eb;
2227 	u64 gen;
2228 	u64 block1 = 0;
2229 	u64 block2 = 0;
2230 
2231 	parent = path->nodes[level + 1];
2232 	if (!parent)
2233 		return;
2234 
2235 	nritems = btrfs_header_nritems(parent);
2236 	slot = path->slots[level + 1];
2237 
2238 	if (slot > 0) {
2239 		block1 = btrfs_node_blockptr(parent, slot - 1);
2240 		gen = btrfs_node_ptr_generation(parent, slot - 1);
2241 		eb = find_extent_buffer(fs_info, block1);
2242 		/*
2243 		 * if we get -eagain from btrfs_buffer_uptodate, we
2244 		 * don't want to return eagain here.  That will loop
2245 		 * forever
2246 		 */
2247 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2248 			block1 = 0;
2249 		free_extent_buffer(eb);
2250 	}
2251 	if (slot + 1 < nritems) {
2252 		block2 = btrfs_node_blockptr(parent, slot + 1);
2253 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2254 		eb = find_extent_buffer(fs_info, block2);
2255 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2256 			block2 = 0;
2257 		free_extent_buffer(eb);
2258 	}
2259 
2260 	if (block1)
2261 		readahead_tree_block(fs_info, block1);
2262 	if (block2)
2263 		readahead_tree_block(fs_info, block2);
2264 }
2265 
2266 
2267 /*
2268  * when we walk down the tree, it is usually safe to unlock the higher layers
2269  * in the tree.  The exceptions are when our path goes through slot 0, because
2270  * operations on the tree might require changing key pointers higher up in the
2271  * tree.
2272  *
2273  * callers might also have set path->keep_locks, which tells this code to keep
2274  * the lock if the path points to the last slot in the block.  This is part of
2275  * walking through the tree, and selecting the next slot in the higher block.
2276  *
2277  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2278  * if lowest_unlock is 1, level 0 won't be unlocked
2279  */
2280 static noinline void unlock_up(struct btrfs_path *path, int level,
2281 			       int lowest_unlock, int min_write_lock_level,
2282 			       int *write_lock_level)
2283 {
2284 	int i;
2285 	int skip_level = level;
2286 	int no_skips = 0;
2287 	struct extent_buffer *t;
2288 
2289 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2290 		if (!path->nodes[i])
2291 			break;
2292 		if (!path->locks[i])
2293 			break;
2294 		if (!no_skips && path->slots[i] == 0) {
2295 			skip_level = i + 1;
2296 			continue;
2297 		}
2298 		if (!no_skips && path->keep_locks) {
2299 			u32 nritems;
2300 			t = path->nodes[i];
2301 			nritems = btrfs_header_nritems(t);
2302 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2303 				skip_level = i + 1;
2304 				continue;
2305 			}
2306 		}
2307 		if (skip_level < i && i >= lowest_unlock)
2308 			no_skips = 1;
2309 
2310 		t = path->nodes[i];
2311 		if (i >= lowest_unlock && i > skip_level) {
2312 			btrfs_tree_unlock_rw(t, path->locks[i]);
2313 			path->locks[i] = 0;
2314 			if (write_lock_level &&
2315 			    i > min_write_lock_level &&
2316 			    i <= *write_lock_level) {
2317 				*write_lock_level = i - 1;
2318 			}
2319 		}
2320 	}
2321 }
2322 
2323 /*
2324  * This releases any locks held in the path starting at level and
2325  * going all the way up to the root.
2326  *
2327  * btrfs_search_slot will keep the lock held on higher nodes in a few
2328  * corner cases, such as COW of the block at slot zero in the node.  This
2329  * ignores those rules, and it should only be called when there are no
2330  * more updates to be done higher up in the tree.
2331  */
2332 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2333 {
2334 	int i;
2335 
2336 	if (path->keep_locks)
2337 		return;
2338 
2339 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2340 		if (!path->nodes[i])
2341 			continue;
2342 		if (!path->locks[i])
2343 			continue;
2344 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2345 		path->locks[i] = 0;
2346 	}
2347 }
2348 
2349 /*
2350  * helper function for btrfs_search_slot.  The goal is to find a block
2351  * in cache without setting the path to blocking.  If we find the block
2352  * we return zero and the path is unchanged.
2353  *
2354  * If we can't find the block, we set the path blocking and do some
2355  * reada.  -EAGAIN is returned and the search must be repeated.
2356  */
2357 static int
2358 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2359 		      struct extent_buffer **eb_ret, int level, int slot,
2360 		      const struct btrfs_key *key)
2361 {
2362 	struct btrfs_fs_info *fs_info = root->fs_info;
2363 	u64 blocknr;
2364 	u64 gen;
2365 	struct extent_buffer *b = *eb_ret;
2366 	struct extent_buffer *tmp;
2367 	struct btrfs_key first_key;
2368 	int ret;
2369 	int parent_level;
2370 
2371 	blocknr = btrfs_node_blockptr(b, slot);
2372 	gen = btrfs_node_ptr_generation(b, slot);
2373 	parent_level = btrfs_header_level(b);
2374 	btrfs_node_key_to_cpu(b, &first_key, slot);
2375 
2376 	tmp = find_extent_buffer(fs_info, blocknr);
2377 	if (tmp) {
2378 		/* first we do an atomic uptodate check */
2379 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2380 			*eb_ret = tmp;
2381 			return 0;
2382 		}
2383 
2384 		/* the pages were up to date, but we failed
2385 		 * the generation number check.  Do a full
2386 		 * read for the generation number that is correct.
2387 		 * We must do this without dropping locks so
2388 		 * we can trust our generation number
2389 		 */
2390 		btrfs_set_path_blocking(p);
2391 
2392 		/* now we're allowed to do a blocking uptodate check */
2393 		ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
2394 		if (!ret) {
2395 			*eb_ret = tmp;
2396 			return 0;
2397 		}
2398 		free_extent_buffer(tmp);
2399 		btrfs_release_path(p);
2400 		return -EIO;
2401 	}
2402 
2403 	/*
2404 	 * reduce lock contention at high levels
2405 	 * of the btree by dropping locks before
2406 	 * we read.  Don't release the lock on the current
2407 	 * level because we need to walk this node to figure
2408 	 * out which blocks to read.
2409 	 */
2410 	btrfs_unlock_up_safe(p, level + 1);
2411 	btrfs_set_path_blocking(p);
2412 
2413 	if (p->reada != READA_NONE)
2414 		reada_for_search(fs_info, p, level, slot, key->objectid);
2415 
2416 	ret = -EAGAIN;
2417 	tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1,
2418 			      &first_key);
2419 	if (!IS_ERR(tmp)) {
2420 		/*
2421 		 * If the read above didn't mark this buffer up to date,
2422 		 * it will never end up being up to date.  Set ret to EIO now
2423 		 * and give up so that our caller doesn't loop forever
2424 		 * on our EAGAINs.
2425 		 */
2426 		if (!extent_buffer_uptodate(tmp))
2427 			ret = -EIO;
2428 		free_extent_buffer(tmp);
2429 	} else {
2430 		ret = PTR_ERR(tmp);
2431 	}
2432 
2433 	btrfs_release_path(p);
2434 	return ret;
2435 }
2436 
2437 /*
2438  * helper function for btrfs_search_slot.  This does all of the checks
2439  * for node-level blocks and does any balancing required based on
2440  * the ins_len.
2441  *
2442  * If no extra work was required, zero is returned.  If we had to
2443  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2444  * start over
2445  */
2446 static int
2447 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2448 		       struct btrfs_root *root, struct btrfs_path *p,
2449 		       struct extent_buffer *b, int level, int ins_len,
2450 		       int *write_lock_level)
2451 {
2452 	struct btrfs_fs_info *fs_info = root->fs_info;
2453 	int ret;
2454 
2455 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2456 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2457 		int sret;
2458 
2459 		if (*write_lock_level < level + 1) {
2460 			*write_lock_level = level + 1;
2461 			btrfs_release_path(p);
2462 			goto again;
2463 		}
2464 
2465 		btrfs_set_path_blocking(p);
2466 		reada_for_balance(fs_info, p, level);
2467 		sret = split_node(trans, root, p, level);
2468 
2469 		BUG_ON(sret > 0);
2470 		if (sret) {
2471 			ret = sret;
2472 			goto done;
2473 		}
2474 		b = p->nodes[level];
2475 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2476 		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2477 		int sret;
2478 
2479 		if (*write_lock_level < level + 1) {
2480 			*write_lock_level = level + 1;
2481 			btrfs_release_path(p);
2482 			goto again;
2483 		}
2484 
2485 		btrfs_set_path_blocking(p);
2486 		reada_for_balance(fs_info, p, level);
2487 		sret = balance_level(trans, root, p, level);
2488 
2489 		if (sret) {
2490 			ret = sret;
2491 			goto done;
2492 		}
2493 		b = p->nodes[level];
2494 		if (!b) {
2495 			btrfs_release_path(p);
2496 			goto again;
2497 		}
2498 		BUG_ON(btrfs_header_nritems(b) == 1);
2499 	}
2500 	return 0;
2501 
2502 again:
2503 	ret = -EAGAIN;
2504 done:
2505 	return ret;
2506 }
2507 
2508 static void key_search_validate(struct extent_buffer *b,
2509 				const struct btrfs_key *key,
2510 				int level)
2511 {
2512 #ifdef CONFIG_BTRFS_ASSERT
2513 	struct btrfs_disk_key disk_key;
2514 
2515 	btrfs_cpu_key_to_disk(&disk_key, key);
2516 
2517 	if (level == 0)
2518 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2519 		    offsetof(struct btrfs_leaf, items[0].key),
2520 		    sizeof(disk_key)));
2521 	else
2522 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2523 		    offsetof(struct btrfs_node, ptrs[0].key),
2524 		    sizeof(disk_key)));
2525 #endif
2526 }
2527 
2528 static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2529 		      int level, int *prev_cmp, int *slot)
2530 {
2531 	if (*prev_cmp != 0) {
2532 		*prev_cmp = btrfs_bin_search(b, key, level, slot);
2533 		return *prev_cmp;
2534 	}
2535 
2536 	key_search_validate(b, key, level);
2537 	*slot = 0;
2538 
2539 	return 0;
2540 }
2541 
2542 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2543 		u64 iobjectid, u64 ioff, u8 key_type,
2544 		struct btrfs_key *found_key)
2545 {
2546 	int ret;
2547 	struct btrfs_key key;
2548 	struct extent_buffer *eb;
2549 
2550 	ASSERT(path);
2551 	ASSERT(found_key);
2552 
2553 	key.type = key_type;
2554 	key.objectid = iobjectid;
2555 	key.offset = ioff;
2556 
2557 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2558 	if (ret < 0)
2559 		return ret;
2560 
2561 	eb = path->nodes[0];
2562 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2563 		ret = btrfs_next_leaf(fs_root, path);
2564 		if (ret)
2565 			return ret;
2566 		eb = path->nodes[0];
2567 	}
2568 
2569 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2570 	if (found_key->type != key.type ||
2571 			found_key->objectid != key.objectid)
2572 		return 1;
2573 
2574 	return 0;
2575 }
2576 
2577 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
2578 							struct btrfs_path *p,
2579 							int write_lock_level)
2580 {
2581 	struct btrfs_fs_info *fs_info = root->fs_info;
2582 	struct extent_buffer *b;
2583 	int root_lock;
2584 	int level = 0;
2585 
2586 	/* We try very hard to do read locks on the root */
2587 	root_lock = BTRFS_READ_LOCK;
2588 
2589 	if (p->search_commit_root) {
2590 		/*
2591 		 * The commit roots are read only so we always do read locks,
2592 		 * and we always must hold the commit_root_sem when doing
2593 		 * searches on them, the only exception is send where we don't
2594 		 * want to block transaction commits for a long time, so
2595 		 * we need to clone the commit root in order to avoid races
2596 		 * with transaction commits that create a snapshot of one of
2597 		 * the roots used by a send operation.
2598 		 */
2599 		if (p->need_commit_sem) {
2600 			down_read(&fs_info->commit_root_sem);
2601 			b = btrfs_clone_extent_buffer(root->commit_root);
2602 			up_read(&fs_info->commit_root_sem);
2603 			if (!b)
2604 				return ERR_PTR(-ENOMEM);
2605 
2606 		} else {
2607 			b = root->commit_root;
2608 			extent_buffer_get(b);
2609 		}
2610 		level = btrfs_header_level(b);
2611 		/*
2612 		 * Ensure that all callers have set skip_locking when
2613 		 * p->search_commit_root = 1.
2614 		 */
2615 		ASSERT(p->skip_locking == 1);
2616 
2617 		goto out;
2618 	}
2619 
2620 	if (p->skip_locking) {
2621 		b = btrfs_root_node(root);
2622 		level = btrfs_header_level(b);
2623 		goto out;
2624 	}
2625 
2626 	/*
2627 	 * If the level is set to maximum, we can skip trying to get the read
2628 	 * lock.
2629 	 */
2630 	if (write_lock_level < BTRFS_MAX_LEVEL) {
2631 		/*
2632 		 * We don't know the level of the root node until we actually
2633 		 * have it read locked
2634 		 */
2635 		b = btrfs_read_lock_root_node(root);
2636 		level = btrfs_header_level(b);
2637 		if (level > write_lock_level)
2638 			goto out;
2639 
2640 		/* Whoops, must trade for write lock */
2641 		btrfs_tree_read_unlock(b);
2642 		free_extent_buffer(b);
2643 	}
2644 
2645 	b = btrfs_lock_root_node(root);
2646 	root_lock = BTRFS_WRITE_LOCK;
2647 
2648 	/* The level might have changed, check again */
2649 	level = btrfs_header_level(b);
2650 
2651 out:
2652 	p->nodes[level] = b;
2653 	if (!p->skip_locking)
2654 		p->locks[level] = root_lock;
2655 	/*
2656 	 * Callers are responsible for dropping b's references.
2657 	 */
2658 	return b;
2659 }
2660 
2661 
2662 /*
2663  * btrfs_search_slot - look for a key in a tree and perform necessary
2664  * modifications to preserve tree invariants.
2665  *
2666  * @trans:	Handle of transaction, used when modifying the tree
2667  * @p:		Holds all btree nodes along the search path
2668  * @root:	The root node of the tree
2669  * @key:	The key we are looking for
2670  * @ins_len:	Indicates purpose of search, for inserts it is 1, for
2671  *		deletions it's -1. 0 for plain searches
2672  * @cow:	boolean should CoW operations be performed. Must always be 1
2673  *		when modifying the tree.
2674  *
2675  * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2676  * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2677  *
2678  * If @key is found, 0 is returned and you can find the item in the leaf level
2679  * of the path (level 0)
2680  *
2681  * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2682  * points to the slot where it should be inserted
2683  *
2684  * If an error is encountered while searching the tree a negative error number
2685  * is returned
2686  */
2687 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2688 		      const struct btrfs_key *key, struct btrfs_path *p,
2689 		      int ins_len, int cow)
2690 {
2691 	struct btrfs_fs_info *fs_info = root->fs_info;
2692 	struct extent_buffer *b;
2693 	int slot;
2694 	int ret;
2695 	int err;
2696 	int level;
2697 	int lowest_unlock = 1;
2698 	/* everything at write_lock_level or lower must be write locked */
2699 	int write_lock_level = 0;
2700 	u8 lowest_level = 0;
2701 	int min_write_lock_level;
2702 	int prev_cmp;
2703 
2704 	lowest_level = p->lowest_level;
2705 	WARN_ON(lowest_level && ins_len > 0);
2706 	WARN_ON(p->nodes[0] != NULL);
2707 	BUG_ON(!cow && ins_len);
2708 
2709 	if (ins_len < 0) {
2710 		lowest_unlock = 2;
2711 
2712 		/* when we are removing items, we might have to go up to level
2713 		 * two as we update tree pointers  Make sure we keep write
2714 		 * for those levels as well
2715 		 */
2716 		write_lock_level = 2;
2717 	} else if (ins_len > 0) {
2718 		/*
2719 		 * for inserting items, make sure we have a write lock on
2720 		 * level 1 so we can update keys
2721 		 */
2722 		write_lock_level = 1;
2723 	}
2724 
2725 	if (!cow)
2726 		write_lock_level = -1;
2727 
2728 	if (cow && (p->keep_locks || p->lowest_level))
2729 		write_lock_level = BTRFS_MAX_LEVEL;
2730 
2731 	min_write_lock_level = write_lock_level;
2732 
2733 again:
2734 	prev_cmp = -1;
2735 	b = btrfs_search_slot_get_root(root, p, write_lock_level);
2736 	if (IS_ERR(b)) {
2737 		ret = PTR_ERR(b);
2738 		goto done;
2739 	}
2740 
2741 	while (b) {
2742 		level = btrfs_header_level(b);
2743 
2744 		/*
2745 		 * setup the path here so we can release it under lock
2746 		 * contention with the cow code
2747 		 */
2748 		if (cow) {
2749 			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2750 
2751 			/*
2752 			 * if we don't really need to cow this block
2753 			 * then we don't want to set the path blocking,
2754 			 * so we test it here
2755 			 */
2756 			if (!should_cow_block(trans, root, b)) {
2757 				trans->dirty = true;
2758 				goto cow_done;
2759 			}
2760 
2761 			/*
2762 			 * must have write locks on this node and the
2763 			 * parent
2764 			 */
2765 			if (level > write_lock_level ||
2766 			    (level + 1 > write_lock_level &&
2767 			    level + 1 < BTRFS_MAX_LEVEL &&
2768 			    p->nodes[level + 1])) {
2769 				write_lock_level = level + 1;
2770 				btrfs_release_path(p);
2771 				goto again;
2772 			}
2773 
2774 			btrfs_set_path_blocking(p);
2775 			if (last_level)
2776 				err = btrfs_cow_block(trans, root, b, NULL, 0,
2777 						      &b);
2778 			else
2779 				err = btrfs_cow_block(trans, root, b,
2780 						      p->nodes[level + 1],
2781 						      p->slots[level + 1], &b);
2782 			if (err) {
2783 				ret = err;
2784 				goto done;
2785 			}
2786 		}
2787 cow_done:
2788 		p->nodes[level] = b;
2789 		/*
2790 		 * Leave path with blocking locks to avoid massive
2791 		 * lock context switch, this is made on purpose.
2792 		 */
2793 
2794 		/*
2795 		 * we have a lock on b and as long as we aren't changing
2796 		 * the tree, there is no way to for the items in b to change.
2797 		 * It is safe to drop the lock on our parent before we
2798 		 * go through the expensive btree search on b.
2799 		 *
2800 		 * If we're inserting or deleting (ins_len != 0), then we might
2801 		 * be changing slot zero, which may require changing the parent.
2802 		 * So, we can't drop the lock until after we know which slot
2803 		 * we're operating on.
2804 		 */
2805 		if (!ins_len && !p->keep_locks) {
2806 			int u = level + 1;
2807 
2808 			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2809 				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2810 				p->locks[u] = 0;
2811 			}
2812 		}
2813 
2814 		ret = key_search(b, key, level, &prev_cmp, &slot);
2815 		if (ret < 0)
2816 			goto done;
2817 
2818 		if (level != 0) {
2819 			int dec = 0;
2820 			if (ret && slot > 0) {
2821 				dec = 1;
2822 				slot -= 1;
2823 			}
2824 			p->slots[level] = slot;
2825 			err = setup_nodes_for_search(trans, root, p, b, level,
2826 					     ins_len, &write_lock_level);
2827 			if (err == -EAGAIN)
2828 				goto again;
2829 			if (err) {
2830 				ret = err;
2831 				goto done;
2832 			}
2833 			b = p->nodes[level];
2834 			slot = p->slots[level];
2835 
2836 			/*
2837 			 * slot 0 is special, if we change the key
2838 			 * we have to update the parent pointer
2839 			 * which means we must have a write lock
2840 			 * on the parent
2841 			 */
2842 			if (slot == 0 && ins_len &&
2843 			    write_lock_level < level + 1) {
2844 				write_lock_level = level + 1;
2845 				btrfs_release_path(p);
2846 				goto again;
2847 			}
2848 
2849 			unlock_up(p, level, lowest_unlock,
2850 				  min_write_lock_level, &write_lock_level);
2851 
2852 			if (level == lowest_level) {
2853 				if (dec)
2854 					p->slots[level]++;
2855 				goto done;
2856 			}
2857 
2858 			err = read_block_for_search(root, p, &b, level,
2859 						    slot, key);
2860 			if (err == -EAGAIN)
2861 				goto again;
2862 			if (err) {
2863 				ret = err;
2864 				goto done;
2865 			}
2866 
2867 			if (!p->skip_locking) {
2868 				level = btrfs_header_level(b);
2869 				if (level <= write_lock_level) {
2870 					err = btrfs_try_tree_write_lock(b);
2871 					if (!err) {
2872 						btrfs_set_path_blocking(p);
2873 						btrfs_tree_lock(b);
2874 					}
2875 					p->locks[level] = BTRFS_WRITE_LOCK;
2876 				} else {
2877 					err = btrfs_tree_read_lock_atomic(b);
2878 					if (!err) {
2879 						btrfs_set_path_blocking(p);
2880 						btrfs_tree_read_lock(b);
2881 					}
2882 					p->locks[level] = BTRFS_READ_LOCK;
2883 				}
2884 				p->nodes[level] = b;
2885 			}
2886 		} else {
2887 			p->slots[level] = slot;
2888 			if (ins_len > 0 &&
2889 			    btrfs_leaf_free_space(fs_info, b) < ins_len) {
2890 				if (write_lock_level < 1) {
2891 					write_lock_level = 1;
2892 					btrfs_release_path(p);
2893 					goto again;
2894 				}
2895 
2896 				btrfs_set_path_blocking(p);
2897 				err = split_leaf(trans, root, key,
2898 						 p, ins_len, ret == 0);
2899 
2900 				BUG_ON(err > 0);
2901 				if (err) {
2902 					ret = err;
2903 					goto done;
2904 				}
2905 			}
2906 			if (!p->search_for_split)
2907 				unlock_up(p, level, lowest_unlock,
2908 					  min_write_lock_level, NULL);
2909 			goto done;
2910 		}
2911 	}
2912 	ret = 1;
2913 done:
2914 	/*
2915 	 * we don't really know what they plan on doing with the path
2916 	 * from here on, so for now just mark it as blocking
2917 	 */
2918 	if (!p->leave_spinning)
2919 		btrfs_set_path_blocking(p);
2920 	if (ret < 0 && !p->skip_release_on_error)
2921 		btrfs_release_path(p);
2922 	return ret;
2923 }
2924 
2925 /*
2926  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2927  * current state of the tree together with the operations recorded in the tree
2928  * modification log to search for the key in a previous version of this tree, as
2929  * denoted by the time_seq parameter.
2930  *
2931  * Naturally, there is no support for insert, delete or cow operations.
2932  *
2933  * The resulting path and return value will be set up as if we called
2934  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2935  */
2936 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2937 			  struct btrfs_path *p, u64 time_seq)
2938 {
2939 	struct btrfs_fs_info *fs_info = root->fs_info;
2940 	struct extent_buffer *b;
2941 	int slot;
2942 	int ret;
2943 	int err;
2944 	int level;
2945 	int lowest_unlock = 1;
2946 	u8 lowest_level = 0;
2947 	int prev_cmp = -1;
2948 
2949 	lowest_level = p->lowest_level;
2950 	WARN_ON(p->nodes[0] != NULL);
2951 
2952 	if (p->search_commit_root) {
2953 		BUG_ON(time_seq);
2954 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2955 	}
2956 
2957 again:
2958 	b = get_old_root(root, time_seq);
2959 	if (!b) {
2960 		ret = -EIO;
2961 		goto done;
2962 	}
2963 	level = btrfs_header_level(b);
2964 	p->locks[level] = BTRFS_READ_LOCK;
2965 
2966 	while (b) {
2967 		level = btrfs_header_level(b);
2968 		p->nodes[level] = b;
2969 
2970 		/*
2971 		 * we have a lock on b and as long as we aren't changing
2972 		 * the tree, there is no way to for the items in b to change.
2973 		 * It is safe to drop the lock on our parent before we
2974 		 * go through the expensive btree search on b.
2975 		 */
2976 		btrfs_unlock_up_safe(p, level + 1);
2977 
2978 		/*
2979 		 * Since we can unwind ebs we want to do a real search every
2980 		 * time.
2981 		 */
2982 		prev_cmp = -1;
2983 		ret = key_search(b, key, level, &prev_cmp, &slot);
2984 
2985 		if (level != 0) {
2986 			int dec = 0;
2987 			if (ret && slot > 0) {
2988 				dec = 1;
2989 				slot -= 1;
2990 			}
2991 			p->slots[level] = slot;
2992 			unlock_up(p, level, lowest_unlock, 0, NULL);
2993 
2994 			if (level == lowest_level) {
2995 				if (dec)
2996 					p->slots[level]++;
2997 				goto done;
2998 			}
2999 
3000 			err = read_block_for_search(root, p, &b, level,
3001 						    slot, key);
3002 			if (err == -EAGAIN)
3003 				goto again;
3004 			if (err) {
3005 				ret = err;
3006 				goto done;
3007 			}
3008 
3009 			level = btrfs_header_level(b);
3010 			err = btrfs_tree_read_lock_atomic(b);
3011 			if (!err) {
3012 				btrfs_set_path_blocking(p);
3013 				btrfs_tree_read_lock(b);
3014 			}
3015 			b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3016 			if (!b) {
3017 				ret = -ENOMEM;
3018 				goto done;
3019 			}
3020 			p->locks[level] = BTRFS_READ_LOCK;
3021 			p->nodes[level] = b;
3022 		} else {
3023 			p->slots[level] = slot;
3024 			unlock_up(p, level, lowest_unlock, 0, NULL);
3025 			goto done;
3026 		}
3027 	}
3028 	ret = 1;
3029 done:
3030 	if (!p->leave_spinning)
3031 		btrfs_set_path_blocking(p);
3032 	if (ret < 0)
3033 		btrfs_release_path(p);
3034 
3035 	return ret;
3036 }
3037 
3038 /*
3039  * helper to use instead of search slot if no exact match is needed but
3040  * instead the next or previous item should be returned.
3041  * When find_higher is true, the next higher item is returned, the next lower
3042  * otherwise.
3043  * When return_any and find_higher are both true, and no higher item is found,
3044  * return the next lower instead.
3045  * When return_any is true and find_higher is false, and no lower item is found,
3046  * return the next higher instead.
3047  * It returns 0 if any item is found, 1 if none is found (tree empty), and
3048  * < 0 on error
3049  */
3050 int btrfs_search_slot_for_read(struct btrfs_root *root,
3051 			       const struct btrfs_key *key,
3052 			       struct btrfs_path *p, int find_higher,
3053 			       int return_any)
3054 {
3055 	int ret;
3056 	struct extent_buffer *leaf;
3057 
3058 again:
3059 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3060 	if (ret <= 0)
3061 		return ret;
3062 	/*
3063 	 * a return value of 1 means the path is at the position where the
3064 	 * item should be inserted. Normally this is the next bigger item,
3065 	 * but in case the previous item is the last in a leaf, path points
3066 	 * to the first free slot in the previous leaf, i.e. at an invalid
3067 	 * item.
3068 	 */
3069 	leaf = p->nodes[0];
3070 
3071 	if (find_higher) {
3072 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3073 			ret = btrfs_next_leaf(root, p);
3074 			if (ret <= 0)
3075 				return ret;
3076 			if (!return_any)
3077 				return 1;
3078 			/*
3079 			 * no higher item found, return the next
3080 			 * lower instead
3081 			 */
3082 			return_any = 0;
3083 			find_higher = 0;
3084 			btrfs_release_path(p);
3085 			goto again;
3086 		}
3087 	} else {
3088 		if (p->slots[0] == 0) {
3089 			ret = btrfs_prev_leaf(root, p);
3090 			if (ret < 0)
3091 				return ret;
3092 			if (!ret) {
3093 				leaf = p->nodes[0];
3094 				if (p->slots[0] == btrfs_header_nritems(leaf))
3095 					p->slots[0]--;
3096 				return 0;
3097 			}
3098 			if (!return_any)
3099 				return 1;
3100 			/*
3101 			 * no lower item found, return the next
3102 			 * higher instead
3103 			 */
3104 			return_any = 0;
3105 			find_higher = 1;
3106 			btrfs_release_path(p);
3107 			goto again;
3108 		} else {
3109 			--p->slots[0];
3110 		}
3111 	}
3112 	return 0;
3113 }
3114 
3115 /*
3116  * adjust the pointers going up the tree, starting at level
3117  * making sure the right key of each node is points to 'key'.
3118  * This is used after shifting pointers to the left, so it stops
3119  * fixing up pointers when a given leaf/node is not in slot 0 of the
3120  * higher levels
3121  *
3122  */
3123 static void fixup_low_keys(struct btrfs_path *path,
3124 			   struct btrfs_disk_key *key, int level)
3125 {
3126 	int i;
3127 	struct extent_buffer *t;
3128 	int ret;
3129 
3130 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3131 		int tslot = path->slots[i];
3132 
3133 		if (!path->nodes[i])
3134 			break;
3135 		t = path->nodes[i];
3136 		ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3137 				GFP_ATOMIC);
3138 		BUG_ON(ret < 0);
3139 		btrfs_set_node_key(t, key, tslot);
3140 		btrfs_mark_buffer_dirty(path->nodes[i]);
3141 		if (tslot != 0)
3142 			break;
3143 	}
3144 }
3145 
3146 /*
3147  * update item key.
3148  *
3149  * This function isn't completely safe. It's the caller's responsibility
3150  * that the new key won't break the order
3151  */
3152 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3153 			     struct btrfs_path *path,
3154 			     const struct btrfs_key *new_key)
3155 {
3156 	struct btrfs_disk_key disk_key;
3157 	struct extent_buffer *eb;
3158 	int slot;
3159 
3160 	eb = path->nodes[0];
3161 	slot = path->slots[0];
3162 	if (slot > 0) {
3163 		btrfs_item_key(eb, &disk_key, slot - 1);
3164 		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3165 	}
3166 	if (slot < btrfs_header_nritems(eb) - 1) {
3167 		btrfs_item_key(eb, &disk_key, slot + 1);
3168 		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3169 	}
3170 
3171 	btrfs_cpu_key_to_disk(&disk_key, new_key);
3172 	btrfs_set_item_key(eb, &disk_key, slot);
3173 	btrfs_mark_buffer_dirty(eb);
3174 	if (slot == 0)
3175 		fixup_low_keys(path, &disk_key, 1);
3176 }
3177 
3178 /*
3179  * try to push data from one node into the next node left in the
3180  * tree.
3181  *
3182  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3183  * error, and > 0 if there was no room in the left hand block.
3184  */
3185 static int push_node_left(struct btrfs_trans_handle *trans,
3186 			  struct btrfs_fs_info *fs_info,
3187 			  struct extent_buffer *dst,
3188 			  struct extent_buffer *src, int empty)
3189 {
3190 	int push_items = 0;
3191 	int src_nritems;
3192 	int dst_nritems;
3193 	int ret = 0;
3194 
3195 	src_nritems = btrfs_header_nritems(src);
3196 	dst_nritems = btrfs_header_nritems(dst);
3197 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3198 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3199 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3200 
3201 	if (!empty && src_nritems <= 8)
3202 		return 1;
3203 
3204 	if (push_items <= 0)
3205 		return 1;
3206 
3207 	if (empty) {
3208 		push_items = min(src_nritems, push_items);
3209 		if (push_items < src_nritems) {
3210 			/* leave at least 8 pointers in the node if
3211 			 * we aren't going to empty it
3212 			 */
3213 			if (src_nritems - push_items < 8) {
3214 				if (push_items <= 8)
3215 					return 1;
3216 				push_items -= 8;
3217 			}
3218 		}
3219 	} else
3220 		push_items = min(src_nritems - 8, push_items);
3221 
3222 	ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
3223 				   push_items);
3224 	if (ret) {
3225 		btrfs_abort_transaction(trans, ret);
3226 		return ret;
3227 	}
3228 	copy_extent_buffer(dst, src,
3229 			   btrfs_node_key_ptr_offset(dst_nritems),
3230 			   btrfs_node_key_ptr_offset(0),
3231 			   push_items * sizeof(struct btrfs_key_ptr));
3232 
3233 	if (push_items < src_nritems) {
3234 		/*
3235 		 * Don't call tree_mod_log_insert_move here, key removal was
3236 		 * already fully logged by tree_mod_log_eb_copy above.
3237 		 */
3238 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3239 				      btrfs_node_key_ptr_offset(push_items),
3240 				      (src_nritems - push_items) *
3241 				      sizeof(struct btrfs_key_ptr));
3242 	}
3243 	btrfs_set_header_nritems(src, src_nritems - push_items);
3244 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3245 	btrfs_mark_buffer_dirty(src);
3246 	btrfs_mark_buffer_dirty(dst);
3247 
3248 	return ret;
3249 }
3250 
3251 /*
3252  * try to push data from one node into the next node right in the
3253  * tree.
3254  *
3255  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3256  * error, and > 0 if there was no room in the right hand block.
3257  *
3258  * this will  only push up to 1/2 the contents of the left node over
3259  */
3260 static int balance_node_right(struct btrfs_trans_handle *trans,
3261 			      struct btrfs_fs_info *fs_info,
3262 			      struct extent_buffer *dst,
3263 			      struct extent_buffer *src)
3264 {
3265 	int push_items = 0;
3266 	int max_push;
3267 	int src_nritems;
3268 	int dst_nritems;
3269 	int ret = 0;
3270 
3271 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3272 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3273 
3274 	src_nritems = btrfs_header_nritems(src);
3275 	dst_nritems = btrfs_header_nritems(dst);
3276 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3277 	if (push_items <= 0)
3278 		return 1;
3279 
3280 	if (src_nritems < 4)
3281 		return 1;
3282 
3283 	max_push = src_nritems / 2 + 1;
3284 	/* don't try to empty the node */
3285 	if (max_push >= src_nritems)
3286 		return 1;
3287 
3288 	if (max_push < push_items)
3289 		push_items = max_push;
3290 
3291 	ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3292 	BUG_ON(ret < 0);
3293 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3294 				      btrfs_node_key_ptr_offset(0),
3295 				      (dst_nritems) *
3296 				      sizeof(struct btrfs_key_ptr));
3297 
3298 	ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
3299 				   src_nritems - push_items, push_items);
3300 	if (ret) {
3301 		btrfs_abort_transaction(trans, ret);
3302 		return ret;
3303 	}
3304 	copy_extent_buffer(dst, src,
3305 			   btrfs_node_key_ptr_offset(0),
3306 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3307 			   push_items * sizeof(struct btrfs_key_ptr));
3308 
3309 	btrfs_set_header_nritems(src, src_nritems - push_items);
3310 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3311 
3312 	btrfs_mark_buffer_dirty(src);
3313 	btrfs_mark_buffer_dirty(dst);
3314 
3315 	return ret;
3316 }
3317 
3318 /*
3319  * helper function to insert a new root level in the tree.
3320  * A new node is allocated, and a single item is inserted to
3321  * point to the existing root
3322  *
3323  * returns zero on success or < 0 on failure.
3324  */
3325 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3326 			   struct btrfs_root *root,
3327 			   struct btrfs_path *path, int level)
3328 {
3329 	struct btrfs_fs_info *fs_info = root->fs_info;
3330 	u64 lower_gen;
3331 	struct extent_buffer *lower;
3332 	struct extent_buffer *c;
3333 	struct extent_buffer *old;
3334 	struct btrfs_disk_key lower_key;
3335 	int ret;
3336 
3337 	BUG_ON(path->nodes[level]);
3338 	BUG_ON(path->nodes[level-1] != root->node);
3339 
3340 	lower = path->nodes[level-1];
3341 	if (level == 1)
3342 		btrfs_item_key(lower, &lower_key, 0);
3343 	else
3344 		btrfs_node_key(lower, &lower_key, 0);
3345 
3346 	c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3347 				   &lower_key, level, root->node->start, 0);
3348 	if (IS_ERR(c))
3349 		return PTR_ERR(c);
3350 
3351 	root_add_used(root, fs_info->nodesize);
3352 
3353 	btrfs_set_header_nritems(c, 1);
3354 	btrfs_set_node_key(c, &lower_key, 0);
3355 	btrfs_set_node_blockptr(c, 0, lower->start);
3356 	lower_gen = btrfs_header_generation(lower);
3357 	WARN_ON(lower_gen != trans->transid);
3358 
3359 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3360 
3361 	btrfs_mark_buffer_dirty(c);
3362 
3363 	old = root->node;
3364 	ret = tree_mod_log_insert_root(root->node, c, 0);
3365 	BUG_ON(ret < 0);
3366 	rcu_assign_pointer(root->node, c);
3367 
3368 	/* the super has an extra ref to root->node */
3369 	free_extent_buffer(old);
3370 
3371 	add_root_to_dirty_list(root);
3372 	extent_buffer_get(c);
3373 	path->nodes[level] = c;
3374 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3375 	path->slots[level] = 0;
3376 	return 0;
3377 }
3378 
3379 /*
3380  * worker function to insert a single pointer in a node.
3381  * the node should have enough room for the pointer already
3382  *
3383  * slot and level indicate where you want the key to go, and
3384  * blocknr is the block the key points to.
3385  */
3386 static void insert_ptr(struct btrfs_trans_handle *trans,
3387 		       struct btrfs_fs_info *fs_info, struct btrfs_path *path,
3388 		       struct btrfs_disk_key *key, u64 bytenr,
3389 		       int slot, int level)
3390 {
3391 	struct extent_buffer *lower;
3392 	int nritems;
3393 	int ret;
3394 
3395 	BUG_ON(!path->nodes[level]);
3396 	btrfs_assert_tree_locked(path->nodes[level]);
3397 	lower = path->nodes[level];
3398 	nritems = btrfs_header_nritems(lower);
3399 	BUG_ON(slot > nritems);
3400 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
3401 	if (slot != nritems) {
3402 		if (level) {
3403 			ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3404 					nritems - slot);
3405 			BUG_ON(ret < 0);
3406 		}
3407 		memmove_extent_buffer(lower,
3408 			      btrfs_node_key_ptr_offset(slot + 1),
3409 			      btrfs_node_key_ptr_offset(slot),
3410 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3411 	}
3412 	if (level) {
3413 		ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3414 				GFP_NOFS);
3415 		BUG_ON(ret < 0);
3416 	}
3417 	btrfs_set_node_key(lower, key, slot);
3418 	btrfs_set_node_blockptr(lower, slot, bytenr);
3419 	WARN_ON(trans->transid == 0);
3420 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3421 	btrfs_set_header_nritems(lower, nritems + 1);
3422 	btrfs_mark_buffer_dirty(lower);
3423 }
3424 
3425 /*
3426  * split the node at the specified level in path in two.
3427  * The path is corrected to point to the appropriate node after the split
3428  *
3429  * Before splitting this tries to make some room in the node by pushing
3430  * left and right, if either one works, it returns right away.
3431  *
3432  * returns 0 on success and < 0 on failure
3433  */
3434 static noinline int split_node(struct btrfs_trans_handle *trans,
3435 			       struct btrfs_root *root,
3436 			       struct btrfs_path *path, int level)
3437 {
3438 	struct btrfs_fs_info *fs_info = root->fs_info;
3439 	struct extent_buffer *c;
3440 	struct extent_buffer *split;
3441 	struct btrfs_disk_key disk_key;
3442 	int mid;
3443 	int ret;
3444 	u32 c_nritems;
3445 
3446 	c = path->nodes[level];
3447 	WARN_ON(btrfs_header_generation(c) != trans->transid);
3448 	if (c == root->node) {
3449 		/*
3450 		 * trying to split the root, lets make a new one
3451 		 *
3452 		 * tree mod log: We don't log_removal old root in
3453 		 * insert_new_root, because that root buffer will be kept as a
3454 		 * normal node. We are going to log removal of half of the
3455 		 * elements below with tree_mod_log_eb_copy. We're holding a
3456 		 * tree lock on the buffer, which is why we cannot race with
3457 		 * other tree_mod_log users.
3458 		 */
3459 		ret = insert_new_root(trans, root, path, level + 1);
3460 		if (ret)
3461 			return ret;
3462 	} else {
3463 		ret = push_nodes_for_insert(trans, root, path, level);
3464 		c = path->nodes[level];
3465 		if (!ret && btrfs_header_nritems(c) <
3466 		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3467 			return 0;
3468 		if (ret < 0)
3469 			return ret;
3470 	}
3471 
3472 	c_nritems = btrfs_header_nritems(c);
3473 	mid = (c_nritems + 1) / 2;
3474 	btrfs_node_key(c, &disk_key, mid);
3475 
3476 	split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3477 			&disk_key, level, c->start, 0);
3478 	if (IS_ERR(split))
3479 		return PTR_ERR(split);
3480 
3481 	root_add_used(root, fs_info->nodesize);
3482 	ASSERT(btrfs_header_level(c) == level);
3483 
3484 	ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
3485 	if (ret) {
3486 		btrfs_abort_transaction(trans, ret);
3487 		return ret;
3488 	}
3489 	copy_extent_buffer(split, c,
3490 			   btrfs_node_key_ptr_offset(0),
3491 			   btrfs_node_key_ptr_offset(mid),
3492 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3493 	btrfs_set_header_nritems(split, c_nritems - mid);
3494 	btrfs_set_header_nritems(c, mid);
3495 	ret = 0;
3496 
3497 	btrfs_mark_buffer_dirty(c);
3498 	btrfs_mark_buffer_dirty(split);
3499 
3500 	insert_ptr(trans, fs_info, path, &disk_key, split->start,
3501 		   path->slots[level + 1] + 1, level + 1);
3502 
3503 	if (path->slots[level] >= mid) {
3504 		path->slots[level] -= mid;
3505 		btrfs_tree_unlock(c);
3506 		free_extent_buffer(c);
3507 		path->nodes[level] = split;
3508 		path->slots[level + 1] += 1;
3509 	} else {
3510 		btrfs_tree_unlock(split);
3511 		free_extent_buffer(split);
3512 	}
3513 	return ret;
3514 }
3515 
3516 /*
3517  * how many bytes are required to store the items in a leaf.  start
3518  * and nr indicate which items in the leaf to check.  This totals up the
3519  * space used both by the item structs and the item data
3520  */
3521 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3522 {
3523 	struct btrfs_item *start_item;
3524 	struct btrfs_item *end_item;
3525 	struct btrfs_map_token token;
3526 	int data_len;
3527 	int nritems = btrfs_header_nritems(l);
3528 	int end = min(nritems, start + nr) - 1;
3529 
3530 	if (!nr)
3531 		return 0;
3532 	btrfs_init_map_token(&token);
3533 	start_item = btrfs_item_nr(start);
3534 	end_item = btrfs_item_nr(end);
3535 	data_len = btrfs_token_item_offset(l, start_item, &token) +
3536 		btrfs_token_item_size(l, start_item, &token);
3537 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3538 	data_len += sizeof(struct btrfs_item) * nr;
3539 	WARN_ON(data_len < 0);
3540 	return data_len;
3541 }
3542 
3543 /*
3544  * The space between the end of the leaf items and
3545  * the start of the leaf data.  IOW, how much room
3546  * the leaf has left for both items and data
3547  */
3548 noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
3549 				   struct extent_buffer *leaf)
3550 {
3551 	int nritems = btrfs_header_nritems(leaf);
3552 	int ret;
3553 
3554 	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3555 	if (ret < 0) {
3556 		btrfs_crit(fs_info,
3557 			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3558 			   ret,
3559 			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3560 			   leaf_space_used(leaf, 0, nritems), nritems);
3561 	}
3562 	return ret;
3563 }
3564 
3565 /*
3566  * min slot controls the lowest index we're willing to push to the
3567  * right.  We'll push up to and including min_slot, but no lower
3568  */
3569 static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
3570 				      struct btrfs_path *path,
3571 				      int data_size, int empty,
3572 				      struct extent_buffer *right,
3573 				      int free_space, u32 left_nritems,
3574 				      u32 min_slot)
3575 {
3576 	struct extent_buffer *left = path->nodes[0];
3577 	struct extent_buffer *upper = path->nodes[1];
3578 	struct btrfs_map_token token;
3579 	struct btrfs_disk_key disk_key;
3580 	int slot;
3581 	u32 i;
3582 	int push_space = 0;
3583 	int push_items = 0;
3584 	struct btrfs_item *item;
3585 	u32 nr;
3586 	u32 right_nritems;
3587 	u32 data_end;
3588 	u32 this_item_size;
3589 
3590 	btrfs_init_map_token(&token);
3591 
3592 	if (empty)
3593 		nr = 0;
3594 	else
3595 		nr = max_t(u32, 1, min_slot);
3596 
3597 	if (path->slots[0] >= left_nritems)
3598 		push_space += data_size;
3599 
3600 	slot = path->slots[1];
3601 	i = left_nritems - 1;
3602 	while (i >= nr) {
3603 		item = btrfs_item_nr(i);
3604 
3605 		if (!empty && push_items > 0) {
3606 			if (path->slots[0] > i)
3607 				break;
3608 			if (path->slots[0] == i) {
3609 				int space = btrfs_leaf_free_space(fs_info, left);
3610 				if (space + push_space * 2 > free_space)
3611 					break;
3612 			}
3613 		}
3614 
3615 		if (path->slots[0] == i)
3616 			push_space += data_size;
3617 
3618 		this_item_size = btrfs_item_size(left, item);
3619 		if (this_item_size + sizeof(*item) + push_space > free_space)
3620 			break;
3621 
3622 		push_items++;
3623 		push_space += this_item_size + sizeof(*item);
3624 		if (i == 0)
3625 			break;
3626 		i--;
3627 	}
3628 
3629 	if (push_items == 0)
3630 		goto out_unlock;
3631 
3632 	WARN_ON(!empty && push_items == left_nritems);
3633 
3634 	/* push left to right */
3635 	right_nritems = btrfs_header_nritems(right);
3636 
3637 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3638 	push_space -= leaf_data_end(fs_info, left);
3639 
3640 	/* make room in the right data area */
3641 	data_end = leaf_data_end(fs_info, right);
3642 	memmove_extent_buffer(right,
3643 			      BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3644 			      BTRFS_LEAF_DATA_OFFSET + data_end,
3645 			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3646 
3647 	/* copy from the left data area */
3648 	copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3649 		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3650 		     BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
3651 		     push_space);
3652 
3653 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3654 			      btrfs_item_nr_offset(0),
3655 			      right_nritems * sizeof(struct btrfs_item));
3656 
3657 	/* copy the items from left to right */
3658 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3659 		   btrfs_item_nr_offset(left_nritems - push_items),
3660 		   push_items * sizeof(struct btrfs_item));
3661 
3662 	/* update the item pointers */
3663 	right_nritems += push_items;
3664 	btrfs_set_header_nritems(right, right_nritems);
3665 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3666 	for (i = 0; i < right_nritems; i++) {
3667 		item = btrfs_item_nr(i);
3668 		push_space -= btrfs_token_item_size(right, item, &token);
3669 		btrfs_set_token_item_offset(right, item, push_space, &token);
3670 	}
3671 
3672 	left_nritems -= push_items;
3673 	btrfs_set_header_nritems(left, left_nritems);
3674 
3675 	if (left_nritems)
3676 		btrfs_mark_buffer_dirty(left);
3677 	else
3678 		clean_tree_block(fs_info, left);
3679 
3680 	btrfs_mark_buffer_dirty(right);
3681 
3682 	btrfs_item_key(right, &disk_key, 0);
3683 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3684 	btrfs_mark_buffer_dirty(upper);
3685 
3686 	/* then fixup the leaf pointer in the path */
3687 	if (path->slots[0] >= left_nritems) {
3688 		path->slots[0] -= left_nritems;
3689 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3690 			clean_tree_block(fs_info, path->nodes[0]);
3691 		btrfs_tree_unlock(path->nodes[0]);
3692 		free_extent_buffer(path->nodes[0]);
3693 		path->nodes[0] = right;
3694 		path->slots[1] += 1;
3695 	} else {
3696 		btrfs_tree_unlock(right);
3697 		free_extent_buffer(right);
3698 	}
3699 	return 0;
3700 
3701 out_unlock:
3702 	btrfs_tree_unlock(right);
3703 	free_extent_buffer(right);
3704 	return 1;
3705 }
3706 
3707 /*
3708  * push some data in the path leaf to the right, trying to free up at
3709  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3710  *
3711  * returns 1 if the push failed because the other node didn't have enough
3712  * room, 0 if everything worked out and < 0 if there were major errors.
3713  *
3714  * this will push starting from min_slot to the end of the leaf.  It won't
3715  * push any slot lower than min_slot
3716  */
3717 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3718 			   *root, struct btrfs_path *path,
3719 			   int min_data_size, int data_size,
3720 			   int empty, u32 min_slot)
3721 {
3722 	struct btrfs_fs_info *fs_info = root->fs_info;
3723 	struct extent_buffer *left = path->nodes[0];
3724 	struct extent_buffer *right;
3725 	struct extent_buffer *upper;
3726 	int slot;
3727 	int free_space;
3728 	u32 left_nritems;
3729 	int ret;
3730 
3731 	if (!path->nodes[1])
3732 		return 1;
3733 
3734 	slot = path->slots[1];
3735 	upper = path->nodes[1];
3736 	if (slot >= btrfs_header_nritems(upper) - 1)
3737 		return 1;
3738 
3739 	btrfs_assert_tree_locked(path->nodes[1]);
3740 
3741 	right = read_node_slot(fs_info, upper, slot + 1);
3742 	/*
3743 	 * slot + 1 is not valid or we fail to read the right node,
3744 	 * no big deal, just return.
3745 	 */
3746 	if (IS_ERR(right))
3747 		return 1;
3748 
3749 	btrfs_tree_lock(right);
3750 	btrfs_set_lock_blocking(right);
3751 
3752 	free_space = btrfs_leaf_free_space(fs_info, right);
3753 	if (free_space < data_size)
3754 		goto out_unlock;
3755 
3756 	/* cow and double check */
3757 	ret = btrfs_cow_block(trans, root, right, upper,
3758 			      slot + 1, &right);
3759 	if (ret)
3760 		goto out_unlock;
3761 
3762 	free_space = btrfs_leaf_free_space(fs_info, right);
3763 	if (free_space < data_size)
3764 		goto out_unlock;
3765 
3766 	left_nritems = btrfs_header_nritems(left);
3767 	if (left_nritems == 0)
3768 		goto out_unlock;
3769 
3770 	if (path->slots[0] == left_nritems && !empty) {
3771 		/* Key greater than all keys in the leaf, right neighbor has
3772 		 * enough room for it and we're not emptying our leaf to delete
3773 		 * it, therefore use right neighbor to insert the new item and
3774 		 * no need to touch/dirty our left leaf. */
3775 		btrfs_tree_unlock(left);
3776 		free_extent_buffer(left);
3777 		path->nodes[0] = right;
3778 		path->slots[0] = 0;
3779 		path->slots[1]++;
3780 		return 0;
3781 	}
3782 
3783 	return __push_leaf_right(fs_info, path, min_data_size, empty,
3784 				right, free_space, left_nritems, min_slot);
3785 out_unlock:
3786 	btrfs_tree_unlock(right);
3787 	free_extent_buffer(right);
3788 	return 1;
3789 }
3790 
3791 /*
3792  * push some data in the path leaf to the left, trying to free up at
3793  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3794  *
3795  * max_slot can put a limit on how far into the leaf we'll push items.  The
3796  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3797  * items
3798  */
3799 static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
3800 				     struct btrfs_path *path, int data_size,
3801 				     int empty, struct extent_buffer *left,
3802 				     int free_space, u32 right_nritems,
3803 				     u32 max_slot)
3804 {
3805 	struct btrfs_disk_key disk_key;
3806 	struct extent_buffer *right = path->nodes[0];
3807 	int i;
3808 	int push_space = 0;
3809 	int push_items = 0;
3810 	struct btrfs_item *item;
3811 	u32 old_left_nritems;
3812 	u32 nr;
3813 	int ret = 0;
3814 	u32 this_item_size;
3815 	u32 old_left_item_size;
3816 	struct btrfs_map_token token;
3817 
3818 	btrfs_init_map_token(&token);
3819 
3820 	if (empty)
3821 		nr = min(right_nritems, max_slot);
3822 	else
3823 		nr = min(right_nritems - 1, max_slot);
3824 
3825 	for (i = 0; i < nr; i++) {
3826 		item = btrfs_item_nr(i);
3827 
3828 		if (!empty && push_items > 0) {
3829 			if (path->slots[0] < i)
3830 				break;
3831 			if (path->slots[0] == i) {
3832 				int space = btrfs_leaf_free_space(fs_info, right);
3833 				if (space + push_space * 2 > free_space)
3834 					break;
3835 			}
3836 		}
3837 
3838 		if (path->slots[0] == i)
3839 			push_space += data_size;
3840 
3841 		this_item_size = btrfs_item_size(right, item);
3842 		if (this_item_size + sizeof(*item) + push_space > free_space)
3843 			break;
3844 
3845 		push_items++;
3846 		push_space += this_item_size + sizeof(*item);
3847 	}
3848 
3849 	if (push_items == 0) {
3850 		ret = 1;
3851 		goto out;
3852 	}
3853 	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3854 
3855 	/* push data from right to left */
3856 	copy_extent_buffer(left, right,
3857 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3858 			   btrfs_item_nr_offset(0),
3859 			   push_items * sizeof(struct btrfs_item));
3860 
3861 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3862 		     btrfs_item_offset_nr(right, push_items - 1);
3863 
3864 	copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3865 		     leaf_data_end(fs_info, left) - push_space,
3866 		     BTRFS_LEAF_DATA_OFFSET +
3867 		     btrfs_item_offset_nr(right, push_items - 1),
3868 		     push_space);
3869 	old_left_nritems = btrfs_header_nritems(left);
3870 	BUG_ON(old_left_nritems <= 0);
3871 
3872 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3873 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3874 		u32 ioff;
3875 
3876 		item = btrfs_item_nr(i);
3877 
3878 		ioff = btrfs_token_item_offset(left, item, &token);
3879 		btrfs_set_token_item_offset(left, item,
3880 		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3881 		      &token);
3882 	}
3883 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3884 
3885 	/* fixup right node */
3886 	if (push_items > right_nritems)
3887 		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3888 		       right_nritems);
3889 
3890 	if (push_items < right_nritems) {
3891 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3892 						  leaf_data_end(fs_info, right);
3893 		memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3894 				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3895 				      BTRFS_LEAF_DATA_OFFSET +
3896 				      leaf_data_end(fs_info, right), push_space);
3897 
3898 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3899 			      btrfs_item_nr_offset(push_items),
3900 			     (btrfs_header_nritems(right) - push_items) *
3901 			     sizeof(struct btrfs_item));
3902 	}
3903 	right_nritems -= push_items;
3904 	btrfs_set_header_nritems(right, right_nritems);
3905 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3906 	for (i = 0; i < right_nritems; i++) {
3907 		item = btrfs_item_nr(i);
3908 
3909 		push_space = push_space - btrfs_token_item_size(right,
3910 								item, &token);
3911 		btrfs_set_token_item_offset(right, item, push_space, &token);
3912 	}
3913 
3914 	btrfs_mark_buffer_dirty(left);
3915 	if (right_nritems)
3916 		btrfs_mark_buffer_dirty(right);
3917 	else
3918 		clean_tree_block(fs_info, right);
3919 
3920 	btrfs_item_key(right, &disk_key, 0);
3921 	fixup_low_keys(path, &disk_key, 1);
3922 
3923 	/* then fixup the leaf pointer in the path */
3924 	if (path->slots[0] < push_items) {
3925 		path->slots[0] += old_left_nritems;
3926 		btrfs_tree_unlock(path->nodes[0]);
3927 		free_extent_buffer(path->nodes[0]);
3928 		path->nodes[0] = left;
3929 		path->slots[1] -= 1;
3930 	} else {
3931 		btrfs_tree_unlock(left);
3932 		free_extent_buffer(left);
3933 		path->slots[0] -= push_items;
3934 	}
3935 	BUG_ON(path->slots[0] < 0);
3936 	return ret;
3937 out:
3938 	btrfs_tree_unlock(left);
3939 	free_extent_buffer(left);
3940 	return ret;
3941 }
3942 
3943 /*
3944  * push some data in the path leaf to the left, trying to free up at
3945  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3946  *
3947  * max_slot can put a limit on how far into the leaf we'll push items.  The
3948  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3949  * items
3950  */
3951 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3952 			  *root, struct btrfs_path *path, int min_data_size,
3953 			  int data_size, int empty, u32 max_slot)
3954 {
3955 	struct btrfs_fs_info *fs_info = root->fs_info;
3956 	struct extent_buffer *right = path->nodes[0];
3957 	struct extent_buffer *left;
3958 	int slot;
3959 	int free_space;
3960 	u32 right_nritems;
3961 	int ret = 0;
3962 
3963 	slot = path->slots[1];
3964 	if (slot == 0)
3965 		return 1;
3966 	if (!path->nodes[1])
3967 		return 1;
3968 
3969 	right_nritems = btrfs_header_nritems(right);
3970 	if (right_nritems == 0)
3971 		return 1;
3972 
3973 	btrfs_assert_tree_locked(path->nodes[1]);
3974 
3975 	left = read_node_slot(fs_info, path->nodes[1], slot - 1);
3976 	/*
3977 	 * slot - 1 is not valid or we fail to read the left node,
3978 	 * no big deal, just return.
3979 	 */
3980 	if (IS_ERR(left))
3981 		return 1;
3982 
3983 	btrfs_tree_lock(left);
3984 	btrfs_set_lock_blocking(left);
3985 
3986 	free_space = btrfs_leaf_free_space(fs_info, left);
3987 	if (free_space < data_size) {
3988 		ret = 1;
3989 		goto out;
3990 	}
3991 
3992 	/* cow and double check */
3993 	ret = btrfs_cow_block(trans, root, left,
3994 			      path->nodes[1], slot - 1, &left);
3995 	if (ret) {
3996 		/* we hit -ENOSPC, but it isn't fatal here */
3997 		if (ret == -ENOSPC)
3998 			ret = 1;
3999 		goto out;
4000 	}
4001 
4002 	free_space = btrfs_leaf_free_space(fs_info, left);
4003 	if (free_space < data_size) {
4004 		ret = 1;
4005 		goto out;
4006 	}
4007 
4008 	return __push_leaf_left(fs_info, path, min_data_size,
4009 			       empty, left, free_space, right_nritems,
4010 			       max_slot);
4011 out:
4012 	btrfs_tree_unlock(left);
4013 	free_extent_buffer(left);
4014 	return ret;
4015 }
4016 
4017 /*
4018  * split the path's leaf in two, making sure there is at least data_size
4019  * available for the resulting leaf level of the path.
4020  */
4021 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4022 				    struct btrfs_fs_info *fs_info,
4023 				    struct btrfs_path *path,
4024 				    struct extent_buffer *l,
4025 				    struct extent_buffer *right,
4026 				    int slot, int mid, int nritems)
4027 {
4028 	int data_copy_size;
4029 	int rt_data_off;
4030 	int i;
4031 	struct btrfs_disk_key disk_key;
4032 	struct btrfs_map_token token;
4033 
4034 	btrfs_init_map_token(&token);
4035 
4036 	nritems = nritems - mid;
4037 	btrfs_set_header_nritems(right, nritems);
4038 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
4039 
4040 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4041 			   btrfs_item_nr_offset(mid),
4042 			   nritems * sizeof(struct btrfs_item));
4043 
4044 	copy_extent_buffer(right, l,
4045 		     BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4046 		     data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4047 		     leaf_data_end(fs_info, l), data_copy_size);
4048 
4049 	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4050 
4051 	for (i = 0; i < nritems; i++) {
4052 		struct btrfs_item *item = btrfs_item_nr(i);
4053 		u32 ioff;
4054 
4055 		ioff = btrfs_token_item_offset(right, item, &token);
4056 		btrfs_set_token_item_offset(right, item,
4057 					    ioff + rt_data_off, &token);
4058 	}
4059 
4060 	btrfs_set_header_nritems(l, mid);
4061 	btrfs_item_key(right, &disk_key, 0);
4062 	insert_ptr(trans, fs_info, path, &disk_key, right->start,
4063 		   path->slots[1] + 1, 1);
4064 
4065 	btrfs_mark_buffer_dirty(right);
4066 	btrfs_mark_buffer_dirty(l);
4067 	BUG_ON(path->slots[0] != slot);
4068 
4069 	if (mid <= slot) {
4070 		btrfs_tree_unlock(path->nodes[0]);
4071 		free_extent_buffer(path->nodes[0]);
4072 		path->nodes[0] = right;
4073 		path->slots[0] -= mid;
4074 		path->slots[1] += 1;
4075 	} else {
4076 		btrfs_tree_unlock(right);
4077 		free_extent_buffer(right);
4078 	}
4079 
4080 	BUG_ON(path->slots[0] < 0);
4081 }
4082 
4083 /*
4084  * double splits happen when we need to insert a big item in the middle
4085  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
4086  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4087  *          A                 B                 C
4088  *
4089  * We avoid this by trying to push the items on either side of our target
4090  * into the adjacent leaves.  If all goes well we can avoid the double split
4091  * completely.
4092  */
4093 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4094 					  struct btrfs_root *root,
4095 					  struct btrfs_path *path,
4096 					  int data_size)
4097 {
4098 	struct btrfs_fs_info *fs_info = root->fs_info;
4099 	int ret;
4100 	int progress = 0;
4101 	int slot;
4102 	u32 nritems;
4103 	int space_needed = data_size;
4104 
4105 	slot = path->slots[0];
4106 	if (slot < btrfs_header_nritems(path->nodes[0]))
4107 		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4108 
4109 	/*
4110 	 * try to push all the items after our slot into the
4111 	 * right leaf
4112 	 */
4113 	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4114 	if (ret < 0)
4115 		return ret;
4116 
4117 	if (ret == 0)
4118 		progress++;
4119 
4120 	nritems = btrfs_header_nritems(path->nodes[0]);
4121 	/*
4122 	 * our goal is to get our slot at the start or end of a leaf.  If
4123 	 * we've done so we're done
4124 	 */
4125 	if (path->slots[0] == 0 || path->slots[0] == nritems)
4126 		return 0;
4127 
4128 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4129 		return 0;
4130 
4131 	/* try to push all the items before our slot into the next leaf */
4132 	slot = path->slots[0];
4133 	space_needed = data_size;
4134 	if (slot > 0)
4135 		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4136 	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4137 	if (ret < 0)
4138 		return ret;
4139 
4140 	if (ret == 0)
4141 		progress++;
4142 
4143 	if (progress)
4144 		return 0;
4145 	return 1;
4146 }
4147 
4148 /*
4149  * split the path's leaf in two, making sure there is at least data_size
4150  * available for the resulting leaf level of the path.
4151  *
4152  * returns 0 if all went well and < 0 on failure.
4153  */
4154 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4155 			       struct btrfs_root *root,
4156 			       const struct btrfs_key *ins_key,
4157 			       struct btrfs_path *path, int data_size,
4158 			       int extend)
4159 {
4160 	struct btrfs_disk_key disk_key;
4161 	struct extent_buffer *l;
4162 	u32 nritems;
4163 	int mid;
4164 	int slot;
4165 	struct extent_buffer *right;
4166 	struct btrfs_fs_info *fs_info = root->fs_info;
4167 	int ret = 0;
4168 	int wret;
4169 	int split;
4170 	int num_doubles = 0;
4171 	int tried_avoid_double = 0;
4172 
4173 	l = path->nodes[0];
4174 	slot = path->slots[0];
4175 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
4176 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4177 		return -EOVERFLOW;
4178 
4179 	/* first try to make some room by pushing left and right */
4180 	if (data_size && path->nodes[1]) {
4181 		int space_needed = data_size;
4182 
4183 		if (slot < btrfs_header_nritems(l))
4184 			space_needed -= btrfs_leaf_free_space(fs_info, l);
4185 
4186 		wret = push_leaf_right(trans, root, path, space_needed,
4187 				       space_needed, 0, 0);
4188 		if (wret < 0)
4189 			return wret;
4190 		if (wret) {
4191 			space_needed = data_size;
4192 			if (slot > 0)
4193 				space_needed -= btrfs_leaf_free_space(fs_info,
4194 								      l);
4195 			wret = push_leaf_left(trans, root, path, space_needed,
4196 					      space_needed, 0, (u32)-1);
4197 			if (wret < 0)
4198 				return wret;
4199 		}
4200 		l = path->nodes[0];
4201 
4202 		/* did the pushes work? */
4203 		if (btrfs_leaf_free_space(fs_info, l) >= data_size)
4204 			return 0;
4205 	}
4206 
4207 	if (!path->nodes[1]) {
4208 		ret = insert_new_root(trans, root, path, 1);
4209 		if (ret)
4210 			return ret;
4211 	}
4212 again:
4213 	split = 1;
4214 	l = path->nodes[0];
4215 	slot = path->slots[0];
4216 	nritems = btrfs_header_nritems(l);
4217 	mid = (nritems + 1) / 2;
4218 
4219 	if (mid <= slot) {
4220 		if (nritems == 1 ||
4221 		    leaf_space_used(l, mid, nritems - mid) + data_size >
4222 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4223 			if (slot >= nritems) {
4224 				split = 0;
4225 			} else {
4226 				mid = slot;
4227 				if (mid != nritems &&
4228 				    leaf_space_used(l, mid, nritems - mid) +
4229 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4230 					if (data_size && !tried_avoid_double)
4231 						goto push_for_double;
4232 					split = 2;
4233 				}
4234 			}
4235 		}
4236 	} else {
4237 		if (leaf_space_used(l, 0, mid) + data_size >
4238 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4239 			if (!extend && data_size && slot == 0) {
4240 				split = 0;
4241 			} else if ((extend || !data_size) && slot == 0) {
4242 				mid = 1;
4243 			} else {
4244 				mid = slot;
4245 				if (mid != nritems &&
4246 				    leaf_space_used(l, mid, nritems - mid) +
4247 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4248 					if (data_size && !tried_avoid_double)
4249 						goto push_for_double;
4250 					split = 2;
4251 				}
4252 			}
4253 		}
4254 	}
4255 
4256 	if (split == 0)
4257 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4258 	else
4259 		btrfs_item_key(l, &disk_key, mid);
4260 
4261 	right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4262 			&disk_key, 0, l->start, 0);
4263 	if (IS_ERR(right))
4264 		return PTR_ERR(right);
4265 
4266 	root_add_used(root, fs_info->nodesize);
4267 
4268 	if (split == 0) {
4269 		if (mid <= slot) {
4270 			btrfs_set_header_nritems(right, 0);
4271 			insert_ptr(trans, fs_info, path, &disk_key,
4272 				   right->start, path->slots[1] + 1, 1);
4273 			btrfs_tree_unlock(path->nodes[0]);
4274 			free_extent_buffer(path->nodes[0]);
4275 			path->nodes[0] = right;
4276 			path->slots[0] = 0;
4277 			path->slots[1] += 1;
4278 		} else {
4279 			btrfs_set_header_nritems(right, 0);
4280 			insert_ptr(trans, fs_info, path, &disk_key,
4281 				   right->start, path->slots[1], 1);
4282 			btrfs_tree_unlock(path->nodes[0]);
4283 			free_extent_buffer(path->nodes[0]);
4284 			path->nodes[0] = right;
4285 			path->slots[0] = 0;
4286 			if (path->slots[1] == 0)
4287 				fixup_low_keys(path, &disk_key, 1);
4288 		}
4289 		/*
4290 		 * We create a new leaf 'right' for the required ins_len and
4291 		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4292 		 * the content of ins_len to 'right'.
4293 		 */
4294 		return ret;
4295 	}
4296 
4297 	copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
4298 
4299 	if (split == 2) {
4300 		BUG_ON(num_doubles != 0);
4301 		num_doubles++;
4302 		goto again;
4303 	}
4304 
4305 	return 0;
4306 
4307 push_for_double:
4308 	push_for_double_split(trans, root, path, data_size);
4309 	tried_avoid_double = 1;
4310 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4311 		return 0;
4312 	goto again;
4313 }
4314 
4315 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4316 					 struct btrfs_root *root,
4317 					 struct btrfs_path *path, int ins_len)
4318 {
4319 	struct btrfs_fs_info *fs_info = root->fs_info;
4320 	struct btrfs_key key;
4321 	struct extent_buffer *leaf;
4322 	struct btrfs_file_extent_item *fi;
4323 	u64 extent_len = 0;
4324 	u32 item_size;
4325 	int ret;
4326 
4327 	leaf = path->nodes[0];
4328 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4329 
4330 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4331 	       key.type != BTRFS_EXTENT_CSUM_KEY);
4332 
4333 	if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
4334 		return 0;
4335 
4336 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4337 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4338 		fi = btrfs_item_ptr(leaf, path->slots[0],
4339 				    struct btrfs_file_extent_item);
4340 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4341 	}
4342 	btrfs_release_path(path);
4343 
4344 	path->keep_locks = 1;
4345 	path->search_for_split = 1;
4346 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4347 	path->search_for_split = 0;
4348 	if (ret > 0)
4349 		ret = -EAGAIN;
4350 	if (ret < 0)
4351 		goto err;
4352 
4353 	ret = -EAGAIN;
4354 	leaf = path->nodes[0];
4355 	/* if our item isn't there, return now */
4356 	if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4357 		goto err;
4358 
4359 	/* the leaf has  changed, it now has room.  return now */
4360 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
4361 		goto err;
4362 
4363 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4364 		fi = btrfs_item_ptr(leaf, path->slots[0],
4365 				    struct btrfs_file_extent_item);
4366 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4367 			goto err;
4368 	}
4369 
4370 	btrfs_set_path_blocking(path);
4371 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4372 	if (ret)
4373 		goto err;
4374 
4375 	path->keep_locks = 0;
4376 	btrfs_unlock_up_safe(path, 1);
4377 	return 0;
4378 err:
4379 	path->keep_locks = 0;
4380 	return ret;
4381 }
4382 
4383 static noinline int split_item(struct btrfs_fs_info *fs_info,
4384 			       struct btrfs_path *path,
4385 			       const struct btrfs_key *new_key,
4386 			       unsigned long split_offset)
4387 {
4388 	struct extent_buffer *leaf;
4389 	struct btrfs_item *item;
4390 	struct btrfs_item *new_item;
4391 	int slot;
4392 	char *buf;
4393 	u32 nritems;
4394 	u32 item_size;
4395 	u32 orig_offset;
4396 	struct btrfs_disk_key disk_key;
4397 
4398 	leaf = path->nodes[0];
4399 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
4400 
4401 	btrfs_set_path_blocking(path);
4402 
4403 	item = btrfs_item_nr(path->slots[0]);
4404 	orig_offset = btrfs_item_offset(leaf, item);
4405 	item_size = btrfs_item_size(leaf, item);
4406 
4407 	buf = kmalloc(item_size, GFP_NOFS);
4408 	if (!buf)
4409 		return -ENOMEM;
4410 
4411 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4412 			    path->slots[0]), item_size);
4413 
4414 	slot = path->slots[0] + 1;
4415 	nritems = btrfs_header_nritems(leaf);
4416 	if (slot != nritems) {
4417 		/* shift the items */
4418 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4419 				btrfs_item_nr_offset(slot),
4420 				(nritems - slot) * sizeof(struct btrfs_item));
4421 	}
4422 
4423 	btrfs_cpu_key_to_disk(&disk_key, new_key);
4424 	btrfs_set_item_key(leaf, &disk_key, slot);
4425 
4426 	new_item = btrfs_item_nr(slot);
4427 
4428 	btrfs_set_item_offset(leaf, new_item, orig_offset);
4429 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4430 
4431 	btrfs_set_item_offset(leaf, item,
4432 			      orig_offset + item_size - split_offset);
4433 	btrfs_set_item_size(leaf, item, split_offset);
4434 
4435 	btrfs_set_header_nritems(leaf, nritems + 1);
4436 
4437 	/* write the data for the start of the original item */
4438 	write_extent_buffer(leaf, buf,
4439 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4440 			    split_offset);
4441 
4442 	/* write the data for the new item */
4443 	write_extent_buffer(leaf, buf + split_offset,
4444 			    btrfs_item_ptr_offset(leaf, slot),
4445 			    item_size - split_offset);
4446 	btrfs_mark_buffer_dirty(leaf);
4447 
4448 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
4449 	kfree(buf);
4450 	return 0;
4451 }
4452 
4453 /*
4454  * This function splits a single item into two items,
4455  * giving 'new_key' to the new item and splitting the
4456  * old one at split_offset (from the start of the item).
4457  *
4458  * The path may be released by this operation.  After
4459  * the split, the path is pointing to the old item.  The
4460  * new item is going to be in the same node as the old one.
4461  *
4462  * Note, the item being split must be smaller enough to live alone on
4463  * a tree block with room for one extra struct btrfs_item
4464  *
4465  * This allows us to split the item in place, keeping a lock on the
4466  * leaf the entire time.
4467  */
4468 int btrfs_split_item(struct btrfs_trans_handle *trans,
4469 		     struct btrfs_root *root,
4470 		     struct btrfs_path *path,
4471 		     const struct btrfs_key *new_key,
4472 		     unsigned long split_offset)
4473 {
4474 	int ret;
4475 	ret = setup_leaf_for_split(trans, root, path,
4476 				   sizeof(struct btrfs_item));
4477 	if (ret)
4478 		return ret;
4479 
4480 	ret = split_item(root->fs_info, path, new_key, split_offset);
4481 	return ret;
4482 }
4483 
4484 /*
4485  * This function duplicate a item, giving 'new_key' to the new item.
4486  * It guarantees both items live in the same tree leaf and the new item
4487  * is contiguous with the original item.
4488  *
4489  * This allows us to split file extent in place, keeping a lock on the
4490  * leaf the entire time.
4491  */
4492 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4493 			 struct btrfs_root *root,
4494 			 struct btrfs_path *path,
4495 			 const struct btrfs_key *new_key)
4496 {
4497 	struct extent_buffer *leaf;
4498 	int ret;
4499 	u32 item_size;
4500 
4501 	leaf = path->nodes[0];
4502 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4503 	ret = setup_leaf_for_split(trans, root, path,
4504 				   item_size + sizeof(struct btrfs_item));
4505 	if (ret)
4506 		return ret;
4507 
4508 	path->slots[0]++;
4509 	setup_items_for_insert(root, path, new_key, &item_size,
4510 			       item_size, item_size +
4511 			       sizeof(struct btrfs_item), 1);
4512 	leaf = path->nodes[0];
4513 	memcpy_extent_buffer(leaf,
4514 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4515 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4516 			     item_size);
4517 	return 0;
4518 }
4519 
4520 /*
4521  * make the item pointed to by the path smaller.  new_size indicates
4522  * how small to make it, and from_end tells us if we just chop bytes
4523  * off the end of the item or if we shift the item to chop bytes off
4524  * the front.
4525  */
4526 void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
4527 			 struct btrfs_path *path, u32 new_size, int from_end)
4528 {
4529 	int slot;
4530 	struct extent_buffer *leaf;
4531 	struct btrfs_item *item;
4532 	u32 nritems;
4533 	unsigned int data_end;
4534 	unsigned int old_data_start;
4535 	unsigned int old_size;
4536 	unsigned int size_diff;
4537 	int i;
4538 	struct btrfs_map_token token;
4539 
4540 	btrfs_init_map_token(&token);
4541 
4542 	leaf = path->nodes[0];
4543 	slot = path->slots[0];
4544 
4545 	old_size = btrfs_item_size_nr(leaf, slot);
4546 	if (old_size == new_size)
4547 		return;
4548 
4549 	nritems = btrfs_header_nritems(leaf);
4550 	data_end = leaf_data_end(fs_info, leaf);
4551 
4552 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4553 
4554 	size_diff = old_size - new_size;
4555 
4556 	BUG_ON(slot < 0);
4557 	BUG_ON(slot >= nritems);
4558 
4559 	/*
4560 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4561 	 */
4562 	/* first correct the data pointers */
4563 	for (i = slot; i < nritems; i++) {
4564 		u32 ioff;
4565 		item = btrfs_item_nr(i);
4566 
4567 		ioff = btrfs_token_item_offset(leaf, item, &token);
4568 		btrfs_set_token_item_offset(leaf, item,
4569 					    ioff + size_diff, &token);
4570 	}
4571 
4572 	/* shift the data */
4573 	if (from_end) {
4574 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4575 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4576 			      data_end, old_data_start + new_size - data_end);
4577 	} else {
4578 		struct btrfs_disk_key disk_key;
4579 		u64 offset;
4580 
4581 		btrfs_item_key(leaf, &disk_key, slot);
4582 
4583 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4584 			unsigned long ptr;
4585 			struct btrfs_file_extent_item *fi;
4586 
4587 			fi = btrfs_item_ptr(leaf, slot,
4588 					    struct btrfs_file_extent_item);
4589 			fi = (struct btrfs_file_extent_item *)(
4590 			     (unsigned long)fi - size_diff);
4591 
4592 			if (btrfs_file_extent_type(leaf, fi) ==
4593 			    BTRFS_FILE_EXTENT_INLINE) {
4594 				ptr = btrfs_item_ptr_offset(leaf, slot);
4595 				memmove_extent_buffer(leaf, ptr,
4596 				      (unsigned long)fi,
4597 				      BTRFS_FILE_EXTENT_INLINE_DATA_START);
4598 			}
4599 		}
4600 
4601 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4602 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4603 			      data_end, old_data_start - data_end);
4604 
4605 		offset = btrfs_disk_key_offset(&disk_key);
4606 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4607 		btrfs_set_item_key(leaf, &disk_key, slot);
4608 		if (slot == 0)
4609 			fixup_low_keys(path, &disk_key, 1);
4610 	}
4611 
4612 	item = btrfs_item_nr(slot);
4613 	btrfs_set_item_size(leaf, item, new_size);
4614 	btrfs_mark_buffer_dirty(leaf);
4615 
4616 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4617 		btrfs_print_leaf(leaf);
4618 		BUG();
4619 	}
4620 }
4621 
4622 /*
4623  * make the item pointed to by the path bigger, data_size is the added size.
4624  */
4625 void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
4626 		       u32 data_size)
4627 {
4628 	int slot;
4629 	struct extent_buffer *leaf;
4630 	struct btrfs_item *item;
4631 	u32 nritems;
4632 	unsigned int data_end;
4633 	unsigned int old_data;
4634 	unsigned int old_size;
4635 	int i;
4636 	struct btrfs_map_token token;
4637 
4638 	btrfs_init_map_token(&token);
4639 
4640 	leaf = path->nodes[0];
4641 
4642 	nritems = btrfs_header_nritems(leaf);
4643 	data_end = leaf_data_end(fs_info, leaf);
4644 
4645 	if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
4646 		btrfs_print_leaf(leaf);
4647 		BUG();
4648 	}
4649 	slot = path->slots[0];
4650 	old_data = btrfs_item_end_nr(leaf, slot);
4651 
4652 	BUG_ON(slot < 0);
4653 	if (slot >= nritems) {
4654 		btrfs_print_leaf(leaf);
4655 		btrfs_crit(fs_info, "slot %d too large, nritems %d",
4656 			   slot, nritems);
4657 		BUG_ON(1);
4658 	}
4659 
4660 	/*
4661 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4662 	 */
4663 	/* first correct the data pointers */
4664 	for (i = slot; i < nritems; i++) {
4665 		u32 ioff;
4666 		item = btrfs_item_nr(i);
4667 
4668 		ioff = btrfs_token_item_offset(leaf, item, &token);
4669 		btrfs_set_token_item_offset(leaf, item,
4670 					    ioff - data_size, &token);
4671 	}
4672 
4673 	/* shift the data */
4674 	memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4675 		      data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4676 		      data_end, old_data - data_end);
4677 
4678 	data_end = old_data;
4679 	old_size = btrfs_item_size_nr(leaf, slot);
4680 	item = btrfs_item_nr(slot);
4681 	btrfs_set_item_size(leaf, item, old_size + data_size);
4682 	btrfs_mark_buffer_dirty(leaf);
4683 
4684 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4685 		btrfs_print_leaf(leaf);
4686 		BUG();
4687 	}
4688 }
4689 
4690 /*
4691  * this is a helper for btrfs_insert_empty_items, the main goal here is
4692  * to save stack depth by doing the bulk of the work in a function
4693  * that doesn't call btrfs_search_slot
4694  */
4695 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4696 			    const struct btrfs_key *cpu_key, u32 *data_size,
4697 			    u32 total_data, u32 total_size, int nr)
4698 {
4699 	struct btrfs_fs_info *fs_info = root->fs_info;
4700 	struct btrfs_item *item;
4701 	int i;
4702 	u32 nritems;
4703 	unsigned int data_end;
4704 	struct btrfs_disk_key disk_key;
4705 	struct extent_buffer *leaf;
4706 	int slot;
4707 	struct btrfs_map_token token;
4708 
4709 	if (path->slots[0] == 0) {
4710 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4711 		fixup_low_keys(path, &disk_key, 1);
4712 	}
4713 	btrfs_unlock_up_safe(path, 1);
4714 
4715 	btrfs_init_map_token(&token);
4716 
4717 	leaf = path->nodes[0];
4718 	slot = path->slots[0];
4719 
4720 	nritems = btrfs_header_nritems(leaf);
4721 	data_end = leaf_data_end(fs_info, leaf);
4722 
4723 	if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
4724 		btrfs_print_leaf(leaf);
4725 		btrfs_crit(fs_info, "not enough freespace need %u have %d",
4726 			   total_size, btrfs_leaf_free_space(fs_info, leaf));
4727 		BUG();
4728 	}
4729 
4730 	if (slot != nritems) {
4731 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4732 
4733 		if (old_data < data_end) {
4734 			btrfs_print_leaf(leaf);
4735 			btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4736 				   slot, old_data, data_end);
4737 			BUG_ON(1);
4738 		}
4739 		/*
4740 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4741 		 */
4742 		/* first correct the data pointers */
4743 		for (i = slot; i < nritems; i++) {
4744 			u32 ioff;
4745 
4746 			item = btrfs_item_nr(i);
4747 			ioff = btrfs_token_item_offset(leaf, item, &token);
4748 			btrfs_set_token_item_offset(leaf, item,
4749 						    ioff - total_data, &token);
4750 		}
4751 		/* shift the items */
4752 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4753 			      btrfs_item_nr_offset(slot),
4754 			      (nritems - slot) * sizeof(struct btrfs_item));
4755 
4756 		/* shift the data */
4757 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4758 			      data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4759 			      data_end, old_data - data_end);
4760 		data_end = old_data;
4761 	}
4762 
4763 	/* setup the item for the new data */
4764 	for (i = 0; i < nr; i++) {
4765 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4766 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4767 		item = btrfs_item_nr(slot + i);
4768 		btrfs_set_token_item_offset(leaf, item,
4769 					    data_end - data_size[i], &token);
4770 		data_end -= data_size[i];
4771 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4772 	}
4773 
4774 	btrfs_set_header_nritems(leaf, nritems + nr);
4775 	btrfs_mark_buffer_dirty(leaf);
4776 
4777 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4778 		btrfs_print_leaf(leaf);
4779 		BUG();
4780 	}
4781 }
4782 
4783 /*
4784  * Given a key and some data, insert items into the tree.
4785  * This does all the path init required, making room in the tree if needed.
4786  */
4787 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4788 			    struct btrfs_root *root,
4789 			    struct btrfs_path *path,
4790 			    const struct btrfs_key *cpu_key, u32 *data_size,
4791 			    int nr)
4792 {
4793 	int ret = 0;
4794 	int slot;
4795 	int i;
4796 	u32 total_size = 0;
4797 	u32 total_data = 0;
4798 
4799 	for (i = 0; i < nr; i++)
4800 		total_data += data_size[i];
4801 
4802 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4803 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4804 	if (ret == 0)
4805 		return -EEXIST;
4806 	if (ret < 0)
4807 		return ret;
4808 
4809 	slot = path->slots[0];
4810 	BUG_ON(slot < 0);
4811 
4812 	setup_items_for_insert(root, path, cpu_key, data_size,
4813 			       total_data, total_size, nr);
4814 	return 0;
4815 }
4816 
4817 /*
4818  * Given a key and some data, insert an item into the tree.
4819  * This does all the path init required, making room in the tree if needed.
4820  */
4821 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4822 		      const struct btrfs_key *cpu_key, void *data,
4823 		      u32 data_size)
4824 {
4825 	int ret = 0;
4826 	struct btrfs_path *path;
4827 	struct extent_buffer *leaf;
4828 	unsigned long ptr;
4829 
4830 	path = btrfs_alloc_path();
4831 	if (!path)
4832 		return -ENOMEM;
4833 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4834 	if (!ret) {
4835 		leaf = path->nodes[0];
4836 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4837 		write_extent_buffer(leaf, data, ptr, data_size);
4838 		btrfs_mark_buffer_dirty(leaf);
4839 	}
4840 	btrfs_free_path(path);
4841 	return ret;
4842 }
4843 
4844 /*
4845  * delete the pointer from a given node.
4846  *
4847  * the tree should have been previously balanced so the deletion does not
4848  * empty a node.
4849  */
4850 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4851 		    int level, int slot)
4852 {
4853 	struct extent_buffer *parent = path->nodes[level];
4854 	u32 nritems;
4855 	int ret;
4856 
4857 	nritems = btrfs_header_nritems(parent);
4858 	if (slot != nritems - 1) {
4859 		if (level) {
4860 			ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4861 					nritems - slot - 1);
4862 			BUG_ON(ret < 0);
4863 		}
4864 		memmove_extent_buffer(parent,
4865 			      btrfs_node_key_ptr_offset(slot),
4866 			      btrfs_node_key_ptr_offset(slot + 1),
4867 			      sizeof(struct btrfs_key_ptr) *
4868 			      (nritems - slot - 1));
4869 	} else if (level) {
4870 		ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4871 				GFP_NOFS);
4872 		BUG_ON(ret < 0);
4873 	}
4874 
4875 	nritems--;
4876 	btrfs_set_header_nritems(parent, nritems);
4877 	if (nritems == 0 && parent == root->node) {
4878 		BUG_ON(btrfs_header_level(root->node) != 1);
4879 		/* just turn the root into a leaf and break */
4880 		btrfs_set_header_level(root->node, 0);
4881 	} else if (slot == 0) {
4882 		struct btrfs_disk_key disk_key;
4883 
4884 		btrfs_node_key(parent, &disk_key, 0);
4885 		fixup_low_keys(path, &disk_key, level + 1);
4886 	}
4887 	btrfs_mark_buffer_dirty(parent);
4888 }
4889 
4890 /*
4891  * a helper function to delete the leaf pointed to by path->slots[1] and
4892  * path->nodes[1].
4893  *
4894  * This deletes the pointer in path->nodes[1] and frees the leaf
4895  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4896  *
4897  * The path must have already been setup for deleting the leaf, including
4898  * all the proper balancing.  path->nodes[1] must be locked.
4899  */
4900 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4901 				    struct btrfs_root *root,
4902 				    struct btrfs_path *path,
4903 				    struct extent_buffer *leaf)
4904 {
4905 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4906 	del_ptr(root, path, 1, path->slots[1]);
4907 
4908 	/*
4909 	 * btrfs_free_extent is expensive, we want to make sure we
4910 	 * aren't holding any locks when we call it
4911 	 */
4912 	btrfs_unlock_up_safe(path, 0);
4913 
4914 	root_sub_used(root, leaf->len);
4915 
4916 	extent_buffer_get(leaf);
4917 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4918 	free_extent_buffer_stale(leaf);
4919 }
4920 /*
4921  * delete the item at the leaf level in path.  If that empties
4922  * the leaf, remove it from the tree
4923  */
4924 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4925 		    struct btrfs_path *path, int slot, int nr)
4926 {
4927 	struct btrfs_fs_info *fs_info = root->fs_info;
4928 	struct extent_buffer *leaf;
4929 	struct btrfs_item *item;
4930 	u32 last_off;
4931 	u32 dsize = 0;
4932 	int ret = 0;
4933 	int wret;
4934 	int i;
4935 	u32 nritems;
4936 	struct btrfs_map_token token;
4937 
4938 	btrfs_init_map_token(&token);
4939 
4940 	leaf = path->nodes[0];
4941 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4942 
4943 	for (i = 0; i < nr; i++)
4944 		dsize += btrfs_item_size_nr(leaf, slot + i);
4945 
4946 	nritems = btrfs_header_nritems(leaf);
4947 
4948 	if (slot + nr != nritems) {
4949 		int data_end = leaf_data_end(fs_info, leaf);
4950 
4951 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4952 			      data_end + dsize,
4953 			      BTRFS_LEAF_DATA_OFFSET + data_end,
4954 			      last_off - data_end);
4955 
4956 		for (i = slot + nr; i < nritems; i++) {
4957 			u32 ioff;
4958 
4959 			item = btrfs_item_nr(i);
4960 			ioff = btrfs_token_item_offset(leaf, item, &token);
4961 			btrfs_set_token_item_offset(leaf, item,
4962 						    ioff + dsize, &token);
4963 		}
4964 
4965 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4966 			      btrfs_item_nr_offset(slot + nr),
4967 			      sizeof(struct btrfs_item) *
4968 			      (nritems - slot - nr));
4969 	}
4970 	btrfs_set_header_nritems(leaf, nritems - nr);
4971 	nritems -= nr;
4972 
4973 	/* delete the leaf if we've emptied it */
4974 	if (nritems == 0) {
4975 		if (leaf == root->node) {
4976 			btrfs_set_header_level(leaf, 0);
4977 		} else {
4978 			btrfs_set_path_blocking(path);
4979 			clean_tree_block(fs_info, leaf);
4980 			btrfs_del_leaf(trans, root, path, leaf);
4981 		}
4982 	} else {
4983 		int used = leaf_space_used(leaf, 0, nritems);
4984 		if (slot == 0) {
4985 			struct btrfs_disk_key disk_key;
4986 
4987 			btrfs_item_key(leaf, &disk_key, 0);
4988 			fixup_low_keys(path, &disk_key, 1);
4989 		}
4990 
4991 		/* delete the leaf if it is mostly empty */
4992 		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4993 			/* push_leaf_left fixes the path.
4994 			 * make sure the path still points to our leaf
4995 			 * for possible call to del_ptr below
4996 			 */
4997 			slot = path->slots[1];
4998 			extent_buffer_get(leaf);
4999 
5000 			btrfs_set_path_blocking(path);
5001 			wret = push_leaf_left(trans, root, path, 1, 1,
5002 					      1, (u32)-1);
5003 			if (wret < 0 && wret != -ENOSPC)
5004 				ret = wret;
5005 
5006 			if (path->nodes[0] == leaf &&
5007 			    btrfs_header_nritems(leaf)) {
5008 				wret = push_leaf_right(trans, root, path, 1,
5009 						       1, 1, 0);
5010 				if (wret < 0 && wret != -ENOSPC)
5011 					ret = wret;
5012 			}
5013 
5014 			if (btrfs_header_nritems(leaf) == 0) {
5015 				path->slots[1] = slot;
5016 				btrfs_del_leaf(trans, root, path, leaf);
5017 				free_extent_buffer(leaf);
5018 				ret = 0;
5019 			} else {
5020 				/* if we're still in the path, make sure
5021 				 * we're dirty.  Otherwise, one of the
5022 				 * push_leaf functions must have already
5023 				 * dirtied this buffer
5024 				 */
5025 				if (path->nodes[0] == leaf)
5026 					btrfs_mark_buffer_dirty(leaf);
5027 				free_extent_buffer(leaf);
5028 			}
5029 		} else {
5030 			btrfs_mark_buffer_dirty(leaf);
5031 		}
5032 	}
5033 	return ret;
5034 }
5035 
5036 /*
5037  * search the tree again to find a leaf with lesser keys
5038  * returns 0 if it found something or 1 if there are no lesser leaves.
5039  * returns < 0 on io errors.
5040  *
5041  * This may release the path, and so you may lose any locks held at the
5042  * time you call it.
5043  */
5044 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5045 {
5046 	struct btrfs_key key;
5047 	struct btrfs_disk_key found_key;
5048 	int ret;
5049 
5050 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5051 
5052 	if (key.offset > 0) {
5053 		key.offset--;
5054 	} else if (key.type > 0) {
5055 		key.type--;
5056 		key.offset = (u64)-1;
5057 	} else if (key.objectid > 0) {
5058 		key.objectid--;
5059 		key.type = (u8)-1;
5060 		key.offset = (u64)-1;
5061 	} else {
5062 		return 1;
5063 	}
5064 
5065 	btrfs_release_path(path);
5066 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5067 	if (ret < 0)
5068 		return ret;
5069 	btrfs_item_key(path->nodes[0], &found_key, 0);
5070 	ret = comp_keys(&found_key, &key);
5071 	/*
5072 	 * We might have had an item with the previous key in the tree right
5073 	 * before we released our path. And after we released our path, that
5074 	 * item might have been pushed to the first slot (0) of the leaf we
5075 	 * were holding due to a tree balance. Alternatively, an item with the
5076 	 * previous key can exist as the only element of a leaf (big fat item).
5077 	 * Therefore account for these 2 cases, so that our callers (like
5078 	 * btrfs_previous_item) don't miss an existing item with a key matching
5079 	 * the previous key we computed above.
5080 	 */
5081 	if (ret <= 0)
5082 		return 0;
5083 	return 1;
5084 }
5085 
5086 /*
5087  * A helper function to walk down the tree starting at min_key, and looking
5088  * for nodes or leaves that are have a minimum transaction id.
5089  * This is used by the btree defrag code, and tree logging
5090  *
5091  * This does not cow, but it does stuff the starting key it finds back
5092  * into min_key, so you can call btrfs_search_slot with cow=1 on the
5093  * key and get a writable path.
5094  *
5095  * This honors path->lowest_level to prevent descent past a given level
5096  * of the tree.
5097  *
5098  * min_trans indicates the oldest transaction that you are interested
5099  * in walking through.  Any nodes or leaves older than min_trans are
5100  * skipped over (without reading them).
5101  *
5102  * returns zero if something useful was found, < 0 on error and 1 if there
5103  * was nothing in the tree that matched the search criteria.
5104  */
5105 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5106 			 struct btrfs_path *path,
5107 			 u64 min_trans)
5108 {
5109 	struct btrfs_fs_info *fs_info = root->fs_info;
5110 	struct extent_buffer *cur;
5111 	struct btrfs_key found_key;
5112 	int slot;
5113 	int sret;
5114 	u32 nritems;
5115 	int level;
5116 	int ret = 1;
5117 	int keep_locks = path->keep_locks;
5118 
5119 	path->keep_locks = 1;
5120 again:
5121 	cur = btrfs_read_lock_root_node(root);
5122 	level = btrfs_header_level(cur);
5123 	WARN_ON(path->nodes[level]);
5124 	path->nodes[level] = cur;
5125 	path->locks[level] = BTRFS_READ_LOCK;
5126 
5127 	if (btrfs_header_generation(cur) < min_trans) {
5128 		ret = 1;
5129 		goto out;
5130 	}
5131 	while (1) {
5132 		nritems = btrfs_header_nritems(cur);
5133 		level = btrfs_header_level(cur);
5134 		sret = btrfs_bin_search(cur, min_key, level, &slot);
5135 
5136 		/* at the lowest level, we're done, setup the path and exit */
5137 		if (level == path->lowest_level) {
5138 			if (slot >= nritems)
5139 				goto find_next_key;
5140 			ret = 0;
5141 			path->slots[level] = slot;
5142 			btrfs_item_key_to_cpu(cur, &found_key, slot);
5143 			goto out;
5144 		}
5145 		if (sret && slot > 0)
5146 			slot--;
5147 		/*
5148 		 * check this node pointer against the min_trans parameters.
5149 		 * If it is too old, old, skip to the next one.
5150 		 */
5151 		while (slot < nritems) {
5152 			u64 gen;
5153 
5154 			gen = btrfs_node_ptr_generation(cur, slot);
5155 			if (gen < min_trans) {
5156 				slot++;
5157 				continue;
5158 			}
5159 			break;
5160 		}
5161 find_next_key:
5162 		/*
5163 		 * we didn't find a candidate key in this node, walk forward
5164 		 * and find another one
5165 		 */
5166 		if (slot >= nritems) {
5167 			path->slots[level] = slot;
5168 			btrfs_set_path_blocking(path);
5169 			sret = btrfs_find_next_key(root, path, min_key, level,
5170 						  min_trans);
5171 			if (sret == 0) {
5172 				btrfs_release_path(path);
5173 				goto again;
5174 			} else {
5175 				goto out;
5176 			}
5177 		}
5178 		/* save our key for returning back */
5179 		btrfs_node_key_to_cpu(cur, &found_key, slot);
5180 		path->slots[level] = slot;
5181 		if (level == path->lowest_level) {
5182 			ret = 0;
5183 			goto out;
5184 		}
5185 		btrfs_set_path_blocking(path);
5186 		cur = read_node_slot(fs_info, cur, slot);
5187 		if (IS_ERR(cur)) {
5188 			ret = PTR_ERR(cur);
5189 			goto out;
5190 		}
5191 
5192 		btrfs_tree_read_lock(cur);
5193 
5194 		path->locks[level - 1] = BTRFS_READ_LOCK;
5195 		path->nodes[level - 1] = cur;
5196 		unlock_up(path, level, 1, 0, NULL);
5197 	}
5198 out:
5199 	path->keep_locks = keep_locks;
5200 	if (ret == 0) {
5201 		btrfs_unlock_up_safe(path, path->lowest_level + 1);
5202 		btrfs_set_path_blocking(path);
5203 		memcpy(min_key, &found_key, sizeof(found_key));
5204 	}
5205 	return ret;
5206 }
5207 
5208 static int tree_move_down(struct btrfs_fs_info *fs_info,
5209 			   struct btrfs_path *path,
5210 			   int *level)
5211 {
5212 	struct extent_buffer *eb;
5213 
5214 	BUG_ON(*level == 0);
5215 	eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
5216 	if (IS_ERR(eb))
5217 		return PTR_ERR(eb);
5218 
5219 	path->nodes[*level - 1] = eb;
5220 	path->slots[*level - 1] = 0;
5221 	(*level)--;
5222 	return 0;
5223 }
5224 
5225 static int tree_move_next_or_upnext(struct btrfs_path *path,
5226 				    int *level, int root_level)
5227 {
5228 	int ret = 0;
5229 	int nritems;
5230 	nritems = btrfs_header_nritems(path->nodes[*level]);
5231 
5232 	path->slots[*level]++;
5233 
5234 	while (path->slots[*level] >= nritems) {
5235 		if (*level == root_level)
5236 			return -1;
5237 
5238 		/* move upnext */
5239 		path->slots[*level] = 0;
5240 		free_extent_buffer(path->nodes[*level]);
5241 		path->nodes[*level] = NULL;
5242 		(*level)++;
5243 		path->slots[*level]++;
5244 
5245 		nritems = btrfs_header_nritems(path->nodes[*level]);
5246 		ret = 1;
5247 	}
5248 	return ret;
5249 }
5250 
5251 /*
5252  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5253  * or down.
5254  */
5255 static int tree_advance(struct btrfs_fs_info *fs_info,
5256 			struct btrfs_path *path,
5257 			int *level, int root_level,
5258 			int allow_down,
5259 			struct btrfs_key *key)
5260 {
5261 	int ret;
5262 
5263 	if (*level == 0 || !allow_down) {
5264 		ret = tree_move_next_or_upnext(path, level, root_level);
5265 	} else {
5266 		ret = tree_move_down(fs_info, path, level);
5267 	}
5268 	if (ret >= 0) {
5269 		if (*level == 0)
5270 			btrfs_item_key_to_cpu(path->nodes[*level], key,
5271 					path->slots[*level]);
5272 		else
5273 			btrfs_node_key_to_cpu(path->nodes[*level], key,
5274 					path->slots[*level]);
5275 	}
5276 	return ret;
5277 }
5278 
5279 static int tree_compare_item(struct btrfs_path *left_path,
5280 			     struct btrfs_path *right_path,
5281 			     char *tmp_buf)
5282 {
5283 	int cmp;
5284 	int len1, len2;
5285 	unsigned long off1, off2;
5286 
5287 	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5288 	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5289 	if (len1 != len2)
5290 		return 1;
5291 
5292 	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5293 	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5294 				right_path->slots[0]);
5295 
5296 	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5297 
5298 	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5299 	if (cmp)
5300 		return 1;
5301 	return 0;
5302 }
5303 
5304 #define ADVANCE 1
5305 #define ADVANCE_ONLY_NEXT -1
5306 
5307 /*
5308  * This function compares two trees and calls the provided callback for
5309  * every changed/new/deleted item it finds.
5310  * If shared tree blocks are encountered, whole subtrees are skipped, making
5311  * the compare pretty fast on snapshotted subvolumes.
5312  *
5313  * This currently works on commit roots only. As commit roots are read only,
5314  * we don't do any locking. The commit roots are protected with transactions.
5315  * Transactions are ended and rejoined when a commit is tried in between.
5316  *
5317  * This function checks for modifications done to the trees while comparing.
5318  * If it detects a change, it aborts immediately.
5319  */
5320 int btrfs_compare_trees(struct btrfs_root *left_root,
5321 			struct btrfs_root *right_root,
5322 			btrfs_changed_cb_t changed_cb, void *ctx)
5323 {
5324 	struct btrfs_fs_info *fs_info = left_root->fs_info;
5325 	int ret;
5326 	int cmp;
5327 	struct btrfs_path *left_path = NULL;
5328 	struct btrfs_path *right_path = NULL;
5329 	struct btrfs_key left_key;
5330 	struct btrfs_key right_key;
5331 	char *tmp_buf = NULL;
5332 	int left_root_level;
5333 	int right_root_level;
5334 	int left_level;
5335 	int right_level;
5336 	int left_end_reached;
5337 	int right_end_reached;
5338 	int advance_left;
5339 	int advance_right;
5340 	u64 left_blockptr;
5341 	u64 right_blockptr;
5342 	u64 left_gen;
5343 	u64 right_gen;
5344 
5345 	left_path = btrfs_alloc_path();
5346 	if (!left_path) {
5347 		ret = -ENOMEM;
5348 		goto out;
5349 	}
5350 	right_path = btrfs_alloc_path();
5351 	if (!right_path) {
5352 		ret = -ENOMEM;
5353 		goto out;
5354 	}
5355 
5356 	tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
5357 	if (!tmp_buf) {
5358 		ret = -ENOMEM;
5359 		goto out;
5360 	}
5361 
5362 	left_path->search_commit_root = 1;
5363 	left_path->skip_locking = 1;
5364 	right_path->search_commit_root = 1;
5365 	right_path->skip_locking = 1;
5366 
5367 	/*
5368 	 * Strategy: Go to the first items of both trees. Then do
5369 	 *
5370 	 * If both trees are at level 0
5371 	 *   Compare keys of current items
5372 	 *     If left < right treat left item as new, advance left tree
5373 	 *       and repeat
5374 	 *     If left > right treat right item as deleted, advance right tree
5375 	 *       and repeat
5376 	 *     If left == right do deep compare of items, treat as changed if
5377 	 *       needed, advance both trees and repeat
5378 	 * If both trees are at the same level but not at level 0
5379 	 *   Compare keys of current nodes/leafs
5380 	 *     If left < right advance left tree and repeat
5381 	 *     If left > right advance right tree and repeat
5382 	 *     If left == right compare blockptrs of the next nodes/leafs
5383 	 *       If they match advance both trees but stay at the same level
5384 	 *         and repeat
5385 	 *       If they don't match advance both trees while allowing to go
5386 	 *         deeper and repeat
5387 	 * If tree levels are different
5388 	 *   Advance the tree that needs it and repeat
5389 	 *
5390 	 * Advancing a tree means:
5391 	 *   If we are at level 0, try to go to the next slot. If that's not
5392 	 *   possible, go one level up and repeat. Stop when we found a level
5393 	 *   where we could go to the next slot. We may at this point be on a
5394 	 *   node or a leaf.
5395 	 *
5396 	 *   If we are not at level 0 and not on shared tree blocks, go one
5397 	 *   level deeper.
5398 	 *
5399 	 *   If we are not at level 0 and on shared tree blocks, go one slot to
5400 	 *   the right if possible or go up and right.
5401 	 */
5402 
5403 	down_read(&fs_info->commit_root_sem);
5404 	left_level = btrfs_header_level(left_root->commit_root);
5405 	left_root_level = left_level;
5406 	left_path->nodes[left_level] =
5407 			btrfs_clone_extent_buffer(left_root->commit_root);
5408 	if (!left_path->nodes[left_level]) {
5409 		up_read(&fs_info->commit_root_sem);
5410 		ret = -ENOMEM;
5411 		goto out;
5412 	}
5413 
5414 	right_level = btrfs_header_level(right_root->commit_root);
5415 	right_root_level = right_level;
5416 	right_path->nodes[right_level] =
5417 			btrfs_clone_extent_buffer(right_root->commit_root);
5418 	if (!right_path->nodes[right_level]) {
5419 		up_read(&fs_info->commit_root_sem);
5420 		ret = -ENOMEM;
5421 		goto out;
5422 	}
5423 	up_read(&fs_info->commit_root_sem);
5424 
5425 	if (left_level == 0)
5426 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
5427 				&left_key, left_path->slots[left_level]);
5428 	else
5429 		btrfs_node_key_to_cpu(left_path->nodes[left_level],
5430 				&left_key, left_path->slots[left_level]);
5431 	if (right_level == 0)
5432 		btrfs_item_key_to_cpu(right_path->nodes[right_level],
5433 				&right_key, right_path->slots[right_level]);
5434 	else
5435 		btrfs_node_key_to_cpu(right_path->nodes[right_level],
5436 				&right_key, right_path->slots[right_level]);
5437 
5438 	left_end_reached = right_end_reached = 0;
5439 	advance_left = advance_right = 0;
5440 
5441 	while (1) {
5442 		if (advance_left && !left_end_reached) {
5443 			ret = tree_advance(fs_info, left_path, &left_level,
5444 					left_root_level,
5445 					advance_left != ADVANCE_ONLY_NEXT,
5446 					&left_key);
5447 			if (ret == -1)
5448 				left_end_reached = ADVANCE;
5449 			else if (ret < 0)
5450 				goto out;
5451 			advance_left = 0;
5452 		}
5453 		if (advance_right && !right_end_reached) {
5454 			ret = tree_advance(fs_info, right_path, &right_level,
5455 					right_root_level,
5456 					advance_right != ADVANCE_ONLY_NEXT,
5457 					&right_key);
5458 			if (ret == -1)
5459 				right_end_reached = ADVANCE;
5460 			else if (ret < 0)
5461 				goto out;
5462 			advance_right = 0;
5463 		}
5464 
5465 		if (left_end_reached && right_end_reached) {
5466 			ret = 0;
5467 			goto out;
5468 		} else if (left_end_reached) {
5469 			if (right_level == 0) {
5470 				ret = changed_cb(left_path, right_path,
5471 						&right_key,
5472 						BTRFS_COMPARE_TREE_DELETED,
5473 						ctx);
5474 				if (ret < 0)
5475 					goto out;
5476 			}
5477 			advance_right = ADVANCE;
5478 			continue;
5479 		} else if (right_end_reached) {
5480 			if (left_level == 0) {
5481 				ret = changed_cb(left_path, right_path,
5482 						&left_key,
5483 						BTRFS_COMPARE_TREE_NEW,
5484 						ctx);
5485 				if (ret < 0)
5486 					goto out;
5487 			}
5488 			advance_left = ADVANCE;
5489 			continue;
5490 		}
5491 
5492 		if (left_level == 0 && right_level == 0) {
5493 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5494 			if (cmp < 0) {
5495 				ret = changed_cb(left_path, right_path,
5496 						&left_key,
5497 						BTRFS_COMPARE_TREE_NEW,
5498 						ctx);
5499 				if (ret < 0)
5500 					goto out;
5501 				advance_left = ADVANCE;
5502 			} else if (cmp > 0) {
5503 				ret = changed_cb(left_path, right_path,
5504 						&right_key,
5505 						BTRFS_COMPARE_TREE_DELETED,
5506 						ctx);
5507 				if (ret < 0)
5508 					goto out;
5509 				advance_right = ADVANCE;
5510 			} else {
5511 				enum btrfs_compare_tree_result result;
5512 
5513 				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5514 				ret = tree_compare_item(left_path, right_path,
5515 							tmp_buf);
5516 				if (ret)
5517 					result = BTRFS_COMPARE_TREE_CHANGED;
5518 				else
5519 					result = BTRFS_COMPARE_TREE_SAME;
5520 				ret = changed_cb(left_path, right_path,
5521 						 &left_key, result, ctx);
5522 				if (ret < 0)
5523 					goto out;
5524 				advance_left = ADVANCE;
5525 				advance_right = ADVANCE;
5526 			}
5527 		} else if (left_level == right_level) {
5528 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5529 			if (cmp < 0) {
5530 				advance_left = ADVANCE;
5531 			} else if (cmp > 0) {
5532 				advance_right = ADVANCE;
5533 			} else {
5534 				left_blockptr = btrfs_node_blockptr(
5535 						left_path->nodes[left_level],
5536 						left_path->slots[left_level]);
5537 				right_blockptr = btrfs_node_blockptr(
5538 						right_path->nodes[right_level],
5539 						right_path->slots[right_level]);
5540 				left_gen = btrfs_node_ptr_generation(
5541 						left_path->nodes[left_level],
5542 						left_path->slots[left_level]);
5543 				right_gen = btrfs_node_ptr_generation(
5544 						right_path->nodes[right_level],
5545 						right_path->slots[right_level]);
5546 				if (left_blockptr == right_blockptr &&
5547 				    left_gen == right_gen) {
5548 					/*
5549 					 * As we're on a shared block, don't
5550 					 * allow to go deeper.
5551 					 */
5552 					advance_left = ADVANCE_ONLY_NEXT;
5553 					advance_right = ADVANCE_ONLY_NEXT;
5554 				} else {
5555 					advance_left = ADVANCE;
5556 					advance_right = ADVANCE;
5557 				}
5558 			}
5559 		} else if (left_level < right_level) {
5560 			advance_right = ADVANCE;
5561 		} else {
5562 			advance_left = ADVANCE;
5563 		}
5564 	}
5565 
5566 out:
5567 	btrfs_free_path(left_path);
5568 	btrfs_free_path(right_path);
5569 	kvfree(tmp_buf);
5570 	return ret;
5571 }
5572 
5573 /*
5574  * this is similar to btrfs_next_leaf, but does not try to preserve
5575  * and fixup the path.  It looks for and returns the next key in the
5576  * tree based on the current path and the min_trans parameters.
5577  *
5578  * 0 is returned if another key is found, < 0 if there are any errors
5579  * and 1 is returned if there are no higher keys in the tree
5580  *
5581  * path->keep_locks should be set to 1 on the search made before
5582  * calling this function.
5583  */
5584 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5585 			struct btrfs_key *key, int level, u64 min_trans)
5586 {
5587 	int slot;
5588 	struct extent_buffer *c;
5589 
5590 	WARN_ON(!path->keep_locks);
5591 	while (level < BTRFS_MAX_LEVEL) {
5592 		if (!path->nodes[level])
5593 			return 1;
5594 
5595 		slot = path->slots[level] + 1;
5596 		c = path->nodes[level];
5597 next:
5598 		if (slot >= btrfs_header_nritems(c)) {
5599 			int ret;
5600 			int orig_lowest;
5601 			struct btrfs_key cur_key;
5602 			if (level + 1 >= BTRFS_MAX_LEVEL ||
5603 			    !path->nodes[level + 1])
5604 				return 1;
5605 
5606 			if (path->locks[level + 1]) {
5607 				level++;
5608 				continue;
5609 			}
5610 
5611 			slot = btrfs_header_nritems(c) - 1;
5612 			if (level == 0)
5613 				btrfs_item_key_to_cpu(c, &cur_key, slot);
5614 			else
5615 				btrfs_node_key_to_cpu(c, &cur_key, slot);
5616 
5617 			orig_lowest = path->lowest_level;
5618 			btrfs_release_path(path);
5619 			path->lowest_level = level;
5620 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5621 						0, 0);
5622 			path->lowest_level = orig_lowest;
5623 			if (ret < 0)
5624 				return ret;
5625 
5626 			c = path->nodes[level];
5627 			slot = path->slots[level];
5628 			if (ret == 0)
5629 				slot++;
5630 			goto next;
5631 		}
5632 
5633 		if (level == 0)
5634 			btrfs_item_key_to_cpu(c, key, slot);
5635 		else {
5636 			u64 gen = btrfs_node_ptr_generation(c, slot);
5637 
5638 			if (gen < min_trans) {
5639 				slot++;
5640 				goto next;
5641 			}
5642 			btrfs_node_key_to_cpu(c, key, slot);
5643 		}
5644 		return 0;
5645 	}
5646 	return 1;
5647 }
5648 
5649 /*
5650  * search the tree again to find a leaf with greater keys
5651  * returns 0 if it found something or 1 if there are no greater leaves.
5652  * returns < 0 on io errors.
5653  */
5654 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5655 {
5656 	return btrfs_next_old_leaf(root, path, 0);
5657 }
5658 
5659 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5660 			u64 time_seq)
5661 {
5662 	int slot;
5663 	int level;
5664 	struct extent_buffer *c;
5665 	struct extent_buffer *next;
5666 	struct btrfs_key key;
5667 	u32 nritems;
5668 	int ret;
5669 	int old_spinning = path->leave_spinning;
5670 	int next_rw_lock = 0;
5671 
5672 	nritems = btrfs_header_nritems(path->nodes[0]);
5673 	if (nritems == 0)
5674 		return 1;
5675 
5676 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5677 again:
5678 	level = 1;
5679 	next = NULL;
5680 	next_rw_lock = 0;
5681 	btrfs_release_path(path);
5682 
5683 	path->keep_locks = 1;
5684 	path->leave_spinning = 1;
5685 
5686 	if (time_seq)
5687 		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5688 	else
5689 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5690 	path->keep_locks = 0;
5691 
5692 	if (ret < 0)
5693 		return ret;
5694 
5695 	nritems = btrfs_header_nritems(path->nodes[0]);
5696 	/*
5697 	 * by releasing the path above we dropped all our locks.  A balance
5698 	 * could have added more items next to the key that used to be
5699 	 * at the very end of the block.  So, check again here and
5700 	 * advance the path if there are now more items available.
5701 	 */
5702 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5703 		if (ret == 0)
5704 			path->slots[0]++;
5705 		ret = 0;
5706 		goto done;
5707 	}
5708 	/*
5709 	 * So the above check misses one case:
5710 	 * - after releasing the path above, someone has removed the item that
5711 	 *   used to be at the very end of the block, and balance between leafs
5712 	 *   gets another one with bigger key.offset to replace it.
5713 	 *
5714 	 * This one should be returned as well, or we can get leaf corruption
5715 	 * later(esp. in __btrfs_drop_extents()).
5716 	 *
5717 	 * And a bit more explanation about this check,
5718 	 * with ret > 0, the key isn't found, the path points to the slot
5719 	 * where it should be inserted, so the path->slots[0] item must be the
5720 	 * bigger one.
5721 	 */
5722 	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5723 		ret = 0;
5724 		goto done;
5725 	}
5726 
5727 	while (level < BTRFS_MAX_LEVEL) {
5728 		if (!path->nodes[level]) {
5729 			ret = 1;
5730 			goto done;
5731 		}
5732 
5733 		slot = path->slots[level] + 1;
5734 		c = path->nodes[level];
5735 		if (slot >= btrfs_header_nritems(c)) {
5736 			level++;
5737 			if (level == BTRFS_MAX_LEVEL) {
5738 				ret = 1;
5739 				goto done;
5740 			}
5741 			continue;
5742 		}
5743 
5744 		if (next) {
5745 			btrfs_tree_unlock_rw(next, next_rw_lock);
5746 			free_extent_buffer(next);
5747 		}
5748 
5749 		next = c;
5750 		next_rw_lock = path->locks[level];
5751 		ret = read_block_for_search(root, path, &next, level,
5752 					    slot, &key);
5753 		if (ret == -EAGAIN)
5754 			goto again;
5755 
5756 		if (ret < 0) {
5757 			btrfs_release_path(path);
5758 			goto done;
5759 		}
5760 
5761 		if (!path->skip_locking) {
5762 			ret = btrfs_try_tree_read_lock(next);
5763 			if (!ret && time_seq) {
5764 				/*
5765 				 * If we don't get the lock, we may be racing
5766 				 * with push_leaf_left, holding that lock while
5767 				 * itself waiting for the leaf we've currently
5768 				 * locked. To solve this situation, we give up
5769 				 * on our lock and cycle.
5770 				 */
5771 				free_extent_buffer(next);
5772 				btrfs_release_path(path);
5773 				cond_resched();
5774 				goto again;
5775 			}
5776 			if (!ret) {
5777 				btrfs_set_path_blocking(path);
5778 				btrfs_tree_read_lock(next);
5779 			}
5780 			next_rw_lock = BTRFS_READ_LOCK;
5781 		}
5782 		break;
5783 	}
5784 	path->slots[level] = slot;
5785 	while (1) {
5786 		level--;
5787 		c = path->nodes[level];
5788 		if (path->locks[level])
5789 			btrfs_tree_unlock_rw(c, path->locks[level]);
5790 
5791 		free_extent_buffer(c);
5792 		path->nodes[level] = next;
5793 		path->slots[level] = 0;
5794 		if (!path->skip_locking)
5795 			path->locks[level] = next_rw_lock;
5796 		if (!level)
5797 			break;
5798 
5799 		ret = read_block_for_search(root, path, &next, level,
5800 					    0, &key);
5801 		if (ret == -EAGAIN)
5802 			goto again;
5803 
5804 		if (ret < 0) {
5805 			btrfs_release_path(path);
5806 			goto done;
5807 		}
5808 
5809 		if (!path->skip_locking) {
5810 			ret = btrfs_try_tree_read_lock(next);
5811 			if (!ret) {
5812 				btrfs_set_path_blocking(path);
5813 				btrfs_tree_read_lock(next);
5814 			}
5815 			next_rw_lock = BTRFS_READ_LOCK;
5816 		}
5817 	}
5818 	ret = 0;
5819 done:
5820 	unlock_up(path, 0, 1, 0, NULL);
5821 	path->leave_spinning = old_spinning;
5822 	if (!old_spinning)
5823 		btrfs_set_path_blocking(path);
5824 
5825 	return ret;
5826 }
5827 
5828 /*
5829  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5830  * searching until it gets past min_objectid or finds an item of 'type'
5831  *
5832  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5833  */
5834 int btrfs_previous_item(struct btrfs_root *root,
5835 			struct btrfs_path *path, u64 min_objectid,
5836 			int type)
5837 {
5838 	struct btrfs_key found_key;
5839 	struct extent_buffer *leaf;
5840 	u32 nritems;
5841 	int ret;
5842 
5843 	while (1) {
5844 		if (path->slots[0] == 0) {
5845 			btrfs_set_path_blocking(path);
5846 			ret = btrfs_prev_leaf(root, path);
5847 			if (ret != 0)
5848 				return ret;
5849 		} else {
5850 			path->slots[0]--;
5851 		}
5852 		leaf = path->nodes[0];
5853 		nritems = btrfs_header_nritems(leaf);
5854 		if (nritems == 0)
5855 			return 1;
5856 		if (path->slots[0] == nritems)
5857 			path->slots[0]--;
5858 
5859 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5860 		if (found_key.objectid < min_objectid)
5861 			break;
5862 		if (found_key.type == type)
5863 			return 0;
5864 		if (found_key.objectid == min_objectid &&
5865 		    found_key.type < type)
5866 			break;
5867 	}
5868 	return 1;
5869 }
5870 
5871 /*
5872  * search in extent tree to find a previous Metadata/Data extent item with
5873  * min objecitd.
5874  *
5875  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5876  */
5877 int btrfs_previous_extent_item(struct btrfs_root *root,
5878 			struct btrfs_path *path, u64 min_objectid)
5879 {
5880 	struct btrfs_key found_key;
5881 	struct extent_buffer *leaf;
5882 	u32 nritems;
5883 	int ret;
5884 
5885 	while (1) {
5886 		if (path->slots[0] == 0) {
5887 			btrfs_set_path_blocking(path);
5888 			ret = btrfs_prev_leaf(root, path);
5889 			if (ret != 0)
5890 				return ret;
5891 		} else {
5892 			path->slots[0]--;
5893 		}
5894 		leaf = path->nodes[0];
5895 		nritems = btrfs_header_nritems(leaf);
5896 		if (nritems == 0)
5897 			return 1;
5898 		if (path->slots[0] == nritems)
5899 			path->slots[0]--;
5900 
5901 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5902 		if (found_key.objectid < min_objectid)
5903 			break;
5904 		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5905 		    found_key.type == BTRFS_METADATA_ITEM_KEY)
5906 			return 0;
5907 		if (found_key.objectid == min_objectid &&
5908 		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
5909 			break;
5910 	}
5911 	return 1;
5912 }
5913