xref: /openbmc/linux/fs/btrfs/ctree.c (revision 6cc23ed2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
9 #include <linux/mm.h>
10 #include "ctree.h"
11 #include "disk-io.h"
12 #include "transaction.h"
13 #include "print-tree.h"
14 #include "locking.h"
15 #include "volumes.h"
16 #include "qgroup.h"
17 
18 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
19 		      *root, struct btrfs_path *path, int level);
20 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
21 		      const struct btrfs_key *ins_key, struct btrfs_path *path,
22 		      int data_size, int extend);
23 static int push_node_left(struct btrfs_trans_handle *trans,
24 			  struct extent_buffer *dst,
25 			  struct extent_buffer *src, int empty);
26 static int balance_node_right(struct btrfs_trans_handle *trans,
27 			      struct extent_buffer *dst_buf,
28 			      struct extent_buffer *src_buf);
29 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
30 		    int level, int slot);
31 
32 static const struct btrfs_csums {
33 	u16		size;
34 	const char	*name;
35 } btrfs_csums[] = {
36 	[BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
37 };
38 
39 int btrfs_super_csum_size(const struct btrfs_super_block *s)
40 {
41 	u16 t = btrfs_super_csum_type(s);
42 	/*
43 	 * csum type is validated at mount time
44 	 */
45 	return btrfs_csums[t].size;
46 }
47 
48 const char *btrfs_super_csum_name(u16 csum_type)
49 {
50 	/* csum type is validated at mount time */
51 	return btrfs_csums[csum_type].name;
52 }
53 
54 struct btrfs_path *btrfs_alloc_path(void)
55 {
56 	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
57 }
58 
59 /*
60  * set all locked nodes in the path to blocking locks.  This should
61  * be done before scheduling
62  */
63 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
64 {
65 	int i;
66 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
67 		if (!p->nodes[i] || !p->locks[i])
68 			continue;
69 		/*
70 		 * If we currently have a spinning reader or writer lock this
71 		 * will bump the count of blocking holders and drop the
72 		 * spinlock.
73 		 */
74 		if (p->locks[i] == BTRFS_READ_LOCK) {
75 			btrfs_set_lock_blocking_read(p->nodes[i]);
76 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
77 		} else if (p->locks[i] == BTRFS_WRITE_LOCK) {
78 			btrfs_set_lock_blocking_write(p->nodes[i]);
79 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
80 		}
81 	}
82 }
83 
84 /* this also releases the path */
85 void btrfs_free_path(struct btrfs_path *p)
86 {
87 	if (!p)
88 		return;
89 	btrfs_release_path(p);
90 	kmem_cache_free(btrfs_path_cachep, p);
91 }
92 
93 /*
94  * path release drops references on the extent buffers in the path
95  * and it drops any locks held by this path
96  *
97  * It is safe to call this on paths that no locks or extent buffers held.
98  */
99 noinline void btrfs_release_path(struct btrfs_path *p)
100 {
101 	int i;
102 
103 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
104 		p->slots[i] = 0;
105 		if (!p->nodes[i])
106 			continue;
107 		if (p->locks[i]) {
108 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
109 			p->locks[i] = 0;
110 		}
111 		free_extent_buffer(p->nodes[i]);
112 		p->nodes[i] = NULL;
113 	}
114 }
115 
116 /*
117  * safely gets a reference on the root node of a tree.  A lock
118  * is not taken, so a concurrent writer may put a different node
119  * at the root of the tree.  See btrfs_lock_root_node for the
120  * looping required.
121  *
122  * The extent buffer returned by this has a reference taken, so
123  * it won't disappear.  It may stop being the root of the tree
124  * at any time because there are no locks held.
125  */
126 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
127 {
128 	struct extent_buffer *eb;
129 
130 	while (1) {
131 		rcu_read_lock();
132 		eb = rcu_dereference(root->node);
133 
134 		/*
135 		 * RCU really hurts here, we could free up the root node because
136 		 * it was COWed but we may not get the new root node yet so do
137 		 * the inc_not_zero dance and if it doesn't work then
138 		 * synchronize_rcu and try again.
139 		 */
140 		if (atomic_inc_not_zero(&eb->refs)) {
141 			rcu_read_unlock();
142 			break;
143 		}
144 		rcu_read_unlock();
145 		synchronize_rcu();
146 	}
147 	return eb;
148 }
149 
150 /* loop around taking references on and locking the root node of the
151  * tree until you end up with a lock on the root.  A locked buffer
152  * is returned, with a reference held.
153  */
154 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
155 {
156 	struct extent_buffer *eb;
157 
158 	while (1) {
159 		eb = btrfs_root_node(root);
160 		btrfs_tree_lock(eb);
161 		if (eb == root->node)
162 			break;
163 		btrfs_tree_unlock(eb);
164 		free_extent_buffer(eb);
165 	}
166 	return eb;
167 }
168 
169 /* loop around taking references on and locking the root node of the
170  * tree until you end up with a lock on the root.  A locked buffer
171  * is returned, with a reference held.
172  */
173 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
174 {
175 	struct extent_buffer *eb;
176 
177 	while (1) {
178 		eb = btrfs_root_node(root);
179 		btrfs_tree_read_lock(eb);
180 		if (eb == root->node)
181 			break;
182 		btrfs_tree_read_unlock(eb);
183 		free_extent_buffer(eb);
184 	}
185 	return eb;
186 }
187 
188 /* cowonly root (everything not a reference counted cow subvolume), just get
189  * put onto a simple dirty list.  transaction.c walks this to make sure they
190  * get properly updated on disk.
191  */
192 static void add_root_to_dirty_list(struct btrfs_root *root)
193 {
194 	struct btrfs_fs_info *fs_info = root->fs_info;
195 
196 	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
197 	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
198 		return;
199 
200 	spin_lock(&fs_info->trans_lock);
201 	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
202 		/* Want the extent tree to be the last on the list */
203 		if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
204 			list_move_tail(&root->dirty_list,
205 				       &fs_info->dirty_cowonly_roots);
206 		else
207 			list_move(&root->dirty_list,
208 				  &fs_info->dirty_cowonly_roots);
209 	}
210 	spin_unlock(&fs_info->trans_lock);
211 }
212 
213 /*
214  * used by snapshot creation to make a copy of a root for a tree with
215  * a given objectid.  The buffer with the new root node is returned in
216  * cow_ret, and this func returns zero on success or a negative error code.
217  */
218 int btrfs_copy_root(struct btrfs_trans_handle *trans,
219 		      struct btrfs_root *root,
220 		      struct extent_buffer *buf,
221 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
222 {
223 	struct btrfs_fs_info *fs_info = root->fs_info;
224 	struct extent_buffer *cow;
225 	int ret = 0;
226 	int level;
227 	struct btrfs_disk_key disk_key;
228 
229 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
230 		trans->transid != fs_info->running_transaction->transid);
231 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
232 		trans->transid != root->last_trans);
233 
234 	level = btrfs_header_level(buf);
235 	if (level == 0)
236 		btrfs_item_key(buf, &disk_key, 0);
237 	else
238 		btrfs_node_key(buf, &disk_key, 0);
239 
240 	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
241 			&disk_key, level, buf->start, 0);
242 	if (IS_ERR(cow))
243 		return PTR_ERR(cow);
244 
245 	copy_extent_buffer_full(cow, buf);
246 	btrfs_set_header_bytenr(cow, cow->start);
247 	btrfs_set_header_generation(cow, trans->transid);
248 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
249 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
250 				     BTRFS_HEADER_FLAG_RELOC);
251 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
252 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
253 	else
254 		btrfs_set_header_owner(cow, new_root_objectid);
255 
256 	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
257 
258 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
259 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
260 		ret = btrfs_inc_ref(trans, root, cow, 1);
261 	else
262 		ret = btrfs_inc_ref(trans, root, cow, 0);
263 
264 	if (ret)
265 		return ret;
266 
267 	btrfs_mark_buffer_dirty(cow);
268 	*cow_ret = cow;
269 	return 0;
270 }
271 
272 enum mod_log_op {
273 	MOD_LOG_KEY_REPLACE,
274 	MOD_LOG_KEY_ADD,
275 	MOD_LOG_KEY_REMOVE,
276 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
277 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
278 	MOD_LOG_MOVE_KEYS,
279 	MOD_LOG_ROOT_REPLACE,
280 };
281 
282 struct tree_mod_root {
283 	u64 logical;
284 	u8 level;
285 };
286 
287 struct tree_mod_elem {
288 	struct rb_node node;
289 	u64 logical;
290 	u64 seq;
291 	enum mod_log_op op;
292 
293 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
294 	int slot;
295 
296 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
297 	u64 generation;
298 
299 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
300 	struct btrfs_disk_key key;
301 	u64 blockptr;
302 
303 	/* this is used for op == MOD_LOG_MOVE_KEYS */
304 	struct {
305 		int dst_slot;
306 		int nr_items;
307 	} move;
308 
309 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
310 	struct tree_mod_root old_root;
311 };
312 
313 /*
314  * Pull a new tree mod seq number for our operation.
315  */
316 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
317 {
318 	return atomic64_inc_return(&fs_info->tree_mod_seq);
319 }
320 
321 /*
322  * This adds a new blocker to the tree mod log's blocker list if the @elem
323  * passed does not already have a sequence number set. So when a caller expects
324  * to record tree modifications, it should ensure to set elem->seq to zero
325  * before calling btrfs_get_tree_mod_seq.
326  * Returns a fresh, unused tree log modification sequence number, even if no new
327  * blocker was added.
328  */
329 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
330 			   struct seq_list *elem)
331 {
332 	write_lock(&fs_info->tree_mod_log_lock);
333 	spin_lock(&fs_info->tree_mod_seq_lock);
334 	if (!elem->seq) {
335 		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
336 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
337 	}
338 	spin_unlock(&fs_info->tree_mod_seq_lock);
339 	write_unlock(&fs_info->tree_mod_log_lock);
340 
341 	return elem->seq;
342 }
343 
344 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
345 			    struct seq_list *elem)
346 {
347 	struct rb_root *tm_root;
348 	struct rb_node *node;
349 	struct rb_node *next;
350 	struct seq_list *cur_elem;
351 	struct tree_mod_elem *tm;
352 	u64 min_seq = (u64)-1;
353 	u64 seq_putting = elem->seq;
354 
355 	if (!seq_putting)
356 		return;
357 
358 	spin_lock(&fs_info->tree_mod_seq_lock);
359 	list_del(&elem->list);
360 	elem->seq = 0;
361 
362 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
363 		if (cur_elem->seq < min_seq) {
364 			if (seq_putting > cur_elem->seq) {
365 				/*
366 				 * blocker with lower sequence number exists, we
367 				 * cannot remove anything from the log
368 				 */
369 				spin_unlock(&fs_info->tree_mod_seq_lock);
370 				return;
371 			}
372 			min_seq = cur_elem->seq;
373 		}
374 	}
375 	spin_unlock(&fs_info->tree_mod_seq_lock);
376 
377 	/*
378 	 * anything that's lower than the lowest existing (read: blocked)
379 	 * sequence number can be removed from the tree.
380 	 */
381 	write_lock(&fs_info->tree_mod_log_lock);
382 	tm_root = &fs_info->tree_mod_log;
383 	for (node = rb_first(tm_root); node; node = next) {
384 		next = rb_next(node);
385 		tm = rb_entry(node, struct tree_mod_elem, node);
386 		if (tm->seq > min_seq)
387 			continue;
388 		rb_erase(node, tm_root);
389 		kfree(tm);
390 	}
391 	write_unlock(&fs_info->tree_mod_log_lock);
392 }
393 
394 /*
395  * key order of the log:
396  *       node/leaf start address -> sequence
397  *
398  * The 'start address' is the logical address of the *new* root node
399  * for root replace operations, or the logical address of the affected
400  * block for all other operations.
401  */
402 static noinline int
403 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
404 {
405 	struct rb_root *tm_root;
406 	struct rb_node **new;
407 	struct rb_node *parent = NULL;
408 	struct tree_mod_elem *cur;
409 
410 	lockdep_assert_held_write(&fs_info->tree_mod_log_lock);
411 
412 	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
413 
414 	tm_root = &fs_info->tree_mod_log;
415 	new = &tm_root->rb_node;
416 	while (*new) {
417 		cur = rb_entry(*new, struct tree_mod_elem, node);
418 		parent = *new;
419 		if (cur->logical < tm->logical)
420 			new = &((*new)->rb_left);
421 		else if (cur->logical > tm->logical)
422 			new = &((*new)->rb_right);
423 		else if (cur->seq < tm->seq)
424 			new = &((*new)->rb_left);
425 		else if (cur->seq > tm->seq)
426 			new = &((*new)->rb_right);
427 		else
428 			return -EEXIST;
429 	}
430 
431 	rb_link_node(&tm->node, parent, new);
432 	rb_insert_color(&tm->node, tm_root);
433 	return 0;
434 }
435 
436 /*
437  * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
438  * returns zero with the tree_mod_log_lock acquired. The caller must hold
439  * this until all tree mod log insertions are recorded in the rb tree and then
440  * write unlock fs_info::tree_mod_log_lock.
441  */
442 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
443 				    struct extent_buffer *eb) {
444 	smp_mb();
445 	if (list_empty(&(fs_info)->tree_mod_seq_list))
446 		return 1;
447 	if (eb && btrfs_header_level(eb) == 0)
448 		return 1;
449 
450 	write_lock(&fs_info->tree_mod_log_lock);
451 	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
452 		write_unlock(&fs_info->tree_mod_log_lock);
453 		return 1;
454 	}
455 
456 	return 0;
457 }
458 
459 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
460 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
461 				    struct extent_buffer *eb)
462 {
463 	smp_mb();
464 	if (list_empty(&(fs_info)->tree_mod_seq_list))
465 		return 0;
466 	if (eb && btrfs_header_level(eb) == 0)
467 		return 0;
468 
469 	return 1;
470 }
471 
472 static struct tree_mod_elem *
473 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
474 		    enum mod_log_op op, gfp_t flags)
475 {
476 	struct tree_mod_elem *tm;
477 
478 	tm = kzalloc(sizeof(*tm), flags);
479 	if (!tm)
480 		return NULL;
481 
482 	tm->logical = eb->start;
483 	if (op != MOD_LOG_KEY_ADD) {
484 		btrfs_node_key(eb, &tm->key, slot);
485 		tm->blockptr = btrfs_node_blockptr(eb, slot);
486 	}
487 	tm->op = op;
488 	tm->slot = slot;
489 	tm->generation = btrfs_node_ptr_generation(eb, slot);
490 	RB_CLEAR_NODE(&tm->node);
491 
492 	return tm;
493 }
494 
495 static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
496 		enum mod_log_op op, gfp_t flags)
497 {
498 	struct tree_mod_elem *tm;
499 	int ret;
500 
501 	if (!tree_mod_need_log(eb->fs_info, eb))
502 		return 0;
503 
504 	tm = alloc_tree_mod_elem(eb, slot, op, flags);
505 	if (!tm)
506 		return -ENOMEM;
507 
508 	if (tree_mod_dont_log(eb->fs_info, eb)) {
509 		kfree(tm);
510 		return 0;
511 	}
512 
513 	ret = __tree_mod_log_insert(eb->fs_info, tm);
514 	write_unlock(&eb->fs_info->tree_mod_log_lock);
515 	if (ret)
516 		kfree(tm);
517 
518 	return ret;
519 }
520 
521 static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
522 		int dst_slot, int src_slot, int nr_items)
523 {
524 	struct tree_mod_elem *tm = NULL;
525 	struct tree_mod_elem **tm_list = NULL;
526 	int ret = 0;
527 	int i;
528 	int locked = 0;
529 
530 	if (!tree_mod_need_log(eb->fs_info, eb))
531 		return 0;
532 
533 	tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
534 	if (!tm_list)
535 		return -ENOMEM;
536 
537 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
538 	if (!tm) {
539 		ret = -ENOMEM;
540 		goto free_tms;
541 	}
542 
543 	tm->logical = eb->start;
544 	tm->slot = src_slot;
545 	tm->move.dst_slot = dst_slot;
546 	tm->move.nr_items = nr_items;
547 	tm->op = MOD_LOG_MOVE_KEYS;
548 
549 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
550 		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
551 		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
552 		if (!tm_list[i]) {
553 			ret = -ENOMEM;
554 			goto free_tms;
555 		}
556 	}
557 
558 	if (tree_mod_dont_log(eb->fs_info, eb))
559 		goto free_tms;
560 	locked = 1;
561 
562 	/*
563 	 * When we override something during the move, we log these removals.
564 	 * This can only happen when we move towards the beginning of the
565 	 * buffer, i.e. dst_slot < src_slot.
566 	 */
567 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
568 		ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
569 		if (ret)
570 			goto free_tms;
571 	}
572 
573 	ret = __tree_mod_log_insert(eb->fs_info, tm);
574 	if (ret)
575 		goto free_tms;
576 	write_unlock(&eb->fs_info->tree_mod_log_lock);
577 	kfree(tm_list);
578 
579 	return 0;
580 free_tms:
581 	for (i = 0; i < nr_items; i++) {
582 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
583 			rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
584 		kfree(tm_list[i]);
585 	}
586 	if (locked)
587 		write_unlock(&eb->fs_info->tree_mod_log_lock);
588 	kfree(tm_list);
589 	kfree(tm);
590 
591 	return ret;
592 }
593 
594 static inline int
595 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
596 		       struct tree_mod_elem **tm_list,
597 		       int nritems)
598 {
599 	int i, j;
600 	int ret;
601 
602 	for (i = nritems - 1; i >= 0; i--) {
603 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
604 		if (ret) {
605 			for (j = nritems - 1; j > i; j--)
606 				rb_erase(&tm_list[j]->node,
607 					 &fs_info->tree_mod_log);
608 			return ret;
609 		}
610 	}
611 
612 	return 0;
613 }
614 
615 static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
616 			 struct extent_buffer *new_root, int log_removal)
617 {
618 	struct btrfs_fs_info *fs_info = old_root->fs_info;
619 	struct tree_mod_elem *tm = NULL;
620 	struct tree_mod_elem **tm_list = NULL;
621 	int nritems = 0;
622 	int ret = 0;
623 	int i;
624 
625 	if (!tree_mod_need_log(fs_info, NULL))
626 		return 0;
627 
628 	if (log_removal && btrfs_header_level(old_root) > 0) {
629 		nritems = btrfs_header_nritems(old_root);
630 		tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
631 				  GFP_NOFS);
632 		if (!tm_list) {
633 			ret = -ENOMEM;
634 			goto free_tms;
635 		}
636 		for (i = 0; i < nritems; i++) {
637 			tm_list[i] = alloc_tree_mod_elem(old_root, i,
638 			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
639 			if (!tm_list[i]) {
640 				ret = -ENOMEM;
641 				goto free_tms;
642 			}
643 		}
644 	}
645 
646 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
647 	if (!tm) {
648 		ret = -ENOMEM;
649 		goto free_tms;
650 	}
651 
652 	tm->logical = new_root->start;
653 	tm->old_root.logical = old_root->start;
654 	tm->old_root.level = btrfs_header_level(old_root);
655 	tm->generation = btrfs_header_generation(old_root);
656 	tm->op = MOD_LOG_ROOT_REPLACE;
657 
658 	if (tree_mod_dont_log(fs_info, NULL))
659 		goto free_tms;
660 
661 	if (tm_list)
662 		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
663 	if (!ret)
664 		ret = __tree_mod_log_insert(fs_info, tm);
665 
666 	write_unlock(&fs_info->tree_mod_log_lock);
667 	if (ret)
668 		goto free_tms;
669 	kfree(tm_list);
670 
671 	return ret;
672 
673 free_tms:
674 	if (tm_list) {
675 		for (i = 0; i < nritems; i++)
676 			kfree(tm_list[i]);
677 		kfree(tm_list);
678 	}
679 	kfree(tm);
680 
681 	return ret;
682 }
683 
684 static struct tree_mod_elem *
685 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
686 		      int smallest)
687 {
688 	struct rb_root *tm_root;
689 	struct rb_node *node;
690 	struct tree_mod_elem *cur = NULL;
691 	struct tree_mod_elem *found = NULL;
692 
693 	read_lock(&fs_info->tree_mod_log_lock);
694 	tm_root = &fs_info->tree_mod_log;
695 	node = tm_root->rb_node;
696 	while (node) {
697 		cur = rb_entry(node, struct tree_mod_elem, node);
698 		if (cur->logical < start) {
699 			node = node->rb_left;
700 		} else if (cur->logical > start) {
701 			node = node->rb_right;
702 		} else if (cur->seq < min_seq) {
703 			node = node->rb_left;
704 		} else if (!smallest) {
705 			/* we want the node with the highest seq */
706 			if (found)
707 				BUG_ON(found->seq > cur->seq);
708 			found = cur;
709 			node = node->rb_left;
710 		} else if (cur->seq > min_seq) {
711 			/* we want the node with the smallest seq */
712 			if (found)
713 				BUG_ON(found->seq < cur->seq);
714 			found = cur;
715 			node = node->rb_right;
716 		} else {
717 			found = cur;
718 			break;
719 		}
720 	}
721 	read_unlock(&fs_info->tree_mod_log_lock);
722 
723 	return found;
724 }
725 
726 /*
727  * this returns the element from the log with the smallest time sequence
728  * value that's in the log (the oldest log item). any element with a time
729  * sequence lower than min_seq will be ignored.
730  */
731 static struct tree_mod_elem *
732 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
733 			   u64 min_seq)
734 {
735 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
736 }
737 
738 /*
739  * this returns the element from the log with the largest time sequence
740  * value that's in the log (the most recent log item). any element with
741  * a time sequence lower than min_seq will be ignored.
742  */
743 static struct tree_mod_elem *
744 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
745 {
746 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
747 }
748 
749 static noinline int tree_mod_log_eb_copy(struct extent_buffer *dst,
750 		     struct extent_buffer *src, unsigned long dst_offset,
751 		     unsigned long src_offset, int nr_items)
752 {
753 	struct btrfs_fs_info *fs_info = dst->fs_info;
754 	int ret = 0;
755 	struct tree_mod_elem **tm_list = NULL;
756 	struct tree_mod_elem **tm_list_add, **tm_list_rem;
757 	int i;
758 	int locked = 0;
759 
760 	if (!tree_mod_need_log(fs_info, NULL))
761 		return 0;
762 
763 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
764 		return 0;
765 
766 	tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
767 			  GFP_NOFS);
768 	if (!tm_list)
769 		return -ENOMEM;
770 
771 	tm_list_add = tm_list;
772 	tm_list_rem = tm_list + nr_items;
773 	for (i = 0; i < nr_items; i++) {
774 		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
775 		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
776 		if (!tm_list_rem[i]) {
777 			ret = -ENOMEM;
778 			goto free_tms;
779 		}
780 
781 		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
782 		    MOD_LOG_KEY_ADD, GFP_NOFS);
783 		if (!tm_list_add[i]) {
784 			ret = -ENOMEM;
785 			goto free_tms;
786 		}
787 	}
788 
789 	if (tree_mod_dont_log(fs_info, NULL))
790 		goto free_tms;
791 	locked = 1;
792 
793 	for (i = 0; i < nr_items; i++) {
794 		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
795 		if (ret)
796 			goto free_tms;
797 		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
798 		if (ret)
799 			goto free_tms;
800 	}
801 
802 	write_unlock(&fs_info->tree_mod_log_lock);
803 	kfree(tm_list);
804 
805 	return 0;
806 
807 free_tms:
808 	for (i = 0; i < nr_items * 2; i++) {
809 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
810 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
811 		kfree(tm_list[i]);
812 	}
813 	if (locked)
814 		write_unlock(&fs_info->tree_mod_log_lock);
815 	kfree(tm_list);
816 
817 	return ret;
818 }
819 
820 static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
821 {
822 	struct tree_mod_elem **tm_list = NULL;
823 	int nritems = 0;
824 	int i;
825 	int ret = 0;
826 
827 	if (btrfs_header_level(eb) == 0)
828 		return 0;
829 
830 	if (!tree_mod_need_log(eb->fs_info, NULL))
831 		return 0;
832 
833 	nritems = btrfs_header_nritems(eb);
834 	tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
835 	if (!tm_list)
836 		return -ENOMEM;
837 
838 	for (i = 0; i < nritems; i++) {
839 		tm_list[i] = alloc_tree_mod_elem(eb, i,
840 		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
841 		if (!tm_list[i]) {
842 			ret = -ENOMEM;
843 			goto free_tms;
844 		}
845 	}
846 
847 	if (tree_mod_dont_log(eb->fs_info, eb))
848 		goto free_tms;
849 
850 	ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
851 	write_unlock(&eb->fs_info->tree_mod_log_lock);
852 	if (ret)
853 		goto free_tms;
854 	kfree(tm_list);
855 
856 	return 0;
857 
858 free_tms:
859 	for (i = 0; i < nritems; i++)
860 		kfree(tm_list[i]);
861 	kfree(tm_list);
862 
863 	return ret;
864 }
865 
866 /*
867  * check if the tree block can be shared by multiple trees
868  */
869 int btrfs_block_can_be_shared(struct btrfs_root *root,
870 			      struct extent_buffer *buf)
871 {
872 	/*
873 	 * Tree blocks not in reference counted trees and tree roots
874 	 * are never shared. If a block was allocated after the last
875 	 * snapshot and the block was not allocated by tree relocation,
876 	 * we know the block is not shared.
877 	 */
878 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
879 	    buf != root->node && buf != root->commit_root &&
880 	    (btrfs_header_generation(buf) <=
881 	     btrfs_root_last_snapshot(&root->root_item) ||
882 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
883 		return 1;
884 
885 	return 0;
886 }
887 
888 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
889 				       struct btrfs_root *root,
890 				       struct extent_buffer *buf,
891 				       struct extent_buffer *cow,
892 				       int *last_ref)
893 {
894 	struct btrfs_fs_info *fs_info = root->fs_info;
895 	u64 refs;
896 	u64 owner;
897 	u64 flags;
898 	u64 new_flags = 0;
899 	int ret;
900 
901 	/*
902 	 * Backrefs update rules:
903 	 *
904 	 * Always use full backrefs for extent pointers in tree block
905 	 * allocated by tree relocation.
906 	 *
907 	 * If a shared tree block is no longer referenced by its owner
908 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
909 	 * use full backrefs for extent pointers in tree block.
910 	 *
911 	 * If a tree block is been relocating
912 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
913 	 * use full backrefs for extent pointers in tree block.
914 	 * The reason for this is some operations (such as drop tree)
915 	 * are only allowed for blocks use full backrefs.
916 	 */
917 
918 	if (btrfs_block_can_be_shared(root, buf)) {
919 		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
920 					       btrfs_header_level(buf), 1,
921 					       &refs, &flags);
922 		if (ret)
923 			return ret;
924 		if (refs == 0) {
925 			ret = -EROFS;
926 			btrfs_handle_fs_error(fs_info, ret, NULL);
927 			return ret;
928 		}
929 	} else {
930 		refs = 1;
931 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
932 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
933 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
934 		else
935 			flags = 0;
936 	}
937 
938 	owner = btrfs_header_owner(buf);
939 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
940 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
941 
942 	if (refs > 1) {
943 		if ((owner == root->root_key.objectid ||
944 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
945 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
946 			ret = btrfs_inc_ref(trans, root, buf, 1);
947 			if (ret)
948 				return ret;
949 
950 			if (root->root_key.objectid ==
951 			    BTRFS_TREE_RELOC_OBJECTID) {
952 				ret = btrfs_dec_ref(trans, root, buf, 0);
953 				if (ret)
954 					return ret;
955 				ret = btrfs_inc_ref(trans, root, cow, 1);
956 				if (ret)
957 					return ret;
958 			}
959 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
960 		} else {
961 
962 			if (root->root_key.objectid ==
963 			    BTRFS_TREE_RELOC_OBJECTID)
964 				ret = btrfs_inc_ref(trans, root, cow, 1);
965 			else
966 				ret = btrfs_inc_ref(trans, root, cow, 0);
967 			if (ret)
968 				return ret;
969 		}
970 		if (new_flags != 0) {
971 			int level = btrfs_header_level(buf);
972 
973 			ret = btrfs_set_disk_extent_flags(trans,
974 							  buf->start,
975 							  buf->len,
976 							  new_flags, level, 0);
977 			if (ret)
978 				return ret;
979 		}
980 	} else {
981 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
982 			if (root->root_key.objectid ==
983 			    BTRFS_TREE_RELOC_OBJECTID)
984 				ret = btrfs_inc_ref(trans, root, cow, 1);
985 			else
986 				ret = btrfs_inc_ref(trans, root, cow, 0);
987 			if (ret)
988 				return ret;
989 			ret = btrfs_dec_ref(trans, root, buf, 1);
990 			if (ret)
991 				return ret;
992 		}
993 		btrfs_clean_tree_block(buf);
994 		*last_ref = 1;
995 	}
996 	return 0;
997 }
998 
999 static struct extent_buffer *alloc_tree_block_no_bg_flush(
1000 					  struct btrfs_trans_handle *trans,
1001 					  struct btrfs_root *root,
1002 					  u64 parent_start,
1003 					  const struct btrfs_disk_key *disk_key,
1004 					  int level,
1005 					  u64 hint,
1006 					  u64 empty_size)
1007 {
1008 	struct btrfs_fs_info *fs_info = root->fs_info;
1009 	struct extent_buffer *ret;
1010 
1011 	/*
1012 	 * If we are COWing a node/leaf from the extent, chunk, device or free
1013 	 * space trees, make sure that we do not finish block group creation of
1014 	 * pending block groups. We do this to avoid a deadlock.
1015 	 * COWing can result in allocation of a new chunk, and flushing pending
1016 	 * block groups (btrfs_create_pending_block_groups()) can be triggered
1017 	 * when finishing allocation of a new chunk. Creation of a pending block
1018 	 * group modifies the extent, chunk, device and free space trees,
1019 	 * therefore we could deadlock with ourselves since we are holding a
1020 	 * lock on an extent buffer that btrfs_create_pending_block_groups() may
1021 	 * try to COW later.
1022 	 * For similar reasons, we also need to delay flushing pending block
1023 	 * groups when splitting a leaf or node, from one of those trees, since
1024 	 * we are holding a write lock on it and its parent or when inserting a
1025 	 * new root node for one of those trees.
1026 	 */
1027 	if (root == fs_info->extent_root ||
1028 	    root == fs_info->chunk_root ||
1029 	    root == fs_info->dev_root ||
1030 	    root == fs_info->free_space_root)
1031 		trans->can_flush_pending_bgs = false;
1032 
1033 	ret = btrfs_alloc_tree_block(trans, root, parent_start,
1034 				     root->root_key.objectid, disk_key, level,
1035 				     hint, empty_size);
1036 	trans->can_flush_pending_bgs = true;
1037 
1038 	return ret;
1039 }
1040 
1041 /*
1042  * does the dirty work in cow of a single block.  The parent block (if
1043  * supplied) is updated to point to the new cow copy.  The new buffer is marked
1044  * dirty and returned locked.  If you modify the block it needs to be marked
1045  * dirty again.
1046  *
1047  * search_start -- an allocation hint for the new block
1048  *
1049  * empty_size -- a hint that you plan on doing more cow.  This is the size in
1050  * bytes the allocator should try to find free next to the block it returns.
1051  * This is just a hint and may be ignored by the allocator.
1052  */
1053 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1054 			     struct btrfs_root *root,
1055 			     struct extent_buffer *buf,
1056 			     struct extent_buffer *parent, int parent_slot,
1057 			     struct extent_buffer **cow_ret,
1058 			     u64 search_start, u64 empty_size)
1059 {
1060 	struct btrfs_fs_info *fs_info = root->fs_info;
1061 	struct btrfs_disk_key disk_key;
1062 	struct extent_buffer *cow;
1063 	int level, ret;
1064 	int last_ref = 0;
1065 	int unlock_orig = 0;
1066 	u64 parent_start = 0;
1067 
1068 	if (*cow_ret == buf)
1069 		unlock_orig = 1;
1070 
1071 	btrfs_assert_tree_locked(buf);
1072 
1073 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1074 		trans->transid != fs_info->running_transaction->transid);
1075 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1076 		trans->transid != root->last_trans);
1077 
1078 	level = btrfs_header_level(buf);
1079 
1080 	if (level == 0)
1081 		btrfs_item_key(buf, &disk_key, 0);
1082 	else
1083 		btrfs_node_key(buf, &disk_key, 0);
1084 
1085 	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1086 		parent_start = parent->start;
1087 
1088 	cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
1089 					   level, search_start, empty_size);
1090 	if (IS_ERR(cow))
1091 		return PTR_ERR(cow);
1092 
1093 	/* cow is set to blocking by btrfs_init_new_buffer */
1094 
1095 	copy_extent_buffer_full(cow, buf);
1096 	btrfs_set_header_bytenr(cow, cow->start);
1097 	btrfs_set_header_generation(cow, trans->transid);
1098 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1099 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1100 				     BTRFS_HEADER_FLAG_RELOC);
1101 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1102 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1103 	else
1104 		btrfs_set_header_owner(cow, root->root_key.objectid);
1105 
1106 	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
1107 
1108 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1109 	if (ret) {
1110 		btrfs_abort_transaction(trans, ret);
1111 		return ret;
1112 	}
1113 
1114 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1115 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1116 		if (ret) {
1117 			btrfs_abort_transaction(trans, ret);
1118 			return ret;
1119 		}
1120 	}
1121 
1122 	if (buf == root->node) {
1123 		WARN_ON(parent && parent != buf);
1124 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1125 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1126 			parent_start = buf->start;
1127 
1128 		extent_buffer_get(cow);
1129 		ret = tree_mod_log_insert_root(root->node, cow, 1);
1130 		BUG_ON(ret < 0);
1131 		rcu_assign_pointer(root->node, cow);
1132 
1133 		btrfs_free_tree_block(trans, root, buf, parent_start,
1134 				      last_ref);
1135 		free_extent_buffer(buf);
1136 		add_root_to_dirty_list(root);
1137 	} else {
1138 		WARN_ON(trans->transid != btrfs_header_generation(parent));
1139 		tree_mod_log_insert_key(parent, parent_slot,
1140 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1141 		btrfs_set_node_blockptr(parent, parent_slot,
1142 					cow->start);
1143 		btrfs_set_node_ptr_generation(parent, parent_slot,
1144 					      trans->transid);
1145 		btrfs_mark_buffer_dirty(parent);
1146 		if (last_ref) {
1147 			ret = tree_mod_log_free_eb(buf);
1148 			if (ret) {
1149 				btrfs_abort_transaction(trans, ret);
1150 				return ret;
1151 			}
1152 		}
1153 		btrfs_free_tree_block(trans, root, buf, parent_start,
1154 				      last_ref);
1155 	}
1156 	if (unlock_orig)
1157 		btrfs_tree_unlock(buf);
1158 	free_extent_buffer_stale(buf);
1159 	btrfs_mark_buffer_dirty(cow);
1160 	*cow_ret = cow;
1161 	return 0;
1162 }
1163 
1164 /*
1165  * returns the logical address of the oldest predecessor of the given root.
1166  * entries older than time_seq are ignored.
1167  */
1168 static struct tree_mod_elem *__tree_mod_log_oldest_root(
1169 		struct extent_buffer *eb_root, u64 time_seq)
1170 {
1171 	struct tree_mod_elem *tm;
1172 	struct tree_mod_elem *found = NULL;
1173 	u64 root_logical = eb_root->start;
1174 	int looped = 0;
1175 
1176 	if (!time_seq)
1177 		return NULL;
1178 
1179 	/*
1180 	 * the very last operation that's logged for a root is the
1181 	 * replacement operation (if it is replaced at all). this has
1182 	 * the logical address of the *new* root, making it the very
1183 	 * first operation that's logged for this root.
1184 	 */
1185 	while (1) {
1186 		tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1187 						time_seq);
1188 		if (!looped && !tm)
1189 			return NULL;
1190 		/*
1191 		 * if there are no tree operation for the oldest root, we simply
1192 		 * return it. this should only happen if that (old) root is at
1193 		 * level 0.
1194 		 */
1195 		if (!tm)
1196 			break;
1197 
1198 		/*
1199 		 * if there's an operation that's not a root replacement, we
1200 		 * found the oldest version of our root. normally, we'll find a
1201 		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1202 		 */
1203 		if (tm->op != MOD_LOG_ROOT_REPLACE)
1204 			break;
1205 
1206 		found = tm;
1207 		root_logical = tm->old_root.logical;
1208 		looped = 1;
1209 	}
1210 
1211 	/* if there's no old root to return, return what we found instead */
1212 	if (!found)
1213 		found = tm;
1214 
1215 	return found;
1216 }
1217 
1218 /*
1219  * tm is a pointer to the first operation to rewind within eb. then, all
1220  * previous operations will be rewound (until we reach something older than
1221  * time_seq).
1222  */
1223 static void
1224 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1225 		      u64 time_seq, struct tree_mod_elem *first_tm)
1226 {
1227 	u32 n;
1228 	struct rb_node *next;
1229 	struct tree_mod_elem *tm = first_tm;
1230 	unsigned long o_dst;
1231 	unsigned long o_src;
1232 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1233 
1234 	n = btrfs_header_nritems(eb);
1235 	read_lock(&fs_info->tree_mod_log_lock);
1236 	while (tm && tm->seq >= time_seq) {
1237 		/*
1238 		 * all the operations are recorded with the operator used for
1239 		 * the modification. as we're going backwards, we do the
1240 		 * opposite of each operation here.
1241 		 */
1242 		switch (tm->op) {
1243 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1244 			BUG_ON(tm->slot < n);
1245 			/* Fallthrough */
1246 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1247 		case MOD_LOG_KEY_REMOVE:
1248 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1249 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1250 			btrfs_set_node_ptr_generation(eb, tm->slot,
1251 						      tm->generation);
1252 			n++;
1253 			break;
1254 		case MOD_LOG_KEY_REPLACE:
1255 			BUG_ON(tm->slot >= n);
1256 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1257 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1258 			btrfs_set_node_ptr_generation(eb, tm->slot,
1259 						      tm->generation);
1260 			break;
1261 		case MOD_LOG_KEY_ADD:
1262 			/* if a move operation is needed it's in the log */
1263 			n--;
1264 			break;
1265 		case MOD_LOG_MOVE_KEYS:
1266 			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1267 			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1268 			memmove_extent_buffer(eb, o_dst, o_src,
1269 					      tm->move.nr_items * p_size);
1270 			break;
1271 		case MOD_LOG_ROOT_REPLACE:
1272 			/*
1273 			 * this operation is special. for roots, this must be
1274 			 * handled explicitly before rewinding.
1275 			 * for non-roots, this operation may exist if the node
1276 			 * was a root: root A -> child B; then A gets empty and
1277 			 * B is promoted to the new root. in the mod log, we'll
1278 			 * have a root-replace operation for B, a tree block
1279 			 * that is no root. we simply ignore that operation.
1280 			 */
1281 			break;
1282 		}
1283 		next = rb_next(&tm->node);
1284 		if (!next)
1285 			break;
1286 		tm = rb_entry(next, struct tree_mod_elem, node);
1287 		if (tm->logical != first_tm->logical)
1288 			break;
1289 	}
1290 	read_unlock(&fs_info->tree_mod_log_lock);
1291 	btrfs_set_header_nritems(eb, n);
1292 }
1293 
1294 /*
1295  * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1296  * is returned. If rewind operations happen, a fresh buffer is returned. The
1297  * returned buffer is always read-locked. If the returned buffer is not the
1298  * input buffer, the lock on the input buffer is released and the input buffer
1299  * is freed (its refcount is decremented).
1300  */
1301 static struct extent_buffer *
1302 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1303 		    struct extent_buffer *eb, u64 time_seq)
1304 {
1305 	struct extent_buffer *eb_rewin;
1306 	struct tree_mod_elem *tm;
1307 
1308 	if (!time_seq)
1309 		return eb;
1310 
1311 	if (btrfs_header_level(eb) == 0)
1312 		return eb;
1313 
1314 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1315 	if (!tm)
1316 		return eb;
1317 
1318 	btrfs_set_path_blocking(path);
1319 	btrfs_set_lock_blocking_read(eb);
1320 
1321 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1322 		BUG_ON(tm->slot != 0);
1323 		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1324 		if (!eb_rewin) {
1325 			btrfs_tree_read_unlock_blocking(eb);
1326 			free_extent_buffer(eb);
1327 			return NULL;
1328 		}
1329 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1330 		btrfs_set_header_backref_rev(eb_rewin,
1331 					     btrfs_header_backref_rev(eb));
1332 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1333 		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1334 	} else {
1335 		eb_rewin = btrfs_clone_extent_buffer(eb);
1336 		if (!eb_rewin) {
1337 			btrfs_tree_read_unlock_blocking(eb);
1338 			free_extent_buffer(eb);
1339 			return NULL;
1340 		}
1341 	}
1342 
1343 	btrfs_tree_read_unlock_blocking(eb);
1344 	free_extent_buffer(eb);
1345 
1346 	btrfs_tree_read_lock(eb_rewin);
1347 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1348 	WARN_ON(btrfs_header_nritems(eb_rewin) >
1349 		BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1350 
1351 	return eb_rewin;
1352 }
1353 
1354 /*
1355  * get_old_root() rewinds the state of @root's root node to the given @time_seq
1356  * value. If there are no changes, the current root->root_node is returned. If
1357  * anything changed in between, there's a fresh buffer allocated on which the
1358  * rewind operations are done. In any case, the returned buffer is read locked.
1359  * Returns NULL on error (with no locks held).
1360  */
1361 static inline struct extent_buffer *
1362 get_old_root(struct btrfs_root *root, u64 time_seq)
1363 {
1364 	struct btrfs_fs_info *fs_info = root->fs_info;
1365 	struct tree_mod_elem *tm;
1366 	struct extent_buffer *eb = NULL;
1367 	struct extent_buffer *eb_root;
1368 	u64 eb_root_owner = 0;
1369 	struct extent_buffer *old;
1370 	struct tree_mod_root *old_root = NULL;
1371 	u64 old_generation = 0;
1372 	u64 logical;
1373 	int level;
1374 
1375 	eb_root = btrfs_read_lock_root_node(root);
1376 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1377 	if (!tm)
1378 		return eb_root;
1379 
1380 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1381 		old_root = &tm->old_root;
1382 		old_generation = tm->generation;
1383 		logical = old_root->logical;
1384 		level = old_root->level;
1385 	} else {
1386 		logical = eb_root->start;
1387 		level = btrfs_header_level(eb_root);
1388 	}
1389 
1390 	tm = tree_mod_log_search(fs_info, logical, time_seq);
1391 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1392 		btrfs_tree_read_unlock(eb_root);
1393 		free_extent_buffer(eb_root);
1394 		old = read_tree_block(fs_info, logical, 0, level, NULL);
1395 		if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1396 			if (!IS_ERR(old))
1397 				free_extent_buffer(old);
1398 			btrfs_warn(fs_info,
1399 				   "failed to read tree block %llu from get_old_root",
1400 				   logical);
1401 		} else {
1402 			eb = btrfs_clone_extent_buffer(old);
1403 			free_extent_buffer(old);
1404 		}
1405 	} else if (old_root) {
1406 		eb_root_owner = btrfs_header_owner(eb_root);
1407 		btrfs_tree_read_unlock(eb_root);
1408 		free_extent_buffer(eb_root);
1409 		eb = alloc_dummy_extent_buffer(fs_info, logical);
1410 	} else {
1411 		btrfs_set_lock_blocking_read(eb_root);
1412 		eb = btrfs_clone_extent_buffer(eb_root);
1413 		btrfs_tree_read_unlock_blocking(eb_root);
1414 		free_extent_buffer(eb_root);
1415 	}
1416 
1417 	if (!eb)
1418 		return NULL;
1419 	btrfs_tree_read_lock(eb);
1420 	if (old_root) {
1421 		btrfs_set_header_bytenr(eb, eb->start);
1422 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1423 		btrfs_set_header_owner(eb, eb_root_owner);
1424 		btrfs_set_header_level(eb, old_root->level);
1425 		btrfs_set_header_generation(eb, old_generation);
1426 	}
1427 	if (tm)
1428 		__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1429 	else
1430 		WARN_ON(btrfs_header_level(eb) != 0);
1431 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1432 
1433 	return eb;
1434 }
1435 
1436 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1437 {
1438 	struct tree_mod_elem *tm;
1439 	int level;
1440 	struct extent_buffer *eb_root = btrfs_root_node(root);
1441 
1442 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1443 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1444 		level = tm->old_root.level;
1445 	} else {
1446 		level = btrfs_header_level(eb_root);
1447 	}
1448 	free_extent_buffer(eb_root);
1449 
1450 	return level;
1451 }
1452 
1453 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1454 				   struct btrfs_root *root,
1455 				   struct extent_buffer *buf)
1456 {
1457 	if (btrfs_is_testing(root->fs_info))
1458 		return 0;
1459 
1460 	/* Ensure we can see the FORCE_COW bit */
1461 	smp_mb__before_atomic();
1462 
1463 	/*
1464 	 * We do not need to cow a block if
1465 	 * 1) this block is not created or changed in this transaction;
1466 	 * 2) this block does not belong to TREE_RELOC tree;
1467 	 * 3) the root is not forced COW.
1468 	 *
1469 	 * What is forced COW:
1470 	 *    when we create snapshot during committing the transaction,
1471 	 *    after we've finished copying src root, we must COW the shared
1472 	 *    block to ensure the metadata consistency.
1473 	 */
1474 	if (btrfs_header_generation(buf) == trans->transid &&
1475 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1476 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1477 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1478 	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1479 		return 0;
1480 	return 1;
1481 }
1482 
1483 /*
1484  * cows a single block, see __btrfs_cow_block for the real work.
1485  * This version of it has extra checks so that a block isn't COWed more than
1486  * once per transaction, as long as it hasn't been written yet
1487  */
1488 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1489 		    struct btrfs_root *root, struct extent_buffer *buf,
1490 		    struct extent_buffer *parent, int parent_slot,
1491 		    struct extent_buffer **cow_ret)
1492 {
1493 	struct btrfs_fs_info *fs_info = root->fs_info;
1494 	u64 search_start;
1495 	int ret;
1496 
1497 	if (test_bit(BTRFS_ROOT_DELETING, &root->state))
1498 		btrfs_err(fs_info,
1499 			"COW'ing blocks on a fs root that's being dropped");
1500 
1501 	if (trans->transaction != fs_info->running_transaction)
1502 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1503 		       trans->transid,
1504 		       fs_info->running_transaction->transid);
1505 
1506 	if (trans->transid != fs_info->generation)
1507 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1508 		       trans->transid, fs_info->generation);
1509 
1510 	if (!should_cow_block(trans, root, buf)) {
1511 		trans->dirty = true;
1512 		*cow_ret = buf;
1513 		return 0;
1514 	}
1515 
1516 	search_start = buf->start & ~((u64)SZ_1G - 1);
1517 
1518 	if (parent)
1519 		btrfs_set_lock_blocking_write(parent);
1520 	btrfs_set_lock_blocking_write(buf);
1521 
1522 	/*
1523 	 * Before CoWing this block for later modification, check if it's
1524 	 * the subtree root and do the delayed subtree trace if needed.
1525 	 *
1526 	 * Also We don't care about the error, as it's handled internally.
1527 	 */
1528 	btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
1529 	ret = __btrfs_cow_block(trans, root, buf, parent,
1530 				 parent_slot, cow_ret, search_start, 0);
1531 
1532 	trace_btrfs_cow_block(root, buf, *cow_ret);
1533 
1534 	return ret;
1535 }
1536 
1537 /*
1538  * helper function for defrag to decide if two blocks pointed to by a
1539  * node are actually close by
1540  */
1541 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1542 {
1543 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1544 		return 1;
1545 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1546 		return 1;
1547 	return 0;
1548 }
1549 
1550 /*
1551  * compare two keys in a memcmp fashion
1552  */
1553 static int comp_keys(const struct btrfs_disk_key *disk,
1554 		     const struct btrfs_key *k2)
1555 {
1556 	struct btrfs_key k1;
1557 
1558 	btrfs_disk_key_to_cpu(&k1, disk);
1559 
1560 	return btrfs_comp_cpu_keys(&k1, k2);
1561 }
1562 
1563 /*
1564  * same as comp_keys only with two btrfs_key's
1565  */
1566 int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1567 {
1568 	if (k1->objectid > k2->objectid)
1569 		return 1;
1570 	if (k1->objectid < k2->objectid)
1571 		return -1;
1572 	if (k1->type > k2->type)
1573 		return 1;
1574 	if (k1->type < k2->type)
1575 		return -1;
1576 	if (k1->offset > k2->offset)
1577 		return 1;
1578 	if (k1->offset < k2->offset)
1579 		return -1;
1580 	return 0;
1581 }
1582 
1583 /*
1584  * this is used by the defrag code to go through all the
1585  * leaves pointed to by a node and reallocate them so that
1586  * disk order is close to key order
1587  */
1588 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1589 		       struct btrfs_root *root, struct extent_buffer *parent,
1590 		       int start_slot, u64 *last_ret,
1591 		       struct btrfs_key *progress)
1592 {
1593 	struct btrfs_fs_info *fs_info = root->fs_info;
1594 	struct extent_buffer *cur;
1595 	u64 blocknr;
1596 	u64 gen;
1597 	u64 search_start = *last_ret;
1598 	u64 last_block = 0;
1599 	u64 other;
1600 	u32 parent_nritems;
1601 	int end_slot;
1602 	int i;
1603 	int err = 0;
1604 	int parent_level;
1605 	int uptodate;
1606 	u32 blocksize;
1607 	int progress_passed = 0;
1608 	struct btrfs_disk_key disk_key;
1609 
1610 	parent_level = btrfs_header_level(parent);
1611 
1612 	WARN_ON(trans->transaction != fs_info->running_transaction);
1613 	WARN_ON(trans->transid != fs_info->generation);
1614 
1615 	parent_nritems = btrfs_header_nritems(parent);
1616 	blocksize = fs_info->nodesize;
1617 	end_slot = parent_nritems - 1;
1618 
1619 	if (parent_nritems <= 1)
1620 		return 0;
1621 
1622 	btrfs_set_lock_blocking_write(parent);
1623 
1624 	for (i = start_slot; i <= end_slot; i++) {
1625 		struct btrfs_key first_key;
1626 		int close = 1;
1627 
1628 		btrfs_node_key(parent, &disk_key, i);
1629 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1630 			continue;
1631 
1632 		progress_passed = 1;
1633 		blocknr = btrfs_node_blockptr(parent, i);
1634 		gen = btrfs_node_ptr_generation(parent, i);
1635 		btrfs_node_key_to_cpu(parent, &first_key, i);
1636 		if (last_block == 0)
1637 			last_block = blocknr;
1638 
1639 		if (i > 0) {
1640 			other = btrfs_node_blockptr(parent, i - 1);
1641 			close = close_blocks(blocknr, other, blocksize);
1642 		}
1643 		if (!close && i < end_slot) {
1644 			other = btrfs_node_blockptr(parent, i + 1);
1645 			close = close_blocks(blocknr, other, blocksize);
1646 		}
1647 		if (close) {
1648 			last_block = blocknr;
1649 			continue;
1650 		}
1651 
1652 		cur = find_extent_buffer(fs_info, blocknr);
1653 		if (cur)
1654 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1655 		else
1656 			uptodate = 0;
1657 		if (!cur || !uptodate) {
1658 			if (!cur) {
1659 				cur = read_tree_block(fs_info, blocknr, gen,
1660 						      parent_level - 1,
1661 						      &first_key);
1662 				if (IS_ERR(cur)) {
1663 					return PTR_ERR(cur);
1664 				} else if (!extent_buffer_uptodate(cur)) {
1665 					free_extent_buffer(cur);
1666 					return -EIO;
1667 				}
1668 			} else if (!uptodate) {
1669 				err = btrfs_read_buffer(cur, gen,
1670 						parent_level - 1,&first_key);
1671 				if (err) {
1672 					free_extent_buffer(cur);
1673 					return err;
1674 				}
1675 			}
1676 		}
1677 		if (search_start == 0)
1678 			search_start = last_block;
1679 
1680 		btrfs_tree_lock(cur);
1681 		btrfs_set_lock_blocking_write(cur);
1682 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1683 					&cur, search_start,
1684 					min(16 * blocksize,
1685 					    (end_slot - i) * blocksize));
1686 		if (err) {
1687 			btrfs_tree_unlock(cur);
1688 			free_extent_buffer(cur);
1689 			break;
1690 		}
1691 		search_start = cur->start;
1692 		last_block = cur->start;
1693 		*last_ret = search_start;
1694 		btrfs_tree_unlock(cur);
1695 		free_extent_buffer(cur);
1696 	}
1697 	return err;
1698 }
1699 
1700 /*
1701  * search for key in the extent_buffer.  The items start at offset p,
1702  * and they are item_size apart.  There are 'max' items in p.
1703  *
1704  * the slot in the array is returned via slot, and it points to
1705  * the place where you would insert key if it is not found in
1706  * the array.
1707  *
1708  * slot may point to max if the key is bigger than all of the keys
1709  */
1710 static noinline int generic_bin_search(struct extent_buffer *eb,
1711 				       unsigned long p, int item_size,
1712 				       const struct btrfs_key *key,
1713 				       int max, int *slot)
1714 {
1715 	int low = 0;
1716 	int high = max;
1717 	int mid;
1718 	int ret;
1719 	struct btrfs_disk_key *tmp = NULL;
1720 	struct btrfs_disk_key unaligned;
1721 	unsigned long offset;
1722 	char *kaddr = NULL;
1723 	unsigned long map_start = 0;
1724 	unsigned long map_len = 0;
1725 	int err;
1726 
1727 	if (low > high) {
1728 		btrfs_err(eb->fs_info,
1729 		 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1730 			  __func__, low, high, eb->start,
1731 			  btrfs_header_owner(eb), btrfs_header_level(eb));
1732 		return -EINVAL;
1733 	}
1734 
1735 	while (low < high) {
1736 		mid = (low + high) / 2;
1737 		offset = p + mid * item_size;
1738 
1739 		if (!kaddr || offset < map_start ||
1740 		    (offset + sizeof(struct btrfs_disk_key)) >
1741 		    map_start + map_len) {
1742 
1743 			err = map_private_extent_buffer(eb, offset,
1744 						sizeof(struct btrfs_disk_key),
1745 						&kaddr, &map_start, &map_len);
1746 
1747 			if (!err) {
1748 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1749 							map_start);
1750 			} else if (err == 1) {
1751 				read_extent_buffer(eb, &unaligned,
1752 						   offset, sizeof(unaligned));
1753 				tmp = &unaligned;
1754 			} else {
1755 				return err;
1756 			}
1757 
1758 		} else {
1759 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1760 							map_start);
1761 		}
1762 		ret = comp_keys(tmp, key);
1763 
1764 		if (ret < 0)
1765 			low = mid + 1;
1766 		else if (ret > 0)
1767 			high = mid;
1768 		else {
1769 			*slot = mid;
1770 			return 0;
1771 		}
1772 	}
1773 	*slot = low;
1774 	return 1;
1775 }
1776 
1777 /*
1778  * simple bin_search frontend that does the right thing for
1779  * leaves vs nodes
1780  */
1781 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1782 		     int level, int *slot)
1783 {
1784 	if (level == 0)
1785 		return generic_bin_search(eb,
1786 					  offsetof(struct btrfs_leaf, items),
1787 					  sizeof(struct btrfs_item),
1788 					  key, btrfs_header_nritems(eb),
1789 					  slot);
1790 	else
1791 		return generic_bin_search(eb,
1792 					  offsetof(struct btrfs_node, ptrs),
1793 					  sizeof(struct btrfs_key_ptr),
1794 					  key, btrfs_header_nritems(eb),
1795 					  slot);
1796 }
1797 
1798 static void root_add_used(struct btrfs_root *root, u32 size)
1799 {
1800 	spin_lock(&root->accounting_lock);
1801 	btrfs_set_root_used(&root->root_item,
1802 			    btrfs_root_used(&root->root_item) + size);
1803 	spin_unlock(&root->accounting_lock);
1804 }
1805 
1806 static void root_sub_used(struct btrfs_root *root, u32 size)
1807 {
1808 	spin_lock(&root->accounting_lock);
1809 	btrfs_set_root_used(&root->root_item,
1810 			    btrfs_root_used(&root->root_item) - size);
1811 	spin_unlock(&root->accounting_lock);
1812 }
1813 
1814 /* given a node and slot number, this reads the blocks it points to.  The
1815  * extent buffer is returned with a reference taken (but unlocked).
1816  */
1817 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
1818 					   int slot)
1819 {
1820 	int level = btrfs_header_level(parent);
1821 	struct extent_buffer *eb;
1822 	struct btrfs_key first_key;
1823 
1824 	if (slot < 0 || slot >= btrfs_header_nritems(parent))
1825 		return ERR_PTR(-ENOENT);
1826 
1827 	BUG_ON(level == 0);
1828 
1829 	btrfs_node_key_to_cpu(parent, &first_key, slot);
1830 	eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
1831 			     btrfs_node_ptr_generation(parent, slot),
1832 			     level - 1, &first_key);
1833 	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1834 		free_extent_buffer(eb);
1835 		eb = ERR_PTR(-EIO);
1836 	}
1837 
1838 	return eb;
1839 }
1840 
1841 /*
1842  * node level balancing, used to make sure nodes are in proper order for
1843  * item deletion.  We balance from the top down, so we have to make sure
1844  * that a deletion won't leave an node completely empty later on.
1845  */
1846 static noinline int balance_level(struct btrfs_trans_handle *trans,
1847 			 struct btrfs_root *root,
1848 			 struct btrfs_path *path, int level)
1849 {
1850 	struct btrfs_fs_info *fs_info = root->fs_info;
1851 	struct extent_buffer *right = NULL;
1852 	struct extent_buffer *mid;
1853 	struct extent_buffer *left = NULL;
1854 	struct extent_buffer *parent = NULL;
1855 	int ret = 0;
1856 	int wret;
1857 	int pslot;
1858 	int orig_slot = path->slots[level];
1859 	u64 orig_ptr;
1860 
1861 	ASSERT(level > 0);
1862 
1863 	mid = path->nodes[level];
1864 
1865 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1866 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1867 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1868 
1869 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1870 
1871 	if (level < BTRFS_MAX_LEVEL - 1) {
1872 		parent = path->nodes[level + 1];
1873 		pslot = path->slots[level + 1];
1874 	}
1875 
1876 	/*
1877 	 * deal with the case where there is only one pointer in the root
1878 	 * by promoting the node below to a root
1879 	 */
1880 	if (!parent) {
1881 		struct extent_buffer *child;
1882 
1883 		if (btrfs_header_nritems(mid) != 1)
1884 			return 0;
1885 
1886 		/* promote the child to a root */
1887 		child = btrfs_read_node_slot(mid, 0);
1888 		if (IS_ERR(child)) {
1889 			ret = PTR_ERR(child);
1890 			btrfs_handle_fs_error(fs_info, ret, NULL);
1891 			goto enospc;
1892 		}
1893 
1894 		btrfs_tree_lock(child);
1895 		btrfs_set_lock_blocking_write(child);
1896 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1897 		if (ret) {
1898 			btrfs_tree_unlock(child);
1899 			free_extent_buffer(child);
1900 			goto enospc;
1901 		}
1902 
1903 		ret = tree_mod_log_insert_root(root->node, child, 1);
1904 		BUG_ON(ret < 0);
1905 		rcu_assign_pointer(root->node, child);
1906 
1907 		add_root_to_dirty_list(root);
1908 		btrfs_tree_unlock(child);
1909 
1910 		path->locks[level] = 0;
1911 		path->nodes[level] = NULL;
1912 		btrfs_clean_tree_block(mid);
1913 		btrfs_tree_unlock(mid);
1914 		/* once for the path */
1915 		free_extent_buffer(mid);
1916 
1917 		root_sub_used(root, mid->len);
1918 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1919 		/* once for the root ptr */
1920 		free_extent_buffer_stale(mid);
1921 		return 0;
1922 	}
1923 	if (btrfs_header_nritems(mid) >
1924 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1925 		return 0;
1926 
1927 	left = btrfs_read_node_slot(parent, pslot - 1);
1928 	if (IS_ERR(left))
1929 		left = NULL;
1930 
1931 	if (left) {
1932 		btrfs_tree_lock(left);
1933 		btrfs_set_lock_blocking_write(left);
1934 		wret = btrfs_cow_block(trans, root, left,
1935 				       parent, pslot - 1, &left);
1936 		if (wret) {
1937 			ret = wret;
1938 			goto enospc;
1939 		}
1940 	}
1941 
1942 	right = btrfs_read_node_slot(parent, pslot + 1);
1943 	if (IS_ERR(right))
1944 		right = NULL;
1945 
1946 	if (right) {
1947 		btrfs_tree_lock(right);
1948 		btrfs_set_lock_blocking_write(right);
1949 		wret = btrfs_cow_block(trans, root, right,
1950 				       parent, pslot + 1, &right);
1951 		if (wret) {
1952 			ret = wret;
1953 			goto enospc;
1954 		}
1955 	}
1956 
1957 	/* first, try to make some room in the middle buffer */
1958 	if (left) {
1959 		orig_slot += btrfs_header_nritems(left);
1960 		wret = push_node_left(trans, left, mid, 1);
1961 		if (wret < 0)
1962 			ret = wret;
1963 	}
1964 
1965 	/*
1966 	 * then try to empty the right most buffer into the middle
1967 	 */
1968 	if (right) {
1969 		wret = push_node_left(trans, mid, right, 1);
1970 		if (wret < 0 && wret != -ENOSPC)
1971 			ret = wret;
1972 		if (btrfs_header_nritems(right) == 0) {
1973 			btrfs_clean_tree_block(right);
1974 			btrfs_tree_unlock(right);
1975 			del_ptr(root, path, level + 1, pslot + 1);
1976 			root_sub_used(root, right->len);
1977 			btrfs_free_tree_block(trans, root, right, 0, 1);
1978 			free_extent_buffer_stale(right);
1979 			right = NULL;
1980 		} else {
1981 			struct btrfs_disk_key right_key;
1982 			btrfs_node_key(right, &right_key, 0);
1983 			ret = tree_mod_log_insert_key(parent, pslot + 1,
1984 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1985 			BUG_ON(ret < 0);
1986 			btrfs_set_node_key(parent, &right_key, pslot + 1);
1987 			btrfs_mark_buffer_dirty(parent);
1988 		}
1989 	}
1990 	if (btrfs_header_nritems(mid) == 1) {
1991 		/*
1992 		 * we're not allowed to leave a node with one item in the
1993 		 * tree during a delete.  A deletion from lower in the tree
1994 		 * could try to delete the only pointer in this node.
1995 		 * So, pull some keys from the left.
1996 		 * There has to be a left pointer at this point because
1997 		 * otherwise we would have pulled some pointers from the
1998 		 * right
1999 		 */
2000 		if (!left) {
2001 			ret = -EROFS;
2002 			btrfs_handle_fs_error(fs_info, ret, NULL);
2003 			goto enospc;
2004 		}
2005 		wret = balance_node_right(trans, mid, left);
2006 		if (wret < 0) {
2007 			ret = wret;
2008 			goto enospc;
2009 		}
2010 		if (wret == 1) {
2011 			wret = push_node_left(trans, left, mid, 1);
2012 			if (wret < 0)
2013 				ret = wret;
2014 		}
2015 		BUG_ON(wret == 1);
2016 	}
2017 	if (btrfs_header_nritems(mid) == 0) {
2018 		btrfs_clean_tree_block(mid);
2019 		btrfs_tree_unlock(mid);
2020 		del_ptr(root, path, level + 1, pslot);
2021 		root_sub_used(root, mid->len);
2022 		btrfs_free_tree_block(trans, root, mid, 0, 1);
2023 		free_extent_buffer_stale(mid);
2024 		mid = NULL;
2025 	} else {
2026 		/* update the parent key to reflect our changes */
2027 		struct btrfs_disk_key mid_key;
2028 		btrfs_node_key(mid, &mid_key, 0);
2029 		ret = tree_mod_log_insert_key(parent, pslot,
2030 				MOD_LOG_KEY_REPLACE, GFP_NOFS);
2031 		BUG_ON(ret < 0);
2032 		btrfs_set_node_key(parent, &mid_key, pslot);
2033 		btrfs_mark_buffer_dirty(parent);
2034 	}
2035 
2036 	/* update the path */
2037 	if (left) {
2038 		if (btrfs_header_nritems(left) > orig_slot) {
2039 			extent_buffer_get(left);
2040 			/* left was locked after cow */
2041 			path->nodes[level] = left;
2042 			path->slots[level + 1] -= 1;
2043 			path->slots[level] = orig_slot;
2044 			if (mid) {
2045 				btrfs_tree_unlock(mid);
2046 				free_extent_buffer(mid);
2047 			}
2048 		} else {
2049 			orig_slot -= btrfs_header_nritems(left);
2050 			path->slots[level] = orig_slot;
2051 		}
2052 	}
2053 	/* double check we haven't messed things up */
2054 	if (orig_ptr !=
2055 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2056 		BUG();
2057 enospc:
2058 	if (right) {
2059 		btrfs_tree_unlock(right);
2060 		free_extent_buffer(right);
2061 	}
2062 	if (left) {
2063 		if (path->nodes[level] != left)
2064 			btrfs_tree_unlock(left);
2065 		free_extent_buffer(left);
2066 	}
2067 	return ret;
2068 }
2069 
2070 /* Node balancing for insertion.  Here we only split or push nodes around
2071  * when they are completely full.  This is also done top down, so we
2072  * have to be pessimistic.
2073  */
2074 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2075 					  struct btrfs_root *root,
2076 					  struct btrfs_path *path, int level)
2077 {
2078 	struct btrfs_fs_info *fs_info = root->fs_info;
2079 	struct extent_buffer *right = NULL;
2080 	struct extent_buffer *mid;
2081 	struct extent_buffer *left = NULL;
2082 	struct extent_buffer *parent = NULL;
2083 	int ret = 0;
2084 	int wret;
2085 	int pslot;
2086 	int orig_slot = path->slots[level];
2087 
2088 	if (level == 0)
2089 		return 1;
2090 
2091 	mid = path->nodes[level];
2092 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
2093 
2094 	if (level < BTRFS_MAX_LEVEL - 1) {
2095 		parent = path->nodes[level + 1];
2096 		pslot = path->slots[level + 1];
2097 	}
2098 
2099 	if (!parent)
2100 		return 1;
2101 
2102 	left = btrfs_read_node_slot(parent, pslot - 1);
2103 	if (IS_ERR(left))
2104 		left = NULL;
2105 
2106 	/* first, try to make some room in the middle buffer */
2107 	if (left) {
2108 		u32 left_nr;
2109 
2110 		btrfs_tree_lock(left);
2111 		btrfs_set_lock_blocking_write(left);
2112 
2113 		left_nr = btrfs_header_nritems(left);
2114 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2115 			wret = 1;
2116 		} else {
2117 			ret = btrfs_cow_block(trans, root, left, parent,
2118 					      pslot - 1, &left);
2119 			if (ret)
2120 				wret = 1;
2121 			else {
2122 				wret = push_node_left(trans, left, mid, 0);
2123 			}
2124 		}
2125 		if (wret < 0)
2126 			ret = wret;
2127 		if (wret == 0) {
2128 			struct btrfs_disk_key disk_key;
2129 			orig_slot += left_nr;
2130 			btrfs_node_key(mid, &disk_key, 0);
2131 			ret = tree_mod_log_insert_key(parent, pslot,
2132 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2133 			BUG_ON(ret < 0);
2134 			btrfs_set_node_key(parent, &disk_key, pslot);
2135 			btrfs_mark_buffer_dirty(parent);
2136 			if (btrfs_header_nritems(left) > orig_slot) {
2137 				path->nodes[level] = left;
2138 				path->slots[level + 1] -= 1;
2139 				path->slots[level] = orig_slot;
2140 				btrfs_tree_unlock(mid);
2141 				free_extent_buffer(mid);
2142 			} else {
2143 				orig_slot -=
2144 					btrfs_header_nritems(left);
2145 				path->slots[level] = orig_slot;
2146 				btrfs_tree_unlock(left);
2147 				free_extent_buffer(left);
2148 			}
2149 			return 0;
2150 		}
2151 		btrfs_tree_unlock(left);
2152 		free_extent_buffer(left);
2153 	}
2154 	right = btrfs_read_node_slot(parent, pslot + 1);
2155 	if (IS_ERR(right))
2156 		right = NULL;
2157 
2158 	/*
2159 	 * then try to empty the right most buffer into the middle
2160 	 */
2161 	if (right) {
2162 		u32 right_nr;
2163 
2164 		btrfs_tree_lock(right);
2165 		btrfs_set_lock_blocking_write(right);
2166 
2167 		right_nr = btrfs_header_nritems(right);
2168 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2169 			wret = 1;
2170 		} else {
2171 			ret = btrfs_cow_block(trans, root, right,
2172 					      parent, pslot + 1,
2173 					      &right);
2174 			if (ret)
2175 				wret = 1;
2176 			else {
2177 				wret = balance_node_right(trans, right, mid);
2178 			}
2179 		}
2180 		if (wret < 0)
2181 			ret = wret;
2182 		if (wret == 0) {
2183 			struct btrfs_disk_key disk_key;
2184 
2185 			btrfs_node_key(right, &disk_key, 0);
2186 			ret = tree_mod_log_insert_key(parent, pslot + 1,
2187 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2188 			BUG_ON(ret < 0);
2189 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2190 			btrfs_mark_buffer_dirty(parent);
2191 
2192 			if (btrfs_header_nritems(mid) <= orig_slot) {
2193 				path->nodes[level] = right;
2194 				path->slots[level + 1] += 1;
2195 				path->slots[level] = orig_slot -
2196 					btrfs_header_nritems(mid);
2197 				btrfs_tree_unlock(mid);
2198 				free_extent_buffer(mid);
2199 			} else {
2200 				btrfs_tree_unlock(right);
2201 				free_extent_buffer(right);
2202 			}
2203 			return 0;
2204 		}
2205 		btrfs_tree_unlock(right);
2206 		free_extent_buffer(right);
2207 	}
2208 	return 1;
2209 }
2210 
2211 /*
2212  * readahead one full node of leaves, finding things that are close
2213  * to the block in 'slot', and triggering ra on them.
2214  */
2215 static void reada_for_search(struct btrfs_fs_info *fs_info,
2216 			     struct btrfs_path *path,
2217 			     int level, int slot, u64 objectid)
2218 {
2219 	struct extent_buffer *node;
2220 	struct btrfs_disk_key disk_key;
2221 	u32 nritems;
2222 	u64 search;
2223 	u64 target;
2224 	u64 nread = 0;
2225 	struct extent_buffer *eb;
2226 	u32 nr;
2227 	u32 blocksize;
2228 	u32 nscan = 0;
2229 
2230 	if (level != 1)
2231 		return;
2232 
2233 	if (!path->nodes[level])
2234 		return;
2235 
2236 	node = path->nodes[level];
2237 
2238 	search = btrfs_node_blockptr(node, slot);
2239 	blocksize = fs_info->nodesize;
2240 	eb = find_extent_buffer(fs_info, search);
2241 	if (eb) {
2242 		free_extent_buffer(eb);
2243 		return;
2244 	}
2245 
2246 	target = search;
2247 
2248 	nritems = btrfs_header_nritems(node);
2249 	nr = slot;
2250 
2251 	while (1) {
2252 		if (path->reada == READA_BACK) {
2253 			if (nr == 0)
2254 				break;
2255 			nr--;
2256 		} else if (path->reada == READA_FORWARD) {
2257 			nr++;
2258 			if (nr >= nritems)
2259 				break;
2260 		}
2261 		if (path->reada == READA_BACK && objectid) {
2262 			btrfs_node_key(node, &disk_key, nr);
2263 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2264 				break;
2265 		}
2266 		search = btrfs_node_blockptr(node, nr);
2267 		if ((search <= target && target - search <= 65536) ||
2268 		    (search > target && search - target <= 65536)) {
2269 			readahead_tree_block(fs_info, search);
2270 			nread += blocksize;
2271 		}
2272 		nscan++;
2273 		if ((nread > 65536 || nscan > 32))
2274 			break;
2275 	}
2276 }
2277 
2278 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2279 				       struct btrfs_path *path, int level)
2280 {
2281 	int slot;
2282 	int nritems;
2283 	struct extent_buffer *parent;
2284 	struct extent_buffer *eb;
2285 	u64 gen;
2286 	u64 block1 = 0;
2287 	u64 block2 = 0;
2288 
2289 	parent = path->nodes[level + 1];
2290 	if (!parent)
2291 		return;
2292 
2293 	nritems = btrfs_header_nritems(parent);
2294 	slot = path->slots[level + 1];
2295 
2296 	if (slot > 0) {
2297 		block1 = btrfs_node_blockptr(parent, slot - 1);
2298 		gen = btrfs_node_ptr_generation(parent, slot - 1);
2299 		eb = find_extent_buffer(fs_info, block1);
2300 		/*
2301 		 * if we get -eagain from btrfs_buffer_uptodate, we
2302 		 * don't want to return eagain here.  That will loop
2303 		 * forever
2304 		 */
2305 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2306 			block1 = 0;
2307 		free_extent_buffer(eb);
2308 	}
2309 	if (slot + 1 < nritems) {
2310 		block2 = btrfs_node_blockptr(parent, slot + 1);
2311 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2312 		eb = find_extent_buffer(fs_info, block2);
2313 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2314 			block2 = 0;
2315 		free_extent_buffer(eb);
2316 	}
2317 
2318 	if (block1)
2319 		readahead_tree_block(fs_info, block1);
2320 	if (block2)
2321 		readahead_tree_block(fs_info, block2);
2322 }
2323 
2324 
2325 /*
2326  * when we walk down the tree, it is usually safe to unlock the higher layers
2327  * in the tree.  The exceptions are when our path goes through slot 0, because
2328  * operations on the tree might require changing key pointers higher up in the
2329  * tree.
2330  *
2331  * callers might also have set path->keep_locks, which tells this code to keep
2332  * the lock if the path points to the last slot in the block.  This is part of
2333  * walking through the tree, and selecting the next slot in the higher block.
2334  *
2335  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2336  * if lowest_unlock is 1, level 0 won't be unlocked
2337  */
2338 static noinline void unlock_up(struct btrfs_path *path, int level,
2339 			       int lowest_unlock, int min_write_lock_level,
2340 			       int *write_lock_level)
2341 {
2342 	int i;
2343 	int skip_level = level;
2344 	int no_skips = 0;
2345 	struct extent_buffer *t;
2346 
2347 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2348 		if (!path->nodes[i])
2349 			break;
2350 		if (!path->locks[i])
2351 			break;
2352 		if (!no_skips && path->slots[i] == 0) {
2353 			skip_level = i + 1;
2354 			continue;
2355 		}
2356 		if (!no_skips && path->keep_locks) {
2357 			u32 nritems;
2358 			t = path->nodes[i];
2359 			nritems = btrfs_header_nritems(t);
2360 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2361 				skip_level = i + 1;
2362 				continue;
2363 			}
2364 		}
2365 		if (skip_level < i && i >= lowest_unlock)
2366 			no_skips = 1;
2367 
2368 		t = path->nodes[i];
2369 		if (i >= lowest_unlock && i > skip_level) {
2370 			btrfs_tree_unlock_rw(t, path->locks[i]);
2371 			path->locks[i] = 0;
2372 			if (write_lock_level &&
2373 			    i > min_write_lock_level &&
2374 			    i <= *write_lock_level) {
2375 				*write_lock_level = i - 1;
2376 			}
2377 		}
2378 	}
2379 }
2380 
2381 /*
2382  * This releases any locks held in the path starting at level and
2383  * going all the way up to the root.
2384  *
2385  * btrfs_search_slot will keep the lock held on higher nodes in a few
2386  * corner cases, such as COW of the block at slot zero in the node.  This
2387  * ignores those rules, and it should only be called when there are no
2388  * more updates to be done higher up in the tree.
2389  */
2390 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2391 {
2392 	int i;
2393 
2394 	if (path->keep_locks)
2395 		return;
2396 
2397 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2398 		if (!path->nodes[i])
2399 			continue;
2400 		if (!path->locks[i])
2401 			continue;
2402 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2403 		path->locks[i] = 0;
2404 	}
2405 }
2406 
2407 /*
2408  * helper function for btrfs_search_slot.  The goal is to find a block
2409  * in cache without setting the path to blocking.  If we find the block
2410  * we return zero and the path is unchanged.
2411  *
2412  * If we can't find the block, we set the path blocking and do some
2413  * reada.  -EAGAIN is returned and the search must be repeated.
2414  */
2415 static int
2416 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2417 		      struct extent_buffer **eb_ret, int level, int slot,
2418 		      const struct btrfs_key *key)
2419 {
2420 	struct btrfs_fs_info *fs_info = root->fs_info;
2421 	u64 blocknr;
2422 	u64 gen;
2423 	struct extent_buffer *b = *eb_ret;
2424 	struct extent_buffer *tmp;
2425 	struct btrfs_key first_key;
2426 	int ret;
2427 	int parent_level;
2428 
2429 	blocknr = btrfs_node_blockptr(b, slot);
2430 	gen = btrfs_node_ptr_generation(b, slot);
2431 	parent_level = btrfs_header_level(b);
2432 	btrfs_node_key_to_cpu(b, &first_key, slot);
2433 
2434 	tmp = find_extent_buffer(fs_info, blocknr);
2435 	if (tmp) {
2436 		/* first we do an atomic uptodate check */
2437 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2438 			/*
2439 			 * Do extra check for first_key, eb can be stale due to
2440 			 * being cached, read from scrub, or have multiple
2441 			 * parents (shared tree blocks).
2442 			 */
2443 			if (btrfs_verify_level_key(tmp,
2444 					parent_level - 1, &first_key, gen)) {
2445 				free_extent_buffer(tmp);
2446 				return -EUCLEAN;
2447 			}
2448 			*eb_ret = tmp;
2449 			return 0;
2450 		}
2451 
2452 		/* the pages were up to date, but we failed
2453 		 * the generation number check.  Do a full
2454 		 * read for the generation number that is correct.
2455 		 * We must do this without dropping locks so
2456 		 * we can trust our generation number
2457 		 */
2458 		btrfs_set_path_blocking(p);
2459 
2460 		/* now we're allowed to do a blocking uptodate check */
2461 		ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
2462 		if (!ret) {
2463 			*eb_ret = tmp;
2464 			return 0;
2465 		}
2466 		free_extent_buffer(tmp);
2467 		btrfs_release_path(p);
2468 		return -EIO;
2469 	}
2470 
2471 	/*
2472 	 * reduce lock contention at high levels
2473 	 * of the btree by dropping locks before
2474 	 * we read.  Don't release the lock on the current
2475 	 * level because we need to walk this node to figure
2476 	 * out which blocks to read.
2477 	 */
2478 	btrfs_unlock_up_safe(p, level + 1);
2479 	btrfs_set_path_blocking(p);
2480 
2481 	if (p->reada != READA_NONE)
2482 		reada_for_search(fs_info, p, level, slot, key->objectid);
2483 
2484 	ret = -EAGAIN;
2485 	tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1,
2486 			      &first_key);
2487 	if (!IS_ERR(tmp)) {
2488 		/*
2489 		 * If the read above didn't mark this buffer up to date,
2490 		 * it will never end up being up to date.  Set ret to EIO now
2491 		 * and give up so that our caller doesn't loop forever
2492 		 * on our EAGAINs.
2493 		 */
2494 		if (!extent_buffer_uptodate(tmp))
2495 			ret = -EIO;
2496 		free_extent_buffer(tmp);
2497 	} else {
2498 		ret = PTR_ERR(tmp);
2499 	}
2500 
2501 	btrfs_release_path(p);
2502 	return ret;
2503 }
2504 
2505 /*
2506  * helper function for btrfs_search_slot.  This does all of the checks
2507  * for node-level blocks and does any balancing required based on
2508  * the ins_len.
2509  *
2510  * If no extra work was required, zero is returned.  If we had to
2511  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2512  * start over
2513  */
2514 static int
2515 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2516 		       struct btrfs_root *root, struct btrfs_path *p,
2517 		       struct extent_buffer *b, int level, int ins_len,
2518 		       int *write_lock_level)
2519 {
2520 	struct btrfs_fs_info *fs_info = root->fs_info;
2521 	int ret;
2522 
2523 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2524 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2525 		int sret;
2526 
2527 		if (*write_lock_level < level + 1) {
2528 			*write_lock_level = level + 1;
2529 			btrfs_release_path(p);
2530 			goto again;
2531 		}
2532 
2533 		btrfs_set_path_blocking(p);
2534 		reada_for_balance(fs_info, p, level);
2535 		sret = split_node(trans, root, p, level);
2536 
2537 		BUG_ON(sret > 0);
2538 		if (sret) {
2539 			ret = sret;
2540 			goto done;
2541 		}
2542 		b = p->nodes[level];
2543 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2544 		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2545 		int sret;
2546 
2547 		if (*write_lock_level < level + 1) {
2548 			*write_lock_level = level + 1;
2549 			btrfs_release_path(p);
2550 			goto again;
2551 		}
2552 
2553 		btrfs_set_path_blocking(p);
2554 		reada_for_balance(fs_info, p, level);
2555 		sret = balance_level(trans, root, p, level);
2556 
2557 		if (sret) {
2558 			ret = sret;
2559 			goto done;
2560 		}
2561 		b = p->nodes[level];
2562 		if (!b) {
2563 			btrfs_release_path(p);
2564 			goto again;
2565 		}
2566 		BUG_ON(btrfs_header_nritems(b) == 1);
2567 	}
2568 	return 0;
2569 
2570 again:
2571 	ret = -EAGAIN;
2572 done:
2573 	return ret;
2574 }
2575 
2576 static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2577 		      int level, int *prev_cmp, int *slot)
2578 {
2579 	if (*prev_cmp != 0) {
2580 		*prev_cmp = btrfs_bin_search(b, key, level, slot);
2581 		return *prev_cmp;
2582 	}
2583 
2584 	*slot = 0;
2585 
2586 	return 0;
2587 }
2588 
2589 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2590 		u64 iobjectid, u64 ioff, u8 key_type,
2591 		struct btrfs_key *found_key)
2592 {
2593 	int ret;
2594 	struct btrfs_key key;
2595 	struct extent_buffer *eb;
2596 
2597 	ASSERT(path);
2598 	ASSERT(found_key);
2599 
2600 	key.type = key_type;
2601 	key.objectid = iobjectid;
2602 	key.offset = ioff;
2603 
2604 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2605 	if (ret < 0)
2606 		return ret;
2607 
2608 	eb = path->nodes[0];
2609 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2610 		ret = btrfs_next_leaf(fs_root, path);
2611 		if (ret)
2612 			return ret;
2613 		eb = path->nodes[0];
2614 	}
2615 
2616 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2617 	if (found_key->type != key.type ||
2618 			found_key->objectid != key.objectid)
2619 		return 1;
2620 
2621 	return 0;
2622 }
2623 
2624 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
2625 							struct btrfs_path *p,
2626 							int write_lock_level)
2627 {
2628 	struct btrfs_fs_info *fs_info = root->fs_info;
2629 	struct extent_buffer *b;
2630 	int root_lock;
2631 	int level = 0;
2632 
2633 	/* We try very hard to do read locks on the root */
2634 	root_lock = BTRFS_READ_LOCK;
2635 
2636 	if (p->search_commit_root) {
2637 		/*
2638 		 * The commit roots are read only so we always do read locks,
2639 		 * and we always must hold the commit_root_sem when doing
2640 		 * searches on them, the only exception is send where we don't
2641 		 * want to block transaction commits for a long time, so
2642 		 * we need to clone the commit root in order to avoid races
2643 		 * with transaction commits that create a snapshot of one of
2644 		 * the roots used by a send operation.
2645 		 */
2646 		if (p->need_commit_sem) {
2647 			down_read(&fs_info->commit_root_sem);
2648 			b = btrfs_clone_extent_buffer(root->commit_root);
2649 			up_read(&fs_info->commit_root_sem);
2650 			if (!b)
2651 				return ERR_PTR(-ENOMEM);
2652 
2653 		} else {
2654 			b = root->commit_root;
2655 			extent_buffer_get(b);
2656 		}
2657 		level = btrfs_header_level(b);
2658 		/*
2659 		 * Ensure that all callers have set skip_locking when
2660 		 * p->search_commit_root = 1.
2661 		 */
2662 		ASSERT(p->skip_locking == 1);
2663 
2664 		goto out;
2665 	}
2666 
2667 	if (p->skip_locking) {
2668 		b = btrfs_root_node(root);
2669 		level = btrfs_header_level(b);
2670 		goto out;
2671 	}
2672 
2673 	/*
2674 	 * If the level is set to maximum, we can skip trying to get the read
2675 	 * lock.
2676 	 */
2677 	if (write_lock_level < BTRFS_MAX_LEVEL) {
2678 		/*
2679 		 * We don't know the level of the root node until we actually
2680 		 * have it read locked
2681 		 */
2682 		b = btrfs_read_lock_root_node(root);
2683 		level = btrfs_header_level(b);
2684 		if (level > write_lock_level)
2685 			goto out;
2686 
2687 		/* Whoops, must trade for write lock */
2688 		btrfs_tree_read_unlock(b);
2689 		free_extent_buffer(b);
2690 	}
2691 
2692 	b = btrfs_lock_root_node(root);
2693 	root_lock = BTRFS_WRITE_LOCK;
2694 
2695 	/* The level might have changed, check again */
2696 	level = btrfs_header_level(b);
2697 
2698 out:
2699 	p->nodes[level] = b;
2700 	if (!p->skip_locking)
2701 		p->locks[level] = root_lock;
2702 	/*
2703 	 * Callers are responsible for dropping b's references.
2704 	 */
2705 	return b;
2706 }
2707 
2708 
2709 /*
2710  * btrfs_search_slot - look for a key in a tree and perform necessary
2711  * modifications to preserve tree invariants.
2712  *
2713  * @trans:	Handle of transaction, used when modifying the tree
2714  * @p:		Holds all btree nodes along the search path
2715  * @root:	The root node of the tree
2716  * @key:	The key we are looking for
2717  * @ins_len:	Indicates purpose of search, for inserts it is 1, for
2718  *		deletions it's -1. 0 for plain searches
2719  * @cow:	boolean should CoW operations be performed. Must always be 1
2720  *		when modifying the tree.
2721  *
2722  * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2723  * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2724  *
2725  * If @key is found, 0 is returned and you can find the item in the leaf level
2726  * of the path (level 0)
2727  *
2728  * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2729  * points to the slot where it should be inserted
2730  *
2731  * If an error is encountered while searching the tree a negative error number
2732  * is returned
2733  */
2734 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2735 		      const struct btrfs_key *key, struct btrfs_path *p,
2736 		      int ins_len, int cow)
2737 {
2738 	struct extent_buffer *b;
2739 	int slot;
2740 	int ret;
2741 	int err;
2742 	int level;
2743 	int lowest_unlock = 1;
2744 	/* everything at write_lock_level or lower must be write locked */
2745 	int write_lock_level = 0;
2746 	u8 lowest_level = 0;
2747 	int min_write_lock_level;
2748 	int prev_cmp;
2749 
2750 	lowest_level = p->lowest_level;
2751 	WARN_ON(lowest_level && ins_len > 0);
2752 	WARN_ON(p->nodes[0] != NULL);
2753 	BUG_ON(!cow && ins_len);
2754 
2755 	if (ins_len < 0) {
2756 		lowest_unlock = 2;
2757 
2758 		/* when we are removing items, we might have to go up to level
2759 		 * two as we update tree pointers  Make sure we keep write
2760 		 * for those levels as well
2761 		 */
2762 		write_lock_level = 2;
2763 	} else if (ins_len > 0) {
2764 		/*
2765 		 * for inserting items, make sure we have a write lock on
2766 		 * level 1 so we can update keys
2767 		 */
2768 		write_lock_level = 1;
2769 	}
2770 
2771 	if (!cow)
2772 		write_lock_level = -1;
2773 
2774 	if (cow && (p->keep_locks || p->lowest_level))
2775 		write_lock_level = BTRFS_MAX_LEVEL;
2776 
2777 	min_write_lock_level = write_lock_level;
2778 
2779 again:
2780 	prev_cmp = -1;
2781 	b = btrfs_search_slot_get_root(root, p, write_lock_level);
2782 	if (IS_ERR(b)) {
2783 		ret = PTR_ERR(b);
2784 		goto done;
2785 	}
2786 
2787 	while (b) {
2788 		level = btrfs_header_level(b);
2789 
2790 		/*
2791 		 * setup the path here so we can release it under lock
2792 		 * contention with the cow code
2793 		 */
2794 		if (cow) {
2795 			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2796 
2797 			/*
2798 			 * if we don't really need to cow this block
2799 			 * then we don't want to set the path blocking,
2800 			 * so we test it here
2801 			 */
2802 			if (!should_cow_block(trans, root, b)) {
2803 				trans->dirty = true;
2804 				goto cow_done;
2805 			}
2806 
2807 			/*
2808 			 * must have write locks on this node and the
2809 			 * parent
2810 			 */
2811 			if (level > write_lock_level ||
2812 			    (level + 1 > write_lock_level &&
2813 			    level + 1 < BTRFS_MAX_LEVEL &&
2814 			    p->nodes[level + 1])) {
2815 				write_lock_level = level + 1;
2816 				btrfs_release_path(p);
2817 				goto again;
2818 			}
2819 
2820 			btrfs_set_path_blocking(p);
2821 			if (last_level)
2822 				err = btrfs_cow_block(trans, root, b, NULL, 0,
2823 						      &b);
2824 			else
2825 				err = btrfs_cow_block(trans, root, b,
2826 						      p->nodes[level + 1],
2827 						      p->slots[level + 1], &b);
2828 			if (err) {
2829 				ret = err;
2830 				goto done;
2831 			}
2832 		}
2833 cow_done:
2834 		p->nodes[level] = b;
2835 		/*
2836 		 * Leave path with blocking locks to avoid massive
2837 		 * lock context switch, this is made on purpose.
2838 		 */
2839 
2840 		/*
2841 		 * we have a lock on b and as long as we aren't changing
2842 		 * the tree, there is no way to for the items in b to change.
2843 		 * It is safe to drop the lock on our parent before we
2844 		 * go through the expensive btree search on b.
2845 		 *
2846 		 * If we're inserting or deleting (ins_len != 0), then we might
2847 		 * be changing slot zero, which may require changing the parent.
2848 		 * So, we can't drop the lock until after we know which slot
2849 		 * we're operating on.
2850 		 */
2851 		if (!ins_len && !p->keep_locks) {
2852 			int u = level + 1;
2853 
2854 			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2855 				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2856 				p->locks[u] = 0;
2857 			}
2858 		}
2859 
2860 		ret = key_search(b, key, level, &prev_cmp, &slot);
2861 		if (ret < 0)
2862 			goto done;
2863 
2864 		if (level != 0) {
2865 			int dec = 0;
2866 			if (ret && slot > 0) {
2867 				dec = 1;
2868 				slot -= 1;
2869 			}
2870 			p->slots[level] = slot;
2871 			err = setup_nodes_for_search(trans, root, p, b, level,
2872 					     ins_len, &write_lock_level);
2873 			if (err == -EAGAIN)
2874 				goto again;
2875 			if (err) {
2876 				ret = err;
2877 				goto done;
2878 			}
2879 			b = p->nodes[level];
2880 			slot = p->slots[level];
2881 
2882 			/*
2883 			 * slot 0 is special, if we change the key
2884 			 * we have to update the parent pointer
2885 			 * which means we must have a write lock
2886 			 * on the parent
2887 			 */
2888 			if (slot == 0 && ins_len &&
2889 			    write_lock_level < level + 1) {
2890 				write_lock_level = level + 1;
2891 				btrfs_release_path(p);
2892 				goto again;
2893 			}
2894 
2895 			unlock_up(p, level, lowest_unlock,
2896 				  min_write_lock_level, &write_lock_level);
2897 
2898 			if (level == lowest_level) {
2899 				if (dec)
2900 					p->slots[level]++;
2901 				goto done;
2902 			}
2903 
2904 			err = read_block_for_search(root, p, &b, level,
2905 						    slot, key);
2906 			if (err == -EAGAIN)
2907 				goto again;
2908 			if (err) {
2909 				ret = err;
2910 				goto done;
2911 			}
2912 
2913 			if (!p->skip_locking) {
2914 				level = btrfs_header_level(b);
2915 				if (level <= write_lock_level) {
2916 					if (!btrfs_try_tree_write_lock(b)) {
2917 						btrfs_set_path_blocking(p);
2918 						btrfs_tree_lock(b);
2919 					}
2920 					p->locks[level] = BTRFS_WRITE_LOCK;
2921 				} else {
2922 					if (!btrfs_tree_read_lock_atomic(b)) {
2923 						btrfs_set_path_blocking(p);
2924 						btrfs_tree_read_lock(b);
2925 					}
2926 					p->locks[level] = BTRFS_READ_LOCK;
2927 				}
2928 				p->nodes[level] = b;
2929 			}
2930 		} else {
2931 			p->slots[level] = slot;
2932 			if (ins_len > 0 &&
2933 			    btrfs_leaf_free_space(b) < ins_len) {
2934 				if (write_lock_level < 1) {
2935 					write_lock_level = 1;
2936 					btrfs_release_path(p);
2937 					goto again;
2938 				}
2939 
2940 				btrfs_set_path_blocking(p);
2941 				err = split_leaf(trans, root, key,
2942 						 p, ins_len, ret == 0);
2943 
2944 				BUG_ON(err > 0);
2945 				if (err) {
2946 					ret = err;
2947 					goto done;
2948 				}
2949 			}
2950 			if (!p->search_for_split)
2951 				unlock_up(p, level, lowest_unlock,
2952 					  min_write_lock_level, NULL);
2953 			goto done;
2954 		}
2955 	}
2956 	ret = 1;
2957 done:
2958 	/*
2959 	 * we don't really know what they plan on doing with the path
2960 	 * from here on, so for now just mark it as blocking
2961 	 */
2962 	if (!p->leave_spinning)
2963 		btrfs_set_path_blocking(p);
2964 	if (ret < 0 && !p->skip_release_on_error)
2965 		btrfs_release_path(p);
2966 	return ret;
2967 }
2968 
2969 /*
2970  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2971  * current state of the tree together with the operations recorded in the tree
2972  * modification log to search for the key in a previous version of this tree, as
2973  * denoted by the time_seq parameter.
2974  *
2975  * Naturally, there is no support for insert, delete or cow operations.
2976  *
2977  * The resulting path and return value will be set up as if we called
2978  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2979  */
2980 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2981 			  struct btrfs_path *p, u64 time_seq)
2982 {
2983 	struct btrfs_fs_info *fs_info = root->fs_info;
2984 	struct extent_buffer *b;
2985 	int slot;
2986 	int ret;
2987 	int err;
2988 	int level;
2989 	int lowest_unlock = 1;
2990 	u8 lowest_level = 0;
2991 	int prev_cmp = -1;
2992 
2993 	lowest_level = p->lowest_level;
2994 	WARN_ON(p->nodes[0] != NULL);
2995 
2996 	if (p->search_commit_root) {
2997 		BUG_ON(time_seq);
2998 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2999 	}
3000 
3001 again:
3002 	b = get_old_root(root, time_seq);
3003 	if (!b) {
3004 		ret = -EIO;
3005 		goto done;
3006 	}
3007 	level = btrfs_header_level(b);
3008 	p->locks[level] = BTRFS_READ_LOCK;
3009 
3010 	while (b) {
3011 		level = btrfs_header_level(b);
3012 		p->nodes[level] = b;
3013 
3014 		/*
3015 		 * we have a lock on b and as long as we aren't changing
3016 		 * the tree, there is no way to for the items in b to change.
3017 		 * It is safe to drop the lock on our parent before we
3018 		 * go through the expensive btree search on b.
3019 		 */
3020 		btrfs_unlock_up_safe(p, level + 1);
3021 
3022 		/*
3023 		 * Since we can unwind ebs we want to do a real search every
3024 		 * time.
3025 		 */
3026 		prev_cmp = -1;
3027 		ret = key_search(b, key, level, &prev_cmp, &slot);
3028 		if (ret < 0)
3029 			goto done;
3030 
3031 		if (level != 0) {
3032 			int dec = 0;
3033 			if (ret && slot > 0) {
3034 				dec = 1;
3035 				slot -= 1;
3036 			}
3037 			p->slots[level] = slot;
3038 			unlock_up(p, level, lowest_unlock, 0, NULL);
3039 
3040 			if (level == lowest_level) {
3041 				if (dec)
3042 					p->slots[level]++;
3043 				goto done;
3044 			}
3045 
3046 			err = read_block_for_search(root, p, &b, level,
3047 						    slot, key);
3048 			if (err == -EAGAIN)
3049 				goto again;
3050 			if (err) {
3051 				ret = err;
3052 				goto done;
3053 			}
3054 
3055 			level = btrfs_header_level(b);
3056 			if (!btrfs_tree_read_lock_atomic(b)) {
3057 				btrfs_set_path_blocking(p);
3058 				btrfs_tree_read_lock(b);
3059 			}
3060 			b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3061 			if (!b) {
3062 				ret = -ENOMEM;
3063 				goto done;
3064 			}
3065 			p->locks[level] = BTRFS_READ_LOCK;
3066 			p->nodes[level] = b;
3067 		} else {
3068 			p->slots[level] = slot;
3069 			unlock_up(p, level, lowest_unlock, 0, NULL);
3070 			goto done;
3071 		}
3072 	}
3073 	ret = 1;
3074 done:
3075 	if (!p->leave_spinning)
3076 		btrfs_set_path_blocking(p);
3077 	if (ret < 0)
3078 		btrfs_release_path(p);
3079 
3080 	return ret;
3081 }
3082 
3083 /*
3084  * helper to use instead of search slot if no exact match is needed but
3085  * instead the next or previous item should be returned.
3086  * When find_higher is true, the next higher item is returned, the next lower
3087  * otherwise.
3088  * When return_any and find_higher are both true, and no higher item is found,
3089  * return the next lower instead.
3090  * When return_any is true and find_higher is false, and no lower item is found,
3091  * return the next higher instead.
3092  * It returns 0 if any item is found, 1 if none is found (tree empty), and
3093  * < 0 on error
3094  */
3095 int btrfs_search_slot_for_read(struct btrfs_root *root,
3096 			       const struct btrfs_key *key,
3097 			       struct btrfs_path *p, int find_higher,
3098 			       int return_any)
3099 {
3100 	int ret;
3101 	struct extent_buffer *leaf;
3102 
3103 again:
3104 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3105 	if (ret <= 0)
3106 		return ret;
3107 	/*
3108 	 * a return value of 1 means the path is at the position where the
3109 	 * item should be inserted. Normally this is the next bigger item,
3110 	 * but in case the previous item is the last in a leaf, path points
3111 	 * to the first free slot in the previous leaf, i.e. at an invalid
3112 	 * item.
3113 	 */
3114 	leaf = p->nodes[0];
3115 
3116 	if (find_higher) {
3117 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3118 			ret = btrfs_next_leaf(root, p);
3119 			if (ret <= 0)
3120 				return ret;
3121 			if (!return_any)
3122 				return 1;
3123 			/*
3124 			 * no higher item found, return the next
3125 			 * lower instead
3126 			 */
3127 			return_any = 0;
3128 			find_higher = 0;
3129 			btrfs_release_path(p);
3130 			goto again;
3131 		}
3132 	} else {
3133 		if (p->slots[0] == 0) {
3134 			ret = btrfs_prev_leaf(root, p);
3135 			if (ret < 0)
3136 				return ret;
3137 			if (!ret) {
3138 				leaf = p->nodes[0];
3139 				if (p->slots[0] == btrfs_header_nritems(leaf))
3140 					p->slots[0]--;
3141 				return 0;
3142 			}
3143 			if (!return_any)
3144 				return 1;
3145 			/*
3146 			 * no lower item found, return the next
3147 			 * higher instead
3148 			 */
3149 			return_any = 0;
3150 			find_higher = 1;
3151 			btrfs_release_path(p);
3152 			goto again;
3153 		} else {
3154 			--p->slots[0];
3155 		}
3156 	}
3157 	return 0;
3158 }
3159 
3160 /*
3161  * adjust the pointers going up the tree, starting at level
3162  * making sure the right key of each node is points to 'key'.
3163  * This is used after shifting pointers to the left, so it stops
3164  * fixing up pointers when a given leaf/node is not in slot 0 of the
3165  * higher levels
3166  *
3167  */
3168 static void fixup_low_keys(struct btrfs_path *path,
3169 			   struct btrfs_disk_key *key, int level)
3170 {
3171 	int i;
3172 	struct extent_buffer *t;
3173 	int ret;
3174 
3175 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3176 		int tslot = path->slots[i];
3177 
3178 		if (!path->nodes[i])
3179 			break;
3180 		t = path->nodes[i];
3181 		ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3182 				GFP_ATOMIC);
3183 		BUG_ON(ret < 0);
3184 		btrfs_set_node_key(t, key, tslot);
3185 		btrfs_mark_buffer_dirty(path->nodes[i]);
3186 		if (tslot != 0)
3187 			break;
3188 	}
3189 }
3190 
3191 /*
3192  * update item key.
3193  *
3194  * This function isn't completely safe. It's the caller's responsibility
3195  * that the new key won't break the order
3196  */
3197 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3198 			     struct btrfs_path *path,
3199 			     const struct btrfs_key *new_key)
3200 {
3201 	struct btrfs_disk_key disk_key;
3202 	struct extent_buffer *eb;
3203 	int slot;
3204 
3205 	eb = path->nodes[0];
3206 	slot = path->slots[0];
3207 	if (slot > 0) {
3208 		btrfs_item_key(eb, &disk_key, slot - 1);
3209 		if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
3210 			btrfs_crit(fs_info,
3211 		"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3212 				   slot, btrfs_disk_key_objectid(&disk_key),
3213 				   btrfs_disk_key_type(&disk_key),
3214 				   btrfs_disk_key_offset(&disk_key),
3215 				   new_key->objectid, new_key->type,
3216 				   new_key->offset);
3217 			btrfs_print_leaf(eb);
3218 			BUG();
3219 		}
3220 	}
3221 	if (slot < btrfs_header_nritems(eb) - 1) {
3222 		btrfs_item_key(eb, &disk_key, slot + 1);
3223 		if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
3224 			btrfs_crit(fs_info,
3225 		"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3226 				   slot, btrfs_disk_key_objectid(&disk_key),
3227 				   btrfs_disk_key_type(&disk_key),
3228 				   btrfs_disk_key_offset(&disk_key),
3229 				   new_key->objectid, new_key->type,
3230 				   new_key->offset);
3231 			btrfs_print_leaf(eb);
3232 			BUG();
3233 		}
3234 	}
3235 
3236 	btrfs_cpu_key_to_disk(&disk_key, new_key);
3237 	btrfs_set_item_key(eb, &disk_key, slot);
3238 	btrfs_mark_buffer_dirty(eb);
3239 	if (slot == 0)
3240 		fixup_low_keys(path, &disk_key, 1);
3241 }
3242 
3243 /*
3244  * try to push data from one node into the next node left in the
3245  * tree.
3246  *
3247  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3248  * error, and > 0 if there was no room in the left hand block.
3249  */
3250 static int push_node_left(struct btrfs_trans_handle *trans,
3251 			  struct extent_buffer *dst,
3252 			  struct extent_buffer *src, int empty)
3253 {
3254 	struct btrfs_fs_info *fs_info = trans->fs_info;
3255 	int push_items = 0;
3256 	int src_nritems;
3257 	int dst_nritems;
3258 	int ret = 0;
3259 
3260 	src_nritems = btrfs_header_nritems(src);
3261 	dst_nritems = btrfs_header_nritems(dst);
3262 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3263 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3264 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3265 
3266 	if (!empty && src_nritems <= 8)
3267 		return 1;
3268 
3269 	if (push_items <= 0)
3270 		return 1;
3271 
3272 	if (empty) {
3273 		push_items = min(src_nritems, push_items);
3274 		if (push_items < src_nritems) {
3275 			/* leave at least 8 pointers in the node if
3276 			 * we aren't going to empty it
3277 			 */
3278 			if (src_nritems - push_items < 8) {
3279 				if (push_items <= 8)
3280 					return 1;
3281 				push_items -= 8;
3282 			}
3283 		}
3284 	} else
3285 		push_items = min(src_nritems - 8, push_items);
3286 
3287 	ret = tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
3288 	if (ret) {
3289 		btrfs_abort_transaction(trans, ret);
3290 		return ret;
3291 	}
3292 	copy_extent_buffer(dst, src,
3293 			   btrfs_node_key_ptr_offset(dst_nritems),
3294 			   btrfs_node_key_ptr_offset(0),
3295 			   push_items * sizeof(struct btrfs_key_ptr));
3296 
3297 	if (push_items < src_nritems) {
3298 		/*
3299 		 * Don't call tree_mod_log_insert_move here, key removal was
3300 		 * already fully logged by tree_mod_log_eb_copy above.
3301 		 */
3302 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3303 				      btrfs_node_key_ptr_offset(push_items),
3304 				      (src_nritems - push_items) *
3305 				      sizeof(struct btrfs_key_ptr));
3306 	}
3307 	btrfs_set_header_nritems(src, src_nritems - push_items);
3308 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3309 	btrfs_mark_buffer_dirty(src);
3310 	btrfs_mark_buffer_dirty(dst);
3311 
3312 	return ret;
3313 }
3314 
3315 /*
3316  * try to push data from one node into the next node right in the
3317  * tree.
3318  *
3319  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3320  * error, and > 0 if there was no room in the right hand block.
3321  *
3322  * this will  only push up to 1/2 the contents of the left node over
3323  */
3324 static int balance_node_right(struct btrfs_trans_handle *trans,
3325 			      struct extent_buffer *dst,
3326 			      struct extent_buffer *src)
3327 {
3328 	struct btrfs_fs_info *fs_info = trans->fs_info;
3329 	int push_items = 0;
3330 	int max_push;
3331 	int src_nritems;
3332 	int dst_nritems;
3333 	int ret = 0;
3334 
3335 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3336 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3337 
3338 	src_nritems = btrfs_header_nritems(src);
3339 	dst_nritems = btrfs_header_nritems(dst);
3340 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3341 	if (push_items <= 0)
3342 		return 1;
3343 
3344 	if (src_nritems < 4)
3345 		return 1;
3346 
3347 	max_push = src_nritems / 2 + 1;
3348 	/* don't try to empty the node */
3349 	if (max_push >= src_nritems)
3350 		return 1;
3351 
3352 	if (max_push < push_items)
3353 		push_items = max_push;
3354 
3355 	ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3356 	BUG_ON(ret < 0);
3357 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3358 				      btrfs_node_key_ptr_offset(0),
3359 				      (dst_nritems) *
3360 				      sizeof(struct btrfs_key_ptr));
3361 
3362 	ret = tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
3363 				   push_items);
3364 	if (ret) {
3365 		btrfs_abort_transaction(trans, ret);
3366 		return ret;
3367 	}
3368 	copy_extent_buffer(dst, src,
3369 			   btrfs_node_key_ptr_offset(0),
3370 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3371 			   push_items * sizeof(struct btrfs_key_ptr));
3372 
3373 	btrfs_set_header_nritems(src, src_nritems - push_items);
3374 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3375 
3376 	btrfs_mark_buffer_dirty(src);
3377 	btrfs_mark_buffer_dirty(dst);
3378 
3379 	return ret;
3380 }
3381 
3382 /*
3383  * helper function to insert a new root level in the tree.
3384  * A new node is allocated, and a single item is inserted to
3385  * point to the existing root
3386  *
3387  * returns zero on success or < 0 on failure.
3388  */
3389 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3390 			   struct btrfs_root *root,
3391 			   struct btrfs_path *path, int level)
3392 {
3393 	struct btrfs_fs_info *fs_info = root->fs_info;
3394 	u64 lower_gen;
3395 	struct extent_buffer *lower;
3396 	struct extent_buffer *c;
3397 	struct extent_buffer *old;
3398 	struct btrfs_disk_key lower_key;
3399 	int ret;
3400 
3401 	BUG_ON(path->nodes[level]);
3402 	BUG_ON(path->nodes[level-1] != root->node);
3403 
3404 	lower = path->nodes[level-1];
3405 	if (level == 1)
3406 		btrfs_item_key(lower, &lower_key, 0);
3407 	else
3408 		btrfs_node_key(lower, &lower_key, 0);
3409 
3410 	c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
3411 					 root->node->start, 0);
3412 	if (IS_ERR(c))
3413 		return PTR_ERR(c);
3414 
3415 	root_add_used(root, fs_info->nodesize);
3416 
3417 	btrfs_set_header_nritems(c, 1);
3418 	btrfs_set_node_key(c, &lower_key, 0);
3419 	btrfs_set_node_blockptr(c, 0, lower->start);
3420 	lower_gen = btrfs_header_generation(lower);
3421 	WARN_ON(lower_gen != trans->transid);
3422 
3423 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3424 
3425 	btrfs_mark_buffer_dirty(c);
3426 
3427 	old = root->node;
3428 	ret = tree_mod_log_insert_root(root->node, c, 0);
3429 	BUG_ON(ret < 0);
3430 	rcu_assign_pointer(root->node, c);
3431 
3432 	/* the super has an extra ref to root->node */
3433 	free_extent_buffer(old);
3434 
3435 	add_root_to_dirty_list(root);
3436 	extent_buffer_get(c);
3437 	path->nodes[level] = c;
3438 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3439 	path->slots[level] = 0;
3440 	return 0;
3441 }
3442 
3443 /*
3444  * worker function to insert a single pointer in a node.
3445  * the node should have enough room for the pointer already
3446  *
3447  * slot and level indicate where you want the key to go, and
3448  * blocknr is the block the key points to.
3449  */
3450 static void insert_ptr(struct btrfs_trans_handle *trans,
3451 		       struct btrfs_path *path,
3452 		       struct btrfs_disk_key *key, u64 bytenr,
3453 		       int slot, int level)
3454 {
3455 	struct extent_buffer *lower;
3456 	int nritems;
3457 	int ret;
3458 
3459 	BUG_ON(!path->nodes[level]);
3460 	btrfs_assert_tree_locked(path->nodes[level]);
3461 	lower = path->nodes[level];
3462 	nritems = btrfs_header_nritems(lower);
3463 	BUG_ON(slot > nritems);
3464 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
3465 	if (slot != nritems) {
3466 		if (level) {
3467 			ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3468 					nritems - slot);
3469 			BUG_ON(ret < 0);
3470 		}
3471 		memmove_extent_buffer(lower,
3472 			      btrfs_node_key_ptr_offset(slot + 1),
3473 			      btrfs_node_key_ptr_offset(slot),
3474 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3475 	}
3476 	if (level) {
3477 		ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3478 				GFP_NOFS);
3479 		BUG_ON(ret < 0);
3480 	}
3481 	btrfs_set_node_key(lower, key, slot);
3482 	btrfs_set_node_blockptr(lower, slot, bytenr);
3483 	WARN_ON(trans->transid == 0);
3484 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3485 	btrfs_set_header_nritems(lower, nritems + 1);
3486 	btrfs_mark_buffer_dirty(lower);
3487 }
3488 
3489 /*
3490  * split the node at the specified level in path in two.
3491  * The path is corrected to point to the appropriate node after the split
3492  *
3493  * Before splitting this tries to make some room in the node by pushing
3494  * left and right, if either one works, it returns right away.
3495  *
3496  * returns 0 on success and < 0 on failure
3497  */
3498 static noinline int split_node(struct btrfs_trans_handle *trans,
3499 			       struct btrfs_root *root,
3500 			       struct btrfs_path *path, int level)
3501 {
3502 	struct btrfs_fs_info *fs_info = root->fs_info;
3503 	struct extent_buffer *c;
3504 	struct extent_buffer *split;
3505 	struct btrfs_disk_key disk_key;
3506 	int mid;
3507 	int ret;
3508 	u32 c_nritems;
3509 
3510 	c = path->nodes[level];
3511 	WARN_ON(btrfs_header_generation(c) != trans->transid);
3512 	if (c == root->node) {
3513 		/*
3514 		 * trying to split the root, lets make a new one
3515 		 *
3516 		 * tree mod log: We don't log_removal old root in
3517 		 * insert_new_root, because that root buffer will be kept as a
3518 		 * normal node. We are going to log removal of half of the
3519 		 * elements below with tree_mod_log_eb_copy. We're holding a
3520 		 * tree lock on the buffer, which is why we cannot race with
3521 		 * other tree_mod_log users.
3522 		 */
3523 		ret = insert_new_root(trans, root, path, level + 1);
3524 		if (ret)
3525 			return ret;
3526 	} else {
3527 		ret = push_nodes_for_insert(trans, root, path, level);
3528 		c = path->nodes[level];
3529 		if (!ret && btrfs_header_nritems(c) <
3530 		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3531 			return 0;
3532 		if (ret < 0)
3533 			return ret;
3534 	}
3535 
3536 	c_nritems = btrfs_header_nritems(c);
3537 	mid = (c_nritems + 1) / 2;
3538 	btrfs_node_key(c, &disk_key, mid);
3539 
3540 	split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
3541 					     c->start, 0);
3542 	if (IS_ERR(split))
3543 		return PTR_ERR(split);
3544 
3545 	root_add_used(root, fs_info->nodesize);
3546 	ASSERT(btrfs_header_level(c) == level);
3547 
3548 	ret = tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
3549 	if (ret) {
3550 		btrfs_abort_transaction(trans, ret);
3551 		return ret;
3552 	}
3553 	copy_extent_buffer(split, c,
3554 			   btrfs_node_key_ptr_offset(0),
3555 			   btrfs_node_key_ptr_offset(mid),
3556 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3557 	btrfs_set_header_nritems(split, c_nritems - mid);
3558 	btrfs_set_header_nritems(c, mid);
3559 	ret = 0;
3560 
3561 	btrfs_mark_buffer_dirty(c);
3562 	btrfs_mark_buffer_dirty(split);
3563 
3564 	insert_ptr(trans, path, &disk_key, split->start,
3565 		   path->slots[level + 1] + 1, level + 1);
3566 
3567 	if (path->slots[level] >= mid) {
3568 		path->slots[level] -= mid;
3569 		btrfs_tree_unlock(c);
3570 		free_extent_buffer(c);
3571 		path->nodes[level] = split;
3572 		path->slots[level + 1] += 1;
3573 	} else {
3574 		btrfs_tree_unlock(split);
3575 		free_extent_buffer(split);
3576 	}
3577 	return ret;
3578 }
3579 
3580 /*
3581  * how many bytes are required to store the items in a leaf.  start
3582  * and nr indicate which items in the leaf to check.  This totals up the
3583  * space used both by the item structs and the item data
3584  */
3585 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3586 {
3587 	struct btrfs_item *start_item;
3588 	struct btrfs_item *end_item;
3589 	struct btrfs_map_token token;
3590 	int data_len;
3591 	int nritems = btrfs_header_nritems(l);
3592 	int end = min(nritems, start + nr) - 1;
3593 
3594 	if (!nr)
3595 		return 0;
3596 	btrfs_init_map_token(&token, l);
3597 	start_item = btrfs_item_nr(start);
3598 	end_item = btrfs_item_nr(end);
3599 	data_len = btrfs_token_item_offset(l, start_item, &token) +
3600 		btrfs_token_item_size(l, start_item, &token);
3601 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3602 	data_len += sizeof(struct btrfs_item) * nr;
3603 	WARN_ON(data_len < 0);
3604 	return data_len;
3605 }
3606 
3607 /*
3608  * The space between the end of the leaf items and
3609  * the start of the leaf data.  IOW, how much room
3610  * the leaf has left for both items and data
3611  */
3612 noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
3613 {
3614 	struct btrfs_fs_info *fs_info = leaf->fs_info;
3615 	int nritems = btrfs_header_nritems(leaf);
3616 	int ret;
3617 
3618 	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3619 	if (ret < 0) {
3620 		btrfs_crit(fs_info,
3621 			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3622 			   ret,
3623 			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3624 			   leaf_space_used(leaf, 0, nritems), nritems);
3625 	}
3626 	return ret;
3627 }
3628 
3629 /*
3630  * min slot controls the lowest index we're willing to push to the
3631  * right.  We'll push up to and including min_slot, but no lower
3632  */
3633 static noinline int __push_leaf_right(struct btrfs_path *path,
3634 				      int data_size, int empty,
3635 				      struct extent_buffer *right,
3636 				      int free_space, u32 left_nritems,
3637 				      u32 min_slot)
3638 {
3639 	struct btrfs_fs_info *fs_info = right->fs_info;
3640 	struct extent_buffer *left = path->nodes[0];
3641 	struct extent_buffer *upper = path->nodes[1];
3642 	struct btrfs_map_token token;
3643 	struct btrfs_disk_key disk_key;
3644 	int slot;
3645 	u32 i;
3646 	int push_space = 0;
3647 	int push_items = 0;
3648 	struct btrfs_item *item;
3649 	u32 nr;
3650 	u32 right_nritems;
3651 	u32 data_end;
3652 	u32 this_item_size;
3653 
3654 	if (empty)
3655 		nr = 0;
3656 	else
3657 		nr = max_t(u32, 1, min_slot);
3658 
3659 	if (path->slots[0] >= left_nritems)
3660 		push_space += data_size;
3661 
3662 	slot = path->slots[1];
3663 	i = left_nritems - 1;
3664 	while (i >= nr) {
3665 		item = btrfs_item_nr(i);
3666 
3667 		if (!empty && push_items > 0) {
3668 			if (path->slots[0] > i)
3669 				break;
3670 			if (path->slots[0] == i) {
3671 				int space = btrfs_leaf_free_space(left);
3672 
3673 				if (space + push_space * 2 > free_space)
3674 					break;
3675 			}
3676 		}
3677 
3678 		if (path->slots[0] == i)
3679 			push_space += data_size;
3680 
3681 		this_item_size = btrfs_item_size(left, item);
3682 		if (this_item_size + sizeof(*item) + push_space > free_space)
3683 			break;
3684 
3685 		push_items++;
3686 		push_space += this_item_size + sizeof(*item);
3687 		if (i == 0)
3688 			break;
3689 		i--;
3690 	}
3691 
3692 	if (push_items == 0)
3693 		goto out_unlock;
3694 
3695 	WARN_ON(!empty && push_items == left_nritems);
3696 
3697 	/* push left to right */
3698 	right_nritems = btrfs_header_nritems(right);
3699 
3700 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3701 	push_space -= leaf_data_end(left);
3702 
3703 	/* make room in the right data area */
3704 	data_end = leaf_data_end(right);
3705 	memmove_extent_buffer(right,
3706 			      BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3707 			      BTRFS_LEAF_DATA_OFFSET + data_end,
3708 			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3709 
3710 	/* copy from the left data area */
3711 	copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3712 		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3713 		     BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
3714 		     push_space);
3715 
3716 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3717 			      btrfs_item_nr_offset(0),
3718 			      right_nritems * sizeof(struct btrfs_item));
3719 
3720 	/* copy the items from left to right */
3721 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3722 		   btrfs_item_nr_offset(left_nritems - push_items),
3723 		   push_items * sizeof(struct btrfs_item));
3724 
3725 	/* update the item pointers */
3726 	btrfs_init_map_token(&token, right);
3727 	right_nritems += push_items;
3728 	btrfs_set_header_nritems(right, right_nritems);
3729 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3730 	for (i = 0; i < right_nritems; i++) {
3731 		item = btrfs_item_nr(i);
3732 		push_space -= btrfs_token_item_size(right, item, &token);
3733 		btrfs_set_token_item_offset(right, item, push_space, &token);
3734 	}
3735 
3736 	left_nritems -= push_items;
3737 	btrfs_set_header_nritems(left, left_nritems);
3738 
3739 	if (left_nritems)
3740 		btrfs_mark_buffer_dirty(left);
3741 	else
3742 		btrfs_clean_tree_block(left);
3743 
3744 	btrfs_mark_buffer_dirty(right);
3745 
3746 	btrfs_item_key(right, &disk_key, 0);
3747 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3748 	btrfs_mark_buffer_dirty(upper);
3749 
3750 	/* then fixup the leaf pointer in the path */
3751 	if (path->slots[0] >= left_nritems) {
3752 		path->slots[0] -= left_nritems;
3753 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3754 			btrfs_clean_tree_block(path->nodes[0]);
3755 		btrfs_tree_unlock(path->nodes[0]);
3756 		free_extent_buffer(path->nodes[0]);
3757 		path->nodes[0] = right;
3758 		path->slots[1] += 1;
3759 	} else {
3760 		btrfs_tree_unlock(right);
3761 		free_extent_buffer(right);
3762 	}
3763 	return 0;
3764 
3765 out_unlock:
3766 	btrfs_tree_unlock(right);
3767 	free_extent_buffer(right);
3768 	return 1;
3769 }
3770 
3771 /*
3772  * push some data in the path leaf to the right, trying to free up at
3773  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3774  *
3775  * returns 1 if the push failed because the other node didn't have enough
3776  * room, 0 if everything worked out and < 0 if there were major errors.
3777  *
3778  * this will push starting from min_slot to the end of the leaf.  It won't
3779  * push any slot lower than min_slot
3780  */
3781 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3782 			   *root, struct btrfs_path *path,
3783 			   int min_data_size, int data_size,
3784 			   int empty, u32 min_slot)
3785 {
3786 	struct extent_buffer *left = path->nodes[0];
3787 	struct extent_buffer *right;
3788 	struct extent_buffer *upper;
3789 	int slot;
3790 	int free_space;
3791 	u32 left_nritems;
3792 	int ret;
3793 
3794 	if (!path->nodes[1])
3795 		return 1;
3796 
3797 	slot = path->slots[1];
3798 	upper = path->nodes[1];
3799 	if (slot >= btrfs_header_nritems(upper) - 1)
3800 		return 1;
3801 
3802 	btrfs_assert_tree_locked(path->nodes[1]);
3803 
3804 	right = btrfs_read_node_slot(upper, slot + 1);
3805 	/*
3806 	 * slot + 1 is not valid or we fail to read the right node,
3807 	 * no big deal, just return.
3808 	 */
3809 	if (IS_ERR(right))
3810 		return 1;
3811 
3812 	btrfs_tree_lock(right);
3813 	btrfs_set_lock_blocking_write(right);
3814 
3815 	free_space = btrfs_leaf_free_space(right);
3816 	if (free_space < data_size)
3817 		goto out_unlock;
3818 
3819 	/* cow and double check */
3820 	ret = btrfs_cow_block(trans, root, right, upper,
3821 			      slot + 1, &right);
3822 	if (ret)
3823 		goto out_unlock;
3824 
3825 	free_space = btrfs_leaf_free_space(right);
3826 	if (free_space < data_size)
3827 		goto out_unlock;
3828 
3829 	left_nritems = btrfs_header_nritems(left);
3830 	if (left_nritems == 0)
3831 		goto out_unlock;
3832 
3833 	if (path->slots[0] == left_nritems && !empty) {
3834 		/* Key greater than all keys in the leaf, right neighbor has
3835 		 * enough room for it and we're not emptying our leaf to delete
3836 		 * it, therefore use right neighbor to insert the new item and
3837 		 * no need to touch/dirty our left leaf. */
3838 		btrfs_tree_unlock(left);
3839 		free_extent_buffer(left);
3840 		path->nodes[0] = right;
3841 		path->slots[0] = 0;
3842 		path->slots[1]++;
3843 		return 0;
3844 	}
3845 
3846 	return __push_leaf_right(path, min_data_size, empty,
3847 				right, free_space, left_nritems, min_slot);
3848 out_unlock:
3849 	btrfs_tree_unlock(right);
3850 	free_extent_buffer(right);
3851 	return 1;
3852 }
3853 
3854 /*
3855  * push some data in the path leaf to the left, trying to free up at
3856  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3857  *
3858  * max_slot can put a limit on how far into the leaf we'll push items.  The
3859  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3860  * items
3861  */
3862 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
3863 				     int empty, struct extent_buffer *left,
3864 				     int free_space, u32 right_nritems,
3865 				     u32 max_slot)
3866 {
3867 	struct btrfs_fs_info *fs_info = left->fs_info;
3868 	struct btrfs_disk_key disk_key;
3869 	struct extent_buffer *right = path->nodes[0];
3870 	int i;
3871 	int push_space = 0;
3872 	int push_items = 0;
3873 	struct btrfs_item *item;
3874 	u32 old_left_nritems;
3875 	u32 nr;
3876 	int ret = 0;
3877 	u32 this_item_size;
3878 	u32 old_left_item_size;
3879 	struct btrfs_map_token token;
3880 
3881 	if (empty)
3882 		nr = min(right_nritems, max_slot);
3883 	else
3884 		nr = min(right_nritems - 1, max_slot);
3885 
3886 	for (i = 0; i < nr; i++) {
3887 		item = btrfs_item_nr(i);
3888 
3889 		if (!empty && push_items > 0) {
3890 			if (path->slots[0] < i)
3891 				break;
3892 			if (path->slots[0] == i) {
3893 				int space = btrfs_leaf_free_space(right);
3894 
3895 				if (space + push_space * 2 > free_space)
3896 					break;
3897 			}
3898 		}
3899 
3900 		if (path->slots[0] == i)
3901 			push_space += data_size;
3902 
3903 		this_item_size = btrfs_item_size(right, item);
3904 		if (this_item_size + sizeof(*item) + push_space > free_space)
3905 			break;
3906 
3907 		push_items++;
3908 		push_space += this_item_size + sizeof(*item);
3909 	}
3910 
3911 	if (push_items == 0) {
3912 		ret = 1;
3913 		goto out;
3914 	}
3915 	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3916 
3917 	/* push data from right to left */
3918 	copy_extent_buffer(left, right,
3919 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3920 			   btrfs_item_nr_offset(0),
3921 			   push_items * sizeof(struct btrfs_item));
3922 
3923 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3924 		     btrfs_item_offset_nr(right, push_items - 1);
3925 
3926 	copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3927 		     leaf_data_end(left) - push_space,
3928 		     BTRFS_LEAF_DATA_OFFSET +
3929 		     btrfs_item_offset_nr(right, push_items - 1),
3930 		     push_space);
3931 	old_left_nritems = btrfs_header_nritems(left);
3932 	BUG_ON(old_left_nritems <= 0);
3933 
3934 	btrfs_init_map_token(&token, left);
3935 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3936 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3937 		u32 ioff;
3938 
3939 		item = btrfs_item_nr(i);
3940 
3941 		ioff = btrfs_token_item_offset(left, item, &token);
3942 		btrfs_set_token_item_offset(left, item,
3943 		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3944 		      &token);
3945 	}
3946 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3947 
3948 	/* fixup right node */
3949 	if (push_items > right_nritems)
3950 		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3951 		       right_nritems);
3952 
3953 	if (push_items < right_nritems) {
3954 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3955 						  leaf_data_end(right);
3956 		memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3957 				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3958 				      BTRFS_LEAF_DATA_OFFSET +
3959 				      leaf_data_end(right), push_space);
3960 
3961 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3962 			      btrfs_item_nr_offset(push_items),
3963 			     (btrfs_header_nritems(right) - push_items) *
3964 			     sizeof(struct btrfs_item));
3965 	}
3966 
3967 	btrfs_init_map_token(&token, right);
3968 	right_nritems -= push_items;
3969 	btrfs_set_header_nritems(right, right_nritems);
3970 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3971 	for (i = 0; i < right_nritems; i++) {
3972 		item = btrfs_item_nr(i);
3973 
3974 		push_space = push_space - btrfs_token_item_size(right,
3975 								item, &token);
3976 		btrfs_set_token_item_offset(right, item, push_space, &token);
3977 	}
3978 
3979 	btrfs_mark_buffer_dirty(left);
3980 	if (right_nritems)
3981 		btrfs_mark_buffer_dirty(right);
3982 	else
3983 		btrfs_clean_tree_block(right);
3984 
3985 	btrfs_item_key(right, &disk_key, 0);
3986 	fixup_low_keys(path, &disk_key, 1);
3987 
3988 	/* then fixup the leaf pointer in the path */
3989 	if (path->slots[0] < push_items) {
3990 		path->slots[0] += old_left_nritems;
3991 		btrfs_tree_unlock(path->nodes[0]);
3992 		free_extent_buffer(path->nodes[0]);
3993 		path->nodes[0] = left;
3994 		path->slots[1] -= 1;
3995 	} else {
3996 		btrfs_tree_unlock(left);
3997 		free_extent_buffer(left);
3998 		path->slots[0] -= push_items;
3999 	}
4000 	BUG_ON(path->slots[0] < 0);
4001 	return ret;
4002 out:
4003 	btrfs_tree_unlock(left);
4004 	free_extent_buffer(left);
4005 	return ret;
4006 }
4007 
4008 /*
4009  * push some data in the path leaf to the left, trying to free up at
4010  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
4011  *
4012  * max_slot can put a limit on how far into the leaf we'll push items.  The
4013  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
4014  * items
4015  */
4016 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
4017 			  *root, struct btrfs_path *path, int min_data_size,
4018 			  int data_size, int empty, u32 max_slot)
4019 {
4020 	struct extent_buffer *right = path->nodes[0];
4021 	struct extent_buffer *left;
4022 	int slot;
4023 	int free_space;
4024 	u32 right_nritems;
4025 	int ret = 0;
4026 
4027 	slot = path->slots[1];
4028 	if (slot == 0)
4029 		return 1;
4030 	if (!path->nodes[1])
4031 		return 1;
4032 
4033 	right_nritems = btrfs_header_nritems(right);
4034 	if (right_nritems == 0)
4035 		return 1;
4036 
4037 	btrfs_assert_tree_locked(path->nodes[1]);
4038 
4039 	left = btrfs_read_node_slot(path->nodes[1], slot - 1);
4040 	/*
4041 	 * slot - 1 is not valid or we fail to read the left node,
4042 	 * no big deal, just return.
4043 	 */
4044 	if (IS_ERR(left))
4045 		return 1;
4046 
4047 	btrfs_tree_lock(left);
4048 	btrfs_set_lock_blocking_write(left);
4049 
4050 	free_space = btrfs_leaf_free_space(left);
4051 	if (free_space < data_size) {
4052 		ret = 1;
4053 		goto out;
4054 	}
4055 
4056 	/* cow and double check */
4057 	ret = btrfs_cow_block(trans, root, left,
4058 			      path->nodes[1], slot - 1, &left);
4059 	if (ret) {
4060 		/* we hit -ENOSPC, but it isn't fatal here */
4061 		if (ret == -ENOSPC)
4062 			ret = 1;
4063 		goto out;
4064 	}
4065 
4066 	free_space = btrfs_leaf_free_space(left);
4067 	if (free_space < data_size) {
4068 		ret = 1;
4069 		goto out;
4070 	}
4071 
4072 	return __push_leaf_left(path, min_data_size,
4073 			       empty, left, free_space, right_nritems,
4074 			       max_slot);
4075 out:
4076 	btrfs_tree_unlock(left);
4077 	free_extent_buffer(left);
4078 	return ret;
4079 }
4080 
4081 /*
4082  * split the path's leaf in two, making sure there is at least data_size
4083  * available for the resulting leaf level of the path.
4084  */
4085 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4086 				    struct btrfs_path *path,
4087 				    struct extent_buffer *l,
4088 				    struct extent_buffer *right,
4089 				    int slot, int mid, int nritems)
4090 {
4091 	struct btrfs_fs_info *fs_info = trans->fs_info;
4092 	int data_copy_size;
4093 	int rt_data_off;
4094 	int i;
4095 	struct btrfs_disk_key disk_key;
4096 	struct btrfs_map_token token;
4097 
4098 	nritems = nritems - mid;
4099 	btrfs_set_header_nritems(right, nritems);
4100 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
4101 
4102 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4103 			   btrfs_item_nr_offset(mid),
4104 			   nritems * sizeof(struct btrfs_item));
4105 
4106 	copy_extent_buffer(right, l,
4107 		     BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4108 		     data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4109 		     leaf_data_end(l), data_copy_size);
4110 
4111 	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4112 
4113 	btrfs_init_map_token(&token, right);
4114 	for (i = 0; i < nritems; i++) {
4115 		struct btrfs_item *item = btrfs_item_nr(i);
4116 		u32 ioff;
4117 
4118 		ioff = btrfs_token_item_offset(right, item, &token);
4119 		btrfs_set_token_item_offset(right, item,
4120 					    ioff + rt_data_off, &token);
4121 	}
4122 
4123 	btrfs_set_header_nritems(l, mid);
4124 	btrfs_item_key(right, &disk_key, 0);
4125 	insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
4126 
4127 	btrfs_mark_buffer_dirty(right);
4128 	btrfs_mark_buffer_dirty(l);
4129 	BUG_ON(path->slots[0] != slot);
4130 
4131 	if (mid <= slot) {
4132 		btrfs_tree_unlock(path->nodes[0]);
4133 		free_extent_buffer(path->nodes[0]);
4134 		path->nodes[0] = right;
4135 		path->slots[0] -= mid;
4136 		path->slots[1] += 1;
4137 	} else {
4138 		btrfs_tree_unlock(right);
4139 		free_extent_buffer(right);
4140 	}
4141 
4142 	BUG_ON(path->slots[0] < 0);
4143 }
4144 
4145 /*
4146  * double splits happen when we need to insert a big item in the middle
4147  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
4148  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4149  *          A                 B                 C
4150  *
4151  * We avoid this by trying to push the items on either side of our target
4152  * into the adjacent leaves.  If all goes well we can avoid the double split
4153  * completely.
4154  */
4155 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4156 					  struct btrfs_root *root,
4157 					  struct btrfs_path *path,
4158 					  int data_size)
4159 {
4160 	int ret;
4161 	int progress = 0;
4162 	int slot;
4163 	u32 nritems;
4164 	int space_needed = data_size;
4165 
4166 	slot = path->slots[0];
4167 	if (slot < btrfs_header_nritems(path->nodes[0]))
4168 		space_needed -= btrfs_leaf_free_space(path->nodes[0]);
4169 
4170 	/*
4171 	 * try to push all the items after our slot into the
4172 	 * right leaf
4173 	 */
4174 	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4175 	if (ret < 0)
4176 		return ret;
4177 
4178 	if (ret == 0)
4179 		progress++;
4180 
4181 	nritems = btrfs_header_nritems(path->nodes[0]);
4182 	/*
4183 	 * our goal is to get our slot at the start or end of a leaf.  If
4184 	 * we've done so we're done
4185 	 */
4186 	if (path->slots[0] == 0 || path->slots[0] == nritems)
4187 		return 0;
4188 
4189 	if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
4190 		return 0;
4191 
4192 	/* try to push all the items before our slot into the next leaf */
4193 	slot = path->slots[0];
4194 	space_needed = data_size;
4195 	if (slot > 0)
4196 		space_needed -= btrfs_leaf_free_space(path->nodes[0]);
4197 	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4198 	if (ret < 0)
4199 		return ret;
4200 
4201 	if (ret == 0)
4202 		progress++;
4203 
4204 	if (progress)
4205 		return 0;
4206 	return 1;
4207 }
4208 
4209 /*
4210  * split the path's leaf in two, making sure there is at least data_size
4211  * available for the resulting leaf level of the path.
4212  *
4213  * returns 0 if all went well and < 0 on failure.
4214  */
4215 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4216 			       struct btrfs_root *root,
4217 			       const struct btrfs_key *ins_key,
4218 			       struct btrfs_path *path, int data_size,
4219 			       int extend)
4220 {
4221 	struct btrfs_disk_key disk_key;
4222 	struct extent_buffer *l;
4223 	u32 nritems;
4224 	int mid;
4225 	int slot;
4226 	struct extent_buffer *right;
4227 	struct btrfs_fs_info *fs_info = root->fs_info;
4228 	int ret = 0;
4229 	int wret;
4230 	int split;
4231 	int num_doubles = 0;
4232 	int tried_avoid_double = 0;
4233 
4234 	l = path->nodes[0];
4235 	slot = path->slots[0];
4236 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
4237 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4238 		return -EOVERFLOW;
4239 
4240 	/* first try to make some room by pushing left and right */
4241 	if (data_size && path->nodes[1]) {
4242 		int space_needed = data_size;
4243 
4244 		if (slot < btrfs_header_nritems(l))
4245 			space_needed -= btrfs_leaf_free_space(l);
4246 
4247 		wret = push_leaf_right(trans, root, path, space_needed,
4248 				       space_needed, 0, 0);
4249 		if (wret < 0)
4250 			return wret;
4251 		if (wret) {
4252 			space_needed = data_size;
4253 			if (slot > 0)
4254 				space_needed -= btrfs_leaf_free_space(l);
4255 			wret = push_leaf_left(trans, root, path, space_needed,
4256 					      space_needed, 0, (u32)-1);
4257 			if (wret < 0)
4258 				return wret;
4259 		}
4260 		l = path->nodes[0];
4261 
4262 		/* did the pushes work? */
4263 		if (btrfs_leaf_free_space(l) >= data_size)
4264 			return 0;
4265 	}
4266 
4267 	if (!path->nodes[1]) {
4268 		ret = insert_new_root(trans, root, path, 1);
4269 		if (ret)
4270 			return ret;
4271 	}
4272 again:
4273 	split = 1;
4274 	l = path->nodes[0];
4275 	slot = path->slots[0];
4276 	nritems = btrfs_header_nritems(l);
4277 	mid = (nritems + 1) / 2;
4278 
4279 	if (mid <= slot) {
4280 		if (nritems == 1 ||
4281 		    leaf_space_used(l, mid, nritems - mid) + data_size >
4282 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4283 			if (slot >= nritems) {
4284 				split = 0;
4285 			} else {
4286 				mid = slot;
4287 				if (mid != nritems &&
4288 				    leaf_space_used(l, mid, nritems - mid) +
4289 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4290 					if (data_size && !tried_avoid_double)
4291 						goto push_for_double;
4292 					split = 2;
4293 				}
4294 			}
4295 		}
4296 	} else {
4297 		if (leaf_space_used(l, 0, mid) + data_size >
4298 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4299 			if (!extend && data_size && slot == 0) {
4300 				split = 0;
4301 			} else if ((extend || !data_size) && slot == 0) {
4302 				mid = 1;
4303 			} else {
4304 				mid = slot;
4305 				if (mid != nritems &&
4306 				    leaf_space_used(l, mid, nritems - mid) +
4307 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4308 					if (data_size && !tried_avoid_double)
4309 						goto push_for_double;
4310 					split = 2;
4311 				}
4312 			}
4313 		}
4314 	}
4315 
4316 	if (split == 0)
4317 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4318 	else
4319 		btrfs_item_key(l, &disk_key, mid);
4320 
4321 	right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
4322 					     l->start, 0);
4323 	if (IS_ERR(right))
4324 		return PTR_ERR(right);
4325 
4326 	root_add_used(root, fs_info->nodesize);
4327 
4328 	if (split == 0) {
4329 		if (mid <= slot) {
4330 			btrfs_set_header_nritems(right, 0);
4331 			insert_ptr(trans, path, &disk_key,
4332 				   right->start, path->slots[1] + 1, 1);
4333 			btrfs_tree_unlock(path->nodes[0]);
4334 			free_extent_buffer(path->nodes[0]);
4335 			path->nodes[0] = right;
4336 			path->slots[0] = 0;
4337 			path->slots[1] += 1;
4338 		} else {
4339 			btrfs_set_header_nritems(right, 0);
4340 			insert_ptr(trans, path, &disk_key,
4341 				   right->start, path->slots[1], 1);
4342 			btrfs_tree_unlock(path->nodes[0]);
4343 			free_extent_buffer(path->nodes[0]);
4344 			path->nodes[0] = right;
4345 			path->slots[0] = 0;
4346 			if (path->slots[1] == 0)
4347 				fixup_low_keys(path, &disk_key, 1);
4348 		}
4349 		/*
4350 		 * We create a new leaf 'right' for the required ins_len and
4351 		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4352 		 * the content of ins_len to 'right'.
4353 		 */
4354 		return ret;
4355 	}
4356 
4357 	copy_for_split(trans, path, l, right, slot, mid, nritems);
4358 
4359 	if (split == 2) {
4360 		BUG_ON(num_doubles != 0);
4361 		num_doubles++;
4362 		goto again;
4363 	}
4364 
4365 	return 0;
4366 
4367 push_for_double:
4368 	push_for_double_split(trans, root, path, data_size);
4369 	tried_avoid_double = 1;
4370 	if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
4371 		return 0;
4372 	goto again;
4373 }
4374 
4375 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4376 					 struct btrfs_root *root,
4377 					 struct btrfs_path *path, int ins_len)
4378 {
4379 	struct btrfs_key key;
4380 	struct extent_buffer *leaf;
4381 	struct btrfs_file_extent_item *fi;
4382 	u64 extent_len = 0;
4383 	u32 item_size;
4384 	int ret;
4385 
4386 	leaf = path->nodes[0];
4387 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4388 
4389 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4390 	       key.type != BTRFS_EXTENT_CSUM_KEY);
4391 
4392 	if (btrfs_leaf_free_space(leaf) >= ins_len)
4393 		return 0;
4394 
4395 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4396 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4397 		fi = btrfs_item_ptr(leaf, path->slots[0],
4398 				    struct btrfs_file_extent_item);
4399 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4400 	}
4401 	btrfs_release_path(path);
4402 
4403 	path->keep_locks = 1;
4404 	path->search_for_split = 1;
4405 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4406 	path->search_for_split = 0;
4407 	if (ret > 0)
4408 		ret = -EAGAIN;
4409 	if (ret < 0)
4410 		goto err;
4411 
4412 	ret = -EAGAIN;
4413 	leaf = path->nodes[0];
4414 	/* if our item isn't there, return now */
4415 	if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4416 		goto err;
4417 
4418 	/* the leaf has  changed, it now has room.  return now */
4419 	if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
4420 		goto err;
4421 
4422 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4423 		fi = btrfs_item_ptr(leaf, path->slots[0],
4424 				    struct btrfs_file_extent_item);
4425 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4426 			goto err;
4427 	}
4428 
4429 	btrfs_set_path_blocking(path);
4430 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4431 	if (ret)
4432 		goto err;
4433 
4434 	path->keep_locks = 0;
4435 	btrfs_unlock_up_safe(path, 1);
4436 	return 0;
4437 err:
4438 	path->keep_locks = 0;
4439 	return ret;
4440 }
4441 
4442 static noinline int split_item(struct btrfs_path *path,
4443 			       const struct btrfs_key *new_key,
4444 			       unsigned long split_offset)
4445 {
4446 	struct extent_buffer *leaf;
4447 	struct btrfs_item *item;
4448 	struct btrfs_item *new_item;
4449 	int slot;
4450 	char *buf;
4451 	u32 nritems;
4452 	u32 item_size;
4453 	u32 orig_offset;
4454 	struct btrfs_disk_key disk_key;
4455 
4456 	leaf = path->nodes[0];
4457 	BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
4458 
4459 	btrfs_set_path_blocking(path);
4460 
4461 	item = btrfs_item_nr(path->slots[0]);
4462 	orig_offset = btrfs_item_offset(leaf, item);
4463 	item_size = btrfs_item_size(leaf, item);
4464 
4465 	buf = kmalloc(item_size, GFP_NOFS);
4466 	if (!buf)
4467 		return -ENOMEM;
4468 
4469 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4470 			    path->slots[0]), item_size);
4471 
4472 	slot = path->slots[0] + 1;
4473 	nritems = btrfs_header_nritems(leaf);
4474 	if (slot != nritems) {
4475 		/* shift the items */
4476 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4477 				btrfs_item_nr_offset(slot),
4478 				(nritems - slot) * sizeof(struct btrfs_item));
4479 	}
4480 
4481 	btrfs_cpu_key_to_disk(&disk_key, new_key);
4482 	btrfs_set_item_key(leaf, &disk_key, slot);
4483 
4484 	new_item = btrfs_item_nr(slot);
4485 
4486 	btrfs_set_item_offset(leaf, new_item, orig_offset);
4487 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4488 
4489 	btrfs_set_item_offset(leaf, item,
4490 			      orig_offset + item_size - split_offset);
4491 	btrfs_set_item_size(leaf, item, split_offset);
4492 
4493 	btrfs_set_header_nritems(leaf, nritems + 1);
4494 
4495 	/* write the data for the start of the original item */
4496 	write_extent_buffer(leaf, buf,
4497 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4498 			    split_offset);
4499 
4500 	/* write the data for the new item */
4501 	write_extent_buffer(leaf, buf + split_offset,
4502 			    btrfs_item_ptr_offset(leaf, slot),
4503 			    item_size - split_offset);
4504 	btrfs_mark_buffer_dirty(leaf);
4505 
4506 	BUG_ON(btrfs_leaf_free_space(leaf) < 0);
4507 	kfree(buf);
4508 	return 0;
4509 }
4510 
4511 /*
4512  * This function splits a single item into two items,
4513  * giving 'new_key' to the new item and splitting the
4514  * old one at split_offset (from the start of the item).
4515  *
4516  * The path may be released by this operation.  After
4517  * the split, the path is pointing to the old item.  The
4518  * new item is going to be in the same node as the old one.
4519  *
4520  * Note, the item being split must be smaller enough to live alone on
4521  * a tree block with room for one extra struct btrfs_item
4522  *
4523  * This allows us to split the item in place, keeping a lock on the
4524  * leaf the entire time.
4525  */
4526 int btrfs_split_item(struct btrfs_trans_handle *trans,
4527 		     struct btrfs_root *root,
4528 		     struct btrfs_path *path,
4529 		     const struct btrfs_key *new_key,
4530 		     unsigned long split_offset)
4531 {
4532 	int ret;
4533 	ret = setup_leaf_for_split(trans, root, path,
4534 				   sizeof(struct btrfs_item));
4535 	if (ret)
4536 		return ret;
4537 
4538 	ret = split_item(path, new_key, split_offset);
4539 	return ret;
4540 }
4541 
4542 /*
4543  * This function duplicate a item, giving 'new_key' to the new item.
4544  * It guarantees both items live in the same tree leaf and the new item
4545  * is contiguous with the original item.
4546  *
4547  * This allows us to split file extent in place, keeping a lock on the
4548  * leaf the entire time.
4549  */
4550 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4551 			 struct btrfs_root *root,
4552 			 struct btrfs_path *path,
4553 			 const struct btrfs_key *new_key)
4554 {
4555 	struct extent_buffer *leaf;
4556 	int ret;
4557 	u32 item_size;
4558 
4559 	leaf = path->nodes[0];
4560 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4561 	ret = setup_leaf_for_split(trans, root, path,
4562 				   item_size + sizeof(struct btrfs_item));
4563 	if (ret)
4564 		return ret;
4565 
4566 	path->slots[0]++;
4567 	setup_items_for_insert(root, path, new_key, &item_size,
4568 			       item_size, item_size +
4569 			       sizeof(struct btrfs_item), 1);
4570 	leaf = path->nodes[0];
4571 	memcpy_extent_buffer(leaf,
4572 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4573 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4574 			     item_size);
4575 	return 0;
4576 }
4577 
4578 /*
4579  * make the item pointed to by the path smaller.  new_size indicates
4580  * how small to make it, and from_end tells us if we just chop bytes
4581  * off the end of the item or if we shift the item to chop bytes off
4582  * the front.
4583  */
4584 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
4585 {
4586 	int slot;
4587 	struct extent_buffer *leaf;
4588 	struct btrfs_item *item;
4589 	u32 nritems;
4590 	unsigned int data_end;
4591 	unsigned int old_data_start;
4592 	unsigned int old_size;
4593 	unsigned int size_diff;
4594 	int i;
4595 	struct btrfs_map_token token;
4596 
4597 	leaf = path->nodes[0];
4598 	slot = path->slots[0];
4599 
4600 	old_size = btrfs_item_size_nr(leaf, slot);
4601 	if (old_size == new_size)
4602 		return;
4603 
4604 	nritems = btrfs_header_nritems(leaf);
4605 	data_end = leaf_data_end(leaf);
4606 
4607 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4608 
4609 	size_diff = old_size - new_size;
4610 
4611 	BUG_ON(slot < 0);
4612 	BUG_ON(slot >= nritems);
4613 
4614 	/*
4615 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4616 	 */
4617 	/* first correct the data pointers */
4618 	btrfs_init_map_token(&token, leaf);
4619 	for (i = slot; i < nritems; i++) {
4620 		u32 ioff;
4621 		item = btrfs_item_nr(i);
4622 
4623 		ioff = btrfs_token_item_offset(leaf, item, &token);
4624 		btrfs_set_token_item_offset(leaf, item,
4625 					    ioff + size_diff, &token);
4626 	}
4627 
4628 	/* shift the data */
4629 	if (from_end) {
4630 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4631 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4632 			      data_end, old_data_start + new_size - data_end);
4633 	} else {
4634 		struct btrfs_disk_key disk_key;
4635 		u64 offset;
4636 
4637 		btrfs_item_key(leaf, &disk_key, slot);
4638 
4639 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4640 			unsigned long ptr;
4641 			struct btrfs_file_extent_item *fi;
4642 
4643 			fi = btrfs_item_ptr(leaf, slot,
4644 					    struct btrfs_file_extent_item);
4645 			fi = (struct btrfs_file_extent_item *)(
4646 			     (unsigned long)fi - size_diff);
4647 
4648 			if (btrfs_file_extent_type(leaf, fi) ==
4649 			    BTRFS_FILE_EXTENT_INLINE) {
4650 				ptr = btrfs_item_ptr_offset(leaf, slot);
4651 				memmove_extent_buffer(leaf, ptr,
4652 				      (unsigned long)fi,
4653 				      BTRFS_FILE_EXTENT_INLINE_DATA_START);
4654 			}
4655 		}
4656 
4657 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4658 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4659 			      data_end, old_data_start - data_end);
4660 
4661 		offset = btrfs_disk_key_offset(&disk_key);
4662 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4663 		btrfs_set_item_key(leaf, &disk_key, slot);
4664 		if (slot == 0)
4665 			fixup_low_keys(path, &disk_key, 1);
4666 	}
4667 
4668 	item = btrfs_item_nr(slot);
4669 	btrfs_set_item_size(leaf, item, new_size);
4670 	btrfs_mark_buffer_dirty(leaf);
4671 
4672 	if (btrfs_leaf_free_space(leaf) < 0) {
4673 		btrfs_print_leaf(leaf);
4674 		BUG();
4675 	}
4676 }
4677 
4678 /*
4679  * make the item pointed to by the path bigger, data_size is the added size.
4680  */
4681 void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
4682 {
4683 	int slot;
4684 	struct extent_buffer *leaf;
4685 	struct btrfs_item *item;
4686 	u32 nritems;
4687 	unsigned int data_end;
4688 	unsigned int old_data;
4689 	unsigned int old_size;
4690 	int i;
4691 	struct btrfs_map_token token;
4692 
4693 	leaf = path->nodes[0];
4694 
4695 	nritems = btrfs_header_nritems(leaf);
4696 	data_end = leaf_data_end(leaf);
4697 
4698 	if (btrfs_leaf_free_space(leaf) < data_size) {
4699 		btrfs_print_leaf(leaf);
4700 		BUG();
4701 	}
4702 	slot = path->slots[0];
4703 	old_data = btrfs_item_end_nr(leaf, slot);
4704 
4705 	BUG_ON(slot < 0);
4706 	if (slot >= nritems) {
4707 		btrfs_print_leaf(leaf);
4708 		btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
4709 			   slot, nritems);
4710 		BUG();
4711 	}
4712 
4713 	/*
4714 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4715 	 */
4716 	/* first correct the data pointers */
4717 	btrfs_init_map_token(&token, leaf);
4718 	for (i = slot; i < nritems; i++) {
4719 		u32 ioff;
4720 		item = btrfs_item_nr(i);
4721 
4722 		ioff = btrfs_token_item_offset(leaf, item, &token);
4723 		btrfs_set_token_item_offset(leaf, item,
4724 					    ioff - data_size, &token);
4725 	}
4726 
4727 	/* shift the data */
4728 	memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4729 		      data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4730 		      data_end, old_data - data_end);
4731 
4732 	data_end = old_data;
4733 	old_size = btrfs_item_size_nr(leaf, slot);
4734 	item = btrfs_item_nr(slot);
4735 	btrfs_set_item_size(leaf, item, old_size + data_size);
4736 	btrfs_mark_buffer_dirty(leaf);
4737 
4738 	if (btrfs_leaf_free_space(leaf) < 0) {
4739 		btrfs_print_leaf(leaf);
4740 		BUG();
4741 	}
4742 }
4743 
4744 /*
4745  * this is a helper for btrfs_insert_empty_items, the main goal here is
4746  * to save stack depth by doing the bulk of the work in a function
4747  * that doesn't call btrfs_search_slot
4748  */
4749 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4750 			    const struct btrfs_key *cpu_key, u32 *data_size,
4751 			    u32 total_data, u32 total_size, int nr)
4752 {
4753 	struct btrfs_fs_info *fs_info = root->fs_info;
4754 	struct btrfs_item *item;
4755 	int i;
4756 	u32 nritems;
4757 	unsigned int data_end;
4758 	struct btrfs_disk_key disk_key;
4759 	struct extent_buffer *leaf;
4760 	int slot;
4761 	struct btrfs_map_token token;
4762 
4763 	if (path->slots[0] == 0) {
4764 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4765 		fixup_low_keys(path, &disk_key, 1);
4766 	}
4767 	btrfs_unlock_up_safe(path, 1);
4768 
4769 	leaf = path->nodes[0];
4770 	slot = path->slots[0];
4771 
4772 	nritems = btrfs_header_nritems(leaf);
4773 	data_end = leaf_data_end(leaf);
4774 
4775 	if (btrfs_leaf_free_space(leaf) < total_size) {
4776 		btrfs_print_leaf(leaf);
4777 		btrfs_crit(fs_info, "not enough freespace need %u have %d",
4778 			   total_size, btrfs_leaf_free_space(leaf));
4779 		BUG();
4780 	}
4781 
4782 	btrfs_init_map_token(&token, leaf);
4783 	if (slot != nritems) {
4784 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4785 
4786 		if (old_data < data_end) {
4787 			btrfs_print_leaf(leaf);
4788 			btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4789 				   slot, old_data, data_end);
4790 			BUG();
4791 		}
4792 		/*
4793 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4794 		 */
4795 		/* first correct the data pointers */
4796 		for (i = slot; i < nritems; i++) {
4797 			u32 ioff;
4798 
4799 			item = btrfs_item_nr(i);
4800 			ioff = btrfs_token_item_offset(leaf, item, &token);
4801 			btrfs_set_token_item_offset(leaf, item,
4802 						    ioff - total_data, &token);
4803 		}
4804 		/* shift the items */
4805 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4806 			      btrfs_item_nr_offset(slot),
4807 			      (nritems - slot) * sizeof(struct btrfs_item));
4808 
4809 		/* shift the data */
4810 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4811 			      data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4812 			      data_end, old_data - data_end);
4813 		data_end = old_data;
4814 	}
4815 
4816 	/* setup the item for the new data */
4817 	for (i = 0; i < nr; i++) {
4818 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4819 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4820 		item = btrfs_item_nr(slot + i);
4821 		btrfs_set_token_item_offset(leaf, item,
4822 					    data_end - data_size[i], &token);
4823 		data_end -= data_size[i];
4824 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4825 	}
4826 
4827 	btrfs_set_header_nritems(leaf, nritems + nr);
4828 	btrfs_mark_buffer_dirty(leaf);
4829 
4830 	if (btrfs_leaf_free_space(leaf) < 0) {
4831 		btrfs_print_leaf(leaf);
4832 		BUG();
4833 	}
4834 }
4835 
4836 /*
4837  * Given a key and some data, insert items into the tree.
4838  * This does all the path init required, making room in the tree if needed.
4839  */
4840 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4841 			    struct btrfs_root *root,
4842 			    struct btrfs_path *path,
4843 			    const struct btrfs_key *cpu_key, u32 *data_size,
4844 			    int nr)
4845 {
4846 	int ret = 0;
4847 	int slot;
4848 	int i;
4849 	u32 total_size = 0;
4850 	u32 total_data = 0;
4851 
4852 	for (i = 0; i < nr; i++)
4853 		total_data += data_size[i];
4854 
4855 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4856 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4857 	if (ret == 0)
4858 		return -EEXIST;
4859 	if (ret < 0)
4860 		return ret;
4861 
4862 	slot = path->slots[0];
4863 	BUG_ON(slot < 0);
4864 
4865 	setup_items_for_insert(root, path, cpu_key, data_size,
4866 			       total_data, total_size, nr);
4867 	return 0;
4868 }
4869 
4870 /*
4871  * Given a key and some data, insert an item into the tree.
4872  * This does all the path init required, making room in the tree if needed.
4873  */
4874 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4875 		      const struct btrfs_key *cpu_key, void *data,
4876 		      u32 data_size)
4877 {
4878 	int ret = 0;
4879 	struct btrfs_path *path;
4880 	struct extent_buffer *leaf;
4881 	unsigned long ptr;
4882 
4883 	path = btrfs_alloc_path();
4884 	if (!path)
4885 		return -ENOMEM;
4886 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4887 	if (!ret) {
4888 		leaf = path->nodes[0];
4889 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4890 		write_extent_buffer(leaf, data, ptr, data_size);
4891 		btrfs_mark_buffer_dirty(leaf);
4892 	}
4893 	btrfs_free_path(path);
4894 	return ret;
4895 }
4896 
4897 /*
4898  * delete the pointer from a given node.
4899  *
4900  * the tree should have been previously balanced so the deletion does not
4901  * empty a node.
4902  */
4903 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4904 		    int level, int slot)
4905 {
4906 	struct extent_buffer *parent = path->nodes[level];
4907 	u32 nritems;
4908 	int ret;
4909 
4910 	nritems = btrfs_header_nritems(parent);
4911 	if (slot != nritems - 1) {
4912 		if (level) {
4913 			ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4914 					nritems - slot - 1);
4915 			BUG_ON(ret < 0);
4916 		}
4917 		memmove_extent_buffer(parent,
4918 			      btrfs_node_key_ptr_offset(slot),
4919 			      btrfs_node_key_ptr_offset(slot + 1),
4920 			      sizeof(struct btrfs_key_ptr) *
4921 			      (nritems - slot - 1));
4922 	} else if (level) {
4923 		ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4924 				GFP_NOFS);
4925 		BUG_ON(ret < 0);
4926 	}
4927 
4928 	nritems--;
4929 	btrfs_set_header_nritems(parent, nritems);
4930 	if (nritems == 0 && parent == root->node) {
4931 		BUG_ON(btrfs_header_level(root->node) != 1);
4932 		/* just turn the root into a leaf and break */
4933 		btrfs_set_header_level(root->node, 0);
4934 	} else if (slot == 0) {
4935 		struct btrfs_disk_key disk_key;
4936 
4937 		btrfs_node_key(parent, &disk_key, 0);
4938 		fixup_low_keys(path, &disk_key, level + 1);
4939 	}
4940 	btrfs_mark_buffer_dirty(parent);
4941 }
4942 
4943 /*
4944  * a helper function to delete the leaf pointed to by path->slots[1] and
4945  * path->nodes[1].
4946  *
4947  * This deletes the pointer in path->nodes[1] and frees the leaf
4948  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4949  *
4950  * The path must have already been setup for deleting the leaf, including
4951  * all the proper balancing.  path->nodes[1] must be locked.
4952  */
4953 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4954 				    struct btrfs_root *root,
4955 				    struct btrfs_path *path,
4956 				    struct extent_buffer *leaf)
4957 {
4958 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4959 	del_ptr(root, path, 1, path->slots[1]);
4960 
4961 	/*
4962 	 * btrfs_free_extent is expensive, we want to make sure we
4963 	 * aren't holding any locks when we call it
4964 	 */
4965 	btrfs_unlock_up_safe(path, 0);
4966 
4967 	root_sub_used(root, leaf->len);
4968 
4969 	extent_buffer_get(leaf);
4970 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4971 	free_extent_buffer_stale(leaf);
4972 }
4973 /*
4974  * delete the item at the leaf level in path.  If that empties
4975  * the leaf, remove it from the tree
4976  */
4977 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4978 		    struct btrfs_path *path, int slot, int nr)
4979 {
4980 	struct btrfs_fs_info *fs_info = root->fs_info;
4981 	struct extent_buffer *leaf;
4982 	struct btrfs_item *item;
4983 	u32 last_off;
4984 	u32 dsize = 0;
4985 	int ret = 0;
4986 	int wret;
4987 	int i;
4988 	u32 nritems;
4989 
4990 	leaf = path->nodes[0];
4991 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4992 
4993 	for (i = 0; i < nr; i++)
4994 		dsize += btrfs_item_size_nr(leaf, slot + i);
4995 
4996 	nritems = btrfs_header_nritems(leaf);
4997 
4998 	if (slot + nr != nritems) {
4999 		int data_end = leaf_data_end(leaf);
5000 		struct btrfs_map_token token;
5001 
5002 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
5003 			      data_end + dsize,
5004 			      BTRFS_LEAF_DATA_OFFSET + data_end,
5005 			      last_off - data_end);
5006 
5007 		btrfs_init_map_token(&token, leaf);
5008 		for (i = slot + nr; i < nritems; i++) {
5009 			u32 ioff;
5010 
5011 			item = btrfs_item_nr(i);
5012 			ioff = btrfs_token_item_offset(leaf, item, &token);
5013 			btrfs_set_token_item_offset(leaf, item,
5014 						    ioff + dsize, &token);
5015 		}
5016 
5017 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
5018 			      btrfs_item_nr_offset(slot + nr),
5019 			      sizeof(struct btrfs_item) *
5020 			      (nritems - slot - nr));
5021 	}
5022 	btrfs_set_header_nritems(leaf, nritems - nr);
5023 	nritems -= nr;
5024 
5025 	/* delete the leaf if we've emptied it */
5026 	if (nritems == 0) {
5027 		if (leaf == root->node) {
5028 			btrfs_set_header_level(leaf, 0);
5029 		} else {
5030 			btrfs_set_path_blocking(path);
5031 			btrfs_clean_tree_block(leaf);
5032 			btrfs_del_leaf(trans, root, path, leaf);
5033 		}
5034 	} else {
5035 		int used = leaf_space_used(leaf, 0, nritems);
5036 		if (slot == 0) {
5037 			struct btrfs_disk_key disk_key;
5038 
5039 			btrfs_item_key(leaf, &disk_key, 0);
5040 			fixup_low_keys(path, &disk_key, 1);
5041 		}
5042 
5043 		/* delete the leaf if it is mostly empty */
5044 		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
5045 			/* push_leaf_left fixes the path.
5046 			 * make sure the path still points to our leaf
5047 			 * for possible call to del_ptr below
5048 			 */
5049 			slot = path->slots[1];
5050 			extent_buffer_get(leaf);
5051 
5052 			btrfs_set_path_blocking(path);
5053 			wret = push_leaf_left(trans, root, path, 1, 1,
5054 					      1, (u32)-1);
5055 			if (wret < 0 && wret != -ENOSPC)
5056 				ret = wret;
5057 
5058 			if (path->nodes[0] == leaf &&
5059 			    btrfs_header_nritems(leaf)) {
5060 				wret = push_leaf_right(trans, root, path, 1,
5061 						       1, 1, 0);
5062 				if (wret < 0 && wret != -ENOSPC)
5063 					ret = wret;
5064 			}
5065 
5066 			if (btrfs_header_nritems(leaf) == 0) {
5067 				path->slots[1] = slot;
5068 				btrfs_del_leaf(trans, root, path, leaf);
5069 				free_extent_buffer(leaf);
5070 				ret = 0;
5071 			} else {
5072 				/* if we're still in the path, make sure
5073 				 * we're dirty.  Otherwise, one of the
5074 				 * push_leaf functions must have already
5075 				 * dirtied this buffer
5076 				 */
5077 				if (path->nodes[0] == leaf)
5078 					btrfs_mark_buffer_dirty(leaf);
5079 				free_extent_buffer(leaf);
5080 			}
5081 		} else {
5082 			btrfs_mark_buffer_dirty(leaf);
5083 		}
5084 	}
5085 	return ret;
5086 }
5087 
5088 /*
5089  * search the tree again to find a leaf with lesser keys
5090  * returns 0 if it found something or 1 if there are no lesser leaves.
5091  * returns < 0 on io errors.
5092  *
5093  * This may release the path, and so you may lose any locks held at the
5094  * time you call it.
5095  */
5096 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5097 {
5098 	struct btrfs_key key;
5099 	struct btrfs_disk_key found_key;
5100 	int ret;
5101 
5102 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5103 
5104 	if (key.offset > 0) {
5105 		key.offset--;
5106 	} else if (key.type > 0) {
5107 		key.type--;
5108 		key.offset = (u64)-1;
5109 	} else if (key.objectid > 0) {
5110 		key.objectid--;
5111 		key.type = (u8)-1;
5112 		key.offset = (u64)-1;
5113 	} else {
5114 		return 1;
5115 	}
5116 
5117 	btrfs_release_path(path);
5118 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5119 	if (ret < 0)
5120 		return ret;
5121 	btrfs_item_key(path->nodes[0], &found_key, 0);
5122 	ret = comp_keys(&found_key, &key);
5123 	/*
5124 	 * We might have had an item with the previous key in the tree right
5125 	 * before we released our path. And after we released our path, that
5126 	 * item might have been pushed to the first slot (0) of the leaf we
5127 	 * were holding due to a tree balance. Alternatively, an item with the
5128 	 * previous key can exist as the only element of a leaf (big fat item).
5129 	 * Therefore account for these 2 cases, so that our callers (like
5130 	 * btrfs_previous_item) don't miss an existing item with a key matching
5131 	 * the previous key we computed above.
5132 	 */
5133 	if (ret <= 0)
5134 		return 0;
5135 	return 1;
5136 }
5137 
5138 /*
5139  * A helper function to walk down the tree starting at min_key, and looking
5140  * for nodes or leaves that are have a minimum transaction id.
5141  * This is used by the btree defrag code, and tree logging
5142  *
5143  * This does not cow, but it does stuff the starting key it finds back
5144  * into min_key, so you can call btrfs_search_slot with cow=1 on the
5145  * key and get a writable path.
5146  *
5147  * This honors path->lowest_level to prevent descent past a given level
5148  * of the tree.
5149  *
5150  * min_trans indicates the oldest transaction that you are interested
5151  * in walking through.  Any nodes or leaves older than min_trans are
5152  * skipped over (without reading them).
5153  *
5154  * returns zero if something useful was found, < 0 on error and 1 if there
5155  * was nothing in the tree that matched the search criteria.
5156  */
5157 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5158 			 struct btrfs_path *path,
5159 			 u64 min_trans)
5160 {
5161 	struct extent_buffer *cur;
5162 	struct btrfs_key found_key;
5163 	int slot;
5164 	int sret;
5165 	u32 nritems;
5166 	int level;
5167 	int ret = 1;
5168 	int keep_locks = path->keep_locks;
5169 
5170 	path->keep_locks = 1;
5171 again:
5172 	cur = btrfs_read_lock_root_node(root);
5173 	level = btrfs_header_level(cur);
5174 	WARN_ON(path->nodes[level]);
5175 	path->nodes[level] = cur;
5176 	path->locks[level] = BTRFS_READ_LOCK;
5177 
5178 	if (btrfs_header_generation(cur) < min_trans) {
5179 		ret = 1;
5180 		goto out;
5181 	}
5182 	while (1) {
5183 		nritems = btrfs_header_nritems(cur);
5184 		level = btrfs_header_level(cur);
5185 		sret = btrfs_bin_search(cur, min_key, level, &slot);
5186 		if (sret < 0) {
5187 			ret = sret;
5188 			goto out;
5189 		}
5190 
5191 		/* at the lowest level, we're done, setup the path and exit */
5192 		if (level == path->lowest_level) {
5193 			if (slot >= nritems)
5194 				goto find_next_key;
5195 			ret = 0;
5196 			path->slots[level] = slot;
5197 			btrfs_item_key_to_cpu(cur, &found_key, slot);
5198 			goto out;
5199 		}
5200 		if (sret && slot > 0)
5201 			slot--;
5202 		/*
5203 		 * check this node pointer against the min_trans parameters.
5204 		 * If it is too old, old, skip to the next one.
5205 		 */
5206 		while (slot < nritems) {
5207 			u64 gen;
5208 
5209 			gen = btrfs_node_ptr_generation(cur, slot);
5210 			if (gen < min_trans) {
5211 				slot++;
5212 				continue;
5213 			}
5214 			break;
5215 		}
5216 find_next_key:
5217 		/*
5218 		 * we didn't find a candidate key in this node, walk forward
5219 		 * and find another one
5220 		 */
5221 		if (slot >= nritems) {
5222 			path->slots[level] = slot;
5223 			btrfs_set_path_blocking(path);
5224 			sret = btrfs_find_next_key(root, path, min_key, level,
5225 						  min_trans);
5226 			if (sret == 0) {
5227 				btrfs_release_path(path);
5228 				goto again;
5229 			} else {
5230 				goto out;
5231 			}
5232 		}
5233 		/* save our key for returning back */
5234 		btrfs_node_key_to_cpu(cur, &found_key, slot);
5235 		path->slots[level] = slot;
5236 		if (level == path->lowest_level) {
5237 			ret = 0;
5238 			goto out;
5239 		}
5240 		btrfs_set_path_blocking(path);
5241 		cur = btrfs_read_node_slot(cur, slot);
5242 		if (IS_ERR(cur)) {
5243 			ret = PTR_ERR(cur);
5244 			goto out;
5245 		}
5246 
5247 		btrfs_tree_read_lock(cur);
5248 
5249 		path->locks[level - 1] = BTRFS_READ_LOCK;
5250 		path->nodes[level - 1] = cur;
5251 		unlock_up(path, level, 1, 0, NULL);
5252 	}
5253 out:
5254 	path->keep_locks = keep_locks;
5255 	if (ret == 0) {
5256 		btrfs_unlock_up_safe(path, path->lowest_level + 1);
5257 		btrfs_set_path_blocking(path);
5258 		memcpy(min_key, &found_key, sizeof(found_key));
5259 	}
5260 	return ret;
5261 }
5262 
5263 /*
5264  * this is similar to btrfs_next_leaf, but does not try to preserve
5265  * and fixup the path.  It looks for and returns the next key in the
5266  * tree based on the current path and the min_trans parameters.
5267  *
5268  * 0 is returned if another key is found, < 0 if there are any errors
5269  * and 1 is returned if there are no higher keys in the tree
5270  *
5271  * path->keep_locks should be set to 1 on the search made before
5272  * calling this function.
5273  */
5274 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5275 			struct btrfs_key *key, int level, u64 min_trans)
5276 {
5277 	int slot;
5278 	struct extent_buffer *c;
5279 
5280 	WARN_ON(!path->keep_locks && !path->skip_locking);
5281 	while (level < BTRFS_MAX_LEVEL) {
5282 		if (!path->nodes[level])
5283 			return 1;
5284 
5285 		slot = path->slots[level] + 1;
5286 		c = path->nodes[level];
5287 next:
5288 		if (slot >= btrfs_header_nritems(c)) {
5289 			int ret;
5290 			int orig_lowest;
5291 			struct btrfs_key cur_key;
5292 			if (level + 1 >= BTRFS_MAX_LEVEL ||
5293 			    !path->nodes[level + 1])
5294 				return 1;
5295 
5296 			if (path->locks[level + 1] || path->skip_locking) {
5297 				level++;
5298 				continue;
5299 			}
5300 
5301 			slot = btrfs_header_nritems(c) - 1;
5302 			if (level == 0)
5303 				btrfs_item_key_to_cpu(c, &cur_key, slot);
5304 			else
5305 				btrfs_node_key_to_cpu(c, &cur_key, slot);
5306 
5307 			orig_lowest = path->lowest_level;
5308 			btrfs_release_path(path);
5309 			path->lowest_level = level;
5310 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5311 						0, 0);
5312 			path->lowest_level = orig_lowest;
5313 			if (ret < 0)
5314 				return ret;
5315 
5316 			c = path->nodes[level];
5317 			slot = path->slots[level];
5318 			if (ret == 0)
5319 				slot++;
5320 			goto next;
5321 		}
5322 
5323 		if (level == 0)
5324 			btrfs_item_key_to_cpu(c, key, slot);
5325 		else {
5326 			u64 gen = btrfs_node_ptr_generation(c, slot);
5327 
5328 			if (gen < min_trans) {
5329 				slot++;
5330 				goto next;
5331 			}
5332 			btrfs_node_key_to_cpu(c, key, slot);
5333 		}
5334 		return 0;
5335 	}
5336 	return 1;
5337 }
5338 
5339 /*
5340  * search the tree again to find a leaf with greater keys
5341  * returns 0 if it found something or 1 if there are no greater leaves.
5342  * returns < 0 on io errors.
5343  */
5344 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5345 {
5346 	return btrfs_next_old_leaf(root, path, 0);
5347 }
5348 
5349 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5350 			u64 time_seq)
5351 {
5352 	int slot;
5353 	int level;
5354 	struct extent_buffer *c;
5355 	struct extent_buffer *next;
5356 	struct btrfs_key key;
5357 	u32 nritems;
5358 	int ret;
5359 	int old_spinning = path->leave_spinning;
5360 	int next_rw_lock = 0;
5361 
5362 	nritems = btrfs_header_nritems(path->nodes[0]);
5363 	if (nritems == 0)
5364 		return 1;
5365 
5366 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5367 again:
5368 	level = 1;
5369 	next = NULL;
5370 	next_rw_lock = 0;
5371 	btrfs_release_path(path);
5372 
5373 	path->keep_locks = 1;
5374 	path->leave_spinning = 1;
5375 
5376 	if (time_seq)
5377 		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5378 	else
5379 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5380 	path->keep_locks = 0;
5381 
5382 	if (ret < 0)
5383 		return ret;
5384 
5385 	nritems = btrfs_header_nritems(path->nodes[0]);
5386 	/*
5387 	 * by releasing the path above we dropped all our locks.  A balance
5388 	 * could have added more items next to the key that used to be
5389 	 * at the very end of the block.  So, check again here and
5390 	 * advance the path if there are now more items available.
5391 	 */
5392 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5393 		if (ret == 0)
5394 			path->slots[0]++;
5395 		ret = 0;
5396 		goto done;
5397 	}
5398 	/*
5399 	 * So the above check misses one case:
5400 	 * - after releasing the path above, someone has removed the item that
5401 	 *   used to be at the very end of the block, and balance between leafs
5402 	 *   gets another one with bigger key.offset to replace it.
5403 	 *
5404 	 * This one should be returned as well, or we can get leaf corruption
5405 	 * later(esp. in __btrfs_drop_extents()).
5406 	 *
5407 	 * And a bit more explanation about this check,
5408 	 * with ret > 0, the key isn't found, the path points to the slot
5409 	 * where it should be inserted, so the path->slots[0] item must be the
5410 	 * bigger one.
5411 	 */
5412 	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5413 		ret = 0;
5414 		goto done;
5415 	}
5416 
5417 	while (level < BTRFS_MAX_LEVEL) {
5418 		if (!path->nodes[level]) {
5419 			ret = 1;
5420 			goto done;
5421 		}
5422 
5423 		slot = path->slots[level] + 1;
5424 		c = path->nodes[level];
5425 		if (slot >= btrfs_header_nritems(c)) {
5426 			level++;
5427 			if (level == BTRFS_MAX_LEVEL) {
5428 				ret = 1;
5429 				goto done;
5430 			}
5431 			continue;
5432 		}
5433 
5434 		if (next) {
5435 			btrfs_tree_unlock_rw(next, next_rw_lock);
5436 			free_extent_buffer(next);
5437 		}
5438 
5439 		next = c;
5440 		next_rw_lock = path->locks[level];
5441 		ret = read_block_for_search(root, path, &next, level,
5442 					    slot, &key);
5443 		if (ret == -EAGAIN)
5444 			goto again;
5445 
5446 		if (ret < 0) {
5447 			btrfs_release_path(path);
5448 			goto done;
5449 		}
5450 
5451 		if (!path->skip_locking) {
5452 			ret = btrfs_try_tree_read_lock(next);
5453 			if (!ret && time_seq) {
5454 				/*
5455 				 * If we don't get the lock, we may be racing
5456 				 * with push_leaf_left, holding that lock while
5457 				 * itself waiting for the leaf we've currently
5458 				 * locked. To solve this situation, we give up
5459 				 * on our lock and cycle.
5460 				 */
5461 				free_extent_buffer(next);
5462 				btrfs_release_path(path);
5463 				cond_resched();
5464 				goto again;
5465 			}
5466 			if (!ret) {
5467 				btrfs_set_path_blocking(path);
5468 				btrfs_tree_read_lock(next);
5469 			}
5470 			next_rw_lock = BTRFS_READ_LOCK;
5471 		}
5472 		break;
5473 	}
5474 	path->slots[level] = slot;
5475 	while (1) {
5476 		level--;
5477 		c = path->nodes[level];
5478 		if (path->locks[level])
5479 			btrfs_tree_unlock_rw(c, path->locks[level]);
5480 
5481 		free_extent_buffer(c);
5482 		path->nodes[level] = next;
5483 		path->slots[level] = 0;
5484 		if (!path->skip_locking)
5485 			path->locks[level] = next_rw_lock;
5486 		if (!level)
5487 			break;
5488 
5489 		ret = read_block_for_search(root, path, &next, level,
5490 					    0, &key);
5491 		if (ret == -EAGAIN)
5492 			goto again;
5493 
5494 		if (ret < 0) {
5495 			btrfs_release_path(path);
5496 			goto done;
5497 		}
5498 
5499 		if (!path->skip_locking) {
5500 			ret = btrfs_try_tree_read_lock(next);
5501 			if (!ret) {
5502 				btrfs_set_path_blocking(path);
5503 				btrfs_tree_read_lock(next);
5504 			}
5505 			next_rw_lock = BTRFS_READ_LOCK;
5506 		}
5507 	}
5508 	ret = 0;
5509 done:
5510 	unlock_up(path, 0, 1, 0, NULL);
5511 	path->leave_spinning = old_spinning;
5512 	if (!old_spinning)
5513 		btrfs_set_path_blocking(path);
5514 
5515 	return ret;
5516 }
5517 
5518 /*
5519  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5520  * searching until it gets past min_objectid or finds an item of 'type'
5521  *
5522  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5523  */
5524 int btrfs_previous_item(struct btrfs_root *root,
5525 			struct btrfs_path *path, u64 min_objectid,
5526 			int type)
5527 {
5528 	struct btrfs_key found_key;
5529 	struct extent_buffer *leaf;
5530 	u32 nritems;
5531 	int ret;
5532 
5533 	while (1) {
5534 		if (path->slots[0] == 0) {
5535 			btrfs_set_path_blocking(path);
5536 			ret = btrfs_prev_leaf(root, path);
5537 			if (ret != 0)
5538 				return ret;
5539 		} else {
5540 			path->slots[0]--;
5541 		}
5542 		leaf = path->nodes[0];
5543 		nritems = btrfs_header_nritems(leaf);
5544 		if (nritems == 0)
5545 			return 1;
5546 		if (path->slots[0] == nritems)
5547 			path->slots[0]--;
5548 
5549 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5550 		if (found_key.objectid < min_objectid)
5551 			break;
5552 		if (found_key.type == type)
5553 			return 0;
5554 		if (found_key.objectid == min_objectid &&
5555 		    found_key.type < type)
5556 			break;
5557 	}
5558 	return 1;
5559 }
5560 
5561 /*
5562  * search in extent tree to find a previous Metadata/Data extent item with
5563  * min objecitd.
5564  *
5565  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5566  */
5567 int btrfs_previous_extent_item(struct btrfs_root *root,
5568 			struct btrfs_path *path, u64 min_objectid)
5569 {
5570 	struct btrfs_key found_key;
5571 	struct extent_buffer *leaf;
5572 	u32 nritems;
5573 	int ret;
5574 
5575 	while (1) {
5576 		if (path->slots[0] == 0) {
5577 			btrfs_set_path_blocking(path);
5578 			ret = btrfs_prev_leaf(root, path);
5579 			if (ret != 0)
5580 				return ret;
5581 		} else {
5582 			path->slots[0]--;
5583 		}
5584 		leaf = path->nodes[0];
5585 		nritems = btrfs_header_nritems(leaf);
5586 		if (nritems == 0)
5587 			return 1;
5588 		if (path->slots[0] == nritems)
5589 			path->slots[0]--;
5590 
5591 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5592 		if (found_key.objectid < min_objectid)
5593 			break;
5594 		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5595 		    found_key.type == BTRFS_METADATA_ITEM_KEY)
5596 			return 0;
5597 		if (found_key.objectid == min_objectid &&
5598 		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
5599 			break;
5600 	}
5601 	return 1;
5602 }
5603