xref: /openbmc/linux/fs/btrfs/ctree.c (revision 068ac0db)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
9 #include <linux/mm.h>
10 #include "ctree.h"
11 #include "disk-io.h"
12 #include "transaction.h"
13 #include "print-tree.h"
14 #include "locking.h"
15 #include "volumes.h"
16 #include "qgroup.h"
17 
18 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
19 		      *root, struct btrfs_path *path, int level);
20 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
21 		      const struct btrfs_key *ins_key, struct btrfs_path *path,
22 		      int data_size, int extend);
23 static int push_node_left(struct btrfs_trans_handle *trans,
24 			  struct extent_buffer *dst,
25 			  struct extent_buffer *src, int empty);
26 static int balance_node_right(struct btrfs_trans_handle *trans,
27 			      struct extent_buffer *dst_buf,
28 			      struct extent_buffer *src_buf);
29 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
30 		    int level, int slot);
31 
32 static const struct btrfs_csums {
33 	u16		size;
34 	const char	*name;
35 	const char	*driver;
36 } btrfs_csums[] = {
37 	[BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
38 	[BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
39 	[BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
40 	[BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
41 				     .driver = "blake2b-256" },
42 };
43 
44 int btrfs_super_csum_size(const struct btrfs_super_block *s)
45 {
46 	u16 t = btrfs_super_csum_type(s);
47 	/*
48 	 * csum type is validated at mount time
49 	 */
50 	return btrfs_csums[t].size;
51 }
52 
53 const char *btrfs_super_csum_name(u16 csum_type)
54 {
55 	/* csum type is validated at mount time */
56 	return btrfs_csums[csum_type].name;
57 }
58 
59 /*
60  * Return driver name if defined, otherwise the name that's also a valid driver
61  * name
62  */
63 const char *btrfs_super_csum_driver(u16 csum_type)
64 {
65 	/* csum type is validated at mount time */
66 	return btrfs_csums[csum_type].driver ?:
67 		btrfs_csums[csum_type].name;
68 }
69 
70 size_t __const btrfs_get_num_csums(void)
71 {
72 	return ARRAY_SIZE(btrfs_csums);
73 }
74 
75 struct btrfs_path *btrfs_alloc_path(void)
76 {
77 	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
78 }
79 
80 /* this also releases the path */
81 void btrfs_free_path(struct btrfs_path *p)
82 {
83 	if (!p)
84 		return;
85 	btrfs_release_path(p);
86 	kmem_cache_free(btrfs_path_cachep, p);
87 }
88 
89 /*
90  * path release drops references on the extent buffers in the path
91  * and it drops any locks held by this path
92  *
93  * It is safe to call this on paths that no locks or extent buffers held.
94  */
95 noinline void btrfs_release_path(struct btrfs_path *p)
96 {
97 	int i;
98 
99 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
100 		p->slots[i] = 0;
101 		if (!p->nodes[i])
102 			continue;
103 		if (p->locks[i]) {
104 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
105 			p->locks[i] = 0;
106 		}
107 		free_extent_buffer(p->nodes[i]);
108 		p->nodes[i] = NULL;
109 	}
110 }
111 
112 /*
113  * safely gets a reference on the root node of a tree.  A lock
114  * is not taken, so a concurrent writer may put a different node
115  * at the root of the tree.  See btrfs_lock_root_node for the
116  * looping required.
117  *
118  * The extent buffer returned by this has a reference taken, so
119  * it won't disappear.  It may stop being the root of the tree
120  * at any time because there are no locks held.
121  */
122 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
123 {
124 	struct extent_buffer *eb;
125 
126 	while (1) {
127 		rcu_read_lock();
128 		eb = rcu_dereference(root->node);
129 
130 		/*
131 		 * RCU really hurts here, we could free up the root node because
132 		 * it was COWed but we may not get the new root node yet so do
133 		 * the inc_not_zero dance and if it doesn't work then
134 		 * synchronize_rcu and try again.
135 		 */
136 		if (atomic_inc_not_zero(&eb->refs)) {
137 			rcu_read_unlock();
138 			break;
139 		}
140 		rcu_read_unlock();
141 		synchronize_rcu();
142 	}
143 	return eb;
144 }
145 
146 /* loop around taking references on and locking the root node of the
147  * tree until you end up with a lock on the root.  A locked buffer
148  * is returned, with a reference held.
149  */
150 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
151 {
152 	struct extent_buffer *eb;
153 
154 	while (1) {
155 		eb = btrfs_root_node(root);
156 		btrfs_tree_lock(eb);
157 		if (eb == root->node)
158 			break;
159 		btrfs_tree_unlock(eb);
160 		free_extent_buffer(eb);
161 	}
162 	return eb;
163 }
164 
165 /* loop around taking references on and locking the root node of the
166  * tree until you end up with a lock on the root.  A locked buffer
167  * is returned, with a reference held.
168  */
169 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
170 {
171 	struct extent_buffer *eb;
172 
173 	while (1) {
174 		eb = btrfs_root_node(root);
175 		btrfs_tree_read_lock(eb);
176 		if (eb == root->node)
177 			break;
178 		btrfs_tree_read_unlock(eb);
179 		free_extent_buffer(eb);
180 	}
181 	return eb;
182 }
183 
184 /* cowonly root (everything not a reference counted cow subvolume), just get
185  * put onto a simple dirty list.  transaction.c walks this to make sure they
186  * get properly updated on disk.
187  */
188 static void add_root_to_dirty_list(struct btrfs_root *root)
189 {
190 	struct btrfs_fs_info *fs_info = root->fs_info;
191 
192 	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
193 	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
194 		return;
195 
196 	spin_lock(&fs_info->trans_lock);
197 	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
198 		/* Want the extent tree to be the last on the list */
199 		if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
200 			list_move_tail(&root->dirty_list,
201 				       &fs_info->dirty_cowonly_roots);
202 		else
203 			list_move(&root->dirty_list,
204 				  &fs_info->dirty_cowonly_roots);
205 	}
206 	spin_unlock(&fs_info->trans_lock);
207 }
208 
209 /*
210  * used by snapshot creation to make a copy of a root for a tree with
211  * a given objectid.  The buffer with the new root node is returned in
212  * cow_ret, and this func returns zero on success or a negative error code.
213  */
214 int btrfs_copy_root(struct btrfs_trans_handle *trans,
215 		      struct btrfs_root *root,
216 		      struct extent_buffer *buf,
217 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
218 {
219 	struct btrfs_fs_info *fs_info = root->fs_info;
220 	struct extent_buffer *cow;
221 	int ret = 0;
222 	int level;
223 	struct btrfs_disk_key disk_key;
224 
225 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
226 		trans->transid != fs_info->running_transaction->transid);
227 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
228 		trans->transid != root->last_trans);
229 
230 	level = btrfs_header_level(buf);
231 	if (level == 0)
232 		btrfs_item_key(buf, &disk_key, 0);
233 	else
234 		btrfs_node_key(buf, &disk_key, 0);
235 
236 	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
237 			&disk_key, level, buf->start, 0);
238 	if (IS_ERR(cow))
239 		return PTR_ERR(cow);
240 
241 	copy_extent_buffer_full(cow, buf);
242 	btrfs_set_header_bytenr(cow, cow->start);
243 	btrfs_set_header_generation(cow, trans->transid);
244 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
245 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
246 				     BTRFS_HEADER_FLAG_RELOC);
247 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
248 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
249 	else
250 		btrfs_set_header_owner(cow, new_root_objectid);
251 
252 	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
253 
254 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
255 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
256 		ret = btrfs_inc_ref(trans, root, cow, 1);
257 	else
258 		ret = btrfs_inc_ref(trans, root, cow, 0);
259 
260 	if (ret)
261 		return ret;
262 
263 	btrfs_mark_buffer_dirty(cow);
264 	*cow_ret = cow;
265 	return 0;
266 }
267 
268 enum mod_log_op {
269 	MOD_LOG_KEY_REPLACE,
270 	MOD_LOG_KEY_ADD,
271 	MOD_LOG_KEY_REMOVE,
272 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
273 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
274 	MOD_LOG_MOVE_KEYS,
275 	MOD_LOG_ROOT_REPLACE,
276 };
277 
278 struct tree_mod_root {
279 	u64 logical;
280 	u8 level;
281 };
282 
283 struct tree_mod_elem {
284 	struct rb_node node;
285 	u64 logical;
286 	u64 seq;
287 	enum mod_log_op op;
288 
289 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
290 	int slot;
291 
292 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
293 	u64 generation;
294 
295 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
296 	struct btrfs_disk_key key;
297 	u64 blockptr;
298 
299 	/* this is used for op == MOD_LOG_MOVE_KEYS */
300 	struct {
301 		int dst_slot;
302 		int nr_items;
303 	} move;
304 
305 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
306 	struct tree_mod_root old_root;
307 };
308 
309 /*
310  * Pull a new tree mod seq number for our operation.
311  */
312 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
313 {
314 	return atomic64_inc_return(&fs_info->tree_mod_seq);
315 }
316 
317 /*
318  * This adds a new blocker to the tree mod log's blocker list if the @elem
319  * passed does not already have a sequence number set. So when a caller expects
320  * to record tree modifications, it should ensure to set elem->seq to zero
321  * before calling btrfs_get_tree_mod_seq.
322  * Returns a fresh, unused tree log modification sequence number, even if no new
323  * blocker was added.
324  */
325 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
326 			   struct seq_list *elem)
327 {
328 	write_lock(&fs_info->tree_mod_log_lock);
329 	spin_lock(&fs_info->tree_mod_seq_lock);
330 	if (!elem->seq) {
331 		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
332 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
333 	}
334 	spin_unlock(&fs_info->tree_mod_seq_lock);
335 	write_unlock(&fs_info->tree_mod_log_lock);
336 
337 	return elem->seq;
338 }
339 
340 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
341 			    struct seq_list *elem)
342 {
343 	struct rb_root *tm_root;
344 	struct rb_node *node;
345 	struct rb_node *next;
346 	struct seq_list *cur_elem;
347 	struct tree_mod_elem *tm;
348 	u64 min_seq = (u64)-1;
349 	u64 seq_putting = elem->seq;
350 
351 	if (!seq_putting)
352 		return;
353 
354 	spin_lock(&fs_info->tree_mod_seq_lock);
355 	list_del(&elem->list);
356 	elem->seq = 0;
357 
358 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
359 		if (cur_elem->seq < min_seq) {
360 			if (seq_putting > cur_elem->seq) {
361 				/*
362 				 * blocker with lower sequence number exists, we
363 				 * cannot remove anything from the log
364 				 */
365 				spin_unlock(&fs_info->tree_mod_seq_lock);
366 				return;
367 			}
368 			min_seq = cur_elem->seq;
369 		}
370 	}
371 	spin_unlock(&fs_info->tree_mod_seq_lock);
372 
373 	/*
374 	 * anything that's lower than the lowest existing (read: blocked)
375 	 * sequence number can be removed from the tree.
376 	 */
377 	write_lock(&fs_info->tree_mod_log_lock);
378 	tm_root = &fs_info->tree_mod_log;
379 	for (node = rb_first(tm_root); node; node = next) {
380 		next = rb_next(node);
381 		tm = rb_entry(node, struct tree_mod_elem, node);
382 		if (tm->seq > min_seq)
383 			continue;
384 		rb_erase(node, tm_root);
385 		kfree(tm);
386 	}
387 	write_unlock(&fs_info->tree_mod_log_lock);
388 }
389 
390 /*
391  * key order of the log:
392  *       node/leaf start address -> sequence
393  *
394  * The 'start address' is the logical address of the *new* root node
395  * for root replace operations, or the logical address of the affected
396  * block for all other operations.
397  */
398 static noinline int
399 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
400 {
401 	struct rb_root *tm_root;
402 	struct rb_node **new;
403 	struct rb_node *parent = NULL;
404 	struct tree_mod_elem *cur;
405 
406 	lockdep_assert_held_write(&fs_info->tree_mod_log_lock);
407 
408 	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
409 
410 	tm_root = &fs_info->tree_mod_log;
411 	new = &tm_root->rb_node;
412 	while (*new) {
413 		cur = rb_entry(*new, struct tree_mod_elem, node);
414 		parent = *new;
415 		if (cur->logical < tm->logical)
416 			new = &((*new)->rb_left);
417 		else if (cur->logical > tm->logical)
418 			new = &((*new)->rb_right);
419 		else if (cur->seq < tm->seq)
420 			new = &((*new)->rb_left);
421 		else if (cur->seq > tm->seq)
422 			new = &((*new)->rb_right);
423 		else
424 			return -EEXIST;
425 	}
426 
427 	rb_link_node(&tm->node, parent, new);
428 	rb_insert_color(&tm->node, tm_root);
429 	return 0;
430 }
431 
432 /*
433  * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
434  * returns zero with the tree_mod_log_lock acquired. The caller must hold
435  * this until all tree mod log insertions are recorded in the rb tree and then
436  * write unlock fs_info::tree_mod_log_lock.
437  */
438 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
439 				    struct extent_buffer *eb) {
440 	smp_mb();
441 	if (list_empty(&(fs_info)->tree_mod_seq_list))
442 		return 1;
443 	if (eb && btrfs_header_level(eb) == 0)
444 		return 1;
445 
446 	write_lock(&fs_info->tree_mod_log_lock);
447 	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
448 		write_unlock(&fs_info->tree_mod_log_lock);
449 		return 1;
450 	}
451 
452 	return 0;
453 }
454 
455 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
456 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
457 				    struct extent_buffer *eb)
458 {
459 	smp_mb();
460 	if (list_empty(&(fs_info)->tree_mod_seq_list))
461 		return 0;
462 	if (eb && btrfs_header_level(eb) == 0)
463 		return 0;
464 
465 	return 1;
466 }
467 
468 static struct tree_mod_elem *
469 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
470 		    enum mod_log_op op, gfp_t flags)
471 {
472 	struct tree_mod_elem *tm;
473 
474 	tm = kzalloc(sizeof(*tm), flags);
475 	if (!tm)
476 		return NULL;
477 
478 	tm->logical = eb->start;
479 	if (op != MOD_LOG_KEY_ADD) {
480 		btrfs_node_key(eb, &tm->key, slot);
481 		tm->blockptr = btrfs_node_blockptr(eb, slot);
482 	}
483 	tm->op = op;
484 	tm->slot = slot;
485 	tm->generation = btrfs_node_ptr_generation(eb, slot);
486 	RB_CLEAR_NODE(&tm->node);
487 
488 	return tm;
489 }
490 
491 static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
492 		enum mod_log_op op, gfp_t flags)
493 {
494 	struct tree_mod_elem *tm;
495 	int ret;
496 
497 	if (!tree_mod_need_log(eb->fs_info, eb))
498 		return 0;
499 
500 	tm = alloc_tree_mod_elem(eb, slot, op, flags);
501 	if (!tm)
502 		return -ENOMEM;
503 
504 	if (tree_mod_dont_log(eb->fs_info, eb)) {
505 		kfree(tm);
506 		return 0;
507 	}
508 
509 	ret = __tree_mod_log_insert(eb->fs_info, tm);
510 	write_unlock(&eb->fs_info->tree_mod_log_lock);
511 	if (ret)
512 		kfree(tm);
513 
514 	return ret;
515 }
516 
517 static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
518 		int dst_slot, int src_slot, int nr_items)
519 {
520 	struct tree_mod_elem *tm = NULL;
521 	struct tree_mod_elem **tm_list = NULL;
522 	int ret = 0;
523 	int i;
524 	int locked = 0;
525 
526 	if (!tree_mod_need_log(eb->fs_info, eb))
527 		return 0;
528 
529 	tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
530 	if (!tm_list)
531 		return -ENOMEM;
532 
533 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
534 	if (!tm) {
535 		ret = -ENOMEM;
536 		goto free_tms;
537 	}
538 
539 	tm->logical = eb->start;
540 	tm->slot = src_slot;
541 	tm->move.dst_slot = dst_slot;
542 	tm->move.nr_items = nr_items;
543 	tm->op = MOD_LOG_MOVE_KEYS;
544 
545 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
546 		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
547 		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
548 		if (!tm_list[i]) {
549 			ret = -ENOMEM;
550 			goto free_tms;
551 		}
552 	}
553 
554 	if (tree_mod_dont_log(eb->fs_info, eb))
555 		goto free_tms;
556 	locked = 1;
557 
558 	/*
559 	 * When we override something during the move, we log these removals.
560 	 * This can only happen when we move towards the beginning of the
561 	 * buffer, i.e. dst_slot < src_slot.
562 	 */
563 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
564 		ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
565 		if (ret)
566 			goto free_tms;
567 	}
568 
569 	ret = __tree_mod_log_insert(eb->fs_info, tm);
570 	if (ret)
571 		goto free_tms;
572 	write_unlock(&eb->fs_info->tree_mod_log_lock);
573 	kfree(tm_list);
574 
575 	return 0;
576 free_tms:
577 	for (i = 0; i < nr_items; i++) {
578 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
579 			rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
580 		kfree(tm_list[i]);
581 	}
582 	if (locked)
583 		write_unlock(&eb->fs_info->tree_mod_log_lock);
584 	kfree(tm_list);
585 	kfree(tm);
586 
587 	return ret;
588 }
589 
590 static inline int
591 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
592 		       struct tree_mod_elem **tm_list,
593 		       int nritems)
594 {
595 	int i, j;
596 	int ret;
597 
598 	for (i = nritems - 1; i >= 0; i--) {
599 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
600 		if (ret) {
601 			for (j = nritems - 1; j > i; j--)
602 				rb_erase(&tm_list[j]->node,
603 					 &fs_info->tree_mod_log);
604 			return ret;
605 		}
606 	}
607 
608 	return 0;
609 }
610 
611 static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
612 			 struct extent_buffer *new_root, int log_removal)
613 {
614 	struct btrfs_fs_info *fs_info = old_root->fs_info;
615 	struct tree_mod_elem *tm = NULL;
616 	struct tree_mod_elem **tm_list = NULL;
617 	int nritems = 0;
618 	int ret = 0;
619 	int i;
620 
621 	if (!tree_mod_need_log(fs_info, NULL))
622 		return 0;
623 
624 	if (log_removal && btrfs_header_level(old_root) > 0) {
625 		nritems = btrfs_header_nritems(old_root);
626 		tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
627 				  GFP_NOFS);
628 		if (!tm_list) {
629 			ret = -ENOMEM;
630 			goto free_tms;
631 		}
632 		for (i = 0; i < nritems; i++) {
633 			tm_list[i] = alloc_tree_mod_elem(old_root, i,
634 			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
635 			if (!tm_list[i]) {
636 				ret = -ENOMEM;
637 				goto free_tms;
638 			}
639 		}
640 	}
641 
642 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
643 	if (!tm) {
644 		ret = -ENOMEM;
645 		goto free_tms;
646 	}
647 
648 	tm->logical = new_root->start;
649 	tm->old_root.logical = old_root->start;
650 	tm->old_root.level = btrfs_header_level(old_root);
651 	tm->generation = btrfs_header_generation(old_root);
652 	tm->op = MOD_LOG_ROOT_REPLACE;
653 
654 	if (tree_mod_dont_log(fs_info, NULL))
655 		goto free_tms;
656 
657 	if (tm_list)
658 		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
659 	if (!ret)
660 		ret = __tree_mod_log_insert(fs_info, tm);
661 
662 	write_unlock(&fs_info->tree_mod_log_lock);
663 	if (ret)
664 		goto free_tms;
665 	kfree(tm_list);
666 
667 	return ret;
668 
669 free_tms:
670 	if (tm_list) {
671 		for (i = 0; i < nritems; i++)
672 			kfree(tm_list[i]);
673 		kfree(tm_list);
674 	}
675 	kfree(tm);
676 
677 	return ret;
678 }
679 
680 static struct tree_mod_elem *
681 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
682 		      int smallest)
683 {
684 	struct rb_root *tm_root;
685 	struct rb_node *node;
686 	struct tree_mod_elem *cur = NULL;
687 	struct tree_mod_elem *found = NULL;
688 
689 	read_lock(&fs_info->tree_mod_log_lock);
690 	tm_root = &fs_info->tree_mod_log;
691 	node = tm_root->rb_node;
692 	while (node) {
693 		cur = rb_entry(node, struct tree_mod_elem, node);
694 		if (cur->logical < start) {
695 			node = node->rb_left;
696 		} else if (cur->logical > start) {
697 			node = node->rb_right;
698 		} else if (cur->seq < min_seq) {
699 			node = node->rb_left;
700 		} else if (!smallest) {
701 			/* we want the node with the highest seq */
702 			if (found)
703 				BUG_ON(found->seq > cur->seq);
704 			found = cur;
705 			node = node->rb_left;
706 		} else if (cur->seq > min_seq) {
707 			/* we want the node with the smallest seq */
708 			if (found)
709 				BUG_ON(found->seq < cur->seq);
710 			found = cur;
711 			node = node->rb_right;
712 		} else {
713 			found = cur;
714 			break;
715 		}
716 	}
717 	read_unlock(&fs_info->tree_mod_log_lock);
718 
719 	return found;
720 }
721 
722 /*
723  * this returns the element from the log with the smallest time sequence
724  * value that's in the log (the oldest log item). any element with a time
725  * sequence lower than min_seq will be ignored.
726  */
727 static struct tree_mod_elem *
728 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
729 			   u64 min_seq)
730 {
731 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
732 }
733 
734 /*
735  * this returns the element from the log with the largest time sequence
736  * value that's in the log (the most recent log item). any element with
737  * a time sequence lower than min_seq will be ignored.
738  */
739 static struct tree_mod_elem *
740 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
741 {
742 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
743 }
744 
745 static noinline int tree_mod_log_eb_copy(struct extent_buffer *dst,
746 		     struct extent_buffer *src, unsigned long dst_offset,
747 		     unsigned long src_offset, int nr_items)
748 {
749 	struct btrfs_fs_info *fs_info = dst->fs_info;
750 	int ret = 0;
751 	struct tree_mod_elem **tm_list = NULL;
752 	struct tree_mod_elem **tm_list_add, **tm_list_rem;
753 	int i;
754 	int locked = 0;
755 
756 	if (!tree_mod_need_log(fs_info, NULL))
757 		return 0;
758 
759 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
760 		return 0;
761 
762 	tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
763 			  GFP_NOFS);
764 	if (!tm_list)
765 		return -ENOMEM;
766 
767 	tm_list_add = tm_list;
768 	tm_list_rem = tm_list + nr_items;
769 	for (i = 0; i < nr_items; i++) {
770 		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
771 		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
772 		if (!tm_list_rem[i]) {
773 			ret = -ENOMEM;
774 			goto free_tms;
775 		}
776 
777 		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
778 		    MOD_LOG_KEY_ADD, GFP_NOFS);
779 		if (!tm_list_add[i]) {
780 			ret = -ENOMEM;
781 			goto free_tms;
782 		}
783 	}
784 
785 	if (tree_mod_dont_log(fs_info, NULL))
786 		goto free_tms;
787 	locked = 1;
788 
789 	for (i = 0; i < nr_items; i++) {
790 		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
791 		if (ret)
792 			goto free_tms;
793 		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
794 		if (ret)
795 			goto free_tms;
796 	}
797 
798 	write_unlock(&fs_info->tree_mod_log_lock);
799 	kfree(tm_list);
800 
801 	return 0;
802 
803 free_tms:
804 	for (i = 0; i < nr_items * 2; i++) {
805 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
806 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
807 		kfree(tm_list[i]);
808 	}
809 	if (locked)
810 		write_unlock(&fs_info->tree_mod_log_lock);
811 	kfree(tm_list);
812 
813 	return ret;
814 }
815 
816 static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
817 {
818 	struct tree_mod_elem **tm_list = NULL;
819 	int nritems = 0;
820 	int i;
821 	int ret = 0;
822 
823 	if (btrfs_header_level(eb) == 0)
824 		return 0;
825 
826 	if (!tree_mod_need_log(eb->fs_info, NULL))
827 		return 0;
828 
829 	nritems = btrfs_header_nritems(eb);
830 	tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
831 	if (!tm_list)
832 		return -ENOMEM;
833 
834 	for (i = 0; i < nritems; i++) {
835 		tm_list[i] = alloc_tree_mod_elem(eb, i,
836 		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
837 		if (!tm_list[i]) {
838 			ret = -ENOMEM;
839 			goto free_tms;
840 		}
841 	}
842 
843 	if (tree_mod_dont_log(eb->fs_info, eb))
844 		goto free_tms;
845 
846 	ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
847 	write_unlock(&eb->fs_info->tree_mod_log_lock);
848 	if (ret)
849 		goto free_tms;
850 	kfree(tm_list);
851 
852 	return 0;
853 
854 free_tms:
855 	for (i = 0; i < nritems; i++)
856 		kfree(tm_list[i]);
857 	kfree(tm_list);
858 
859 	return ret;
860 }
861 
862 /*
863  * check if the tree block can be shared by multiple trees
864  */
865 int btrfs_block_can_be_shared(struct btrfs_root *root,
866 			      struct extent_buffer *buf)
867 {
868 	/*
869 	 * Tree blocks not in reference counted trees and tree roots
870 	 * are never shared. If a block was allocated after the last
871 	 * snapshot and the block was not allocated by tree relocation,
872 	 * we know the block is not shared.
873 	 */
874 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
875 	    buf != root->node && buf != root->commit_root &&
876 	    (btrfs_header_generation(buf) <=
877 	     btrfs_root_last_snapshot(&root->root_item) ||
878 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
879 		return 1;
880 
881 	return 0;
882 }
883 
884 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
885 				       struct btrfs_root *root,
886 				       struct extent_buffer *buf,
887 				       struct extent_buffer *cow,
888 				       int *last_ref)
889 {
890 	struct btrfs_fs_info *fs_info = root->fs_info;
891 	u64 refs;
892 	u64 owner;
893 	u64 flags;
894 	u64 new_flags = 0;
895 	int ret;
896 
897 	/*
898 	 * Backrefs update rules:
899 	 *
900 	 * Always use full backrefs for extent pointers in tree block
901 	 * allocated by tree relocation.
902 	 *
903 	 * If a shared tree block is no longer referenced by its owner
904 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
905 	 * use full backrefs for extent pointers in tree block.
906 	 *
907 	 * If a tree block is been relocating
908 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
909 	 * use full backrefs for extent pointers in tree block.
910 	 * The reason for this is some operations (such as drop tree)
911 	 * are only allowed for blocks use full backrefs.
912 	 */
913 
914 	if (btrfs_block_can_be_shared(root, buf)) {
915 		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
916 					       btrfs_header_level(buf), 1,
917 					       &refs, &flags);
918 		if (ret)
919 			return ret;
920 		if (refs == 0) {
921 			ret = -EROFS;
922 			btrfs_handle_fs_error(fs_info, ret, NULL);
923 			return ret;
924 		}
925 	} else {
926 		refs = 1;
927 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
928 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
929 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
930 		else
931 			flags = 0;
932 	}
933 
934 	owner = btrfs_header_owner(buf);
935 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
936 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
937 
938 	if (refs > 1) {
939 		if ((owner == root->root_key.objectid ||
940 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
941 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
942 			ret = btrfs_inc_ref(trans, root, buf, 1);
943 			if (ret)
944 				return ret;
945 
946 			if (root->root_key.objectid ==
947 			    BTRFS_TREE_RELOC_OBJECTID) {
948 				ret = btrfs_dec_ref(trans, root, buf, 0);
949 				if (ret)
950 					return ret;
951 				ret = btrfs_inc_ref(trans, root, cow, 1);
952 				if (ret)
953 					return ret;
954 			}
955 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
956 		} else {
957 
958 			if (root->root_key.objectid ==
959 			    BTRFS_TREE_RELOC_OBJECTID)
960 				ret = btrfs_inc_ref(trans, root, cow, 1);
961 			else
962 				ret = btrfs_inc_ref(trans, root, cow, 0);
963 			if (ret)
964 				return ret;
965 		}
966 		if (new_flags != 0) {
967 			int level = btrfs_header_level(buf);
968 
969 			ret = btrfs_set_disk_extent_flags(trans,
970 							  buf->start,
971 							  buf->len,
972 							  new_flags, level, 0);
973 			if (ret)
974 				return ret;
975 		}
976 	} else {
977 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
978 			if (root->root_key.objectid ==
979 			    BTRFS_TREE_RELOC_OBJECTID)
980 				ret = btrfs_inc_ref(trans, root, cow, 1);
981 			else
982 				ret = btrfs_inc_ref(trans, root, cow, 0);
983 			if (ret)
984 				return ret;
985 			ret = btrfs_dec_ref(trans, root, buf, 1);
986 			if (ret)
987 				return ret;
988 		}
989 		btrfs_clean_tree_block(buf);
990 		*last_ref = 1;
991 	}
992 	return 0;
993 }
994 
995 static struct extent_buffer *alloc_tree_block_no_bg_flush(
996 					  struct btrfs_trans_handle *trans,
997 					  struct btrfs_root *root,
998 					  u64 parent_start,
999 					  const struct btrfs_disk_key *disk_key,
1000 					  int level,
1001 					  u64 hint,
1002 					  u64 empty_size)
1003 {
1004 	struct btrfs_fs_info *fs_info = root->fs_info;
1005 	struct extent_buffer *ret;
1006 
1007 	/*
1008 	 * If we are COWing a node/leaf from the extent, chunk, device or free
1009 	 * space trees, make sure that we do not finish block group creation of
1010 	 * pending block groups. We do this to avoid a deadlock.
1011 	 * COWing can result in allocation of a new chunk, and flushing pending
1012 	 * block groups (btrfs_create_pending_block_groups()) can be triggered
1013 	 * when finishing allocation of a new chunk. Creation of a pending block
1014 	 * group modifies the extent, chunk, device and free space trees,
1015 	 * therefore we could deadlock with ourselves since we are holding a
1016 	 * lock on an extent buffer that btrfs_create_pending_block_groups() may
1017 	 * try to COW later.
1018 	 * For similar reasons, we also need to delay flushing pending block
1019 	 * groups when splitting a leaf or node, from one of those trees, since
1020 	 * we are holding a write lock on it and its parent or when inserting a
1021 	 * new root node for one of those trees.
1022 	 */
1023 	if (root == fs_info->extent_root ||
1024 	    root == fs_info->chunk_root ||
1025 	    root == fs_info->dev_root ||
1026 	    root == fs_info->free_space_root)
1027 		trans->can_flush_pending_bgs = false;
1028 
1029 	ret = btrfs_alloc_tree_block(trans, root, parent_start,
1030 				     root->root_key.objectid, disk_key, level,
1031 				     hint, empty_size);
1032 	trans->can_flush_pending_bgs = true;
1033 
1034 	return ret;
1035 }
1036 
1037 /*
1038  * does the dirty work in cow of a single block.  The parent block (if
1039  * supplied) is updated to point to the new cow copy.  The new buffer is marked
1040  * dirty and returned locked.  If you modify the block it needs to be marked
1041  * dirty again.
1042  *
1043  * search_start -- an allocation hint for the new block
1044  *
1045  * empty_size -- a hint that you plan on doing more cow.  This is the size in
1046  * bytes the allocator should try to find free next to the block it returns.
1047  * This is just a hint and may be ignored by the allocator.
1048  */
1049 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1050 			     struct btrfs_root *root,
1051 			     struct extent_buffer *buf,
1052 			     struct extent_buffer *parent, int parent_slot,
1053 			     struct extent_buffer **cow_ret,
1054 			     u64 search_start, u64 empty_size)
1055 {
1056 	struct btrfs_fs_info *fs_info = root->fs_info;
1057 	struct btrfs_disk_key disk_key;
1058 	struct extent_buffer *cow;
1059 	int level, ret;
1060 	int last_ref = 0;
1061 	int unlock_orig = 0;
1062 	u64 parent_start = 0;
1063 
1064 	if (*cow_ret == buf)
1065 		unlock_orig = 1;
1066 
1067 	btrfs_assert_tree_locked(buf);
1068 
1069 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1070 		trans->transid != fs_info->running_transaction->transid);
1071 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1072 		trans->transid != root->last_trans);
1073 
1074 	level = btrfs_header_level(buf);
1075 
1076 	if (level == 0)
1077 		btrfs_item_key(buf, &disk_key, 0);
1078 	else
1079 		btrfs_node_key(buf, &disk_key, 0);
1080 
1081 	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1082 		parent_start = parent->start;
1083 
1084 	cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
1085 					   level, search_start, empty_size);
1086 	if (IS_ERR(cow))
1087 		return PTR_ERR(cow);
1088 
1089 	/* cow is set to blocking by btrfs_init_new_buffer */
1090 
1091 	copy_extent_buffer_full(cow, buf);
1092 	btrfs_set_header_bytenr(cow, cow->start);
1093 	btrfs_set_header_generation(cow, trans->transid);
1094 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1095 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1096 				     BTRFS_HEADER_FLAG_RELOC);
1097 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1098 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1099 	else
1100 		btrfs_set_header_owner(cow, root->root_key.objectid);
1101 
1102 	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
1103 
1104 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1105 	if (ret) {
1106 		btrfs_abort_transaction(trans, ret);
1107 		return ret;
1108 	}
1109 
1110 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1111 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1112 		if (ret) {
1113 			btrfs_abort_transaction(trans, ret);
1114 			return ret;
1115 		}
1116 	}
1117 
1118 	if (buf == root->node) {
1119 		WARN_ON(parent && parent != buf);
1120 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1121 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1122 			parent_start = buf->start;
1123 
1124 		atomic_inc(&cow->refs);
1125 		ret = tree_mod_log_insert_root(root->node, cow, 1);
1126 		BUG_ON(ret < 0);
1127 		rcu_assign_pointer(root->node, cow);
1128 
1129 		btrfs_free_tree_block(trans, root, buf, parent_start,
1130 				      last_ref);
1131 		free_extent_buffer(buf);
1132 		add_root_to_dirty_list(root);
1133 	} else {
1134 		WARN_ON(trans->transid != btrfs_header_generation(parent));
1135 		tree_mod_log_insert_key(parent, parent_slot,
1136 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1137 		btrfs_set_node_blockptr(parent, parent_slot,
1138 					cow->start);
1139 		btrfs_set_node_ptr_generation(parent, parent_slot,
1140 					      trans->transid);
1141 		btrfs_mark_buffer_dirty(parent);
1142 		if (last_ref) {
1143 			ret = tree_mod_log_free_eb(buf);
1144 			if (ret) {
1145 				btrfs_abort_transaction(trans, ret);
1146 				return ret;
1147 			}
1148 		}
1149 		btrfs_free_tree_block(trans, root, buf, parent_start,
1150 				      last_ref);
1151 	}
1152 	if (unlock_orig)
1153 		btrfs_tree_unlock(buf);
1154 	free_extent_buffer_stale(buf);
1155 	btrfs_mark_buffer_dirty(cow);
1156 	*cow_ret = cow;
1157 	return 0;
1158 }
1159 
1160 /*
1161  * returns the logical address of the oldest predecessor of the given root.
1162  * entries older than time_seq are ignored.
1163  */
1164 static struct tree_mod_elem *__tree_mod_log_oldest_root(
1165 		struct extent_buffer *eb_root, u64 time_seq)
1166 {
1167 	struct tree_mod_elem *tm;
1168 	struct tree_mod_elem *found = NULL;
1169 	u64 root_logical = eb_root->start;
1170 	int looped = 0;
1171 
1172 	if (!time_seq)
1173 		return NULL;
1174 
1175 	/*
1176 	 * the very last operation that's logged for a root is the
1177 	 * replacement operation (if it is replaced at all). this has
1178 	 * the logical address of the *new* root, making it the very
1179 	 * first operation that's logged for this root.
1180 	 */
1181 	while (1) {
1182 		tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1183 						time_seq);
1184 		if (!looped && !tm)
1185 			return NULL;
1186 		/*
1187 		 * if there are no tree operation for the oldest root, we simply
1188 		 * return it. this should only happen if that (old) root is at
1189 		 * level 0.
1190 		 */
1191 		if (!tm)
1192 			break;
1193 
1194 		/*
1195 		 * if there's an operation that's not a root replacement, we
1196 		 * found the oldest version of our root. normally, we'll find a
1197 		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1198 		 */
1199 		if (tm->op != MOD_LOG_ROOT_REPLACE)
1200 			break;
1201 
1202 		found = tm;
1203 		root_logical = tm->old_root.logical;
1204 		looped = 1;
1205 	}
1206 
1207 	/* if there's no old root to return, return what we found instead */
1208 	if (!found)
1209 		found = tm;
1210 
1211 	return found;
1212 }
1213 
1214 /*
1215  * tm is a pointer to the first operation to rewind within eb. then, all
1216  * previous operations will be rewound (until we reach something older than
1217  * time_seq).
1218  */
1219 static void
1220 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1221 		      u64 time_seq, struct tree_mod_elem *first_tm)
1222 {
1223 	u32 n;
1224 	struct rb_node *next;
1225 	struct tree_mod_elem *tm = first_tm;
1226 	unsigned long o_dst;
1227 	unsigned long o_src;
1228 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1229 
1230 	n = btrfs_header_nritems(eb);
1231 	read_lock(&fs_info->tree_mod_log_lock);
1232 	while (tm && tm->seq >= time_seq) {
1233 		/*
1234 		 * all the operations are recorded with the operator used for
1235 		 * the modification. as we're going backwards, we do the
1236 		 * opposite of each operation here.
1237 		 */
1238 		switch (tm->op) {
1239 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1240 			BUG_ON(tm->slot < n);
1241 			/* Fallthrough */
1242 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1243 		case MOD_LOG_KEY_REMOVE:
1244 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1245 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1246 			btrfs_set_node_ptr_generation(eb, tm->slot,
1247 						      tm->generation);
1248 			n++;
1249 			break;
1250 		case MOD_LOG_KEY_REPLACE:
1251 			BUG_ON(tm->slot >= n);
1252 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1253 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1254 			btrfs_set_node_ptr_generation(eb, tm->slot,
1255 						      tm->generation);
1256 			break;
1257 		case MOD_LOG_KEY_ADD:
1258 			/* if a move operation is needed it's in the log */
1259 			n--;
1260 			break;
1261 		case MOD_LOG_MOVE_KEYS:
1262 			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1263 			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1264 			memmove_extent_buffer(eb, o_dst, o_src,
1265 					      tm->move.nr_items * p_size);
1266 			break;
1267 		case MOD_LOG_ROOT_REPLACE:
1268 			/*
1269 			 * this operation is special. for roots, this must be
1270 			 * handled explicitly before rewinding.
1271 			 * for non-roots, this operation may exist if the node
1272 			 * was a root: root A -> child B; then A gets empty and
1273 			 * B is promoted to the new root. in the mod log, we'll
1274 			 * have a root-replace operation for B, a tree block
1275 			 * that is no root. we simply ignore that operation.
1276 			 */
1277 			break;
1278 		}
1279 		next = rb_next(&tm->node);
1280 		if (!next)
1281 			break;
1282 		tm = rb_entry(next, struct tree_mod_elem, node);
1283 		if (tm->logical != first_tm->logical)
1284 			break;
1285 	}
1286 	read_unlock(&fs_info->tree_mod_log_lock);
1287 	btrfs_set_header_nritems(eb, n);
1288 }
1289 
1290 /*
1291  * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1292  * is returned. If rewind operations happen, a fresh buffer is returned. The
1293  * returned buffer is always read-locked. If the returned buffer is not the
1294  * input buffer, the lock on the input buffer is released and the input buffer
1295  * is freed (its refcount is decremented).
1296  */
1297 static struct extent_buffer *
1298 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1299 		    struct extent_buffer *eb, u64 time_seq)
1300 {
1301 	struct extent_buffer *eb_rewin;
1302 	struct tree_mod_elem *tm;
1303 
1304 	if (!time_seq)
1305 		return eb;
1306 
1307 	if (btrfs_header_level(eb) == 0)
1308 		return eb;
1309 
1310 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1311 	if (!tm)
1312 		return eb;
1313 
1314 	btrfs_set_path_blocking(path);
1315 	btrfs_set_lock_blocking_read(eb);
1316 
1317 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1318 		BUG_ON(tm->slot != 0);
1319 		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1320 		if (!eb_rewin) {
1321 			btrfs_tree_read_unlock_blocking(eb);
1322 			free_extent_buffer(eb);
1323 			return NULL;
1324 		}
1325 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1326 		btrfs_set_header_backref_rev(eb_rewin,
1327 					     btrfs_header_backref_rev(eb));
1328 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1329 		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1330 	} else {
1331 		eb_rewin = btrfs_clone_extent_buffer(eb);
1332 		if (!eb_rewin) {
1333 			btrfs_tree_read_unlock_blocking(eb);
1334 			free_extent_buffer(eb);
1335 			return NULL;
1336 		}
1337 	}
1338 
1339 	btrfs_tree_read_unlock_blocking(eb);
1340 	free_extent_buffer(eb);
1341 
1342 	btrfs_tree_read_lock(eb_rewin);
1343 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1344 	WARN_ON(btrfs_header_nritems(eb_rewin) >
1345 		BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1346 
1347 	return eb_rewin;
1348 }
1349 
1350 /*
1351  * get_old_root() rewinds the state of @root's root node to the given @time_seq
1352  * value. If there are no changes, the current root->root_node is returned. If
1353  * anything changed in between, there's a fresh buffer allocated on which the
1354  * rewind operations are done. In any case, the returned buffer is read locked.
1355  * Returns NULL on error (with no locks held).
1356  */
1357 static inline struct extent_buffer *
1358 get_old_root(struct btrfs_root *root, u64 time_seq)
1359 {
1360 	struct btrfs_fs_info *fs_info = root->fs_info;
1361 	struct tree_mod_elem *tm;
1362 	struct extent_buffer *eb = NULL;
1363 	struct extent_buffer *eb_root;
1364 	u64 eb_root_owner = 0;
1365 	struct extent_buffer *old;
1366 	struct tree_mod_root *old_root = NULL;
1367 	u64 old_generation = 0;
1368 	u64 logical;
1369 	int level;
1370 
1371 	eb_root = btrfs_read_lock_root_node(root);
1372 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1373 	if (!tm)
1374 		return eb_root;
1375 
1376 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1377 		old_root = &tm->old_root;
1378 		old_generation = tm->generation;
1379 		logical = old_root->logical;
1380 		level = old_root->level;
1381 	} else {
1382 		logical = eb_root->start;
1383 		level = btrfs_header_level(eb_root);
1384 	}
1385 
1386 	tm = tree_mod_log_search(fs_info, logical, time_seq);
1387 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1388 		btrfs_tree_read_unlock(eb_root);
1389 		free_extent_buffer(eb_root);
1390 		old = read_tree_block(fs_info, logical, 0, level, NULL);
1391 		if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1392 			if (!IS_ERR(old))
1393 				free_extent_buffer(old);
1394 			btrfs_warn(fs_info,
1395 				   "failed to read tree block %llu from get_old_root",
1396 				   logical);
1397 		} else {
1398 			eb = btrfs_clone_extent_buffer(old);
1399 			free_extent_buffer(old);
1400 		}
1401 	} else if (old_root) {
1402 		eb_root_owner = btrfs_header_owner(eb_root);
1403 		btrfs_tree_read_unlock(eb_root);
1404 		free_extent_buffer(eb_root);
1405 		eb = alloc_dummy_extent_buffer(fs_info, logical);
1406 	} else {
1407 		btrfs_set_lock_blocking_read(eb_root);
1408 		eb = btrfs_clone_extent_buffer(eb_root);
1409 		btrfs_tree_read_unlock_blocking(eb_root);
1410 		free_extent_buffer(eb_root);
1411 	}
1412 
1413 	if (!eb)
1414 		return NULL;
1415 	btrfs_tree_read_lock(eb);
1416 	if (old_root) {
1417 		btrfs_set_header_bytenr(eb, eb->start);
1418 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1419 		btrfs_set_header_owner(eb, eb_root_owner);
1420 		btrfs_set_header_level(eb, old_root->level);
1421 		btrfs_set_header_generation(eb, old_generation);
1422 	}
1423 	if (tm)
1424 		__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1425 	else
1426 		WARN_ON(btrfs_header_level(eb) != 0);
1427 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1428 
1429 	return eb;
1430 }
1431 
1432 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1433 {
1434 	struct tree_mod_elem *tm;
1435 	int level;
1436 	struct extent_buffer *eb_root = btrfs_root_node(root);
1437 
1438 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1439 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1440 		level = tm->old_root.level;
1441 	} else {
1442 		level = btrfs_header_level(eb_root);
1443 	}
1444 	free_extent_buffer(eb_root);
1445 
1446 	return level;
1447 }
1448 
1449 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1450 				   struct btrfs_root *root,
1451 				   struct extent_buffer *buf)
1452 {
1453 	if (btrfs_is_testing(root->fs_info))
1454 		return 0;
1455 
1456 	/* Ensure we can see the FORCE_COW bit */
1457 	smp_mb__before_atomic();
1458 
1459 	/*
1460 	 * We do not need to cow a block if
1461 	 * 1) this block is not created or changed in this transaction;
1462 	 * 2) this block does not belong to TREE_RELOC tree;
1463 	 * 3) the root is not forced COW.
1464 	 *
1465 	 * What is forced COW:
1466 	 *    when we create snapshot during committing the transaction,
1467 	 *    after we've finished copying src root, we must COW the shared
1468 	 *    block to ensure the metadata consistency.
1469 	 */
1470 	if (btrfs_header_generation(buf) == trans->transid &&
1471 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1472 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1473 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1474 	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1475 		return 0;
1476 	return 1;
1477 }
1478 
1479 /*
1480  * cows a single block, see __btrfs_cow_block for the real work.
1481  * This version of it has extra checks so that a block isn't COWed more than
1482  * once per transaction, as long as it hasn't been written yet
1483  */
1484 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1485 		    struct btrfs_root *root, struct extent_buffer *buf,
1486 		    struct extent_buffer *parent, int parent_slot,
1487 		    struct extent_buffer **cow_ret)
1488 {
1489 	struct btrfs_fs_info *fs_info = root->fs_info;
1490 	u64 search_start;
1491 	int ret;
1492 
1493 	if (test_bit(BTRFS_ROOT_DELETING, &root->state))
1494 		btrfs_err(fs_info,
1495 			"COW'ing blocks on a fs root that's being dropped");
1496 
1497 	if (trans->transaction != fs_info->running_transaction)
1498 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1499 		       trans->transid,
1500 		       fs_info->running_transaction->transid);
1501 
1502 	if (trans->transid != fs_info->generation)
1503 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1504 		       trans->transid, fs_info->generation);
1505 
1506 	if (!should_cow_block(trans, root, buf)) {
1507 		trans->dirty = true;
1508 		*cow_ret = buf;
1509 		return 0;
1510 	}
1511 
1512 	search_start = buf->start & ~((u64)SZ_1G - 1);
1513 
1514 	if (parent)
1515 		btrfs_set_lock_blocking_write(parent);
1516 	btrfs_set_lock_blocking_write(buf);
1517 
1518 	/*
1519 	 * Before CoWing this block for later modification, check if it's
1520 	 * the subtree root and do the delayed subtree trace if needed.
1521 	 *
1522 	 * Also We don't care about the error, as it's handled internally.
1523 	 */
1524 	btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
1525 	ret = __btrfs_cow_block(trans, root, buf, parent,
1526 				 parent_slot, cow_ret, search_start, 0);
1527 
1528 	trace_btrfs_cow_block(root, buf, *cow_ret);
1529 
1530 	return ret;
1531 }
1532 
1533 /*
1534  * helper function for defrag to decide if two blocks pointed to by a
1535  * node are actually close by
1536  */
1537 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1538 {
1539 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1540 		return 1;
1541 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1542 		return 1;
1543 	return 0;
1544 }
1545 
1546 /*
1547  * compare two keys in a memcmp fashion
1548  */
1549 static int comp_keys(const struct btrfs_disk_key *disk,
1550 		     const struct btrfs_key *k2)
1551 {
1552 	struct btrfs_key k1;
1553 
1554 	btrfs_disk_key_to_cpu(&k1, disk);
1555 
1556 	return btrfs_comp_cpu_keys(&k1, k2);
1557 }
1558 
1559 /*
1560  * same as comp_keys only with two btrfs_key's
1561  */
1562 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1563 {
1564 	if (k1->objectid > k2->objectid)
1565 		return 1;
1566 	if (k1->objectid < k2->objectid)
1567 		return -1;
1568 	if (k1->type > k2->type)
1569 		return 1;
1570 	if (k1->type < k2->type)
1571 		return -1;
1572 	if (k1->offset > k2->offset)
1573 		return 1;
1574 	if (k1->offset < k2->offset)
1575 		return -1;
1576 	return 0;
1577 }
1578 
1579 /*
1580  * this is used by the defrag code to go through all the
1581  * leaves pointed to by a node and reallocate them so that
1582  * disk order is close to key order
1583  */
1584 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1585 		       struct btrfs_root *root, struct extent_buffer *parent,
1586 		       int start_slot, u64 *last_ret,
1587 		       struct btrfs_key *progress)
1588 {
1589 	struct btrfs_fs_info *fs_info = root->fs_info;
1590 	struct extent_buffer *cur;
1591 	u64 blocknr;
1592 	u64 gen;
1593 	u64 search_start = *last_ret;
1594 	u64 last_block = 0;
1595 	u64 other;
1596 	u32 parent_nritems;
1597 	int end_slot;
1598 	int i;
1599 	int err = 0;
1600 	int parent_level;
1601 	int uptodate;
1602 	u32 blocksize;
1603 	int progress_passed = 0;
1604 	struct btrfs_disk_key disk_key;
1605 
1606 	parent_level = btrfs_header_level(parent);
1607 
1608 	WARN_ON(trans->transaction != fs_info->running_transaction);
1609 	WARN_ON(trans->transid != fs_info->generation);
1610 
1611 	parent_nritems = btrfs_header_nritems(parent);
1612 	blocksize = fs_info->nodesize;
1613 	end_slot = parent_nritems - 1;
1614 
1615 	if (parent_nritems <= 1)
1616 		return 0;
1617 
1618 	btrfs_set_lock_blocking_write(parent);
1619 
1620 	for (i = start_slot; i <= end_slot; i++) {
1621 		struct btrfs_key first_key;
1622 		int close = 1;
1623 
1624 		btrfs_node_key(parent, &disk_key, i);
1625 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1626 			continue;
1627 
1628 		progress_passed = 1;
1629 		blocknr = btrfs_node_blockptr(parent, i);
1630 		gen = btrfs_node_ptr_generation(parent, i);
1631 		btrfs_node_key_to_cpu(parent, &first_key, i);
1632 		if (last_block == 0)
1633 			last_block = blocknr;
1634 
1635 		if (i > 0) {
1636 			other = btrfs_node_blockptr(parent, i - 1);
1637 			close = close_blocks(blocknr, other, blocksize);
1638 		}
1639 		if (!close && i < end_slot) {
1640 			other = btrfs_node_blockptr(parent, i + 1);
1641 			close = close_blocks(blocknr, other, blocksize);
1642 		}
1643 		if (close) {
1644 			last_block = blocknr;
1645 			continue;
1646 		}
1647 
1648 		cur = find_extent_buffer(fs_info, blocknr);
1649 		if (cur)
1650 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1651 		else
1652 			uptodate = 0;
1653 		if (!cur || !uptodate) {
1654 			if (!cur) {
1655 				cur = read_tree_block(fs_info, blocknr, gen,
1656 						      parent_level - 1,
1657 						      &first_key);
1658 				if (IS_ERR(cur)) {
1659 					return PTR_ERR(cur);
1660 				} else if (!extent_buffer_uptodate(cur)) {
1661 					free_extent_buffer(cur);
1662 					return -EIO;
1663 				}
1664 			} else if (!uptodate) {
1665 				err = btrfs_read_buffer(cur, gen,
1666 						parent_level - 1,&first_key);
1667 				if (err) {
1668 					free_extent_buffer(cur);
1669 					return err;
1670 				}
1671 			}
1672 		}
1673 		if (search_start == 0)
1674 			search_start = last_block;
1675 
1676 		btrfs_tree_lock(cur);
1677 		btrfs_set_lock_blocking_write(cur);
1678 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1679 					&cur, search_start,
1680 					min(16 * blocksize,
1681 					    (end_slot - i) * blocksize));
1682 		if (err) {
1683 			btrfs_tree_unlock(cur);
1684 			free_extent_buffer(cur);
1685 			break;
1686 		}
1687 		search_start = cur->start;
1688 		last_block = cur->start;
1689 		*last_ret = search_start;
1690 		btrfs_tree_unlock(cur);
1691 		free_extent_buffer(cur);
1692 	}
1693 	return err;
1694 }
1695 
1696 /*
1697  * search for key in the extent_buffer.  The items start at offset p,
1698  * and they are item_size apart.  There are 'max' items in p.
1699  *
1700  * the slot in the array is returned via slot, and it points to
1701  * the place where you would insert key if it is not found in
1702  * the array.
1703  *
1704  * slot may point to max if the key is bigger than all of the keys
1705  */
1706 static noinline int generic_bin_search(struct extent_buffer *eb,
1707 				       unsigned long p, int item_size,
1708 				       const struct btrfs_key *key,
1709 				       int max, int *slot)
1710 {
1711 	int low = 0;
1712 	int high = max;
1713 	int mid;
1714 	int ret;
1715 	struct btrfs_disk_key *tmp = NULL;
1716 	struct btrfs_disk_key unaligned;
1717 	unsigned long offset;
1718 	char *kaddr = NULL;
1719 	unsigned long map_start = 0;
1720 	unsigned long map_len = 0;
1721 	int err;
1722 
1723 	if (low > high) {
1724 		btrfs_err(eb->fs_info,
1725 		 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1726 			  __func__, low, high, eb->start,
1727 			  btrfs_header_owner(eb), btrfs_header_level(eb));
1728 		return -EINVAL;
1729 	}
1730 
1731 	while (low < high) {
1732 		mid = (low + high) / 2;
1733 		offset = p + mid * item_size;
1734 
1735 		if (!kaddr || offset < map_start ||
1736 		    (offset + sizeof(struct btrfs_disk_key)) >
1737 		    map_start + map_len) {
1738 
1739 			err = map_private_extent_buffer(eb, offset,
1740 						sizeof(struct btrfs_disk_key),
1741 						&kaddr, &map_start, &map_len);
1742 
1743 			if (!err) {
1744 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1745 							map_start);
1746 			} else if (err == 1) {
1747 				read_extent_buffer(eb, &unaligned,
1748 						   offset, sizeof(unaligned));
1749 				tmp = &unaligned;
1750 			} else {
1751 				return err;
1752 			}
1753 
1754 		} else {
1755 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1756 							map_start);
1757 		}
1758 		ret = comp_keys(tmp, key);
1759 
1760 		if (ret < 0)
1761 			low = mid + 1;
1762 		else if (ret > 0)
1763 			high = mid;
1764 		else {
1765 			*slot = mid;
1766 			return 0;
1767 		}
1768 	}
1769 	*slot = low;
1770 	return 1;
1771 }
1772 
1773 /*
1774  * simple bin_search frontend that does the right thing for
1775  * leaves vs nodes
1776  */
1777 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1778 		     int level, int *slot)
1779 {
1780 	if (level == 0)
1781 		return generic_bin_search(eb,
1782 					  offsetof(struct btrfs_leaf, items),
1783 					  sizeof(struct btrfs_item),
1784 					  key, btrfs_header_nritems(eb),
1785 					  slot);
1786 	else
1787 		return generic_bin_search(eb,
1788 					  offsetof(struct btrfs_node, ptrs),
1789 					  sizeof(struct btrfs_key_ptr),
1790 					  key, btrfs_header_nritems(eb),
1791 					  slot);
1792 }
1793 
1794 static void root_add_used(struct btrfs_root *root, u32 size)
1795 {
1796 	spin_lock(&root->accounting_lock);
1797 	btrfs_set_root_used(&root->root_item,
1798 			    btrfs_root_used(&root->root_item) + size);
1799 	spin_unlock(&root->accounting_lock);
1800 }
1801 
1802 static void root_sub_used(struct btrfs_root *root, u32 size)
1803 {
1804 	spin_lock(&root->accounting_lock);
1805 	btrfs_set_root_used(&root->root_item,
1806 			    btrfs_root_used(&root->root_item) - size);
1807 	spin_unlock(&root->accounting_lock);
1808 }
1809 
1810 /* given a node and slot number, this reads the blocks it points to.  The
1811  * extent buffer is returned with a reference taken (but unlocked).
1812  */
1813 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
1814 					   int slot)
1815 {
1816 	int level = btrfs_header_level(parent);
1817 	struct extent_buffer *eb;
1818 	struct btrfs_key first_key;
1819 
1820 	if (slot < 0 || slot >= btrfs_header_nritems(parent))
1821 		return ERR_PTR(-ENOENT);
1822 
1823 	BUG_ON(level == 0);
1824 
1825 	btrfs_node_key_to_cpu(parent, &first_key, slot);
1826 	eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
1827 			     btrfs_node_ptr_generation(parent, slot),
1828 			     level - 1, &first_key);
1829 	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1830 		free_extent_buffer(eb);
1831 		eb = ERR_PTR(-EIO);
1832 	}
1833 
1834 	return eb;
1835 }
1836 
1837 /*
1838  * node level balancing, used to make sure nodes are in proper order for
1839  * item deletion.  We balance from the top down, so we have to make sure
1840  * that a deletion won't leave an node completely empty later on.
1841  */
1842 static noinline int balance_level(struct btrfs_trans_handle *trans,
1843 			 struct btrfs_root *root,
1844 			 struct btrfs_path *path, int level)
1845 {
1846 	struct btrfs_fs_info *fs_info = root->fs_info;
1847 	struct extent_buffer *right = NULL;
1848 	struct extent_buffer *mid;
1849 	struct extent_buffer *left = NULL;
1850 	struct extent_buffer *parent = NULL;
1851 	int ret = 0;
1852 	int wret;
1853 	int pslot;
1854 	int orig_slot = path->slots[level];
1855 	u64 orig_ptr;
1856 
1857 	ASSERT(level > 0);
1858 
1859 	mid = path->nodes[level];
1860 
1861 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1862 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1863 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1864 
1865 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1866 
1867 	if (level < BTRFS_MAX_LEVEL - 1) {
1868 		parent = path->nodes[level + 1];
1869 		pslot = path->slots[level + 1];
1870 	}
1871 
1872 	/*
1873 	 * deal with the case where there is only one pointer in the root
1874 	 * by promoting the node below to a root
1875 	 */
1876 	if (!parent) {
1877 		struct extent_buffer *child;
1878 
1879 		if (btrfs_header_nritems(mid) != 1)
1880 			return 0;
1881 
1882 		/* promote the child to a root */
1883 		child = btrfs_read_node_slot(mid, 0);
1884 		if (IS_ERR(child)) {
1885 			ret = PTR_ERR(child);
1886 			btrfs_handle_fs_error(fs_info, ret, NULL);
1887 			goto enospc;
1888 		}
1889 
1890 		btrfs_tree_lock(child);
1891 		btrfs_set_lock_blocking_write(child);
1892 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1893 		if (ret) {
1894 			btrfs_tree_unlock(child);
1895 			free_extent_buffer(child);
1896 			goto enospc;
1897 		}
1898 
1899 		ret = tree_mod_log_insert_root(root->node, child, 1);
1900 		BUG_ON(ret < 0);
1901 		rcu_assign_pointer(root->node, child);
1902 
1903 		add_root_to_dirty_list(root);
1904 		btrfs_tree_unlock(child);
1905 
1906 		path->locks[level] = 0;
1907 		path->nodes[level] = NULL;
1908 		btrfs_clean_tree_block(mid);
1909 		btrfs_tree_unlock(mid);
1910 		/* once for the path */
1911 		free_extent_buffer(mid);
1912 
1913 		root_sub_used(root, mid->len);
1914 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1915 		/* once for the root ptr */
1916 		free_extent_buffer_stale(mid);
1917 		return 0;
1918 	}
1919 	if (btrfs_header_nritems(mid) >
1920 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1921 		return 0;
1922 
1923 	left = btrfs_read_node_slot(parent, pslot - 1);
1924 	if (IS_ERR(left))
1925 		left = NULL;
1926 
1927 	if (left) {
1928 		btrfs_tree_lock(left);
1929 		btrfs_set_lock_blocking_write(left);
1930 		wret = btrfs_cow_block(trans, root, left,
1931 				       parent, pslot - 1, &left);
1932 		if (wret) {
1933 			ret = wret;
1934 			goto enospc;
1935 		}
1936 	}
1937 
1938 	right = btrfs_read_node_slot(parent, pslot + 1);
1939 	if (IS_ERR(right))
1940 		right = NULL;
1941 
1942 	if (right) {
1943 		btrfs_tree_lock(right);
1944 		btrfs_set_lock_blocking_write(right);
1945 		wret = btrfs_cow_block(trans, root, right,
1946 				       parent, pslot + 1, &right);
1947 		if (wret) {
1948 			ret = wret;
1949 			goto enospc;
1950 		}
1951 	}
1952 
1953 	/* first, try to make some room in the middle buffer */
1954 	if (left) {
1955 		orig_slot += btrfs_header_nritems(left);
1956 		wret = push_node_left(trans, left, mid, 1);
1957 		if (wret < 0)
1958 			ret = wret;
1959 	}
1960 
1961 	/*
1962 	 * then try to empty the right most buffer into the middle
1963 	 */
1964 	if (right) {
1965 		wret = push_node_left(trans, mid, right, 1);
1966 		if (wret < 0 && wret != -ENOSPC)
1967 			ret = wret;
1968 		if (btrfs_header_nritems(right) == 0) {
1969 			btrfs_clean_tree_block(right);
1970 			btrfs_tree_unlock(right);
1971 			del_ptr(root, path, level + 1, pslot + 1);
1972 			root_sub_used(root, right->len);
1973 			btrfs_free_tree_block(trans, root, right, 0, 1);
1974 			free_extent_buffer_stale(right);
1975 			right = NULL;
1976 		} else {
1977 			struct btrfs_disk_key right_key;
1978 			btrfs_node_key(right, &right_key, 0);
1979 			ret = tree_mod_log_insert_key(parent, pslot + 1,
1980 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1981 			BUG_ON(ret < 0);
1982 			btrfs_set_node_key(parent, &right_key, pslot + 1);
1983 			btrfs_mark_buffer_dirty(parent);
1984 		}
1985 	}
1986 	if (btrfs_header_nritems(mid) == 1) {
1987 		/*
1988 		 * we're not allowed to leave a node with one item in the
1989 		 * tree during a delete.  A deletion from lower in the tree
1990 		 * could try to delete the only pointer in this node.
1991 		 * So, pull some keys from the left.
1992 		 * There has to be a left pointer at this point because
1993 		 * otherwise we would have pulled some pointers from the
1994 		 * right
1995 		 */
1996 		if (!left) {
1997 			ret = -EROFS;
1998 			btrfs_handle_fs_error(fs_info, ret, NULL);
1999 			goto enospc;
2000 		}
2001 		wret = balance_node_right(trans, mid, left);
2002 		if (wret < 0) {
2003 			ret = wret;
2004 			goto enospc;
2005 		}
2006 		if (wret == 1) {
2007 			wret = push_node_left(trans, left, mid, 1);
2008 			if (wret < 0)
2009 				ret = wret;
2010 		}
2011 		BUG_ON(wret == 1);
2012 	}
2013 	if (btrfs_header_nritems(mid) == 0) {
2014 		btrfs_clean_tree_block(mid);
2015 		btrfs_tree_unlock(mid);
2016 		del_ptr(root, path, level + 1, pslot);
2017 		root_sub_used(root, mid->len);
2018 		btrfs_free_tree_block(trans, root, mid, 0, 1);
2019 		free_extent_buffer_stale(mid);
2020 		mid = NULL;
2021 	} else {
2022 		/* update the parent key to reflect our changes */
2023 		struct btrfs_disk_key mid_key;
2024 		btrfs_node_key(mid, &mid_key, 0);
2025 		ret = tree_mod_log_insert_key(parent, pslot,
2026 				MOD_LOG_KEY_REPLACE, GFP_NOFS);
2027 		BUG_ON(ret < 0);
2028 		btrfs_set_node_key(parent, &mid_key, pslot);
2029 		btrfs_mark_buffer_dirty(parent);
2030 	}
2031 
2032 	/* update the path */
2033 	if (left) {
2034 		if (btrfs_header_nritems(left) > orig_slot) {
2035 			atomic_inc(&left->refs);
2036 			/* left was locked after cow */
2037 			path->nodes[level] = left;
2038 			path->slots[level + 1] -= 1;
2039 			path->slots[level] = orig_slot;
2040 			if (mid) {
2041 				btrfs_tree_unlock(mid);
2042 				free_extent_buffer(mid);
2043 			}
2044 		} else {
2045 			orig_slot -= btrfs_header_nritems(left);
2046 			path->slots[level] = orig_slot;
2047 		}
2048 	}
2049 	/* double check we haven't messed things up */
2050 	if (orig_ptr !=
2051 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2052 		BUG();
2053 enospc:
2054 	if (right) {
2055 		btrfs_tree_unlock(right);
2056 		free_extent_buffer(right);
2057 	}
2058 	if (left) {
2059 		if (path->nodes[level] != left)
2060 			btrfs_tree_unlock(left);
2061 		free_extent_buffer(left);
2062 	}
2063 	return ret;
2064 }
2065 
2066 /* Node balancing for insertion.  Here we only split or push nodes around
2067  * when they are completely full.  This is also done top down, so we
2068  * have to be pessimistic.
2069  */
2070 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2071 					  struct btrfs_root *root,
2072 					  struct btrfs_path *path, int level)
2073 {
2074 	struct btrfs_fs_info *fs_info = root->fs_info;
2075 	struct extent_buffer *right = NULL;
2076 	struct extent_buffer *mid;
2077 	struct extent_buffer *left = NULL;
2078 	struct extent_buffer *parent = NULL;
2079 	int ret = 0;
2080 	int wret;
2081 	int pslot;
2082 	int orig_slot = path->slots[level];
2083 
2084 	if (level == 0)
2085 		return 1;
2086 
2087 	mid = path->nodes[level];
2088 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
2089 
2090 	if (level < BTRFS_MAX_LEVEL - 1) {
2091 		parent = path->nodes[level + 1];
2092 		pslot = path->slots[level + 1];
2093 	}
2094 
2095 	if (!parent)
2096 		return 1;
2097 
2098 	left = btrfs_read_node_slot(parent, pslot - 1);
2099 	if (IS_ERR(left))
2100 		left = NULL;
2101 
2102 	/* first, try to make some room in the middle buffer */
2103 	if (left) {
2104 		u32 left_nr;
2105 
2106 		btrfs_tree_lock(left);
2107 		btrfs_set_lock_blocking_write(left);
2108 
2109 		left_nr = btrfs_header_nritems(left);
2110 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2111 			wret = 1;
2112 		} else {
2113 			ret = btrfs_cow_block(trans, root, left, parent,
2114 					      pslot - 1, &left);
2115 			if (ret)
2116 				wret = 1;
2117 			else {
2118 				wret = push_node_left(trans, left, mid, 0);
2119 			}
2120 		}
2121 		if (wret < 0)
2122 			ret = wret;
2123 		if (wret == 0) {
2124 			struct btrfs_disk_key disk_key;
2125 			orig_slot += left_nr;
2126 			btrfs_node_key(mid, &disk_key, 0);
2127 			ret = tree_mod_log_insert_key(parent, pslot,
2128 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2129 			BUG_ON(ret < 0);
2130 			btrfs_set_node_key(parent, &disk_key, pslot);
2131 			btrfs_mark_buffer_dirty(parent);
2132 			if (btrfs_header_nritems(left) > orig_slot) {
2133 				path->nodes[level] = left;
2134 				path->slots[level + 1] -= 1;
2135 				path->slots[level] = orig_slot;
2136 				btrfs_tree_unlock(mid);
2137 				free_extent_buffer(mid);
2138 			} else {
2139 				orig_slot -=
2140 					btrfs_header_nritems(left);
2141 				path->slots[level] = orig_slot;
2142 				btrfs_tree_unlock(left);
2143 				free_extent_buffer(left);
2144 			}
2145 			return 0;
2146 		}
2147 		btrfs_tree_unlock(left);
2148 		free_extent_buffer(left);
2149 	}
2150 	right = btrfs_read_node_slot(parent, pslot + 1);
2151 	if (IS_ERR(right))
2152 		right = NULL;
2153 
2154 	/*
2155 	 * then try to empty the right most buffer into the middle
2156 	 */
2157 	if (right) {
2158 		u32 right_nr;
2159 
2160 		btrfs_tree_lock(right);
2161 		btrfs_set_lock_blocking_write(right);
2162 
2163 		right_nr = btrfs_header_nritems(right);
2164 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2165 			wret = 1;
2166 		} else {
2167 			ret = btrfs_cow_block(trans, root, right,
2168 					      parent, pslot + 1,
2169 					      &right);
2170 			if (ret)
2171 				wret = 1;
2172 			else {
2173 				wret = balance_node_right(trans, right, mid);
2174 			}
2175 		}
2176 		if (wret < 0)
2177 			ret = wret;
2178 		if (wret == 0) {
2179 			struct btrfs_disk_key disk_key;
2180 
2181 			btrfs_node_key(right, &disk_key, 0);
2182 			ret = tree_mod_log_insert_key(parent, pslot + 1,
2183 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2184 			BUG_ON(ret < 0);
2185 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2186 			btrfs_mark_buffer_dirty(parent);
2187 
2188 			if (btrfs_header_nritems(mid) <= orig_slot) {
2189 				path->nodes[level] = right;
2190 				path->slots[level + 1] += 1;
2191 				path->slots[level] = orig_slot -
2192 					btrfs_header_nritems(mid);
2193 				btrfs_tree_unlock(mid);
2194 				free_extent_buffer(mid);
2195 			} else {
2196 				btrfs_tree_unlock(right);
2197 				free_extent_buffer(right);
2198 			}
2199 			return 0;
2200 		}
2201 		btrfs_tree_unlock(right);
2202 		free_extent_buffer(right);
2203 	}
2204 	return 1;
2205 }
2206 
2207 /*
2208  * readahead one full node of leaves, finding things that are close
2209  * to the block in 'slot', and triggering ra on them.
2210  */
2211 static void reada_for_search(struct btrfs_fs_info *fs_info,
2212 			     struct btrfs_path *path,
2213 			     int level, int slot, u64 objectid)
2214 {
2215 	struct extent_buffer *node;
2216 	struct btrfs_disk_key disk_key;
2217 	u32 nritems;
2218 	u64 search;
2219 	u64 target;
2220 	u64 nread = 0;
2221 	struct extent_buffer *eb;
2222 	u32 nr;
2223 	u32 blocksize;
2224 	u32 nscan = 0;
2225 
2226 	if (level != 1)
2227 		return;
2228 
2229 	if (!path->nodes[level])
2230 		return;
2231 
2232 	node = path->nodes[level];
2233 
2234 	search = btrfs_node_blockptr(node, slot);
2235 	blocksize = fs_info->nodesize;
2236 	eb = find_extent_buffer(fs_info, search);
2237 	if (eb) {
2238 		free_extent_buffer(eb);
2239 		return;
2240 	}
2241 
2242 	target = search;
2243 
2244 	nritems = btrfs_header_nritems(node);
2245 	nr = slot;
2246 
2247 	while (1) {
2248 		if (path->reada == READA_BACK) {
2249 			if (nr == 0)
2250 				break;
2251 			nr--;
2252 		} else if (path->reada == READA_FORWARD) {
2253 			nr++;
2254 			if (nr >= nritems)
2255 				break;
2256 		}
2257 		if (path->reada == READA_BACK && objectid) {
2258 			btrfs_node_key(node, &disk_key, nr);
2259 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2260 				break;
2261 		}
2262 		search = btrfs_node_blockptr(node, nr);
2263 		if ((search <= target && target - search <= 65536) ||
2264 		    (search > target && search - target <= 65536)) {
2265 			readahead_tree_block(fs_info, search);
2266 			nread += blocksize;
2267 		}
2268 		nscan++;
2269 		if ((nread > 65536 || nscan > 32))
2270 			break;
2271 	}
2272 }
2273 
2274 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2275 				       struct btrfs_path *path, int level)
2276 {
2277 	int slot;
2278 	int nritems;
2279 	struct extent_buffer *parent;
2280 	struct extent_buffer *eb;
2281 	u64 gen;
2282 	u64 block1 = 0;
2283 	u64 block2 = 0;
2284 
2285 	parent = path->nodes[level + 1];
2286 	if (!parent)
2287 		return;
2288 
2289 	nritems = btrfs_header_nritems(parent);
2290 	slot = path->slots[level + 1];
2291 
2292 	if (slot > 0) {
2293 		block1 = btrfs_node_blockptr(parent, slot - 1);
2294 		gen = btrfs_node_ptr_generation(parent, slot - 1);
2295 		eb = find_extent_buffer(fs_info, block1);
2296 		/*
2297 		 * if we get -eagain from btrfs_buffer_uptodate, we
2298 		 * don't want to return eagain here.  That will loop
2299 		 * forever
2300 		 */
2301 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2302 			block1 = 0;
2303 		free_extent_buffer(eb);
2304 	}
2305 	if (slot + 1 < nritems) {
2306 		block2 = btrfs_node_blockptr(parent, slot + 1);
2307 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2308 		eb = find_extent_buffer(fs_info, block2);
2309 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2310 			block2 = 0;
2311 		free_extent_buffer(eb);
2312 	}
2313 
2314 	if (block1)
2315 		readahead_tree_block(fs_info, block1);
2316 	if (block2)
2317 		readahead_tree_block(fs_info, block2);
2318 }
2319 
2320 
2321 /*
2322  * when we walk down the tree, it is usually safe to unlock the higher layers
2323  * in the tree.  The exceptions are when our path goes through slot 0, because
2324  * operations on the tree might require changing key pointers higher up in the
2325  * tree.
2326  *
2327  * callers might also have set path->keep_locks, which tells this code to keep
2328  * the lock if the path points to the last slot in the block.  This is part of
2329  * walking through the tree, and selecting the next slot in the higher block.
2330  *
2331  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2332  * if lowest_unlock is 1, level 0 won't be unlocked
2333  */
2334 static noinline void unlock_up(struct btrfs_path *path, int level,
2335 			       int lowest_unlock, int min_write_lock_level,
2336 			       int *write_lock_level)
2337 {
2338 	int i;
2339 	int skip_level = level;
2340 	int no_skips = 0;
2341 	struct extent_buffer *t;
2342 
2343 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2344 		if (!path->nodes[i])
2345 			break;
2346 		if (!path->locks[i])
2347 			break;
2348 		if (!no_skips && path->slots[i] == 0) {
2349 			skip_level = i + 1;
2350 			continue;
2351 		}
2352 		if (!no_skips && path->keep_locks) {
2353 			u32 nritems;
2354 			t = path->nodes[i];
2355 			nritems = btrfs_header_nritems(t);
2356 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2357 				skip_level = i + 1;
2358 				continue;
2359 			}
2360 		}
2361 		if (skip_level < i && i >= lowest_unlock)
2362 			no_skips = 1;
2363 
2364 		t = path->nodes[i];
2365 		if (i >= lowest_unlock && i > skip_level) {
2366 			btrfs_tree_unlock_rw(t, path->locks[i]);
2367 			path->locks[i] = 0;
2368 			if (write_lock_level &&
2369 			    i > min_write_lock_level &&
2370 			    i <= *write_lock_level) {
2371 				*write_lock_level = i - 1;
2372 			}
2373 		}
2374 	}
2375 }
2376 
2377 /*
2378  * helper function for btrfs_search_slot.  The goal is to find a block
2379  * in cache without setting the path to blocking.  If we find the block
2380  * we return zero and the path is unchanged.
2381  *
2382  * If we can't find the block, we set the path blocking and do some
2383  * reada.  -EAGAIN is returned and the search must be repeated.
2384  */
2385 static int
2386 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2387 		      struct extent_buffer **eb_ret, int level, int slot,
2388 		      const struct btrfs_key *key)
2389 {
2390 	struct btrfs_fs_info *fs_info = root->fs_info;
2391 	u64 blocknr;
2392 	u64 gen;
2393 	struct extent_buffer *b = *eb_ret;
2394 	struct extent_buffer *tmp;
2395 	struct btrfs_key first_key;
2396 	int ret;
2397 	int parent_level;
2398 
2399 	blocknr = btrfs_node_blockptr(b, slot);
2400 	gen = btrfs_node_ptr_generation(b, slot);
2401 	parent_level = btrfs_header_level(b);
2402 	btrfs_node_key_to_cpu(b, &first_key, slot);
2403 
2404 	tmp = find_extent_buffer(fs_info, blocknr);
2405 	if (tmp) {
2406 		/* first we do an atomic uptodate check */
2407 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2408 			/*
2409 			 * Do extra check for first_key, eb can be stale due to
2410 			 * being cached, read from scrub, or have multiple
2411 			 * parents (shared tree blocks).
2412 			 */
2413 			if (btrfs_verify_level_key(tmp,
2414 					parent_level - 1, &first_key, gen)) {
2415 				free_extent_buffer(tmp);
2416 				return -EUCLEAN;
2417 			}
2418 			*eb_ret = tmp;
2419 			return 0;
2420 		}
2421 
2422 		/* the pages were up to date, but we failed
2423 		 * the generation number check.  Do a full
2424 		 * read for the generation number that is correct.
2425 		 * We must do this without dropping locks so
2426 		 * we can trust our generation number
2427 		 */
2428 		btrfs_set_path_blocking(p);
2429 
2430 		/* now we're allowed to do a blocking uptodate check */
2431 		ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
2432 		if (!ret) {
2433 			*eb_ret = tmp;
2434 			return 0;
2435 		}
2436 		free_extent_buffer(tmp);
2437 		btrfs_release_path(p);
2438 		return -EIO;
2439 	}
2440 
2441 	/*
2442 	 * reduce lock contention at high levels
2443 	 * of the btree by dropping locks before
2444 	 * we read.  Don't release the lock on the current
2445 	 * level because we need to walk this node to figure
2446 	 * out which blocks to read.
2447 	 */
2448 	btrfs_unlock_up_safe(p, level + 1);
2449 	btrfs_set_path_blocking(p);
2450 
2451 	if (p->reada != READA_NONE)
2452 		reada_for_search(fs_info, p, level, slot, key->objectid);
2453 
2454 	ret = -EAGAIN;
2455 	tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1,
2456 			      &first_key);
2457 	if (!IS_ERR(tmp)) {
2458 		/*
2459 		 * If the read above didn't mark this buffer up to date,
2460 		 * it will never end up being up to date.  Set ret to EIO now
2461 		 * and give up so that our caller doesn't loop forever
2462 		 * on our EAGAINs.
2463 		 */
2464 		if (!extent_buffer_uptodate(tmp))
2465 			ret = -EIO;
2466 		free_extent_buffer(tmp);
2467 	} else {
2468 		ret = PTR_ERR(tmp);
2469 	}
2470 
2471 	btrfs_release_path(p);
2472 	return ret;
2473 }
2474 
2475 /*
2476  * helper function for btrfs_search_slot.  This does all of the checks
2477  * for node-level blocks and does any balancing required based on
2478  * the ins_len.
2479  *
2480  * If no extra work was required, zero is returned.  If we had to
2481  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2482  * start over
2483  */
2484 static int
2485 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2486 		       struct btrfs_root *root, struct btrfs_path *p,
2487 		       struct extent_buffer *b, int level, int ins_len,
2488 		       int *write_lock_level)
2489 {
2490 	struct btrfs_fs_info *fs_info = root->fs_info;
2491 	int ret;
2492 
2493 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2494 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2495 		int sret;
2496 
2497 		if (*write_lock_level < level + 1) {
2498 			*write_lock_level = level + 1;
2499 			btrfs_release_path(p);
2500 			goto again;
2501 		}
2502 
2503 		btrfs_set_path_blocking(p);
2504 		reada_for_balance(fs_info, p, level);
2505 		sret = split_node(trans, root, p, level);
2506 
2507 		BUG_ON(sret > 0);
2508 		if (sret) {
2509 			ret = sret;
2510 			goto done;
2511 		}
2512 		b = p->nodes[level];
2513 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2514 		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2515 		int sret;
2516 
2517 		if (*write_lock_level < level + 1) {
2518 			*write_lock_level = level + 1;
2519 			btrfs_release_path(p);
2520 			goto again;
2521 		}
2522 
2523 		btrfs_set_path_blocking(p);
2524 		reada_for_balance(fs_info, p, level);
2525 		sret = balance_level(trans, root, p, level);
2526 
2527 		if (sret) {
2528 			ret = sret;
2529 			goto done;
2530 		}
2531 		b = p->nodes[level];
2532 		if (!b) {
2533 			btrfs_release_path(p);
2534 			goto again;
2535 		}
2536 		BUG_ON(btrfs_header_nritems(b) == 1);
2537 	}
2538 	return 0;
2539 
2540 again:
2541 	ret = -EAGAIN;
2542 done:
2543 	return ret;
2544 }
2545 
2546 static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2547 		      int level, int *prev_cmp, int *slot)
2548 {
2549 	if (*prev_cmp != 0) {
2550 		*prev_cmp = btrfs_bin_search(b, key, level, slot);
2551 		return *prev_cmp;
2552 	}
2553 
2554 	*slot = 0;
2555 
2556 	return 0;
2557 }
2558 
2559 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2560 		u64 iobjectid, u64 ioff, u8 key_type,
2561 		struct btrfs_key *found_key)
2562 {
2563 	int ret;
2564 	struct btrfs_key key;
2565 	struct extent_buffer *eb;
2566 
2567 	ASSERT(path);
2568 	ASSERT(found_key);
2569 
2570 	key.type = key_type;
2571 	key.objectid = iobjectid;
2572 	key.offset = ioff;
2573 
2574 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2575 	if (ret < 0)
2576 		return ret;
2577 
2578 	eb = path->nodes[0];
2579 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2580 		ret = btrfs_next_leaf(fs_root, path);
2581 		if (ret)
2582 			return ret;
2583 		eb = path->nodes[0];
2584 	}
2585 
2586 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2587 	if (found_key->type != key.type ||
2588 			found_key->objectid != key.objectid)
2589 		return 1;
2590 
2591 	return 0;
2592 }
2593 
2594 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
2595 							struct btrfs_path *p,
2596 							int write_lock_level)
2597 {
2598 	struct btrfs_fs_info *fs_info = root->fs_info;
2599 	struct extent_buffer *b;
2600 	int root_lock;
2601 	int level = 0;
2602 
2603 	/* We try very hard to do read locks on the root */
2604 	root_lock = BTRFS_READ_LOCK;
2605 
2606 	if (p->search_commit_root) {
2607 		/*
2608 		 * The commit roots are read only so we always do read locks,
2609 		 * and we always must hold the commit_root_sem when doing
2610 		 * searches on them, the only exception is send where we don't
2611 		 * want to block transaction commits for a long time, so
2612 		 * we need to clone the commit root in order to avoid races
2613 		 * with transaction commits that create a snapshot of one of
2614 		 * the roots used by a send operation.
2615 		 */
2616 		if (p->need_commit_sem) {
2617 			down_read(&fs_info->commit_root_sem);
2618 			b = btrfs_clone_extent_buffer(root->commit_root);
2619 			up_read(&fs_info->commit_root_sem);
2620 			if (!b)
2621 				return ERR_PTR(-ENOMEM);
2622 
2623 		} else {
2624 			b = root->commit_root;
2625 			atomic_inc(&b->refs);
2626 		}
2627 		level = btrfs_header_level(b);
2628 		/*
2629 		 * Ensure that all callers have set skip_locking when
2630 		 * p->search_commit_root = 1.
2631 		 */
2632 		ASSERT(p->skip_locking == 1);
2633 
2634 		goto out;
2635 	}
2636 
2637 	if (p->skip_locking) {
2638 		b = btrfs_root_node(root);
2639 		level = btrfs_header_level(b);
2640 		goto out;
2641 	}
2642 
2643 	/*
2644 	 * If the level is set to maximum, we can skip trying to get the read
2645 	 * lock.
2646 	 */
2647 	if (write_lock_level < BTRFS_MAX_LEVEL) {
2648 		/*
2649 		 * We don't know the level of the root node until we actually
2650 		 * have it read locked
2651 		 */
2652 		b = btrfs_read_lock_root_node(root);
2653 		level = btrfs_header_level(b);
2654 		if (level > write_lock_level)
2655 			goto out;
2656 
2657 		/* Whoops, must trade for write lock */
2658 		btrfs_tree_read_unlock(b);
2659 		free_extent_buffer(b);
2660 	}
2661 
2662 	b = btrfs_lock_root_node(root);
2663 	root_lock = BTRFS_WRITE_LOCK;
2664 
2665 	/* The level might have changed, check again */
2666 	level = btrfs_header_level(b);
2667 
2668 out:
2669 	p->nodes[level] = b;
2670 	if (!p->skip_locking)
2671 		p->locks[level] = root_lock;
2672 	/*
2673 	 * Callers are responsible for dropping b's references.
2674 	 */
2675 	return b;
2676 }
2677 
2678 
2679 /*
2680  * btrfs_search_slot - look for a key in a tree and perform necessary
2681  * modifications to preserve tree invariants.
2682  *
2683  * @trans:	Handle of transaction, used when modifying the tree
2684  * @p:		Holds all btree nodes along the search path
2685  * @root:	The root node of the tree
2686  * @key:	The key we are looking for
2687  * @ins_len:	Indicates purpose of search, for inserts it is 1, for
2688  *		deletions it's -1. 0 for plain searches
2689  * @cow:	boolean should CoW operations be performed. Must always be 1
2690  *		when modifying the tree.
2691  *
2692  * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2693  * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2694  *
2695  * If @key is found, 0 is returned and you can find the item in the leaf level
2696  * of the path (level 0)
2697  *
2698  * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2699  * points to the slot where it should be inserted
2700  *
2701  * If an error is encountered while searching the tree a negative error number
2702  * is returned
2703  */
2704 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2705 		      const struct btrfs_key *key, struct btrfs_path *p,
2706 		      int ins_len, int cow)
2707 {
2708 	struct extent_buffer *b;
2709 	int slot;
2710 	int ret;
2711 	int err;
2712 	int level;
2713 	int lowest_unlock = 1;
2714 	/* everything at write_lock_level or lower must be write locked */
2715 	int write_lock_level = 0;
2716 	u8 lowest_level = 0;
2717 	int min_write_lock_level;
2718 	int prev_cmp;
2719 
2720 	lowest_level = p->lowest_level;
2721 	WARN_ON(lowest_level && ins_len > 0);
2722 	WARN_ON(p->nodes[0] != NULL);
2723 	BUG_ON(!cow && ins_len);
2724 
2725 	if (ins_len < 0) {
2726 		lowest_unlock = 2;
2727 
2728 		/* when we are removing items, we might have to go up to level
2729 		 * two as we update tree pointers  Make sure we keep write
2730 		 * for those levels as well
2731 		 */
2732 		write_lock_level = 2;
2733 	} else if (ins_len > 0) {
2734 		/*
2735 		 * for inserting items, make sure we have a write lock on
2736 		 * level 1 so we can update keys
2737 		 */
2738 		write_lock_level = 1;
2739 	}
2740 
2741 	if (!cow)
2742 		write_lock_level = -1;
2743 
2744 	if (cow && (p->keep_locks || p->lowest_level))
2745 		write_lock_level = BTRFS_MAX_LEVEL;
2746 
2747 	min_write_lock_level = write_lock_level;
2748 
2749 again:
2750 	prev_cmp = -1;
2751 	b = btrfs_search_slot_get_root(root, p, write_lock_level);
2752 	if (IS_ERR(b)) {
2753 		ret = PTR_ERR(b);
2754 		goto done;
2755 	}
2756 
2757 	while (b) {
2758 		int dec = 0;
2759 
2760 		level = btrfs_header_level(b);
2761 
2762 		if (cow) {
2763 			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2764 
2765 			/*
2766 			 * if we don't really need to cow this block
2767 			 * then we don't want to set the path blocking,
2768 			 * so we test it here
2769 			 */
2770 			if (!should_cow_block(trans, root, b)) {
2771 				trans->dirty = true;
2772 				goto cow_done;
2773 			}
2774 
2775 			/*
2776 			 * must have write locks on this node and the
2777 			 * parent
2778 			 */
2779 			if (level > write_lock_level ||
2780 			    (level + 1 > write_lock_level &&
2781 			    level + 1 < BTRFS_MAX_LEVEL &&
2782 			    p->nodes[level + 1])) {
2783 				write_lock_level = level + 1;
2784 				btrfs_release_path(p);
2785 				goto again;
2786 			}
2787 
2788 			btrfs_set_path_blocking(p);
2789 			if (last_level)
2790 				err = btrfs_cow_block(trans, root, b, NULL, 0,
2791 						      &b);
2792 			else
2793 				err = btrfs_cow_block(trans, root, b,
2794 						      p->nodes[level + 1],
2795 						      p->slots[level + 1], &b);
2796 			if (err) {
2797 				ret = err;
2798 				goto done;
2799 			}
2800 		}
2801 cow_done:
2802 		p->nodes[level] = b;
2803 		/*
2804 		 * Leave path with blocking locks to avoid massive
2805 		 * lock context switch, this is made on purpose.
2806 		 */
2807 
2808 		/*
2809 		 * we have a lock on b and as long as we aren't changing
2810 		 * the tree, there is no way to for the items in b to change.
2811 		 * It is safe to drop the lock on our parent before we
2812 		 * go through the expensive btree search on b.
2813 		 *
2814 		 * If we're inserting or deleting (ins_len != 0), then we might
2815 		 * be changing slot zero, which may require changing the parent.
2816 		 * So, we can't drop the lock until after we know which slot
2817 		 * we're operating on.
2818 		 */
2819 		if (!ins_len && !p->keep_locks) {
2820 			int u = level + 1;
2821 
2822 			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2823 				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2824 				p->locks[u] = 0;
2825 			}
2826 		}
2827 
2828 		ret = key_search(b, key, level, &prev_cmp, &slot);
2829 		if (ret < 0)
2830 			goto done;
2831 
2832 		if (level == 0) {
2833 			p->slots[level] = slot;
2834 			if (ins_len > 0 &&
2835 			    btrfs_leaf_free_space(b) < ins_len) {
2836 				if (write_lock_level < 1) {
2837 					write_lock_level = 1;
2838 					btrfs_release_path(p);
2839 					goto again;
2840 				}
2841 
2842 				btrfs_set_path_blocking(p);
2843 				err = split_leaf(trans, root, key,
2844 						 p, ins_len, ret == 0);
2845 
2846 				BUG_ON(err > 0);
2847 				if (err) {
2848 					ret = err;
2849 					goto done;
2850 				}
2851 			}
2852 			if (!p->search_for_split)
2853 				unlock_up(p, level, lowest_unlock,
2854 					  min_write_lock_level, NULL);
2855 			goto done;
2856 		}
2857 		if (ret && slot > 0) {
2858 			dec = 1;
2859 			slot--;
2860 		}
2861 		p->slots[level] = slot;
2862 		err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
2863 					     &write_lock_level);
2864 		if (err == -EAGAIN)
2865 			goto again;
2866 		if (err) {
2867 			ret = err;
2868 			goto done;
2869 		}
2870 		b = p->nodes[level];
2871 		slot = p->slots[level];
2872 
2873 		/*
2874 		 * Slot 0 is special, if we change the key we have to update
2875 		 * the parent pointer which means we must have a write lock on
2876 		 * the parent
2877 		 */
2878 		if (slot == 0 && ins_len && write_lock_level < level + 1) {
2879 			write_lock_level = level + 1;
2880 			btrfs_release_path(p);
2881 			goto again;
2882 		}
2883 
2884 		unlock_up(p, level, lowest_unlock, min_write_lock_level,
2885 			  &write_lock_level);
2886 
2887 		if (level == lowest_level) {
2888 			if (dec)
2889 				p->slots[level]++;
2890 			goto done;
2891 		}
2892 
2893 		err = read_block_for_search(root, p, &b, level, slot, key);
2894 		if (err == -EAGAIN)
2895 			goto again;
2896 		if (err) {
2897 			ret = err;
2898 			goto done;
2899 		}
2900 
2901 		if (!p->skip_locking) {
2902 			level = btrfs_header_level(b);
2903 			if (level <= write_lock_level) {
2904 				if (!btrfs_try_tree_write_lock(b)) {
2905 					btrfs_set_path_blocking(p);
2906 					btrfs_tree_lock(b);
2907 				}
2908 				p->locks[level] = BTRFS_WRITE_LOCK;
2909 			} else {
2910 				if (!btrfs_tree_read_lock_atomic(b)) {
2911 					btrfs_set_path_blocking(p);
2912 					btrfs_tree_read_lock(b);
2913 				}
2914 				p->locks[level] = BTRFS_READ_LOCK;
2915 			}
2916 			p->nodes[level] = b;
2917 		}
2918 	}
2919 	ret = 1;
2920 done:
2921 	/*
2922 	 * we don't really know what they plan on doing with the path
2923 	 * from here on, so for now just mark it as blocking
2924 	 */
2925 	if (!p->leave_spinning)
2926 		btrfs_set_path_blocking(p);
2927 	if (ret < 0 && !p->skip_release_on_error)
2928 		btrfs_release_path(p);
2929 	return ret;
2930 }
2931 
2932 /*
2933  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2934  * current state of the tree together with the operations recorded in the tree
2935  * modification log to search for the key in a previous version of this tree, as
2936  * denoted by the time_seq parameter.
2937  *
2938  * Naturally, there is no support for insert, delete or cow operations.
2939  *
2940  * The resulting path and return value will be set up as if we called
2941  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2942  */
2943 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2944 			  struct btrfs_path *p, u64 time_seq)
2945 {
2946 	struct btrfs_fs_info *fs_info = root->fs_info;
2947 	struct extent_buffer *b;
2948 	int slot;
2949 	int ret;
2950 	int err;
2951 	int level;
2952 	int lowest_unlock = 1;
2953 	u8 lowest_level = 0;
2954 	int prev_cmp = -1;
2955 
2956 	lowest_level = p->lowest_level;
2957 	WARN_ON(p->nodes[0] != NULL);
2958 
2959 	if (p->search_commit_root) {
2960 		BUG_ON(time_seq);
2961 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2962 	}
2963 
2964 again:
2965 	b = get_old_root(root, time_seq);
2966 	if (!b) {
2967 		ret = -EIO;
2968 		goto done;
2969 	}
2970 	level = btrfs_header_level(b);
2971 	p->locks[level] = BTRFS_READ_LOCK;
2972 
2973 	while (b) {
2974 		int dec = 0;
2975 
2976 		level = btrfs_header_level(b);
2977 		p->nodes[level] = b;
2978 
2979 		/*
2980 		 * we have a lock on b and as long as we aren't changing
2981 		 * the tree, there is no way to for the items in b to change.
2982 		 * It is safe to drop the lock on our parent before we
2983 		 * go through the expensive btree search on b.
2984 		 */
2985 		btrfs_unlock_up_safe(p, level + 1);
2986 
2987 		/*
2988 		 * Since we can unwind ebs we want to do a real search every
2989 		 * time.
2990 		 */
2991 		prev_cmp = -1;
2992 		ret = key_search(b, key, level, &prev_cmp, &slot);
2993 		if (ret < 0)
2994 			goto done;
2995 
2996 		if (level == 0) {
2997 			p->slots[level] = slot;
2998 			unlock_up(p, level, lowest_unlock, 0, NULL);
2999 			goto done;
3000 		}
3001 
3002 		if (ret && slot > 0) {
3003 			dec = 1;
3004 			slot--;
3005 		}
3006 		p->slots[level] = slot;
3007 		unlock_up(p, level, lowest_unlock, 0, NULL);
3008 
3009 		if (level == lowest_level) {
3010 			if (dec)
3011 				p->slots[level]++;
3012 			goto done;
3013 		}
3014 
3015 		err = read_block_for_search(root, p, &b, level, slot, key);
3016 		if (err == -EAGAIN)
3017 			goto again;
3018 		if (err) {
3019 			ret = err;
3020 			goto done;
3021 		}
3022 
3023 		level = btrfs_header_level(b);
3024 		if (!btrfs_tree_read_lock_atomic(b)) {
3025 			btrfs_set_path_blocking(p);
3026 			btrfs_tree_read_lock(b);
3027 		}
3028 		b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3029 		if (!b) {
3030 			ret = -ENOMEM;
3031 			goto done;
3032 		}
3033 		p->locks[level] = BTRFS_READ_LOCK;
3034 		p->nodes[level] = b;
3035 	}
3036 	ret = 1;
3037 done:
3038 	if (!p->leave_spinning)
3039 		btrfs_set_path_blocking(p);
3040 	if (ret < 0)
3041 		btrfs_release_path(p);
3042 
3043 	return ret;
3044 }
3045 
3046 /*
3047  * helper to use instead of search slot if no exact match is needed but
3048  * instead the next or previous item should be returned.
3049  * When find_higher is true, the next higher item is returned, the next lower
3050  * otherwise.
3051  * When return_any and find_higher are both true, and no higher item is found,
3052  * return the next lower instead.
3053  * When return_any is true and find_higher is false, and no lower item is found,
3054  * return the next higher instead.
3055  * It returns 0 if any item is found, 1 if none is found (tree empty), and
3056  * < 0 on error
3057  */
3058 int btrfs_search_slot_for_read(struct btrfs_root *root,
3059 			       const struct btrfs_key *key,
3060 			       struct btrfs_path *p, int find_higher,
3061 			       int return_any)
3062 {
3063 	int ret;
3064 	struct extent_buffer *leaf;
3065 
3066 again:
3067 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3068 	if (ret <= 0)
3069 		return ret;
3070 	/*
3071 	 * a return value of 1 means the path is at the position where the
3072 	 * item should be inserted. Normally this is the next bigger item,
3073 	 * but in case the previous item is the last in a leaf, path points
3074 	 * to the first free slot in the previous leaf, i.e. at an invalid
3075 	 * item.
3076 	 */
3077 	leaf = p->nodes[0];
3078 
3079 	if (find_higher) {
3080 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3081 			ret = btrfs_next_leaf(root, p);
3082 			if (ret <= 0)
3083 				return ret;
3084 			if (!return_any)
3085 				return 1;
3086 			/*
3087 			 * no higher item found, return the next
3088 			 * lower instead
3089 			 */
3090 			return_any = 0;
3091 			find_higher = 0;
3092 			btrfs_release_path(p);
3093 			goto again;
3094 		}
3095 	} else {
3096 		if (p->slots[0] == 0) {
3097 			ret = btrfs_prev_leaf(root, p);
3098 			if (ret < 0)
3099 				return ret;
3100 			if (!ret) {
3101 				leaf = p->nodes[0];
3102 				if (p->slots[0] == btrfs_header_nritems(leaf))
3103 					p->slots[0]--;
3104 				return 0;
3105 			}
3106 			if (!return_any)
3107 				return 1;
3108 			/*
3109 			 * no lower item found, return the next
3110 			 * higher instead
3111 			 */
3112 			return_any = 0;
3113 			find_higher = 1;
3114 			btrfs_release_path(p);
3115 			goto again;
3116 		} else {
3117 			--p->slots[0];
3118 		}
3119 	}
3120 	return 0;
3121 }
3122 
3123 /*
3124  * adjust the pointers going up the tree, starting at level
3125  * making sure the right key of each node is points to 'key'.
3126  * This is used after shifting pointers to the left, so it stops
3127  * fixing up pointers when a given leaf/node is not in slot 0 of the
3128  * higher levels
3129  *
3130  */
3131 static void fixup_low_keys(struct btrfs_path *path,
3132 			   struct btrfs_disk_key *key, int level)
3133 {
3134 	int i;
3135 	struct extent_buffer *t;
3136 	int ret;
3137 
3138 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3139 		int tslot = path->slots[i];
3140 
3141 		if (!path->nodes[i])
3142 			break;
3143 		t = path->nodes[i];
3144 		ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3145 				GFP_ATOMIC);
3146 		BUG_ON(ret < 0);
3147 		btrfs_set_node_key(t, key, tslot);
3148 		btrfs_mark_buffer_dirty(path->nodes[i]);
3149 		if (tslot != 0)
3150 			break;
3151 	}
3152 }
3153 
3154 /*
3155  * update item key.
3156  *
3157  * This function isn't completely safe. It's the caller's responsibility
3158  * that the new key won't break the order
3159  */
3160 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3161 			     struct btrfs_path *path,
3162 			     const struct btrfs_key *new_key)
3163 {
3164 	struct btrfs_disk_key disk_key;
3165 	struct extent_buffer *eb;
3166 	int slot;
3167 
3168 	eb = path->nodes[0];
3169 	slot = path->slots[0];
3170 	if (slot > 0) {
3171 		btrfs_item_key(eb, &disk_key, slot - 1);
3172 		if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
3173 			btrfs_crit(fs_info,
3174 		"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3175 				   slot, btrfs_disk_key_objectid(&disk_key),
3176 				   btrfs_disk_key_type(&disk_key),
3177 				   btrfs_disk_key_offset(&disk_key),
3178 				   new_key->objectid, new_key->type,
3179 				   new_key->offset);
3180 			btrfs_print_leaf(eb);
3181 			BUG();
3182 		}
3183 	}
3184 	if (slot < btrfs_header_nritems(eb) - 1) {
3185 		btrfs_item_key(eb, &disk_key, slot + 1);
3186 		if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
3187 			btrfs_crit(fs_info,
3188 		"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3189 				   slot, btrfs_disk_key_objectid(&disk_key),
3190 				   btrfs_disk_key_type(&disk_key),
3191 				   btrfs_disk_key_offset(&disk_key),
3192 				   new_key->objectid, new_key->type,
3193 				   new_key->offset);
3194 			btrfs_print_leaf(eb);
3195 			BUG();
3196 		}
3197 	}
3198 
3199 	btrfs_cpu_key_to_disk(&disk_key, new_key);
3200 	btrfs_set_item_key(eb, &disk_key, slot);
3201 	btrfs_mark_buffer_dirty(eb);
3202 	if (slot == 0)
3203 		fixup_low_keys(path, &disk_key, 1);
3204 }
3205 
3206 /*
3207  * try to push data from one node into the next node left in the
3208  * tree.
3209  *
3210  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3211  * error, and > 0 if there was no room in the left hand block.
3212  */
3213 static int push_node_left(struct btrfs_trans_handle *trans,
3214 			  struct extent_buffer *dst,
3215 			  struct extent_buffer *src, int empty)
3216 {
3217 	struct btrfs_fs_info *fs_info = trans->fs_info;
3218 	int push_items = 0;
3219 	int src_nritems;
3220 	int dst_nritems;
3221 	int ret = 0;
3222 
3223 	src_nritems = btrfs_header_nritems(src);
3224 	dst_nritems = btrfs_header_nritems(dst);
3225 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3226 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3227 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3228 
3229 	if (!empty && src_nritems <= 8)
3230 		return 1;
3231 
3232 	if (push_items <= 0)
3233 		return 1;
3234 
3235 	if (empty) {
3236 		push_items = min(src_nritems, push_items);
3237 		if (push_items < src_nritems) {
3238 			/* leave at least 8 pointers in the node if
3239 			 * we aren't going to empty it
3240 			 */
3241 			if (src_nritems - push_items < 8) {
3242 				if (push_items <= 8)
3243 					return 1;
3244 				push_items -= 8;
3245 			}
3246 		}
3247 	} else
3248 		push_items = min(src_nritems - 8, push_items);
3249 
3250 	ret = tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
3251 	if (ret) {
3252 		btrfs_abort_transaction(trans, ret);
3253 		return ret;
3254 	}
3255 	copy_extent_buffer(dst, src,
3256 			   btrfs_node_key_ptr_offset(dst_nritems),
3257 			   btrfs_node_key_ptr_offset(0),
3258 			   push_items * sizeof(struct btrfs_key_ptr));
3259 
3260 	if (push_items < src_nritems) {
3261 		/*
3262 		 * Don't call tree_mod_log_insert_move here, key removal was
3263 		 * already fully logged by tree_mod_log_eb_copy above.
3264 		 */
3265 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3266 				      btrfs_node_key_ptr_offset(push_items),
3267 				      (src_nritems - push_items) *
3268 				      sizeof(struct btrfs_key_ptr));
3269 	}
3270 	btrfs_set_header_nritems(src, src_nritems - push_items);
3271 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3272 	btrfs_mark_buffer_dirty(src);
3273 	btrfs_mark_buffer_dirty(dst);
3274 
3275 	return ret;
3276 }
3277 
3278 /*
3279  * try to push data from one node into the next node right in the
3280  * tree.
3281  *
3282  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3283  * error, and > 0 if there was no room in the right hand block.
3284  *
3285  * this will  only push up to 1/2 the contents of the left node over
3286  */
3287 static int balance_node_right(struct btrfs_trans_handle *trans,
3288 			      struct extent_buffer *dst,
3289 			      struct extent_buffer *src)
3290 {
3291 	struct btrfs_fs_info *fs_info = trans->fs_info;
3292 	int push_items = 0;
3293 	int max_push;
3294 	int src_nritems;
3295 	int dst_nritems;
3296 	int ret = 0;
3297 
3298 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3299 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3300 
3301 	src_nritems = btrfs_header_nritems(src);
3302 	dst_nritems = btrfs_header_nritems(dst);
3303 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3304 	if (push_items <= 0)
3305 		return 1;
3306 
3307 	if (src_nritems < 4)
3308 		return 1;
3309 
3310 	max_push = src_nritems / 2 + 1;
3311 	/* don't try to empty the node */
3312 	if (max_push >= src_nritems)
3313 		return 1;
3314 
3315 	if (max_push < push_items)
3316 		push_items = max_push;
3317 
3318 	ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3319 	BUG_ON(ret < 0);
3320 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3321 				      btrfs_node_key_ptr_offset(0),
3322 				      (dst_nritems) *
3323 				      sizeof(struct btrfs_key_ptr));
3324 
3325 	ret = tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
3326 				   push_items);
3327 	if (ret) {
3328 		btrfs_abort_transaction(trans, ret);
3329 		return ret;
3330 	}
3331 	copy_extent_buffer(dst, src,
3332 			   btrfs_node_key_ptr_offset(0),
3333 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3334 			   push_items * sizeof(struct btrfs_key_ptr));
3335 
3336 	btrfs_set_header_nritems(src, src_nritems - push_items);
3337 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3338 
3339 	btrfs_mark_buffer_dirty(src);
3340 	btrfs_mark_buffer_dirty(dst);
3341 
3342 	return ret;
3343 }
3344 
3345 /*
3346  * helper function to insert a new root level in the tree.
3347  * A new node is allocated, and a single item is inserted to
3348  * point to the existing root
3349  *
3350  * returns zero on success or < 0 on failure.
3351  */
3352 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3353 			   struct btrfs_root *root,
3354 			   struct btrfs_path *path, int level)
3355 {
3356 	struct btrfs_fs_info *fs_info = root->fs_info;
3357 	u64 lower_gen;
3358 	struct extent_buffer *lower;
3359 	struct extent_buffer *c;
3360 	struct extent_buffer *old;
3361 	struct btrfs_disk_key lower_key;
3362 	int ret;
3363 
3364 	BUG_ON(path->nodes[level]);
3365 	BUG_ON(path->nodes[level-1] != root->node);
3366 
3367 	lower = path->nodes[level-1];
3368 	if (level == 1)
3369 		btrfs_item_key(lower, &lower_key, 0);
3370 	else
3371 		btrfs_node_key(lower, &lower_key, 0);
3372 
3373 	c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
3374 					 root->node->start, 0);
3375 	if (IS_ERR(c))
3376 		return PTR_ERR(c);
3377 
3378 	root_add_used(root, fs_info->nodesize);
3379 
3380 	btrfs_set_header_nritems(c, 1);
3381 	btrfs_set_node_key(c, &lower_key, 0);
3382 	btrfs_set_node_blockptr(c, 0, lower->start);
3383 	lower_gen = btrfs_header_generation(lower);
3384 	WARN_ON(lower_gen != trans->transid);
3385 
3386 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3387 
3388 	btrfs_mark_buffer_dirty(c);
3389 
3390 	old = root->node;
3391 	ret = tree_mod_log_insert_root(root->node, c, 0);
3392 	BUG_ON(ret < 0);
3393 	rcu_assign_pointer(root->node, c);
3394 
3395 	/* the super has an extra ref to root->node */
3396 	free_extent_buffer(old);
3397 
3398 	add_root_to_dirty_list(root);
3399 	atomic_inc(&c->refs);
3400 	path->nodes[level] = c;
3401 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3402 	path->slots[level] = 0;
3403 	return 0;
3404 }
3405 
3406 /*
3407  * worker function to insert a single pointer in a node.
3408  * the node should have enough room for the pointer already
3409  *
3410  * slot and level indicate where you want the key to go, and
3411  * blocknr is the block the key points to.
3412  */
3413 static void insert_ptr(struct btrfs_trans_handle *trans,
3414 		       struct btrfs_path *path,
3415 		       struct btrfs_disk_key *key, u64 bytenr,
3416 		       int slot, int level)
3417 {
3418 	struct extent_buffer *lower;
3419 	int nritems;
3420 	int ret;
3421 
3422 	BUG_ON(!path->nodes[level]);
3423 	btrfs_assert_tree_locked(path->nodes[level]);
3424 	lower = path->nodes[level];
3425 	nritems = btrfs_header_nritems(lower);
3426 	BUG_ON(slot > nritems);
3427 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
3428 	if (slot != nritems) {
3429 		if (level) {
3430 			ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3431 					nritems - slot);
3432 			BUG_ON(ret < 0);
3433 		}
3434 		memmove_extent_buffer(lower,
3435 			      btrfs_node_key_ptr_offset(slot + 1),
3436 			      btrfs_node_key_ptr_offset(slot),
3437 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3438 	}
3439 	if (level) {
3440 		ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3441 				GFP_NOFS);
3442 		BUG_ON(ret < 0);
3443 	}
3444 	btrfs_set_node_key(lower, key, slot);
3445 	btrfs_set_node_blockptr(lower, slot, bytenr);
3446 	WARN_ON(trans->transid == 0);
3447 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3448 	btrfs_set_header_nritems(lower, nritems + 1);
3449 	btrfs_mark_buffer_dirty(lower);
3450 }
3451 
3452 /*
3453  * split the node at the specified level in path in two.
3454  * The path is corrected to point to the appropriate node after the split
3455  *
3456  * Before splitting this tries to make some room in the node by pushing
3457  * left and right, if either one works, it returns right away.
3458  *
3459  * returns 0 on success and < 0 on failure
3460  */
3461 static noinline int split_node(struct btrfs_trans_handle *trans,
3462 			       struct btrfs_root *root,
3463 			       struct btrfs_path *path, int level)
3464 {
3465 	struct btrfs_fs_info *fs_info = root->fs_info;
3466 	struct extent_buffer *c;
3467 	struct extent_buffer *split;
3468 	struct btrfs_disk_key disk_key;
3469 	int mid;
3470 	int ret;
3471 	u32 c_nritems;
3472 
3473 	c = path->nodes[level];
3474 	WARN_ON(btrfs_header_generation(c) != trans->transid);
3475 	if (c == root->node) {
3476 		/*
3477 		 * trying to split the root, lets make a new one
3478 		 *
3479 		 * tree mod log: We don't log_removal old root in
3480 		 * insert_new_root, because that root buffer will be kept as a
3481 		 * normal node. We are going to log removal of half of the
3482 		 * elements below with tree_mod_log_eb_copy. We're holding a
3483 		 * tree lock on the buffer, which is why we cannot race with
3484 		 * other tree_mod_log users.
3485 		 */
3486 		ret = insert_new_root(trans, root, path, level + 1);
3487 		if (ret)
3488 			return ret;
3489 	} else {
3490 		ret = push_nodes_for_insert(trans, root, path, level);
3491 		c = path->nodes[level];
3492 		if (!ret && btrfs_header_nritems(c) <
3493 		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3494 			return 0;
3495 		if (ret < 0)
3496 			return ret;
3497 	}
3498 
3499 	c_nritems = btrfs_header_nritems(c);
3500 	mid = (c_nritems + 1) / 2;
3501 	btrfs_node_key(c, &disk_key, mid);
3502 
3503 	split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
3504 					     c->start, 0);
3505 	if (IS_ERR(split))
3506 		return PTR_ERR(split);
3507 
3508 	root_add_used(root, fs_info->nodesize);
3509 	ASSERT(btrfs_header_level(c) == level);
3510 
3511 	ret = tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
3512 	if (ret) {
3513 		btrfs_abort_transaction(trans, ret);
3514 		return ret;
3515 	}
3516 	copy_extent_buffer(split, c,
3517 			   btrfs_node_key_ptr_offset(0),
3518 			   btrfs_node_key_ptr_offset(mid),
3519 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3520 	btrfs_set_header_nritems(split, c_nritems - mid);
3521 	btrfs_set_header_nritems(c, mid);
3522 	ret = 0;
3523 
3524 	btrfs_mark_buffer_dirty(c);
3525 	btrfs_mark_buffer_dirty(split);
3526 
3527 	insert_ptr(trans, path, &disk_key, split->start,
3528 		   path->slots[level + 1] + 1, level + 1);
3529 
3530 	if (path->slots[level] >= mid) {
3531 		path->slots[level] -= mid;
3532 		btrfs_tree_unlock(c);
3533 		free_extent_buffer(c);
3534 		path->nodes[level] = split;
3535 		path->slots[level + 1] += 1;
3536 	} else {
3537 		btrfs_tree_unlock(split);
3538 		free_extent_buffer(split);
3539 	}
3540 	return ret;
3541 }
3542 
3543 /*
3544  * how many bytes are required to store the items in a leaf.  start
3545  * and nr indicate which items in the leaf to check.  This totals up the
3546  * space used both by the item structs and the item data
3547  */
3548 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3549 {
3550 	struct btrfs_item *start_item;
3551 	struct btrfs_item *end_item;
3552 	struct btrfs_map_token token;
3553 	int data_len;
3554 	int nritems = btrfs_header_nritems(l);
3555 	int end = min(nritems, start + nr) - 1;
3556 
3557 	if (!nr)
3558 		return 0;
3559 	btrfs_init_map_token(&token, l);
3560 	start_item = btrfs_item_nr(start);
3561 	end_item = btrfs_item_nr(end);
3562 	data_len = btrfs_token_item_offset(l, start_item, &token) +
3563 		btrfs_token_item_size(l, start_item, &token);
3564 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3565 	data_len += sizeof(struct btrfs_item) * nr;
3566 	WARN_ON(data_len < 0);
3567 	return data_len;
3568 }
3569 
3570 /*
3571  * The space between the end of the leaf items and
3572  * the start of the leaf data.  IOW, how much room
3573  * the leaf has left for both items and data
3574  */
3575 noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
3576 {
3577 	struct btrfs_fs_info *fs_info = leaf->fs_info;
3578 	int nritems = btrfs_header_nritems(leaf);
3579 	int ret;
3580 
3581 	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3582 	if (ret < 0) {
3583 		btrfs_crit(fs_info,
3584 			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3585 			   ret,
3586 			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3587 			   leaf_space_used(leaf, 0, nritems), nritems);
3588 	}
3589 	return ret;
3590 }
3591 
3592 /*
3593  * min slot controls the lowest index we're willing to push to the
3594  * right.  We'll push up to and including min_slot, but no lower
3595  */
3596 static noinline int __push_leaf_right(struct btrfs_path *path,
3597 				      int data_size, int empty,
3598 				      struct extent_buffer *right,
3599 				      int free_space, u32 left_nritems,
3600 				      u32 min_slot)
3601 {
3602 	struct btrfs_fs_info *fs_info = right->fs_info;
3603 	struct extent_buffer *left = path->nodes[0];
3604 	struct extent_buffer *upper = path->nodes[1];
3605 	struct btrfs_map_token token;
3606 	struct btrfs_disk_key disk_key;
3607 	int slot;
3608 	u32 i;
3609 	int push_space = 0;
3610 	int push_items = 0;
3611 	struct btrfs_item *item;
3612 	u32 nr;
3613 	u32 right_nritems;
3614 	u32 data_end;
3615 	u32 this_item_size;
3616 
3617 	if (empty)
3618 		nr = 0;
3619 	else
3620 		nr = max_t(u32, 1, min_slot);
3621 
3622 	if (path->slots[0] >= left_nritems)
3623 		push_space += data_size;
3624 
3625 	slot = path->slots[1];
3626 	i = left_nritems - 1;
3627 	while (i >= nr) {
3628 		item = btrfs_item_nr(i);
3629 
3630 		if (!empty && push_items > 0) {
3631 			if (path->slots[0] > i)
3632 				break;
3633 			if (path->slots[0] == i) {
3634 				int space = btrfs_leaf_free_space(left);
3635 
3636 				if (space + push_space * 2 > free_space)
3637 					break;
3638 			}
3639 		}
3640 
3641 		if (path->slots[0] == i)
3642 			push_space += data_size;
3643 
3644 		this_item_size = btrfs_item_size(left, item);
3645 		if (this_item_size + sizeof(*item) + push_space > free_space)
3646 			break;
3647 
3648 		push_items++;
3649 		push_space += this_item_size + sizeof(*item);
3650 		if (i == 0)
3651 			break;
3652 		i--;
3653 	}
3654 
3655 	if (push_items == 0)
3656 		goto out_unlock;
3657 
3658 	WARN_ON(!empty && push_items == left_nritems);
3659 
3660 	/* push left to right */
3661 	right_nritems = btrfs_header_nritems(right);
3662 
3663 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3664 	push_space -= leaf_data_end(left);
3665 
3666 	/* make room in the right data area */
3667 	data_end = leaf_data_end(right);
3668 	memmove_extent_buffer(right,
3669 			      BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3670 			      BTRFS_LEAF_DATA_OFFSET + data_end,
3671 			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3672 
3673 	/* copy from the left data area */
3674 	copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3675 		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3676 		     BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
3677 		     push_space);
3678 
3679 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3680 			      btrfs_item_nr_offset(0),
3681 			      right_nritems * sizeof(struct btrfs_item));
3682 
3683 	/* copy the items from left to right */
3684 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3685 		   btrfs_item_nr_offset(left_nritems - push_items),
3686 		   push_items * sizeof(struct btrfs_item));
3687 
3688 	/* update the item pointers */
3689 	btrfs_init_map_token(&token, right);
3690 	right_nritems += push_items;
3691 	btrfs_set_header_nritems(right, right_nritems);
3692 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3693 	for (i = 0; i < right_nritems; i++) {
3694 		item = btrfs_item_nr(i);
3695 		push_space -= btrfs_token_item_size(right, item, &token);
3696 		btrfs_set_token_item_offset(right, item, push_space, &token);
3697 	}
3698 
3699 	left_nritems -= push_items;
3700 	btrfs_set_header_nritems(left, left_nritems);
3701 
3702 	if (left_nritems)
3703 		btrfs_mark_buffer_dirty(left);
3704 	else
3705 		btrfs_clean_tree_block(left);
3706 
3707 	btrfs_mark_buffer_dirty(right);
3708 
3709 	btrfs_item_key(right, &disk_key, 0);
3710 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3711 	btrfs_mark_buffer_dirty(upper);
3712 
3713 	/* then fixup the leaf pointer in the path */
3714 	if (path->slots[0] >= left_nritems) {
3715 		path->slots[0] -= left_nritems;
3716 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3717 			btrfs_clean_tree_block(path->nodes[0]);
3718 		btrfs_tree_unlock(path->nodes[0]);
3719 		free_extent_buffer(path->nodes[0]);
3720 		path->nodes[0] = right;
3721 		path->slots[1] += 1;
3722 	} else {
3723 		btrfs_tree_unlock(right);
3724 		free_extent_buffer(right);
3725 	}
3726 	return 0;
3727 
3728 out_unlock:
3729 	btrfs_tree_unlock(right);
3730 	free_extent_buffer(right);
3731 	return 1;
3732 }
3733 
3734 /*
3735  * push some data in the path leaf to the right, trying to free up at
3736  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3737  *
3738  * returns 1 if the push failed because the other node didn't have enough
3739  * room, 0 if everything worked out and < 0 if there were major errors.
3740  *
3741  * this will push starting from min_slot to the end of the leaf.  It won't
3742  * push any slot lower than min_slot
3743  */
3744 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3745 			   *root, struct btrfs_path *path,
3746 			   int min_data_size, int data_size,
3747 			   int empty, u32 min_slot)
3748 {
3749 	struct extent_buffer *left = path->nodes[0];
3750 	struct extent_buffer *right;
3751 	struct extent_buffer *upper;
3752 	int slot;
3753 	int free_space;
3754 	u32 left_nritems;
3755 	int ret;
3756 
3757 	if (!path->nodes[1])
3758 		return 1;
3759 
3760 	slot = path->slots[1];
3761 	upper = path->nodes[1];
3762 	if (slot >= btrfs_header_nritems(upper) - 1)
3763 		return 1;
3764 
3765 	btrfs_assert_tree_locked(path->nodes[1]);
3766 
3767 	right = btrfs_read_node_slot(upper, slot + 1);
3768 	/*
3769 	 * slot + 1 is not valid or we fail to read the right node,
3770 	 * no big deal, just return.
3771 	 */
3772 	if (IS_ERR(right))
3773 		return 1;
3774 
3775 	btrfs_tree_lock(right);
3776 	btrfs_set_lock_blocking_write(right);
3777 
3778 	free_space = btrfs_leaf_free_space(right);
3779 	if (free_space < data_size)
3780 		goto out_unlock;
3781 
3782 	/* cow and double check */
3783 	ret = btrfs_cow_block(trans, root, right, upper,
3784 			      slot + 1, &right);
3785 	if (ret)
3786 		goto out_unlock;
3787 
3788 	free_space = btrfs_leaf_free_space(right);
3789 	if (free_space < data_size)
3790 		goto out_unlock;
3791 
3792 	left_nritems = btrfs_header_nritems(left);
3793 	if (left_nritems == 0)
3794 		goto out_unlock;
3795 
3796 	if (path->slots[0] == left_nritems && !empty) {
3797 		/* Key greater than all keys in the leaf, right neighbor has
3798 		 * enough room for it and we're not emptying our leaf to delete
3799 		 * it, therefore use right neighbor to insert the new item and
3800 		 * no need to touch/dirty our left leaf. */
3801 		btrfs_tree_unlock(left);
3802 		free_extent_buffer(left);
3803 		path->nodes[0] = right;
3804 		path->slots[0] = 0;
3805 		path->slots[1]++;
3806 		return 0;
3807 	}
3808 
3809 	return __push_leaf_right(path, min_data_size, empty,
3810 				right, free_space, left_nritems, min_slot);
3811 out_unlock:
3812 	btrfs_tree_unlock(right);
3813 	free_extent_buffer(right);
3814 	return 1;
3815 }
3816 
3817 /*
3818  * push some data in the path leaf to the left, trying to free up at
3819  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3820  *
3821  * max_slot can put a limit on how far into the leaf we'll push items.  The
3822  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3823  * items
3824  */
3825 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
3826 				     int empty, struct extent_buffer *left,
3827 				     int free_space, u32 right_nritems,
3828 				     u32 max_slot)
3829 {
3830 	struct btrfs_fs_info *fs_info = left->fs_info;
3831 	struct btrfs_disk_key disk_key;
3832 	struct extent_buffer *right = path->nodes[0];
3833 	int i;
3834 	int push_space = 0;
3835 	int push_items = 0;
3836 	struct btrfs_item *item;
3837 	u32 old_left_nritems;
3838 	u32 nr;
3839 	int ret = 0;
3840 	u32 this_item_size;
3841 	u32 old_left_item_size;
3842 	struct btrfs_map_token token;
3843 
3844 	if (empty)
3845 		nr = min(right_nritems, max_slot);
3846 	else
3847 		nr = min(right_nritems - 1, max_slot);
3848 
3849 	for (i = 0; i < nr; i++) {
3850 		item = btrfs_item_nr(i);
3851 
3852 		if (!empty && push_items > 0) {
3853 			if (path->slots[0] < i)
3854 				break;
3855 			if (path->slots[0] == i) {
3856 				int space = btrfs_leaf_free_space(right);
3857 
3858 				if (space + push_space * 2 > free_space)
3859 					break;
3860 			}
3861 		}
3862 
3863 		if (path->slots[0] == i)
3864 			push_space += data_size;
3865 
3866 		this_item_size = btrfs_item_size(right, item);
3867 		if (this_item_size + sizeof(*item) + push_space > free_space)
3868 			break;
3869 
3870 		push_items++;
3871 		push_space += this_item_size + sizeof(*item);
3872 	}
3873 
3874 	if (push_items == 0) {
3875 		ret = 1;
3876 		goto out;
3877 	}
3878 	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3879 
3880 	/* push data from right to left */
3881 	copy_extent_buffer(left, right,
3882 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3883 			   btrfs_item_nr_offset(0),
3884 			   push_items * sizeof(struct btrfs_item));
3885 
3886 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3887 		     btrfs_item_offset_nr(right, push_items - 1);
3888 
3889 	copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3890 		     leaf_data_end(left) - push_space,
3891 		     BTRFS_LEAF_DATA_OFFSET +
3892 		     btrfs_item_offset_nr(right, push_items - 1),
3893 		     push_space);
3894 	old_left_nritems = btrfs_header_nritems(left);
3895 	BUG_ON(old_left_nritems <= 0);
3896 
3897 	btrfs_init_map_token(&token, left);
3898 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3899 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3900 		u32 ioff;
3901 
3902 		item = btrfs_item_nr(i);
3903 
3904 		ioff = btrfs_token_item_offset(left, item, &token);
3905 		btrfs_set_token_item_offset(left, item,
3906 		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3907 		      &token);
3908 	}
3909 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3910 
3911 	/* fixup right node */
3912 	if (push_items > right_nritems)
3913 		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3914 		       right_nritems);
3915 
3916 	if (push_items < right_nritems) {
3917 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3918 						  leaf_data_end(right);
3919 		memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3920 				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3921 				      BTRFS_LEAF_DATA_OFFSET +
3922 				      leaf_data_end(right), push_space);
3923 
3924 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3925 			      btrfs_item_nr_offset(push_items),
3926 			     (btrfs_header_nritems(right) - push_items) *
3927 			     sizeof(struct btrfs_item));
3928 	}
3929 
3930 	btrfs_init_map_token(&token, right);
3931 	right_nritems -= push_items;
3932 	btrfs_set_header_nritems(right, right_nritems);
3933 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3934 	for (i = 0; i < right_nritems; i++) {
3935 		item = btrfs_item_nr(i);
3936 
3937 		push_space = push_space - btrfs_token_item_size(right,
3938 								item, &token);
3939 		btrfs_set_token_item_offset(right, item, push_space, &token);
3940 	}
3941 
3942 	btrfs_mark_buffer_dirty(left);
3943 	if (right_nritems)
3944 		btrfs_mark_buffer_dirty(right);
3945 	else
3946 		btrfs_clean_tree_block(right);
3947 
3948 	btrfs_item_key(right, &disk_key, 0);
3949 	fixup_low_keys(path, &disk_key, 1);
3950 
3951 	/* then fixup the leaf pointer in the path */
3952 	if (path->slots[0] < push_items) {
3953 		path->slots[0] += old_left_nritems;
3954 		btrfs_tree_unlock(path->nodes[0]);
3955 		free_extent_buffer(path->nodes[0]);
3956 		path->nodes[0] = left;
3957 		path->slots[1] -= 1;
3958 	} else {
3959 		btrfs_tree_unlock(left);
3960 		free_extent_buffer(left);
3961 		path->slots[0] -= push_items;
3962 	}
3963 	BUG_ON(path->slots[0] < 0);
3964 	return ret;
3965 out:
3966 	btrfs_tree_unlock(left);
3967 	free_extent_buffer(left);
3968 	return ret;
3969 }
3970 
3971 /*
3972  * push some data in the path leaf to the left, trying to free up at
3973  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3974  *
3975  * max_slot can put a limit on how far into the leaf we'll push items.  The
3976  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3977  * items
3978  */
3979 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3980 			  *root, struct btrfs_path *path, int min_data_size,
3981 			  int data_size, int empty, u32 max_slot)
3982 {
3983 	struct extent_buffer *right = path->nodes[0];
3984 	struct extent_buffer *left;
3985 	int slot;
3986 	int free_space;
3987 	u32 right_nritems;
3988 	int ret = 0;
3989 
3990 	slot = path->slots[1];
3991 	if (slot == 0)
3992 		return 1;
3993 	if (!path->nodes[1])
3994 		return 1;
3995 
3996 	right_nritems = btrfs_header_nritems(right);
3997 	if (right_nritems == 0)
3998 		return 1;
3999 
4000 	btrfs_assert_tree_locked(path->nodes[1]);
4001 
4002 	left = btrfs_read_node_slot(path->nodes[1], slot - 1);
4003 	/*
4004 	 * slot - 1 is not valid or we fail to read the left node,
4005 	 * no big deal, just return.
4006 	 */
4007 	if (IS_ERR(left))
4008 		return 1;
4009 
4010 	btrfs_tree_lock(left);
4011 	btrfs_set_lock_blocking_write(left);
4012 
4013 	free_space = btrfs_leaf_free_space(left);
4014 	if (free_space < data_size) {
4015 		ret = 1;
4016 		goto out;
4017 	}
4018 
4019 	/* cow and double check */
4020 	ret = btrfs_cow_block(trans, root, left,
4021 			      path->nodes[1], slot - 1, &left);
4022 	if (ret) {
4023 		/* we hit -ENOSPC, but it isn't fatal here */
4024 		if (ret == -ENOSPC)
4025 			ret = 1;
4026 		goto out;
4027 	}
4028 
4029 	free_space = btrfs_leaf_free_space(left);
4030 	if (free_space < data_size) {
4031 		ret = 1;
4032 		goto out;
4033 	}
4034 
4035 	return __push_leaf_left(path, min_data_size,
4036 			       empty, left, free_space, right_nritems,
4037 			       max_slot);
4038 out:
4039 	btrfs_tree_unlock(left);
4040 	free_extent_buffer(left);
4041 	return ret;
4042 }
4043 
4044 /*
4045  * split the path's leaf in two, making sure there is at least data_size
4046  * available for the resulting leaf level of the path.
4047  */
4048 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4049 				    struct btrfs_path *path,
4050 				    struct extent_buffer *l,
4051 				    struct extent_buffer *right,
4052 				    int slot, int mid, int nritems)
4053 {
4054 	struct btrfs_fs_info *fs_info = trans->fs_info;
4055 	int data_copy_size;
4056 	int rt_data_off;
4057 	int i;
4058 	struct btrfs_disk_key disk_key;
4059 	struct btrfs_map_token token;
4060 
4061 	nritems = nritems - mid;
4062 	btrfs_set_header_nritems(right, nritems);
4063 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
4064 
4065 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4066 			   btrfs_item_nr_offset(mid),
4067 			   nritems * sizeof(struct btrfs_item));
4068 
4069 	copy_extent_buffer(right, l,
4070 		     BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4071 		     data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4072 		     leaf_data_end(l), data_copy_size);
4073 
4074 	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4075 
4076 	btrfs_init_map_token(&token, right);
4077 	for (i = 0; i < nritems; i++) {
4078 		struct btrfs_item *item = btrfs_item_nr(i);
4079 		u32 ioff;
4080 
4081 		ioff = btrfs_token_item_offset(right, item, &token);
4082 		btrfs_set_token_item_offset(right, item,
4083 					    ioff + rt_data_off, &token);
4084 	}
4085 
4086 	btrfs_set_header_nritems(l, mid);
4087 	btrfs_item_key(right, &disk_key, 0);
4088 	insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
4089 
4090 	btrfs_mark_buffer_dirty(right);
4091 	btrfs_mark_buffer_dirty(l);
4092 	BUG_ON(path->slots[0] != slot);
4093 
4094 	if (mid <= slot) {
4095 		btrfs_tree_unlock(path->nodes[0]);
4096 		free_extent_buffer(path->nodes[0]);
4097 		path->nodes[0] = right;
4098 		path->slots[0] -= mid;
4099 		path->slots[1] += 1;
4100 	} else {
4101 		btrfs_tree_unlock(right);
4102 		free_extent_buffer(right);
4103 	}
4104 
4105 	BUG_ON(path->slots[0] < 0);
4106 }
4107 
4108 /*
4109  * double splits happen when we need to insert a big item in the middle
4110  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
4111  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4112  *          A                 B                 C
4113  *
4114  * We avoid this by trying to push the items on either side of our target
4115  * into the adjacent leaves.  If all goes well we can avoid the double split
4116  * completely.
4117  */
4118 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4119 					  struct btrfs_root *root,
4120 					  struct btrfs_path *path,
4121 					  int data_size)
4122 {
4123 	int ret;
4124 	int progress = 0;
4125 	int slot;
4126 	u32 nritems;
4127 	int space_needed = data_size;
4128 
4129 	slot = path->slots[0];
4130 	if (slot < btrfs_header_nritems(path->nodes[0]))
4131 		space_needed -= btrfs_leaf_free_space(path->nodes[0]);
4132 
4133 	/*
4134 	 * try to push all the items after our slot into the
4135 	 * right leaf
4136 	 */
4137 	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4138 	if (ret < 0)
4139 		return ret;
4140 
4141 	if (ret == 0)
4142 		progress++;
4143 
4144 	nritems = btrfs_header_nritems(path->nodes[0]);
4145 	/*
4146 	 * our goal is to get our slot at the start or end of a leaf.  If
4147 	 * we've done so we're done
4148 	 */
4149 	if (path->slots[0] == 0 || path->slots[0] == nritems)
4150 		return 0;
4151 
4152 	if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
4153 		return 0;
4154 
4155 	/* try to push all the items before our slot into the next leaf */
4156 	slot = path->slots[0];
4157 	space_needed = data_size;
4158 	if (slot > 0)
4159 		space_needed -= btrfs_leaf_free_space(path->nodes[0]);
4160 	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4161 	if (ret < 0)
4162 		return ret;
4163 
4164 	if (ret == 0)
4165 		progress++;
4166 
4167 	if (progress)
4168 		return 0;
4169 	return 1;
4170 }
4171 
4172 /*
4173  * split the path's leaf in two, making sure there is at least data_size
4174  * available for the resulting leaf level of the path.
4175  *
4176  * returns 0 if all went well and < 0 on failure.
4177  */
4178 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4179 			       struct btrfs_root *root,
4180 			       const struct btrfs_key *ins_key,
4181 			       struct btrfs_path *path, int data_size,
4182 			       int extend)
4183 {
4184 	struct btrfs_disk_key disk_key;
4185 	struct extent_buffer *l;
4186 	u32 nritems;
4187 	int mid;
4188 	int slot;
4189 	struct extent_buffer *right;
4190 	struct btrfs_fs_info *fs_info = root->fs_info;
4191 	int ret = 0;
4192 	int wret;
4193 	int split;
4194 	int num_doubles = 0;
4195 	int tried_avoid_double = 0;
4196 
4197 	l = path->nodes[0];
4198 	slot = path->slots[0];
4199 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
4200 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4201 		return -EOVERFLOW;
4202 
4203 	/* first try to make some room by pushing left and right */
4204 	if (data_size && path->nodes[1]) {
4205 		int space_needed = data_size;
4206 
4207 		if (slot < btrfs_header_nritems(l))
4208 			space_needed -= btrfs_leaf_free_space(l);
4209 
4210 		wret = push_leaf_right(trans, root, path, space_needed,
4211 				       space_needed, 0, 0);
4212 		if (wret < 0)
4213 			return wret;
4214 		if (wret) {
4215 			space_needed = data_size;
4216 			if (slot > 0)
4217 				space_needed -= btrfs_leaf_free_space(l);
4218 			wret = push_leaf_left(trans, root, path, space_needed,
4219 					      space_needed, 0, (u32)-1);
4220 			if (wret < 0)
4221 				return wret;
4222 		}
4223 		l = path->nodes[0];
4224 
4225 		/* did the pushes work? */
4226 		if (btrfs_leaf_free_space(l) >= data_size)
4227 			return 0;
4228 	}
4229 
4230 	if (!path->nodes[1]) {
4231 		ret = insert_new_root(trans, root, path, 1);
4232 		if (ret)
4233 			return ret;
4234 	}
4235 again:
4236 	split = 1;
4237 	l = path->nodes[0];
4238 	slot = path->slots[0];
4239 	nritems = btrfs_header_nritems(l);
4240 	mid = (nritems + 1) / 2;
4241 
4242 	if (mid <= slot) {
4243 		if (nritems == 1 ||
4244 		    leaf_space_used(l, mid, nritems - mid) + data_size >
4245 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4246 			if (slot >= nritems) {
4247 				split = 0;
4248 			} else {
4249 				mid = slot;
4250 				if (mid != nritems &&
4251 				    leaf_space_used(l, mid, nritems - mid) +
4252 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4253 					if (data_size && !tried_avoid_double)
4254 						goto push_for_double;
4255 					split = 2;
4256 				}
4257 			}
4258 		}
4259 	} else {
4260 		if (leaf_space_used(l, 0, mid) + data_size >
4261 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4262 			if (!extend && data_size && slot == 0) {
4263 				split = 0;
4264 			} else if ((extend || !data_size) && slot == 0) {
4265 				mid = 1;
4266 			} else {
4267 				mid = slot;
4268 				if (mid != nritems &&
4269 				    leaf_space_used(l, mid, nritems - mid) +
4270 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4271 					if (data_size && !tried_avoid_double)
4272 						goto push_for_double;
4273 					split = 2;
4274 				}
4275 			}
4276 		}
4277 	}
4278 
4279 	if (split == 0)
4280 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4281 	else
4282 		btrfs_item_key(l, &disk_key, mid);
4283 
4284 	right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
4285 					     l->start, 0);
4286 	if (IS_ERR(right))
4287 		return PTR_ERR(right);
4288 
4289 	root_add_used(root, fs_info->nodesize);
4290 
4291 	if (split == 0) {
4292 		if (mid <= slot) {
4293 			btrfs_set_header_nritems(right, 0);
4294 			insert_ptr(trans, path, &disk_key,
4295 				   right->start, path->slots[1] + 1, 1);
4296 			btrfs_tree_unlock(path->nodes[0]);
4297 			free_extent_buffer(path->nodes[0]);
4298 			path->nodes[0] = right;
4299 			path->slots[0] = 0;
4300 			path->slots[1] += 1;
4301 		} else {
4302 			btrfs_set_header_nritems(right, 0);
4303 			insert_ptr(trans, path, &disk_key,
4304 				   right->start, path->slots[1], 1);
4305 			btrfs_tree_unlock(path->nodes[0]);
4306 			free_extent_buffer(path->nodes[0]);
4307 			path->nodes[0] = right;
4308 			path->slots[0] = 0;
4309 			if (path->slots[1] == 0)
4310 				fixup_low_keys(path, &disk_key, 1);
4311 		}
4312 		/*
4313 		 * We create a new leaf 'right' for the required ins_len and
4314 		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4315 		 * the content of ins_len to 'right'.
4316 		 */
4317 		return ret;
4318 	}
4319 
4320 	copy_for_split(trans, path, l, right, slot, mid, nritems);
4321 
4322 	if (split == 2) {
4323 		BUG_ON(num_doubles != 0);
4324 		num_doubles++;
4325 		goto again;
4326 	}
4327 
4328 	return 0;
4329 
4330 push_for_double:
4331 	push_for_double_split(trans, root, path, data_size);
4332 	tried_avoid_double = 1;
4333 	if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
4334 		return 0;
4335 	goto again;
4336 }
4337 
4338 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4339 					 struct btrfs_root *root,
4340 					 struct btrfs_path *path, int ins_len)
4341 {
4342 	struct btrfs_key key;
4343 	struct extent_buffer *leaf;
4344 	struct btrfs_file_extent_item *fi;
4345 	u64 extent_len = 0;
4346 	u32 item_size;
4347 	int ret;
4348 
4349 	leaf = path->nodes[0];
4350 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4351 
4352 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4353 	       key.type != BTRFS_EXTENT_CSUM_KEY);
4354 
4355 	if (btrfs_leaf_free_space(leaf) >= ins_len)
4356 		return 0;
4357 
4358 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4359 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4360 		fi = btrfs_item_ptr(leaf, path->slots[0],
4361 				    struct btrfs_file_extent_item);
4362 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4363 	}
4364 	btrfs_release_path(path);
4365 
4366 	path->keep_locks = 1;
4367 	path->search_for_split = 1;
4368 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4369 	path->search_for_split = 0;
4370 	if (ret > 0)
4371 		ret = -EAGAIN;
4372 	if (ret < 0)
4373 		goto err;
4374 
4375 	ret = -EAGAIN;
4376 	leaf = path->nodes[0];
4377 	/* if our item isn't there, return now */
4378 	if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4379 		goto err;
4380 
4381 	/* the leaf has  changed, it now has room.  return now */
4382 	if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
4383 		goto err;
4384 
4385 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4386 		fi = btrfs_item_ptr(leaf, path->slots[0],
4387 				    struct btrfs_file_extent_item);
4388 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4389 			goto err;
4390 	}
4391 
4392 	btrfs_set_path_blocking(path);
4393 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4394 	if (ret)
4395 		goto err;
4396 
4397 	path->keep_locks = 0;
4398 	btrfs_unlock_up_safe(path, 1);
4399 	return 0;
4400 err:
4401 	path->keep_locks = 0;
4402 	return ret;
4403 }
4404 
4405 static noinline int split_item(struct btrfs_path *path,
4406 			       const struct btrfs_key *new_key,
4407 			       unsigned long split_offset)
4408 {
4409 	struct extent_buffer *leaf;
4410 	struct btrfs_item *item;
4411 	struct btrfs_item *new_item;
4412 	int slot;
4413 	char *buf;
4414 	u32 nritems;
4415 	u32 item_size;
4416 	u32 orig_offset;
4417 	struct btrfs_disk_key disk_key;
4418 
4419 	leaf = path->nodes[0];
4420 	BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
4421 
4422 	btrfs_set_path_blocking(path);
4423 
4424 	item = btrfs_item_nr(path->slots[0]);
4425 	orig_offset = btrfs_item_offset(leaf, item);
4426 	item_size = btrfs_item_size(leaf, item);
4427 
4428 	buf = kmalloc(item_size, GFP_NOFS);
4429 	if (!buf)
4430 		return -ENOMEM;
4431 
4432 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4433 			    path->slots[0]), item_size);
4434 
4435 	slot = path->slots[0] + 1;
4436 	nritems = btrfs_header_nritems(leaf);
4437 	if (slot != nritems) {
4438 		/* shift the items */
4439 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4440 				btrfs_item_nr_offset(slot),
4441 				(nritems - slot) * sizeof(struct btrfs_item));
4442 	}
4443 
4444 	btrfs_cpu_key_to_disk(&disk_key, new_key);
4445 	btrfs_set_item_key(leaf, &disk_key, slot);
4446 
4447 	new_item = btrfs_item_nr(slot);
4448 
4449 	btrfs_set_item_offset(leaf, new_item, orig_offset);
4450 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4451 
4452 	btrfs_set_item_offset(leaf, item,
4453 			      orig_offset + item_size - split_offset);
4454 	btrfs_set_item_size(leaf, item, split_offset);
4455 
4456 	btrfs_set_header_nritems(leaf, nritems + 1);
4457 
4458 	/* write the data for the start of the original item */
4459 	write_extent_buffer(leaf, buf,
4460 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4461 			    split_offset);
4462 
4463 	/* write the data for the new item */
4464 	write_extent_buffer(leaf, buf + split_offset,
4465 			    btrfs_item_ptr_offset(leaf, slot),
4466 			    item_size - split_offset);
4467 	btrfs_mark_buffer_dirty(leaf);
4468 
4469 	BUG_ON(btrfs_leaf_free_space(leaf) < 0);
4470 	kfree(buf);
4471 	return 0;
4472 }
4473 
4474 /*
4475  * This function splits a single item into two items,
4476  * giving 'new_key' to the new item and splitting the
4477  * old one at split_offset (from the start of the item).
4478  *
4479  * The path may be released by this operation.  After
4480  * the split, the path is pointing to the old item.  The
4481  * new item is going to be in the same node as the old one.
4482  *
4483  * Note, the item being split must be smaller enough to live alone on
4484  * a tree block with room for one extra struct btrfs_item
4485  *
4486  * This allows us to split the item in place, keeping a lock on the
4487  * leaf the entire time.
4488  */
4489 int btrfs_split_item(struct btrfs_trans_handle *trans,
4490 		     struct btrfs_root *root,
4491 		     struct btrfs_path *path,
4492 		     const struct btrfs_key *new_key,
4493 		     unsigned long split_offset)
4494 {
4495 	int ret;
4496 	ret = setup_leaf_for_split(trans, root, path,
4497 				   sizeof(struct btrfs_item));
4498 	if (ret)
4499 		return ret;
4500 
4501 	ret = split_item(path, new_key, split_offset);
4502 	return ret;
4503 }
4504 
4505 /*
4506  * This function duplicate a item, giving 'new_key' to the new item.
4507  * It guarantees both items live in the same tree leaf and the new item
4508  * is contiguous with the original item.
4509  *
4510  * This allows us to split file extent in place, keeping a lock on the
4511  * leaf the entire time.
4512  */
4513 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4514 			 struct btrfs_root *root,
4515 			 struct btrfs_path *path,
4516 			 const struct btrfs_key *new_key)
4517 {
4518 	struct extent_buffer *leaf;
4519 	int ret;
4520 	u32 item_size;
4521 
4522 	leaf = path->nodes[0];
4523 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4524 	ret = setup_leaf_for_split(trans, root, path,
4525 				   item_size + sizeof(struct btrfs_item));
4526 	if (ret)
4527 		return ret;
4528 
4529 	path->slots[0]++;
4530 	setup_items_for_insert(root, path, new_key, &item_size,
4531 			       item_size, item_size +
4532 			       sizeof(struct btrfs_item), 1);
4533 	leaf = path->nodes[0];
4534 	memcpy_extent_buffer(leaf,
4535 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4536 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4537 			     item_size);
4538 	return 0;
4539 }
4540 
4541 /*
4542  * make the item pointed to by the path smaller.  new_size indicates
4543  * how small to make it, and from_end tells us if we just chop bytes
4544  * off the end of the item or if we shift the item to chop bytes off
4545  * the front.
4546  */
4547 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
4548 {
4549 	int slot;
4550 	struct extent_buffer *leaf;
4551 	struct btrfs_item *item;
4552 	u32 nritems;
4553 	unsigned int data_end;
4554 	unsigned int old_data_start;
4555 	unsigned int old_size;
4556 	unsigned int size_diff;
4557 	int i;
4558 	struct btrfs_map_token token;
4559 
4560 	leaf = path->nodes[0];
4561 	slot = path->slots[0];
4562 
4563 	old_size = btrfs_item_size_nr(leaf, slot);
4564 	if (old_size == new_size)
4565 		return;
4566 
4567 	nritems = btrfs_header_nritems(leaf);
4568 	data_end = leaf_data_end(leaf);
4569 
4570 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4571 
4572 	size_diff = old_size - new_size;
4573 
4574 	BUG_ON(slot < 0);
4575 	BUG_ON(slot >= nritems);
4576 
4577 	/*
4578 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4579 	 */
4580 	/* first correct the data pointers */
4581 	btrfs_init_map_token(&token, leaf);
4582 	for (i = slot; i < nritems; i++) {
4583 		u32 ioff;
4584 		item = btrfs_item_nr(i);
4585 
4586 		ioff = btrfs_token_item_offset(leaf, item, &token);
4587 		btrfs_set_token_item_offset(leaf, item,
4588 					    ioff + size_diff, &token);
4589 	}
4590 
4591 	/* shift the data */
4592 	if (from_end) {
4593 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4594 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4595 			      data_end, old_data_start + new_size - data_end);
4596 	} else {
4597 		struct btrfs_disk_key disk_key;
4598 		u64 offset;
4599 
4600 		btrfs_item_key(leaf, &disk_key, slot);
4601 
4602 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4603 			unsigned long ptr;
4604 			struct btrfs_file_extent_item *fi;
4605 
4606 			fi = btrfs_item_ptr(leaf, slot,
4607 					    struct btrfs_file_extent_item);
4608 			fi = (struct btrfs_file_extent_item *)(
4609 			     (unsigned long)fi - size_diff);
4610 
4611 			if (btrfs_file_extent_type(leaf, fi) ==
4612 			    BTRFS_FILE_EXTENT_INLINE) {
4613 				ptr = btrfs_item_ptr_offset(leaf, slot);
4614 				memmove_extent_buffer(leaf, ptr,
4615 				      (unsigned long)fi,
4616 				      BTRFS_FILE_EXTENT_INLINE_DATA_START);
4617 			}
4618 		}
4619 
4620 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4621 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4622 			      data_end, old_data_start - data_end);
4623 
4624 		offset = btrfs_disk_key_offset(&disk_key);
4625 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4626 		btrfs_set_item_key(leaf, &disk_key, slot);
4627 		if (slot == 0)
4628 			fixup_low_keys(path, &disk_key, 1);
4629 	}
4630 
4631 	item = btrfs_item_nr(slot);
4632 	btrfs_set_item_size(leaf, item, new_size);
4633 	btrfs_mark_buffer_dirty(leaf);
4634 
4635 	if (btrfs_leaf_free_space(leaf) < 0) {
4636 		btrfs_print_leaf(leaf);
4637 		BUG();
4638 	}
4639 }
4640 
4641 /*
4642  * make the item pointed to by the path bigger, data_size is the added size.
4643  */
4644 void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
4645 {
4646 	int slot;
4647 	struct extent_buffer *leaf;
4648 	struct btrfs_item *item;
4649 	u32 nritems;
4650 	unsigned int data_end;
4651 	unsigned int old_data;
4652 	unsigned int old_size;
4653 	int i;
4654 	struct btrfs_map_token token;
4655 
4656 	leaf = path->nodes[0];
4657 
4658 	nritems = btrfs_header_nritems(leaf);
4659 	data_end = leaf_data_end(leaf);
4660 
4661 	if (btrfs_leaf_free_space(leaf) < data_size) {
4662 		btrfs_print_leaf(leaf);
4663 		BUG();
4664 	}
4665 	slot = path->slots[0];
4666 	old_data = btrfs_item_end_nr(leaf, slot);
4667 
4668 	BUG_ON(slot < 0);
4669 	if (slot >= nritems) {
4670 		btrfs_print_leaf(leaf);
4671 		btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
4672 			   slot, nritems);
4673 		BUG();
4674 	}
4675 
4676 	/*
4677 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4678 	 */
4679 	/* first correct the data pointers */
4680 	btrfs_init_map_token(&token, leaf);
4681 	for (i = slot; i < nritems; i++) {
4682 		u32 ioff;
4683 		item = btrfs_item_nr(i);
4684 
4685 		ioff = btrfs_token_item_offset(leaf, item, &token);
4686 		btrfs_set_token_item_offset(leaf, item,
4687 					    ioff - data_size, &token);
4688 	}
4689 
4690 	/* shift the data */
4691 	memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4692 		      data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4693 		      data_end, old_data - data_end);
4694 
4695 	data_end = old_data;
4696 	old_size = btrfs_item_size_nr(leaf, slot);
4697 	item = btrfs_item_nr(slot);
4698 	btrfs_set_item_size(leaf, item, old_size + data_size);
4699 	btrfs_mark_buffer_dirty(leaf);
4700 
4701 	if (btrfs_leaf_free_space(leaf) < 0) {
4702 		btrfs_print_leaf(leaf);
4703 		BUG();
4704 	}
4705 }
4706 
4707 /*
4708  * this is a helper for btrfs_insert_empty_items, the main goal here is
4709  * to save stack depth by doing the bulk of the work in a function
4710  * that doesn't call btrfs_search_slot
4711  */
4712 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4713 			    const struct btrfs_key *cpu_key, u32 *data_size,
4714 			    u32 total_data, u32 total_size, int nr)
4715 {
4716 	struct btrfs_fs_info *fs_info = root->fs_info;
4717 	struct btrfs_item *item;
4718 	int i;
4719 	u32 nritems;
4720 	unsigned int data_end;
4721 	struct btrfs_disk_key disk_key;
4722 	struct extent_buffer *leaf;
4723 	int slot;
4724 	struct btrfs_map_token token;
4725 
4726 	if (path->slots[0] == 0) {
4727 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4728 		fixup_low_keys(path, &disk_key, 1);
4729 	}
4730 	btrfs_unlock_up_safe(path, 1);
4731 
4732 	leaf = path->nodes[0];
4733 	slot = path->slots[0];
4734 
4735 	nritems = btrfs_header_nritems(leaf);
4736 	data_end = leaf_data_end(leaf);
4737 
4738 	if (btrfs_leaf_free_space(leaf) < total_size) {
4739 		btrfs_print_leaf(leaf);
4740 		btrfs_crit(fs_info, "not enough freespace need %u have %d",
4741 			   total_size, btrfs_leaf_free_space(leaf));
4742 		BUG();
4743 	}
4744 
4745 	btrfs_init_map_token(&token, leaf);
4746 	if (slot != nritems) {
4747 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4748 
4749 		if (old_data < data_end) {
4750 			btrfs_print_leaf(leaf);
4751 			btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4752 				   slot, old_data, data_end);
4753 			BUG();
4754 		}
4755 		/*
4756 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4757 		 */
4758 		/* first correct the data pointers */
4759 		for (i = slot; i < nritems; i++) {
4760 			u32 ioff;
4761 
4762 			item = btrfs_item_nr(i);
4763 			ioff = btrfs_token_item_offset(leaf, item, &token);
4764 			btrfs_set_token_item_offset(leaf, item,
4765 						    ioff - total_data, &token);
4766 		}
4767 		/* shift the items */
4768 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4769 			      btrfs_item_nr_offset(slot),
4770 			      (nritems - slot) * sizeof(struct btrfs_item));
4771 
4772 		/* shift the data */
4773 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4774 			      data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4775 			      data_end, old_data - data_end);
4776 		data_end = old_data;
4777 	}
4778 
4779 	/* setup the item for the new data */
4780 	for (i = 0; i < nr; i++) {
4781 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4782 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4783 		item = btrfs_item_nr(slot + i);
4784 		btrfs_set_token_item_offset(leaf, item,
4785 					    data_end - data_size[i], &token);
4786 		data_end -= data_size[i];
4787 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4788 	}
4789 
4790 	btrfs_set_header_nritems(leaf, nritems + nr);
4791 	btrfs_mark_buffer_dirty(leaf);
4792 
4793 	if (btrfs_leaf_free_space(leaf) < 0) {
4794 		btrfs_print_leaf(leaf);
4795 		BUG();
4796 	}
4797 }
4798 
4799 /*
4800  * Given a key and some data, insert items into the tree.
4801  * This does all the path init required, making room in the tree if needed.
4802  */
4803 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4804 			    struct btrfs_root *root,
4805 			    struct btrfs_path *path,
4806 			    const struct btrfs_key *cpu_key, u32 *data_size,
4807 			    int nr)
4808 {
4809 	int ret = 0;
4810 	int slot;
4811 	int i;
4812 	u32 total_size = 0;
4813 	u32 total_data = 0;
4814 
4815 	for (i = 0; i < nr; i++)
4816 		total_data += data_size[i];
4817 
4818 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4819 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4820 	if (ret == 0)
4821 		return -EEXIST;
4822 	if (ret < 0)
4823 		return ret;
4824 
4825 	slot = path->slots[0];
4826 	BUG_ON(slot < 0);
4827 
4828 	setup_items_for_insert(root, path, cpu_key, data_size,
4829 			       total_data, total_size, nr);
4830 	return 0;
4831 }
4832 
4833 /*
4834  * Given a key and some data, insert an item into the tree.
4835  * This does all the path init required, making room in the tree if needed.
4836  */
4837 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4838 		      const struct btrfs_key *cpu_key, void *data,
4839 		      u32 data_size)
4840 {
4841 	int ret = 0;
4842 	struct btrfs_path *path;
4843 	struct extent_buffer *leaf;
4844 	unsigned long ptr;
4845 
4846 	path = btrfs_alloc_path();
4847 	if (!path)
4848 		return -ENOMEM;
4849 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4850 	if (!ret) {
4851 		leaf = path->nodes[0];
4852 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4853 		write_extent_buffer(leaf, data, ptr, data_size);
4854 		btrfs_mark_buffer_dirty(leaf);
4855 	}
4856 	btrfs_free_path(path);
4857 	return ret;
4858 }
4859 
4860 /*
4861  * delete the pointer from a given node.
4862  *
4863  * the tree should have been previously balanced so the deletion does not
4864  * empty a node.
4865  */
4866 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4867 		    int level, int slot)
4868 {
4869 	struct extent_buffer *parent = path->nodes[level];
4870 	u32 nritems;
4871 	int ret;
4872 
4873 	nritems = btrfs_header_nritems(parent);
4874 	if (slot != nritems - 1) {
4875 		if (level) {
4876 			ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4877 					nritems - slot - 1);
4878 			BUG_ON(ret < 0);
4879 		}
4880 		memmove_extent_buffer(parent,
4881 			      btrfs_node_key_ptr_offset(slot),
4882 			      btrfs_node_key_ptr_offset(slot + 1),
4883 			      sizeof(struct btrfs_key_ptr) *
4884 			      (nritems - slot - 1));
4885 	} else if (level) {
4886 		ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4887 				GFP_NOFS);
4888 		BUG_ON(ret < 0);
4889 	}
4890 
4891 	nritems--;
4892 	btrfs_set_header_nritems(parent, nritems);
4893 	if (nritems == 0 && parent == root->node) {
4894 		BUG_ON(btrfs_header_level(root->node) != 1);
4895 		/* just turn the root into a leaf and break */
4896 		btrfs_set_header_level(root->node, 0);
4897 	} else if (slot == 0) {
4898 		struct btrfs_disk_key disk_key;
4899 
4900 		btrfs_node_key(parent, &disk_key, 0);
4901 		fixup_low_keys(path, &disk_key, level + 1);
4902 	}
4903 	btrfs_mark_buffer_dirty(parent);
4904 }
4905 
4906 /*
4907  * a helper function to delete the leaf pointed to by path->slots[1] and
4908  * path->nodes[1].
4909  *
4910  * This deletes the pointer in path->nodes[1] and frees the leaf
4911  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4912  *
4913  * The path must have already been setup for deleting the leaf, including
4914  * all the proper balancing.  path->nodes[1] must be locked.
4915  */
4916 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4917 				    struct btrfs_root *root,
4918 				    struct btrfs_path *path,
4919 				    struct extent_buffer *leaf)
4920 {
4921 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4922 	del_ptr(root, path, 1, path->slots[1]);
4923 
4924 	/*
4925 	 * btrfs_free_extent is expensive, we want to make sure we
4926 	 * aren't holding any locks when we call it
4927 	 */
4928 	btrfs_unlock_up_safe(path, 0);
4929 
4930 	root_sub_used(root, leaf->len);
4931 
4932 	atomic_inc(&leaf->refs);
4933 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4934 	free_extent_buffer_stale(leaf);
4935 }
4936 /*
4937  * delete the item at the leaf level in path.  If that empties
4938  * the leaf, remove it from the tree
4939  */
4940 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4941 		    struct btrfs_path *path, int slot, int nr)
4942 {
4943 	struct btrfs_fs_info *fs_info = root->fs_info;
4944 	struct extent_buffer *leaf;
4945 	struct btrfs_item *item;
4946 	u32 last_off;
4947 	u32 dsize = 0;
4948 	int ret = 0;
4949 	int wret;
4950 	int i;
4951 	u32 nritems;
4952 
4953 	leaf = path->nodes[0];
4954 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4955 
4956 	for (i = 0; i < nr; i++)
4957 		dsize += btrfs_item_size_nr(leaf, slot + i);
4958 
4959 	nritems = btrfs_header_nritems(leaf);
4960 
4961 	if (slot + nr != nritems) {
4962 		int data_end = leaf_data_end(leaf);
4963 		struct btrfs_map_token token;
4964 
4965 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4966 			      data_end + dsize,
4967 			      BTRFS_LEAF_DATA_OFFSET + data_end,
4968 			      last_off - data_end);
4969 
4970 		btrfs_init_map_token(&token, leaf);
4971 		for (i = slot + nr; i < nritems; i++) {
4972 			u32 ioff;
4973 
4974 			item = btrfs_item_nr(i);
4975 			ioff = btrfs_token_item_offset(leaf, item, &token);
4976 			btrfs_set_token_item_offset(leaf, item,
4977 						    ioff + dsize, &token);
4978 		}
4979 
4980 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4981 			      btrfs_item_nr_offset(slot + nr),
4982 			      sizeof(struct btrfs_item) *
4983 			      (nritems - slot - nr));
4984 	}
4985 	btrfs_set_header_nritems(leaf, nritems - nr);
4986 	nritems -= nr;
4987 
4988 	/* delete the leaf if we've emptied it */
4989 	if (nritems == 0) {
4990 		if (leaf == root->node) {
4991 			btrfs_set_header_level(leaf, 0);
4992 		} else {
4993 			btrfs_set_path_blocking(path);
4994 			btrfs_clean_tree_block(leaf);
4995 			btrfs_del_leaf(trans, root, path, leaf);
4996 		}
4997 	} else {
4998 		int used = leaf_space_used(leaf, 0, nritems);
4999 		if (slot == 0) {
5000 			struct btrfs_disk_key disk_key;
5001 
5002 			btrfs_item_key(leaf, &disk_key, 0);
5003 			fixup_low_keys(path, &disk_key, 1);
5004 		}
5005 
5006 		/* delete the leaf if it is mostly empty */
5007 		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
5008 			/* push_leaf_left fixes the path.
5009 			 * make sure the path still points to our leaf
5010 			 * for possible call to del_ptr below
5011 			 */
5012 			slot = path->slots[1];
5013 			atomic_inc(&leaf->refs);
5014 
5015 			btrfs_set_path_blocking(path);
5016 			wret = push_leaf_left(trans, root, path, 1, 1,
5017 					      1, (u32)-1);
5018 			if (wret < 0 && wret != -ENOSPC)
5019 				ret = wret;
5020 
5021 			if (path->nodes[0] == leaf &&
5022 			    btrfs_header_nritems(leaf)) {
5023 				wret = push_leaf_right(trans, root, path, 1,
5024 						       1, 1, 0);
5025 				if (wret < 0 && wret != -ENOSPC)
5026 					ret = wret;
5027 			}
5028 
5029 			if (btrfs_header_nritems(leaf) == 0) {
5030 				path->slots[1] = slot;
5031 				btrfs_del_leaf(trans, root, path, leaf);
5032 				free_extent_buffer(leaf);
5033 				ret = 0;
5034 			} else {
5035 				/* if we're still in the path, make sure
5036 				 * we're dirty.  Otherwise, one of the
5037 				 * push_leaf functions must have already
5038 				 * dirtied this buffer
5039 				 */
5040 				if (path->nodes[0] == leaf)
5041 					btrfs_mark_buffer_dirty(leaf);
5042 				free_extent_buffer(leaf);
5043 			}
5044 		} else {
5045 			btrfs_mark_buffer_dirty(leaf);
5046 		}
5047 	}
5048 	return ret;
5049 }
5050 
5051 /*
5052  * search the tree again to find a leaf with lesser keys
5053  * returns 0 if it found something or 1 if there are no lesser leaves.
5054  * returns < 0 on io errors.
5055  *
5056  * This may release the path, and so you may lose any locks held at the
5057  * time you call it.
5058  */
5059 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5060 {
5061 	struct btrfs_key key;
5062 	struct btrfs_disk_key found_key;
5063 	int ret;
5064 
5065 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5066 
5067 	if (key.offset > 0) {
5068 		key.offset--;
5069 	} else if (key.type > 0) {
5070 		key.type--;
5071 		key.offset = (u64)-1;
5072 	} else if (key.objectid > 0) {
5073 		key.objectid--;
5074 		key.type = (u8)-1;
5075 		key.offset = (u64)-1;
5076 	} else {
5077 		return 1;
5078 	}
5079 
5080 	btrfs_release_path(path);
5081 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5082 	if (ret < 0)
5083 		return ret;
5084 	btrfs_item_key(path->nodes[0], &found_key, 0);
5085 	ret = comp_keys(&found_key, &key);
5086 	/*
5087 	 * We might have had an item with the previous key in the tree right
5088 	 * before we released our path. And after we released our path, that
5089 	 * item might have been pushed to the first slot (0) of the leaf we
5090 	 * were holding due to a tree balance. Alternatively, an item with the
5091 	 * previous key can exist as the only element of a leaf (big fat item).
5092 	 * Therefore account for these 2 cases, so that our callers (like
5093 	 * btrfs_previous_item) don't miss an existing item with a key matching
5094 	 * the previous key we computed above.
5095 	 */
5096 	if (ret <= 0)
5097 		return 0;
5098 	return 1;
5099 }
5100 
5101 /*
5102  * A helper function to walk down the tree starting at min_key, and looking
5103  * for nodes or leaves that are have a minimum transaction id.
5104  * This is used by the btree defrag code, and tree logging
5105  *
5106  * This does not cow, but it does stuff the starting key it finds back
5107  * into min_key, so you can call btrfs_search_slot with cow=1 on the
5108  * key and get a writable path.
5109  *
5110  * This honors path->lowest_level to prevent descent past a given level
5111  * of the tree.
5112  *
5113  * min_trans indicates the oldest transaction that you are interested
5114  * in walking through.  Any nodes or leaves older than min_trans are
5115  * skipped over (without reading them).
5116  *
5117  * returns zero if something useful was found, < 0 on error and 1 if there
5118  * was nothing in the tree that matched the search criteria.
5119  */
5120 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5121 			 struct btrfs_path *path,
5122 			 u64 min_trans)
5123 {
5124 	struct extent_buffer *cur;
5125 	struct btrfs_key found_key;
5126 	int slot;
5127 	int sret;
5128 	u32 nritems;
5129 	int level;
5130 	int ret = 1;
5131 	int keep_locks = path->keep_locks;
5132 
5133 	path->keep_locks = 1;
5134 again:
5135 	cur = btrfs_read_lock_root_node(root);
5136 	level = btrfs_header_level(cur);
5137 	WARN_ON(path->nodes[level]);
5138 	path->nodes[level] = cur;
5139 	path->locks[level] = BTRFS_READ_LOCK;
5140 
5141 	if (btrfs_header_generation(cur) < min_trans) {
5142 		ret = 1;
5143 		goto out;
5144 	}
5145 	while (1) {
5146 		nritems = btrfs_header_nritems(cur);
5147 		level = btrfs_header_level(cur);
5148 		sret = btrfs_bin_search(cur, min_key, level, &slot);
5149 		if (sret < 0) {
5150 			ret = sret;
5151 			goto out;
5152 		}
5153 
5154 		/* at the lowest level, we're done, setup the path and exit */
5155 		if (level == path->lowest_level) {
5156 			if (slot >= nritems)
5157 				goto find_next_key;
5158 			ret = 0;
5159 			path->slots[level] = slot;
5160 			btrfs_item_key_to_cpu(cur, &found_key, slot);
5161 			goto out;
5162 		}
5163 		if (sret && slot > 0)
5164 			slot--;
5165 		/*
5166 		 * check this node pointer against the min_trans parameters.
5167 		 * If it is too old, old, skip to the next one.
5168 		 */
5169 		while (slot < nritems) {
5170 			u64 gen;
5171 
5172 			gen = btrfs_node_ptr_generation(cur, slot);
5173 			if (gen < min_trans) {
5174 				slot++;
5175 				continue;
5176 			}
5177 			break;
5178 		}
5179 find_next_key:
5180 		/*
5181 		 * we didn't find a candidate key in this node, walk forward
5182 		 * and find another one
5183 		 */
5184 		if (slot >= nritems) {
5185 			path->slots[level] = slot;
5186 			btrfs_set_path_blocking(path);
5187 			sret = btrfs_find_next_key(root, path, min_key, level,
5188 						  min_trans);
5189 			if (sret == 0) {
5190 				btrfs_release_path(path);
5191 				goto again;
5192 			} else {
5193 				goto out;
5194 			}
5195 		}
5196 		/* save our key for returning back */
5197 		btrfs_node_key_to_cpu(cur, &found_key, slot);
5198 		path->slots[level] = slot;
5199 		if (level == path->lowest_level) {
5200 			ret = 0;
5201 			goto out;
5202 		}
5203 		btrfs_set_path_blocking(path);
5204 		cur = btrfs_read_node_slot(cur, slot);
5205 		if (IS_ERR(cur)) {
5206 			ret = PTR_ERR(cur);
5207 			goto out;
5208 		}
5209 
5210 		btrfs_tree_read_lock(cur);
5211 
5212 		path->locks[level - 1] = BTRFS_READ_LOCK;
5213 		path->nodes[level - 1] = cur;
5214 		unlock_up(path, level, 1, 0, NULL);
5215 	}
5216 out:
5217 	path->keep_locks = keep_locks;
5218 	if (ret == 0) {
5219 		btrfs_unlock_up_safe(path, path->lowest_level + 1);
5220 		btrfs_set_path_blocking(path);
5221 		memcpy(min_key, &found_key, sizeof(found_key));
5222 	}
5223 	return ret;
5224 }
5225 
5226 /*
5227  * this is similar to btrfs_next_leaf, but does not try to preserve
5228  * and fixup the path.  It looks for and returns the next key in the
5229  * tree based on the current path and the min_trans parameters.
5230  *
5231  * 0 is returned if another key is found, < 0 if there are any errors
5232  * and 1 is returned if there are no higher keys in the tree
5233  *
5234  * path->keep_locks should be set to 1 on the search made before
5235  * calling this function.
5236  */
5237 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5238 			struct btrfs_key *key, int level, u64 min_trans)
5239 {
5240 	int slot;
5241 	struct extent_buffer *c;
5242 
5243 	WARN_ON(!path->keep_locks && !path->skip_locking);
5244 	while (level < BTRFS_MAX_LEVEL) {
5245 		if (!path->nodes[level])
5246 			return 1;
5247 
5248 		slot = path->slots[level] + 1;
5249 		c = path->nodes[level];
5250 next:
5251 		if (slot >= btrfs_header_nritems(c)) {
5252 			int ret;
5253 			int orig_lowest;
5254 			struct btrfs_key cur_key;
5255 			if (level + 1 >= BTRFS_MAX_LEVEL ||
5256 			    !path->nodes[level + 1])
5257 				return 1;
5258 
5259 			if (path->locks[level + 1] || path->skip_locking) {
5260 				level++;
5261 				continue;
5262 			}
5263 
5264 			slot = btrfs_header_nritems(c) - 1;
5265 			if (level == 0)
5266 				btrfs_item_key_to_cpu(c, &cur_key, slot);
5267 			else
5268 				btrfs_node_key_to_cpu(c, &cur_key, slot);
5269 
5270 			orig_lowest = path->lowest_level;
5271 			btrfs_release_path(path);
5272 			path->lowest_level = level;
5273 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5274 						0, 0);
5275 			path->lowest_level = orig_lowest;
5276 			if (ret < 0)
5277 				return ret;
5278 
5279 			c = path->nodes[level];
5280 			slot = path->slots[level];
5281 			if (ret == 0)
5282 				slot++;
5283 			goto next;
5284 		}
5285 
5286 		if (level == 0)
5287 			btrfs_item_key_to_cpu(c, key, slot);
5288 		else {
5289 			u64 gen = btrfs_node_ptr_generation(c, slot);
5290 
5291 			if (gen < min_trans) {
5292 				slot++;
5293 				goto next;
5294 			}
5295 			btrfs_node_key_to_cpu(c, key, slot);
5296 		}
5297 		return 0;
5298 	}
5299 	return 1;
5300 }
5301 
5302 /*
5303  * search the tree again to find a leaf with greater keys
5304  * returns 0 if it found something or 1 if there are no greater leaves.
5305  * returns < 0 on io errors.
5306  */
5307 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5308 {
5309 	return btrfs_next_old_leaf(root, path, 0);
5310 }
5311 
5312 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5313 			u64 time_seq)
5314 {
5315 	int slot;
5316 	int level;
5317 	struct extent_buffer *c;
5318 	struct extent_buffer *next;
5319 	struct btrfs_key key;
5320 	u32 nritems;
5321 	int ret;
5322 	int old_spinning = path->leave_spinning;
5323 	int next_rw_lock = 0;
5324 
5325 	nritems = btrfs_header_nritems(path->nodes[0]);
5326 	if (nritems == 0)
5327 		return 1;
5328 
5329 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5330 again:
5331 	level = 1;
5332 	next = NULL;
5333 	next_rw_lock = 0;
5334 	btrfs_release_path(path);
5335 
5336 	path->keep_locks = 1;
5337 	path->leave_spinning = 1;
5338 
5339 	if (time_seq)
5340 		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5341 	else
5342 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5343 	path->keep_locks = 0;
5344 
5345 	if (ret < 0)
5346 		return ret;
5347 
5348 	nritems = btrfs_header_nritems(path->nodes[0]);
5349 	/*
5350 	 * by releasing the path above we dropped all our locks.  A balance
5351 	 * could have added more items next to the key that used to be
5352 	 * at the very end of the block.  So, check again here and
5353 	 * advance the path if there are now more items available.
5354 	 */
5355 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5356 		if (ret == 0)
5357 			path->slots[0]++;
5358 		ret = 0;
5359 		goto done;
5360 	}
5361 	/*
5362 	 * So the above check misses one case:
5363 	 * - after releasing the path above, someone has removed the item that
5364 	 *   used to be at the very end of the block, and balance between leafs
5365 	 *   gets another one with bigger key.offset to replace it.
5366 	 *
5367 	 * This one should be returned as well, or we can get leaf corruption
5368 	 * later(esp. in __btrfs_drop_extents()).
5369 	 *
5370 	 * And a bit more explanation about this check,
5371 	 * with ret > 0, the key isn't found, the path points to the slot
5372 	 * where it should be inserted, so the path->slots[0] item must be the
5373 	 * bigger one.
5374 	 */
5375 	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5376 		ret = 0;
5377 		goto done;
5378 	}
5379 
5380 	while (level < BTRFS_MAX_LEVEL) {
5381 		if (!path->nodes[level]) {
5382 			ret = 1;
5383 			goto done;
5384 		}
5385 
5386 		slot = path->slots[level] + 1;
5387 		c = path->nodes[level];
5388 		if (slot >= btrfs_header_nritems(c)) {
5389 			level++;
5390 			if (level == BTRFS_MAX_LEVEL) {
5391 				ret = 1;
5392 				goto done;
5393 			}
5394 			continue;
5395 		}
5396 
5397 		if (next) {
5398 			btrfs_tree_unlock_rw(next, next_rw_lock);
5399 			free_extent_buffer(next);
5400 		}
5401 
5402 		next = c;
5403 		next_rw_lock = path->locks[level];
5404 		ret = read_block_for_search(root, path, &next, level,
5405 					    slot, &key);
5406 		if (ret == -EAGAIN)
5407 			goto again;
5408 
5409 		if (ret < 0) {
5410 			btrfs_release_path(path);
5411 			goto done;
5412 		}
5413 
5414 		if (!path->skip_locking) {
5415 			ret = btrfs_try_tree_read_lock(next);
5416 			if (!ret && time_seq) {
5417 				/*
5418 				 * If we don't get the lock, we may be racing
5419 				 * with push_leaf_left, holding that lock while
5420 				 * itself waiting for the leaf we've currently
5421 				 * locked. To solve this situation, we give up
5422 				 * on our lock and cycle.
5423 				 */
5424 				free_extent_buffer(next);
5425 				btrfs_release_path(path);
5426 				cond_resched();
5427 				goto again;
5428 			}
5429 			if (!ret) {
5430 				btrfs_set_path_blocking(path);
5431 				btrfs_tree_read_lock(next);
5432 			}
5433 			next_rw_lock = BTRFS_READ_LOCK;
5434 		}
5435 		break;
5436 	}
5437 	path->slots[level] = slot;
5438 	while (1) {
5439 		level--;
5440 		c = path->nodes[level];
5441 		if (path->locks[level])
5442 			btrfs_tree_unlock_rw(c, path->locks[level]);
5443 
5444 		free_extent_buffer(c);
5445 		path->nodes[level] = next;
5446 		path->slots[level] = 0;
5447 		if (!path->skip_locking)
5448 			path->locks[level] = next_rw_lock;
5449 		if (!level)
5450 			break;
5451 
5452 		ret = read_block_for_search(root, path, &next, level,
5453 					    0, &key);
5454 		if (ret == -EAGAIN)
5455 			goto again;
5456 
5457 		if (ret < 0) {
5458 			btrfs_release_path(path);
5459 			goto done;
5460 		}
5461 
5462 		if (!path->skip_locking) {
5463 			ret = btrfs_try_tree_read_lock(next);
5464 			if (!ret) {
5465 				btrfs_set_path_blocking(path);
5466 				btrfs_tree_read_lock(next);
5467 			}
5468 			next_rw_lock = BTRFS_READ_LOCK;
5469 		}
5470 	}
5471 	ret = 0;
5472 done:
5473 	unlock_up(path, 0, 1, 0, NULL);
5474 	path->leave_spinning = old_spinning;
5475 	if (!old_spinning)
5476 		btrfs_set_path_blocking(path);
5477 
5478 	return ret;
5479 }
5480 
5481 /*
5482  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5483  * searching until it gets past min_objectid or finds an item of 'type'
5484  *
5485  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5486  */
5487 int btrfs_previous_item(struct btrfs_root *root,
5488 			struct btrfs_path *path, u64 min_objectid,
5489 			int type)
5490 {
5491 	struct btrfs_key found_key;
5492 	struct extent_buffer *leaf;
5493 	u32 nritems;
5494 	int ret;
5495 
5496 	while (1) {
5497 		if (path->slots[0] == 0) {
5498 			btrfs_set_path_blocking(path);
5499 			ret = btrfs_prev_leaf(root, path);
5500 			if (ret != 0)
5501 				return ret;
5502 		} else {
5503 			path->slots[0]--;
5504 		}
5505 		leaf = path->nodes[0];
5506 		nritems = btrfs_header_nritems(leaf);
5507 		if (nritems == 0)
5508 			return 1;
5509 		if (path->slots[0] == nritems)
5510 			path->slots[0]--;
5511 
5512 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5513 		if (found_key.objectid < min_objectid)
5514 			break;
5515 		if (found_key.type == type)
5516 			return 0;
5517 		if (found_key.objectid == min_objectid &&
5518 		    found_key.type < type)
5519 			break;
5520 	}
5521 	return 1;
5522 }
5523 
5524 /*
5525  * search in extent tree to find a previous Metadata/Data extent item with
5526  * min objecitd.
5527  *
5528  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5529  */
5530 int btrfs_previous_extent_item(struct btrfs_root *root,
5531 			struct btrfs_path *path, u64 min_objectid)
5532 {
5533 	struct btrfs_key found_key;
5534 	struct extent_buffer *leaf;
5535 	u32 nritems;
5536 	int ret;
5537 
5538 	while (1) {
5539 		if (path->slots[0] == 0) {
5540 			btrfs_set_path_blocking(path);
5541 			ret = btrfs_prev_leaf(root, path);
5542 			if (ret != 0)
5543 				return ret;
5544 		} else {
5545 			path->slots[0]--;
5546 		}
5547 		leaf = path->nodes[0];
5548 		nritems = btrfs_header_nritems(leaf);
5549 		if (nritems == 0)
5550 			return 1;
5551 		if (path->slots[0] == nritems)
5552 			path->slots[0]--;
5553 
5554 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5555 		if (found_key.objectid < min_objectid)
5556 			break;
5557 		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5558 		    found_key.type == BTRFS_METADATA_ITEM_KEY)
5559 			return 0;
5560 		if (found_key.objectid == min_objectid &&
5561 		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
5562 			break;
5563 	}
5564 	return 1;
5565 }
5566