xref: /openbmc/linux/fs/btrfs/ctree.c (revision 5d9e75c41d11ca79b1d1ff6ed17c88c9047d1fea)
1 /*
2  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "print-tree.h"
26 #include "locking.h"
27 
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 		      *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 		      *root, struct btrfs_key *ins_key,
32 		      struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 			  struct btrfs_root *root, struct extent_buffer *dst,
35 			  struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 			      struct btrfs_root *root,
38 			      struct extent_buffer *dst_buf,
39 			      struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
41 		    struct btrfs_path *path, int level, int slot,
42 		    int tree_mod_log);
43 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 				 struct extent_buffer *eb);
45 struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
46 					  u32 blocksize, u64 parent_transid,
47 					  u64 time_seq);
48 struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
49 						u64 bytenr, u32 blocksize,
50 						u64 time_seq);
51 
52 struct btrfs_path *btrfs_alloc_path(void)
53 {
54 	struct btrfs_path *path;
55 	path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
56 	return path;
57 }
58 
59 /*
60  * set all locked nodes in the path to blocking locks.  This should
61  * be done before scheduling
62  */
63 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
64 {
65 	int i;
66 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
67 		if (!p->nodes[i] || !p->locks[i])
68 			continue;
69 		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
70 		if (p->locks[i] == BTRFS_READ_LOCK)
71 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
72 		else if (p->locks[i] == BTRFS_WRITE_LOCK)
73 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
74 	}
75 }
76 
77 /*
78  * reset all the locked nodes in the patch to spinning locks.
79  *
80  * held is used to keep lockdep happy, when lockdep is enabled
81  * we set held to a blocking lock before we go around and
82  * retake all the spinlocks in the path.  You can safely use NULL
83  * for held
84  */
85 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
86 					struct extent_buffer *held, int held_rw)
87 {
88 	int i;
89 
90 #ifdef CONFIG_DEBUG_LOCK_ALLOC
91 	/* lockdep really cares that we take all of these spinlocks
92 	 * in the right order.  If any of the locks in the path are not
93 	 * currently blocking, it is going to complain.  So, make really
94 	 * really sure by forcing the path to blocking before we clear
95 	 * the path blocking.
96 	 */
97 	if (held) {
98 		btrfs_set_lock_blocking_rw(held, held_rw);
99 		if (held_rw == BTRFS_WRITE_LOCK)
100 			held_rw = BTRFS_WRITE_LOCK_BLOCKING;
101 		else if (held_rw == BTRFS_READ_LOCK)
102 			held_rw = BTRFS_READ_LOCK_BLOCKING;
103 	}
104 	btrfs_set_path_blocking(p);
105 #endif
106 
107 	for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
108 		if (p->nodes[i] && p->locks[i]) {
109 			btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
110 			if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
111 				p->locks[i] = BTRFS_WRITE_LOCK;
112 			else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
113 				p->locks[i] = BTRFS_READ_LOCK;
114 		}
115 	}
116 
117 #ifdef CONFIG_DEBUG_LOCK_ALLOC
118 	if (held)
119 		btrfs_clear_lock_blocking_rw(held, held_rw);
120 #endif
121 }
122 
123 /* this also releases the path */
124 void btrfs_free_path(struct btrfs_path *p)
125 {
126 	if (!p)
127 		return;
128 	btrfs_release_path(p);
129 	kmem_cache_free(btrfs_path_cachep, p);
130 }
131 
132 /*
133  * path release drops references on the extent buffers in the path
134  * and it drops any locks held by this path
135  *
136  * It is safe to call this on paths that no locks or extent buffers held.
137  */
138 noinline void btrfs_release_path(struct btrfs_path *p)
139 {
140 	int i;
141 
142 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
143 		p->slots[i] = 0;
144 		if (!p->nodes[i])
145 			continue;
146 		if (p->locks[i]) {
147 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
148 			p->locks[i] = 0;
149 		}
150 		free_extent_buffer(p->nodes[i]);
151 		p->nodes[i] = NULL;
152 	}
153 }
154 
155 /*
156  * safely gets a reference on the root node of a tree.  A lock
157  * is not taken, so a concurrent writer may put a different node
158  * at the root of the tree.  See btrfs_lock_root_node for the
159  * looping required.
160  *
161  * The extent buffer returned by this has a reference taken, so
162  * it won't disappear.  It may stop being the root of the tree
163  * at any time because there are no locks held.
164  */
165 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
166 {
167 	struct extent_buffer *eb;
168 
169 	while (1) {
170 		rcu_read_lock();
171 		eb = rcu_dereference(root->node);
172 
173 		/*
174 		 * RCU really hurts here, we could free up the root node because
175 		 * it was cow'ed but we may not get the new root node yet so do
176 		 * the inc_not_zero dance and if it doesn't work then
177 		 * synchronize_rcu and try again.
178 		 */
179 		if (atomic_inc_not_zero(&eb->refs)) {
180 			rcu_read_unlock();
181 			break;
182 		}
183 		rcu_read_unlock();
184 		synchronize_rcu();
185 	}
186 	return eb;
187 }
188 
189 /* loop around taking references on and locking the root node of the
190  * tree until you end up with a lock on the root.  A locked buffer
191  * is returned, with a reference held.
192  */
193 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
194 {
195 	struct extent_buffer *eb;
196 
197 	while (1) {
198 		eb = btrfs_root_node(root);
199 		btrfs_tree_lock(eb);
200 		if (eb == root->node)
201 			break;
202 		btrfs_tree_unlock(eb);
203 		free_extent_buffer(eb);
204 	}
205 	return eb;
206 }
207 
208 /* loop around taking references on and locking the root node of the
209  * tree until you end up with a lock on the root.  A locked buffer
210  * is returned, with a reference held.
211  */
212 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
213 {
214 	struct extent_buffer *eb;
215 
216 	while (1) {
217 		eb = btrfs_root_node(root);
218 		btrfs_tree_read_lock(eb);
219 		if (eb == root->node)
220 			break;
221 		btrfs_tree_read_unlock(eb);
222 		free_extent_buffer(eb);
223 	}
224 	return eb;
225 }
226 
227 /* cowonly root (everything not a reference counted cow subvolume), just get
228  * put onto a simple dirty list.  transaction.c walks this to make sure they
229  * get properly updated on disk.
230  */
231 static void add_root_to_dirty_list(struct btrfs_root *root)
232 {
233 	spin_lock(&root->fs_info->trans_lock);
234 	if (root->track_dirty && list_empty(&root->dirty_list)) {
235 		list_add(&root->dirty_list,
236 			 &root->fs_info->dirty_cowonly_roots);
237 	}
238 	spin_unlock(&root->fs_info->trans_lock);
239 }
240 
241 /*
242  * used by snapshot creation to make a copy of a root for a tree with
243  * a given objectid.  The buffer with the new root node is returned in
244  * cow_ret, and this func returns zero on success or a negative error code.
245  */
246 int btrfs_copy_root(struct btrfs_trans_handle *trans,
247 		      struct btrfs_root *root,
248 		      struct extent_buffer *buf,
249 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
250 {
251 	struct extent_buffer *cow;
252 	int ret = 0;
253 	int level;
254 	struct btrfs_disk_key disk_key;
255 
256 	WARN_ON(root->ref_cows && trans->transid !=
257 		root->fs_info->running_transaction->transid);
258 	WARN_ON(root->ref_cows && trans->transid != root->last_trans);
259 
260 	level = btrfs_header_level(buf);
261 	if (level == 0)
262 		btrfs_item_key(buf, &disk_key, 0);
263 	else
264 		btrfs_node_key(buf, &disk_key, 0);
265 
266 	cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
267 				     new_root_objectid, &disk_key, level,
268 				     buf->start, 0);
269 	if (IS_ERR(cow))
270 		return PTR_ERR(cow);
271 
272 	copy_extent_buffer(cow, buf, 0, 0, cow->len);
273 	btrfs_set_header_bytenr(cow, cow->start);
274 	btrfs_set_header_generation(cow, trans->transid);
275 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
276 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
277 				     BTRFS_HEADER_FLAG_RELOC);
278 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
279 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
280 	else
281 		btrfs_set_header_owner(cow, new_root_objectid);
282 
283 	write_extent_buffer(cow, root->fs_info->fsid,
284 			    (unsigned long)btrfs_header_fsid(cow),
285 			    BTRFS_FSID_SIZE);
286 
287 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
288 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
289 		ret = btrfs_inc_ref(trans, root, cow, 1, 1);
290 	else
291 		ret = btrfs_inc_ref(trans, root, cow, 0, 1);
292 
293 	if (ret)
294 		return ret;
295 
296 	btrfs_mark_buffer_dirty(cow);
297 	*cow_ret = cow;
298 	return 0;
299 }
300 
301 enum mod_log_op {
302 	MOD_LOG_KEY_REPLACE,
303 	MOD_LOG_KEY_ADD,
304 	MOD_LOG_KEY_REMOVE,
305 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
306 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
307 	MOD_LOG_MOVE_KEYS,
308 	MOD_LOG_ROOT_REPLACE,
309 };
310 
311 struct tree_mod_move {
312 	int dst_slot;
313 	int nr_items;
314 };
315 
316 struct tree_mod_root {
317 	u64 logical;
318 	u8 level;
319 };
320 
321 struct tree_mod_elem {
322 	struct rb_node node;
323 	u64 index;		/* shifted logical */
324 	struct seq_list elem;
325 	enum mod_log_op op;
326 
327 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
328 	int slot;
329 
330 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
331 	u64 generation;
332 
333 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
334 	struct btrfs_disk_key key;
335 	u64 blockptr;
336 
337 	/* this is used for op == MOD_LOG_MOVE_KEYS */
338 	struct tree_mod_move move;
339 
340 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
341 	struct tree_mod_root old_root;
342 };
343 
344 static inline void
345 __get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
346 {
347 	elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
348 	list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
349 }
350 
351 void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
352 			    struct seq_list *elem)
353 {
354 	elem->flags = 1;
355 	spin_lock(&fs_info->tree_mod_seq_lock);
356 	__get_tree_mod_seq(fs_info, elem);
357 	spin_unlock(&fs_info->tree_mod_seq_lock);
358 }
359 
360 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
361 			    struct seq_list *elem)
362 {
363 	struct rb_root *tm_root;
364 	struct rb_node *node;
365 	struct rb_node *next;
366 	struct seq_list *cur_elem;
367 	struct tree_mod_elem *tm;
368 	u64 min_seq = (u64)-1;
369 	u64 seq_putting = elem->seq;
370 
371 	if (!seq_putting)
372 		return;
373 
374 	BUG_ON(!(elem->flags & 1));
375 	spin_lock(&fs_info->tree_mod_seq_lock);
376 	list_del(&elem->list);
377 
378 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
379 		if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
380 			if (seq_putting > cur_elem->seq) {
381 				/*
382 				 * blocker with lower sequence number exists, we
383 				 * cannot remove anything from the log
384 				 */
385 				goto out;
386 			}
387 			min_seq = cur_elem->seq;
388 		}
389 	}
390 
391 	/*
392 	 * anything that's lower than the lowest existing (read: blocked)
393 	 * sequence number can be removed from the tree.
394 	 */
395 	write_lock(&fs_info->tree_mod_log_lock);
396 	tm_root = &fs_info->tree_mod_log;
397 	for (node = rb_first(tm_root); node; node = next) {
398 		next = rb_next(node);
399 		tm = container_of(node, struct tree_mod_elem, node);
400 		if (tm->elem.seq > min_seq)
401 			continue;
402 		rb_erase(node, tm_root);
403 		list_del(&tm->elem.list);
404 		kfree(tm);
405 	}
406 	write_unlock(&fs_info->tree_mod_log_lock);
407 out:
408 	spin_unlock(&fs_info->tree_mod_seq_lock);
409 }
410 
411 /*
412  * key order of the log:
413  *       index -> sequence
414  *
415  * the index is the shifted logical of the *new* root node for root replace
416  * operations, or the shifted logical of the affected block for all other
417  * operations.
418  */
419 static noinline int
420 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
421 {
422 	struct rb_root *tm_root;
423 	struct rb_node **new;
424 	struct rb_node *parent = NULL;
425 	struct tree_mod_elem *cur;
426 	int ret = 0;
427 
428 	BUG_ON(!tm || !tm->elem.seq);
429 
430 	write_lock(&fs_info->tree_mod_log_lock);
431 	tm_root = &fs_info->tree_mod_log;
432 	new = &tm_root->rb_node;
433 	while (*new) {
434 		cur = container_of(*new, struct tree_mod_elem, node);
435 		parent = *new;
436 		if (cur->index < tm->index)
437 			new = &((*new)->rb_left);
438 		else if (cur->index > tm->index)
439 			new = &((*new)->rb_right);
440 		else if (cur->elem.seq < tm->elem.seq)
441 			new = &((*new)->rb_left);
442 		else if (cur->elem.seq > tm->elem.seq)
443 			new = &((*new)->rb_right);
444 		else {
445 			kfree(tm);
446 			ret = -EEXIST;
447 			goto unlock;
448 		}
449 	}
450 
451 	rb_link_node(&tm->node, parent, new);
452 	rb_insert_color(&tm->node, tm_root);
453 unlock:
454 	write_unlock(&fs_info->tree_mod_log_lock);
455 	return ret;
456 }
457 
458 int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
459 		   struct tree_mod_elem **tm_ret)
460 {
461 	struct tree_mod_elem *tm;
462 	u64 seq = 0;
463 
464 	smp_mb();
465 	if (list_empty(&fs_info->tree_mod_seq_list))
466 		return 0;
467 
468 	tm = *tm_ret = kzalloc(sizeof(*tm), flags);
469 	if (!tm)
470 		return -ENOMEM;
471 
472 	__get_tree_mod_seq(fs_info, &tm->elem);
473 	seq = tm->elem.seq;
474 	tm->elem.flags = 0;
475 
476 	return seq;
477 }
478 
479 static noinline int
480 tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
481 			     struct extent_buffer *eb, int slot,
482 			     enum mod_log_op op, gfp_t flags)
483 {
484 	struct tree_mod_elem *tm;
485 	int ret;
486 
487 	ret = tree_mod_alloc(fs_info, flags, &tm);
488 	if (ret <= 0)
489 		return ret;
490 
491 	tm->index = eb->start >> PAGE_CACHE_SHIFT;
492 	if (op != MOD_LOG_KEY_ADD) {
493 		btrfs_node_key(eb, &tm->key, slot);
494 		tm->blockptr = btrfs_node_blockptr(eb, slot);
495 	}
496 	tm->op = op;
497 	tm->slot = slot;
498 	tm->generation = btrfs_node_ptr_generation(eb, slot);
499 
500 	return __tree_mod_log_insert(fs_info, tm);
501 }
502 
503 static noinline int
504 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
505 			int slot, enum mod_log_op op)
506 {
507 	return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
508 }
509 
510 static noinline int
511 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
512 			 struct extent_buffer *eb, int dst_slot, int src_slot,
513 			 int nr_items, gfp_t flags)
514 {
515 	struct tree_mod_elem *tm;
516 	int ret;
517 	int i;
518 
519 	ret = tree_mod_alloc(fs_info, flags, &tm);
520 	if (ret <= 0)
521 		return ret;
522 
523 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
524 		ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
525 					      MOD_LOG_KEY_REMOVE_WHILE_MOVING);
526 		BUG_ON(ret < 0);
527 	}
528 
529 	tm->index = eb->start >> PAGE_CACHE_SHIFT;
530 	tm->slot = src_slot;
531 	tm->move.dst_slot = dst_slot;
532 	tm->move.nr_items = nr_items;
533 	tm->op = MOD_LOG_MOVE_KEYS;
534 
535 	return __tree_mod_log_insert(fs_info, tm);
536 }
537 
538 static noinline int
539 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
540 			 struct extent_buffer *old_root,
541 			 struct extent_buffer *new_root, gfp_t flags)
542 {
543 	struct tree_mod_elem *tm;
544 	int ret;
545 
546 	ret = tree_mod_alloc(fs_info, flags, &tm);
547 	if (ret <= 0)
548 		return ret;
549 
550 	tm->index = new_root->start >> PAGE_CACHE_SHIFT;
551 	tm->old_root.logical = old_root->start;
552 	tm->old_root.level = btrfs_header_level(old_root);
553 	tm->generation = btrfs_header_generation(old_root);
554 	tm->op = MOD_LOG_ROOT_REPLACE;
555 
556 	return __tree_mod_log_insert(fs_info, tm);
557 }
558 
559 static struct tree_mod_elem *
560 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
561 		      int smallest)
562 {
563 	struct rb_root *tm_root;
564 	struct rb_node *node;
565 	struct tree_mod_elem *cur = NULL;
566 	struct tree_mod_elem *found = NULL;
567 	u64 index = start >> PAGE_CACHE_SHIFT;
568 
569 	read_lock(&fs_info->tree_mod_log_lock);
570 	tm_root = &fs_info->tree_mod_log;
571 	node = tm_root->rb_node;
572 	while (node) {
573 		cur = container_of(node, struct tree_mod_elem, node);
574 		if (cur->index < index) {
575 			node = node->rb_left;
576 		} else if (cur->index > index) {
577 			node = node->rb_right;
578 		} else if (cur->elem.seq < min_seq) {
579 			node = node->rb_left;
580 		} else if (!smallest) {
581 			/* we want the node with the highest seq */
582 			if (found)
583 				BUG_ON(found->elem.seq > cur->elem.seq);
584 			found = cur;
585 			node = node->rb_left;
586 		} else if (cur->elem.seq > min_seq) {
587 			/* we want the node with the smallest seq */
588 			if (found)
589 				BUG_ON(found->elem.seq < cur->elem.seq);
590 			found = cur;
591 			node = node->rb_right;
592 		} else {
593 			found = cur;
594 			break;
595 		}
596 	}
597 	read_unlock(&fs_info->tree_mod_log_lock);
598 
599 	return found;
600 }
601 
602 /*
603  * this returns the element from the log with the smallest time sequence
604  * value that's in the log (the oldest log item). any element with a time
605  * sequence lower than min_seq will be ignored.
606  */
607 static struct tree_mod_elem *
608 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
609 			   u64 min_seq)
610 {
611 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
612 }
613 
614 /*
615  * this returns the element from the log with the largest time sequence
616  * value that's in the log (the most recent log item). any element with
617  * a time sequence lower than min_seq will be ignored.
618  */
619 static struct tree_mod_elem *
620 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
621 {
622 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
623 }
624 
625 static inline void
626 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
627 		     struct extent_buffer *src, unsigned long dst_offset,
628 		     unsigned long src_offset, int nr_items)
629 {
630 	int ret;
631 	int i;
632 
633 	smp_mb();
634 	if (list_empty(&fs_info->tree_mod_seq_list))
635 		return;
636 
637 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
638 		return;
639 
640 	/* speed this up by single seq for all operations? */
641 	for (i = 0; i < nr_items; i++) {
642 		ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
643 					      MOD_LOG_KEY_REMOVE);
644 		BUG_ON(ret < 0);
645 		ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
646 					      MOD_LOG_KEY_ADD);
647 		BUG_ON(ret < 0);
648 	}
649 }
650 
651 static inline void
652 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
653 		     int dst_offset, int src_offset, int nr_items)
654 {
655 	int ret;
656 	ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
657 				       nr_items, GFP_NOFS);
658 	BUG_ON(ret < 0);
659 }
660 
661 static inline void
662 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
663 			  struct extent_buffer *eb,
664 			  struct btrfs_disk_key *disk_key, int slot, int atomic)
665 {
666 	int ret;
667 
668 	ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
669 					   MOD_LOG_KEY_REPLACE,
670 					   atomic ? GFP_ATOMIC : GFP_NOFS);
671 	BUG_ON(ret < 0);
672 }
673 
674 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
675 				 struct extent_buffer *eb)
676 {
677 	int i;
678 	int ret;
679 	u32 nritems;
680 
681 	smp_mb();
682 	if (list_empty(&fs_info->tree_mod_seq_list))
683 		return;
684 
685 	if (btrfs_header_level(eb) == 0)
686 		return;
687 
688 	nritems = btrfs_header_nritems(eb);
689 	for (i = nritems - 1; i >= 0; i--) {
690 		ret = tree_mod_log_insert_key(fs_info, eb, i,
691 					      MOD_LOG_KEY_REMOVE_WHILE_FREEING);
692 		BUG_ON(ret < 0);
693 	}
694 }
695 
696 static inline void
697 tree_mod_log_set_root_pointer(struct btrfs_root *root,
698 			      struct extent_buffer *new_root_node)
699 {
700 	int ret;
701 	tree_mod_log_free_eb(root->fs_info, root->node);
702 	ret = tree_mod_log_insert_root(root->fs_info, root->node,
703 				       new_root_node, GFP_NOFS);
704 	BUG_ON(ret < 0);
705 }
706 
707 /*
708  * check if the tree block can be shared by multiple trees
709  */
710 int btrfs_block_can_be_shared(struct btrfs_root *root,
711 			      struct extent_buffer *buf)
712 {
713 	/*
714 	 * Tree blocks not in refernece counted trees and tree roots
715 	 * are never shared. If a block was allocated after the last
716 	 * snapshot and the block was not allocated by tree relocation,
717 	 * we know the block is not shared.
718 	 */
719 	if (root->ref_cows &&
720 	    buf != root->node && buf != root->commit_root &&
721 	    (btrfs_header_generation(buf) <=
722 	     btrfs_root_last_snapshot(&root->root_item) ||
723 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
724 		return 1;
725 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
726 	if (root->ref_cows &&
727 	    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
728 		return 1;
729 #endif
730 	return 0;
731 }
732 
733 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
734 				       struct btrfs_root *root,
735 				       struct extent_buffer *buf,
736 				       struct extent_buffer *cow,
737 				       int *last_ref)
738 {
739 	u64 refs;
740 	u64 owner;
741 	u64 flags;
742 	u64 new_flags = 0;
743 	int ret;
744 
745 	/*
746 	 * Backrefs update rules:
747 	 *
748 	 * Always use full backrefs for extent pointers in tree block
749 	 * allocated by tree relocation.
750 	 *
751 	 * If a shared tree block is no longer referenced by its owner
752 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
753 	 * use full backrefs for extent pointers in tree block.
754 	 *
755 	 * If a tree block is been relocating
756 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
757 	 * use full backrefs for extent pointers in tree block.
758 	 * The reason for this is some operations (such as drop tree)
759 	 * are only allowed for blocks use full backrefs.
760 	 */
761 
762 	if (btrfs_block_can_be_shared(root, buf)) {
763 		ret = btrfs_lookup_extent_info(trans, root, buf->start,
764 					       buf->len, &refs, &flags);
765 		if (ret)
766 			return ret;
767 		if (refs == 0) {
768 			ret = -EROFS;
769 			btrfs_std_error(root->fs_info, ret);
770 			return ret;
771 		}
772 	} else {
773 		refs = 1;
774 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
775 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
776 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
777 		else
778 			flags = 0;
779 	}
780 
781 	owner = btrfs_header_owner(buf);
782 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
783 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
784 
785 	if (refs > 1) {
786 		if ((owner == root->root_key.objectid ||
787 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
788 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
789 			ret = btrfs_inc_ref(trans, root, buf, 1, 1);
790 			BUG_ON(ret); /* -ENOMEM */
791 
792 			if (root->root_key.objectid ==
793 			    BTRFS_TREE_RELOC_OBJECTID) {
794 				ret = btrfs_dec_ref(trans, root, buf, 0, 1);
795 				BUG_ON(ret); /* -ENOMEM */
796 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
797 				BUG_ON(ret); /* -ENOMEM */
798 			}
799 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
800 		} else {
801 
802 			if (root->root_key.objectid ==
803 			    BTRFS_TREE_RELOC_OBJECTID)
804 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
805 			else
806 				ret = btrfs_inc_ref(trans, root, cow, 0, 1);
807 			BUG_ON(ret); /* -ENOMEM */
808 		}
809 		if (new_flags != 0) {
810 			ret = btrfs_set_disk_extent_flags(trans, root,
811 							  buf->start,
812 							  buf->len,
813 							  new_flags, 0);
814 			if (ret)
815 				return ret;
816 		}
817 	} else {
818 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
819 			if (root->root_key.objectid ==
820 			    BTRFS_TREE_RELOC_OBJECTID)
821 				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
822 			else
823 				ret = btrfs_inc_ref(trans, root, cow, 0, 1);
824 			BUG_ON(ret); /* -ENOMEM */
825 			ret = btrfs_dec_ref(trans, root, buf, 1, 1);
826 			BUG_ON(ret); /* -ENOMEM */
827 		}
828 		/*
829 		 * don't log freeing in case we're freeing the root node, this
830 		 * is done by tree_mod_log_set_root_pointer later
831 		 */
832 		if (buf != root->node && btrfs_header_level(buf) != 0)
833 			tree_mod_log_free_eb(root->fs_info, buf);
834 		clean_tree_block(trans, root, buf);
835 		*last_ref = 1;
836 	}
837 	return 0;
838 }
839 
840 /*
841  * does the dirty work in cow of a single block.  The parent block (if
842  * supplied) is updated to point to the new cow copy.  The new buffer is marked
843  * dirty and returned locked.  If you modify the block it needs to be marked
844  * dirty again.
845  *
846  * search_start -- an allocation hint for the new block
847  *
848  * empty_size -- a hint that you plan on doing more cow.  This is the size in
849  * bytes the allocator should try to find free next to the block it returns.
850  * This is just a hint and may be ignored by the allocator.
851  */
852 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
853 			     struct btrfs_root *root,
854 			     struct extent_buffer *buf,
855 			     struct extent_buffer *parent, int parent_slot,
856 			     struct extent_buffer **cow_ret,
857 			     u64 search_start, u64 empty_size)
858 {
859 	struct btrfs_disk_key disk_key;
860 	struct extent_buffer *cow;
861 	int level, ret;
862 	int last_ref = 0;
863 	int unlock_orig = 0;
864 	u64 parent_start;
865 
866 	if (*cow_ret == buf)
867 		unlock_orig = 1;
868 
869 	btrfs_assert_tree_locked(buf);
870 
871 	WARN_ON(root->ref_cows && trans->transid !=
872 		root->fs_info->running_transaction->transid);
873 	WARN_ON(root->ref_cows && trans->transid != root->last_trans);
874 
875 	level = btrfs_header_level(buf);
876 
877 	if (level == 0)
878 		btrfs_item_key(buf, &disk_key, 0);
879 	else
880 		btrfs_node_key(buf, &disk_key, 0);
881 
882 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
883 		if (parent)
884 			parent_start = parent->start;
885 		else
886 			parent_start = 0;
887 	} else
888 		parent_start = 0;
889 
890 	cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
891 				     root->root_key.objectid, &disk_key,
892 				     level, search_start, empty_size);
893 	if (IS_ERR(cow))
894 		return PTR_ERR(cow);
895 
896 	/* cow is set to blocking by btrfs_init_new_buffer */
897 
898 	copy_extent_buffer(cow, buf, 0, 0, cow->len);
899 	btrfs_set_header_bytenr(cow, cow->start);
900 	btrfs_set_header_generation(cow, trans->transid);
901 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
902 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
903 				     BTRFS_HEADER_FLAG_RELOC);
904 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
905 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
906 	else
907 		btrfs_set_header_owner(cow, root->root_key.objectid);
908 
909 	write_extent_buffer(cow, root->fs_info->fsid,
910 			    (unsigned long)btrfs_header_fsid(cow),
911 			    BTRFS_FSID_SIZE);
912 
913 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
914 	if (ret) {
915 		btrfs_abort_transaction(trans, root, ret);
916 		return ret;
917 	}
918 
919 	if (root->ref_cows)
920 		btrfs_reloc_cow_block(trans, root, buf, cow);
921 
922 	if (buf == root->node) {
923 		WARN_ON(parent && parent != buf);
924 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
925 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
926 			parent_start = buf->start;
927 		else
928 			parent_start = 0;
929 
930 		extent_buffer_get(cow);
931 		tree_mod_log_set_root_pointer(root, cow);
932 		rcu_assign_pointer(root->node, cow);
933 
934 		btrfs_free_tree_block(trans, root, buf, parent_start,
935 				      last_ref);
936 		free_extent_buffer(buf);
937 		add_root_to_dirty_list(root);
938 	} else {
939 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
940 			parent_start = parent->start;
941 		else
942 			parent_start = 0;
943 
944 		WARN_ON(trans->transid != btrfs_header_generation(parent));
945 		tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
946 					MOD_LOG_KEY_REPLACE);
947 		btrfs_set_node_blockptr(parent, parent_slot,
948 					cow->start);
949 		btrfs_set_node_ptr_generation(parent, parent_slot,
950 					      trans->transid);
951 		btrfs_mark_buffer_dirty(parent);
952 		btrfs_free_tree_block(trans, root, buf, parent_start,
953 				      last_ref);
954 	}
955 	if (unlock_orig)
956 		btrfs_tree_unlock(buf);
957 	free_extent_buffer_stale(buf);
958 	btrfs_mark_buffer_dirty(cow);
959 	*cow_ret = cow;
960 	return 0;
961 }
962 
963 /*
964  * returns the logical address of the oldest predecessor of the given root.
965  * entries older than time_seq are ignored.
966  */
967 static struct tree_mod_elem *
968 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
969 			   struct btrfs_root *root, u64 time_seq)
970 {
971 	struct tree_mod_elem *tm;
972 	struct tree_mod_elem *found = NULL;
973 	u64 root_logical = root->node->start;
974 	int looped = 0;
975 
976 	if (!time_seq)
977 		return 0;
978 
979 	/*
980 	 * the very last operation that's logged for a root is the replacement
981 	 * operation (if it is replaced at all). this has the index of the *new*
982 	 * root, making it the very first operation that's logged for this root.
983 	 */
984 	while (1) {
985 		tm = tree_mod_log_search_oldest(fs_info, root_logical,
986 						time_seq);
987 		if (!looped && !tm)
988 			return 0;
989 		/*
990 		 * we must have key remove operations in the log before the
991 		 * replace operation.
992 		 */
993 		BUG_ON(!tm);
994 
995 		if (tm->op != MOD_LOG_ROOT_REPLACE)
996 			break;
997 
998 		found = tm;
999 		root_logical = tm->old_root.logical;
1000 		BUG_ON(root_logical == root->node->start);
1001 		looped = 1;
1002 	}
1003 
1004 	return found;
1005 }
1006 
1007 /*
1008  * tm is a pointer to the first operation to rewind within eb. then, all
1009  * previous operations will be rewinded (until we reach something older than
1010  * time_seq).
1011  */
1012 static void
1013 __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
1014 		      struct tree_mod_elem *first_tm)
1015 {
1016 	u32 n;
1017 	struct rb_node *next;
1018 	struct tree_mod_elem *tm = first_tm;
1019 	unsigned long o_dst;
1020 	unsigned long o_src;
1021 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1022 
1023 	n = btrfs_header_nritems(eb);
1024 	while (tm && tm->elem.seq >= time_seq) {
1025 		/*
1026 		 * all the operations are recorded with the operator used for
1027 		 * the modification. as we're going backwards, we do the
1028 		 * opposite of each operation here.
1029 		 */
1030 		switch (tm->op) {
1031 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1032 			BUG_ON(tm->slot < n);
1033 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1034 		case MOD_LOG_KEY_REMOVE:
1035 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1036 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1037 			btrfs_set_node_ptr_generation(eb, tm->slot,
1038 						      tm->generation);
1039 			n++;
1040 			break;
1041 		case MOD_LOG_KEY_REPLACE:
1042 			BUG_ON(tm->slot >= n);
1043 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1044 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1045 			btrfs_set_node_ptr_generation(eb, tm->slot,
1046 						      tm->generation);
1047 			break;
1048 		case MOD_LOG_KEY_ADD:
1049 			if (tm->slot != n - 1) {
1050 				o_dst = btrfs_node_key_ptr_offset(tm->slot);
1051 				o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
1052 				memmove_extent_buffer(eb, o_dst, o_src, p_size);
1053 			}
1054 			n--;
1055 			break;
1056 		case MOD_LOG_MOVE_KEYS:
1057 			memmove_extent_buffer(eb, tm->slot, tm->move.dst_slot,
1058 					      tm->move.nr_items * p_size);
1059 			break;
1060 		case MOD_LOG_ROOT_REPLACE:
1061 			/*
1062 			 * this operation is special. for roots, this must be
1063 			 * handled explicitly before rewinding.
1064 			 * for non-roots, this operation may exist if the node
1065 			 * was a root: root A -> child B; then A gets empty and
1066 			 * B is promoted to the new root. in the mod log, we'll
1067 			 * have a root-replace operation for B, a tree block
1068 			 * that is no root. we simply ignore that operation.
1069 			 */
1070 			break;
1071 		}
1072 		next = rb_next(&tm->node);
1073 		if (!next)
1074 			break;
1075 		tm = container_of(next, struct tree_mod_elem, node);
1076 		if (tm->index != first_tm->index)
1077 			break;
1078 	}
1079 	btrfs_set_header_nritems(eb, n);
1080 }
1081 
1082 static struct extent_buffer *
1083 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1084 		    u64 time_seq)
1085 {
1086 	struct extent_buffer *eb_rewin;
1087 	struct tree_mod_elem *tm;
1088 
1089 	if (!time_seq)
1090 		return eb;
1091 
1092 	if (btrfs_header_level(eb) == 0)
1093 		return eb;
1094 
1095 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1096 	if (!tm)
1097 		return eb;
1098 
1099 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1100 		BUG_ON(tm->slot != 0);
1101 		eb_rewin = alloc_dummy_extent_buffer(eb->start,
1102 						fs_info->tree_root->nodesize);
1103 		BUG_ON(!eb_rewin);
1104 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1105 		btrfs_set_header_backref_rev(eb_rewin,
1106 					     btrfs_header_backref_rev(eb));
1107 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1108 	} else {
1109 		eb_rewin = btrfs_clone_extent_buffer(eb);
1110 		BUG_ON(!eb_rewin);
1111 	}
1112 
1113 	extent_buffer_get(eb_rewin);
1114 	free_extent_buffer(eb);
1115 
1116 	__tree_mod_log_rewind(eb_rewin, time_seq, tm);
1117 
1118 	return eb_rewin;
1119 }
1120 
1121 static inline struct extent_buffer *
1122 get_old_root(struct btrfs_root *root, u64 time_seq)
1123 {
1124 	struct tree_mod_elem *tm;
1125 	struct extent_buffer *eb;
1126 	struct tree_mod_root *old_root;
1127 	u64 old_generation;
1128 
1129 	tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1130 	if (!tm)
1131 		return root->node;
1132 
1133 	old_root = &tm->old_root;
1134 	old_generation = tm->generation;
1135 
1136 	tm = tree_mod_log_search(root->fs_info, old_root->logical, time_seq);
1137 	/*
1138 	 * there was an item in the log when __tree_mod_log_oldest_root
1139 	 * returned. this one must not go away, because the time_seq passed to
1140 	 * us must be blocking its removal.
1141 	 */
1142 	BUG_ON(!tm);
1143 
1144 	if (old_root->logical == root->node->start) {
1145 		/* there are logged operations for the current root */
1146 		eb = btrfs_clone_extent_buffer(root->node);
1147 	} else {
1148 		/* there's a root replace operation for the current root */
1149 		eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
1150 					       root->nodesize);
1151 		btrfs_set_header_bytenr(eb, eb->start);
1152 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1153 		btrfs_set_header_owner(eb, root->root_key.objectid);
1154 	}
1155 	if (!eb)
1156 		return NULL;
1157 	btrfs_set_header_level(eb, old_root->level);
1158 	btrfs_set_header_generation(eb, old_generation);
1159 	__tree_mod_log_rewind(eb, time_seq, tm);
1160 
1161 	return eb;
1162 }
1163 
1164 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1165 				   struct btrfs_root *root,
1166 				   struct extent_buffer *buf)
1167 {
1168 	/* ensure we can see the force_cow */
1169 	smp_rmb();
1170 
1171 	/*
1172 	 * We do not need to cow a block if
1173 	 * 1) this block is not created or changed in this transaction;
1174 	 * 2) this block does not belong to TREE_RELOC tree;
1175 	 * 3) the root is not forced COW.
1176 	 *
1177 	 * What is forced COW:
1178 	 *    when we create snapshot during commiting the transaction,
1179 	 *    after we've finished coping src root, we must COW the shared
1180 	 *    block to ensure the metadata consistency.
1181 	 */
1182 	if (btrfs_header_generation(buf) == trans->transid &&
1183 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1184 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1185 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1186 	    !root->force_cow)
1187 		return 0;
1188 	return 1;
1189 }
1190 
1191 /*
1192  * cows a single block, see __btrfs_cow_block for the real work.
1193  * This version of it has extra checks so that a block isn't cow'd more than
1194  * once per transaction, as long as it hasn't been written yet
1195  */
1196 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1197 		    struct btrfs_root *root, struct extent_buffer *buf,
1198 		    struct extent_buffer *parent, int parent_slot,
1199 		    struct extent_buffer **cow_ret)
1200 {
1201 	u64 search_start;
1202 	int ret;
1203 
1204 	if (trans->transaction != root->fs_info->running_transaction) {
1205 		printk(KERN_CRIT "trans %llu running %llu\n",
1206 		       (unsigned long long)trans->transid,
1207 		       (unsigned long long)
1208 		       root->fs_info->running_transaction->transid);
1209 		WARN_ON(1);
1210 	}
1211 	if (trans->transid != root->fs_info->generation) {
1212 		printk(KERN_CRIT "trans %llu running %llu\n",
1213 		       (unsigned long long)trans->transid,
1214 		       (unsigned long long)root->fs_info->generation);
1215 		WARN_ON(1);
1216 	}
1217 
1218 	if (!should_cow_block(trans, root, buf)) {
1219 		*cow_ret = buf;
1220 		return 0;
1221 	}
1222 
1223 	search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1224 
1225 	if (parent)
1226 		btrfs_set_lock_blocking(parent);
1227 	btrfs_set_lock_blocking(buf);
1228 
1229 	ret = __btrfs_cow_block(trans, root, buf, parent,
1230 				 parent_slot, cow_ret, search_start, 0);
1231 
1232 	trace_btrfs_cow_block(root, buf, *cow_ret);
1233 
1234 	return ret;
1235 }
1236 
1237 /*
1238  * helper function for defrag to decide if two blocks pointed to by a
1239  * node are actually close by
1240  */
1241 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1242 {
1243 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1244 		return 1;
1245 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1246 		return 1;
1247 	return 0;
1248 }
1249 
1250 /*
1251  * compare two keys in a memcmp fashion
1252  */
1253 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1254 {
1255 	struct btrfs_key k1;
1256 
1257 	btrfs_disk_key_to_cpu(&k1, disk);
1258 
1259 	return btrfs_comp_cpu_keys(&k1, k2);
1260 }
1261 
1262 /*
1263  * same as comp_keys only with two btrfs_key's
1264  */
1265 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1266 {
1267 	if (k1->objectid > k2->objectid)
1268 		return 1;
1269 	if (k1->objectid < k2->objectid)
1270 		return -1;
1271 	if (k1->type > k2->type)
1272 		return 1;
1273 	if (k1->type < k2->type)
1274 		return -1;
1275 	if (k1->offset > k2->offset)
1276 		return 1;
1277 	if (k1->offset < k2->offset)
1278 		return -1;
1279 	return 0;
1280 }
1281 
1282 /*
1283  * this is used by the defrag code to go through all the
1284  * leaves pointed to by a node and reallocate them so that
1285  * disk order is close to key order
1286  */
1287 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1288 		       struct btrfs_root *root, struct extent_buffer *parent,
1289 		       int start_slot, int cache_only, u64 *last_ret,
1290 		       struct btrfs_key *progress)
1291 {
1292 	struct extent_buffer *cur;
1293 	u64 blocknr;
1294 	u64 gen;
1295 	u64 search_start = *last_ret;
1296 	u64 last_block = 0;
1297 	u64 other;
1298 	u32 parent_nritems;
1299 	int end_slot;
1300 	int i;
1301 	int err = 0;
1302 	int parent_level;
1303 	int uptodate;
1304 	u32 blocksize;
1305 	int progress_passed = 0;
1306 	struct btrfs_disk_key disk_key;
1307 
1308 	parent_level = btrfs_header_level(parent);
1309 	if (cache_only && parent_level != 1)
1310 		return 0;
1311 
1312 	if (trans->transaction != root->fs_info->running_transaction)
1313 		WARN_ON(1);
1314 	if (trans->transid != root->fs_info->generation)
1315 		WARN_ON(1);
1316 
1317 	parent_nritems = btrfs_header_nritems(parent);
1318 	blocksize = btrfs_level_size(root, parent_level - 1);
1319 	end_slot = parent_nritems;
1320 
1321 	if (parent_nritems == 1)
1322 		return 0;
1323 
1324 	btrfs_set_lock_blocking(parent);
1325 
1326 	for (i = start_slot; i < end_slot; i++) {
1327 		int close = 1;
1328 
1329 		btrfs_node_key(parent, &disk_key, i);
1330 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1331 			continue;
1332 
1333 		progress_passed = 1;
1334 		blocknr = btrfs_node_blockptr(parent, i);
1335 		gen = btrfs_node_ptr_generation(parent, i);
1336 		if (last_block == 0)
1337 			last_block = blocknr;
1338 
1339 		if (i > 0) {
1340 			other = btrfs_node_blockptr(parent, i - 1);
1341 			close = close_blocks(blocknr, other, blocksize);
1342 		}
1343 		if (!close && i < end_slot - 2) {
1344 			other = btrfs_node_blockptr(parent, i + 1);
1345 			close = close_blocks(blocknr, other, blocksize);
1346 		}
1347 		if (close) {
1348 			last_block = blocknr;
1349 			continue;
1350 		}
1351 
1352 		cur = btrfs_find_tree_block(root, blocknr, blocksize);
1353 		if (cur)
1354 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1355 		else
1356 			uptodate = 0;
1357 		if (!cur || !uptodate) {
1358 			if (cache_only) {
1359 				free_extent_buffer(cur);
1360 				continue;
1361 			}
1362 			if (!cur) {
1363 				cur = read_tree_block(root, blocknr,
1364 							 blocksize, gen);
1365 				if (!cur)
1366 					return -EIO;
1367 			} else if (!uptodate) {
1368 				btrfs_read_buffer(cur, gen);
1369 			}
1370 		}
1371 		if (search_start == 0)
1372 			search_start = last_block;
1373 
1374 		btrfs_tree_lock(cur);
1375 		btrfs_set_lock_blocking(cur);
1376 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1377 					&cur, search_start,
1378 					min(16 * blocksize,
1379 					    (end_slot - i) * blocksize));
1380 		if (err) {
1381 			btrfs_tree_unlock(cur);
1382 			free_extent_buffer(cur);
1383 			break;
1384 		}
1385 		search_start = cur->start;
1386 		last_block = cur->start;
1387 		*last_ret = search_start;
1388 		btrfs_tree_unlock(cur);
1389 		free_extent_buffer(cur);
1390 	}
1391 	return err;
1392 }
1393 
1394 /*
1395  * The leaf data grows from end-to-front in the node.
1396  * this returns the address of the start of the last item,
1397  * which is the stop of the leaf data stack
1398  */
1399 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1400 					 struct extent_buffer *leaf)
1401 {
1402 	u32 nr = btrfs_header_nritems(leaf);
1403 	if (nr == 0)
1404 		return BTRFS_LEAF_DATA_SIZE(root);
1405 	return btrfs_item_offset_nr(leaf, nr - 1);
1406 }
1407 
1408 
1409 /*
1410  * search for key in the extent_buffer.  The items start at offset p,
1411  * and they are item_size apart.  There are 'max' items in p.
1412  *
1413  * the slot in the array is returned via slot, and it points to
1414  * the place where you would insert key if it is not found in
1415  * the array.
1416  *
1417  * slot may point to max if the key is bigger than all of the keys
1418  */
1419 static noinline int generic_bin_search(struct extent_buffer *eb,
1420 				       unsigned long p,
1421 				       int item_size, struct btrfs_key *key,
1422 				       int max, int *slot)
1423 {
1424 	int low = 0;
1425 	int high = max;
1426 	int mid;
1427 	int ret;
1428 	struct btrfs_disk_key *tmp = NULL;
1429 	struct btrfs_disk_key unaligned;
1430 	unsigned long offset;
1431 	char *kaddr = NULL;
1432 	unsigned long map_start = 0;
1433 	unsigned long map_len = 0;
1434 	int err;
1435 
1436 	while (low < high) {
1437 		mid = (low + high) / 2;
1438 		offset = p + mid * item_size;
1439 
1440 		if (!kaddr || offset < map_start ||
1441 		    (offset + sizeof(struct btrfs_disk_key)) >
1442 		    map_start + map_len) {
1443 
1444 			err = map_private_extent_buffer(eb, offset,
1445 						sizeof(struct btrfs_disk_key),
1446 						&kaddr, &map_start, &map_len);
1447 
1448 			if (!err) {
1449 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1450 							map_start);
1451 			} else {
1452 				read_extent_buffer(eb, &unaligned,
1453 						   offset, sizeof(unaligned));
1454 				tmp = &unaligned;
1455 			}
1456 
1457 		} else {
1458 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1459 							map_start);
1460 		}
1461 		ret = comp_keys(tmp, key);
1462 
1463 		if (ret < 0)
1464 			low = mid + 1;
1465 		else if (ret > 0)
1466 			high = mid;
1467 		else {
1468 			*slot = mid;
1469 			return 0;
1470 		}
1471 	}
1472 	*slot = low;
1473 	return 1;
1474 }
1475 
1476 /*
1477  * simple bin_search frontend that does the right thing for
1478  * leaves vs nodes
1479  */
1480 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1481 		      int level, int *slot)
1482 {
1483 	if (level == 0) {
1484 		return generic_bin_search(eb,
1485 					  offsetof(struct btrfs_leaf, items),
1486 					  sizeof(struct btrfs_item),
1487 					  key, btrfs_header_nritems(eb),
1488 					  slot);
1489 	} else {
1490 		return generic_bin_search(eb,
1491 					  offsetof(struct btrfs_node, ptrs),
1492 					  sizeof(struct btrfs_key_ptr),
1493 					  key, btrfs_header_nritems(eb),
1494 					  slot);
1495 	}
1496 	return -1;
1497 }
1498 
1499 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1500 		     int level, int *slot)
1501 {
1502 	return bin_search(eb, key, level, slot);
1503 }
1504 
1505 static void root_add_used(struct btrfs_root *root, u32 size)
1506 {
1507 	spin_lock(&root->accounting_lock);
1508 	btrfs_set_root_used(&root->root_item,
1509 			    btrfs_root_used(&root->root_item) + size);
1510 	spin_unlock(&root->accounting_lock);
1511 }
1512 
1513 static void root_sub_used(struct btrfs_root *root, u32 size)
1514 {
1515 	spin_lock(&root->accounting_lock);
1516 	btrfs_set_root_used(&root->root_item,
1517 			    btrfs_root_used(&root->root_item) - size);
1518 	spin_unlock(&root->accounting_lock);
1519 }
1520 
1521 /* given a node and slot number, this reads the blocks it points to.  The
1522  * extent buffer is returned with a reference taken (but unlocked).
1523  * NULL is returned on error.
1524  */
1525 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1526 				   struct extent_buffer *parent, int slot)
1527 {
1528 	int level = btrfs_header_level(parent);
1529 	if (slot < 0)
1530 		return NULL;
1531 	if (slot >= btrfs_header_nritems(parent))
1532 		return NULL;
1533 
1534 	BUG_ON(level == 0);
1535 
1536 	return read_tree_block(root, btrfs_node_blockptr(parent, slot),
1537 		       btrfs_level_size(root, level - 1),
1538 		       btrfs_node_ptr_generation(parent, slot));
1539 }
1540 
1541 /*
1542  * node level balancing, used to make sure nodes are in proper order for
1543  * item deletion.  We balance from the top down, so we have to make sure
1544  * that a deletion won't leave an node completely empty later on.
1545  */
1546 static noinline int balance_level(struct btrfs_trans_handle *trans,
1547 			 struct btrfs_root *root,
1548 			 struct btrfs_path *path, int level)
1549 {
1550 	struct extent_buffer *right = NULL;
1551 	struct extent_buffer *mid;
1552 	struct extent_buffer *left = NULL;
1553 	struct extent_buffer *parent = NULL;
1554 	int ret = 0;
1555 	int wret;
1556 	int pslot;
1557 	int orig_slot = path->slots[level];
1558 	u64 orig_ptr;
1559 
1560 	if (level == 0)
1561 		return 0;
1562 
1563 	mid = path->nodes[level];
1564 
1565 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1566 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1567 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1568 
1569 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1570 
1571 	if (level < BTRFS_MAX_LEVEL - 1) {
1572 		parent = path->nodes[level + 1];
1573 		pslot = path->slots[level + 1];
1574 	}
1575 
1576 	/*
1577 	 * deal with the case where there is only one pointer in the root
1578 	 * by promoting the node below to a root
1579 	 */
1580 	if (!parent) {
1581 		struct extent_buffer *child;
1582 
1583 		if (btrfs_header_nritems(mid) != 1)
1584 			return 0;
1585 
1586 		/* promote the child to a root */
1587 		child = read_node_slot(root, mid, 0);
1588 		if (!child) {
1589 			ret = -EROFS;
1590 			btrfs_std_error(root->fs_info, ret);
1591 			goto enospc;
1592 		}
1593 
1594 		btrfs_tree_lock(child);
1595 		btrfs_set_lock_blocking(child);
1596 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1597 		if (ret) {
1598 			btrfs_tree_unlock(child);
1599 			free_extent_buffer(child);
1600 			goto enospc;
1601 		}
1602 
1603 		tree_mod_log_set_root_pointer(root, child);
1604 		rcu_assign_pointer(root->node, child);
1605 
1606 		add_root_to_dirty_list(root);
1607 		btrfs_tree_unlock(child);
1608 
1609 		path->locks[level] = 0;
1610 		path->nodes[level] = NULL;
1611 		clean_tree_block(trans, root, mid);
1612 		btrfs_tree_unlock(mid);
1613 		/* once for the path */
1614 		free_extent_buffer(mid);
1615 
1616 		root_sub_used(root, mid->len);
1617 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1618 		/* once for the root ptr */
1619 		free_extent_buffer_stale(mid);
1620 		return 0;
1621 	}
1622 	if (btrfs_header_nritems(mid) >
1623 	    BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1624 		return 0;
1625 
1626 	btrfs_header_nritems(mid);
1627 
1628 	left = read_node_slot(root, parent, pslot - 1);
1629 	if (left) {
1630 		btrfs_tree_lock(left);
1631 		btrfs_set_lock_blocking(left);
1632 		wret = btrfs_cow_block(trans, root, left,
1633 				       parent, pslot - 1, &left);
1634 		if (wret) {
1635 			ret = wret;
1636 			goto enospc;
1637 		}
1638 	}
1639 	right = read_node_slot(root, parent, pslot + 1);
1640 	if (right) {
1641 		btrfs_tree_lock(right);
1642 		btrfs_set_lock_blocking(right);
1643 		wret = btrfs_cow_block(trans, root, right,
1644 				       parent, pslot + 1, &right);
1645 		if (wret) {
1646 			ret = wret;
1647 			goto enospc;
1648 		}
1649 	}
1650 
1651 	/* first, try to make some room in the middle buffer */
1652 	if (left) {
1653 		orig_slot += btrfs_header_nritems(left);
1654 		wret = push_node_left(trans, root, left, mid, 1);
1655 		if (wret < 0)
1656 			ret = wret;
1657 		btrfs_header_nritems(mid);
1658 	}
1659 
1660 	/*
1661 	 * then try to empty the right most buffer into the middle
1662 	 */
1663 	if (right) {
1664 		wret = push_node_left(trans, root, mid, right, 1);
1665 		if (wret < 0 && wret != -ENOSPC)
1666 			ret = wret;
1667 		if (btrfs_header_nritems(right) == 0) {
1668 			clean_tree_block(trans, root, right);
1669 			btrfs_tree_unlock(right);
1670 			del_ptr(trans, root, path, level + 1, pslot + 1, 1);
1671 			root_sub_used(root, right->len);
1672 			btrfs_free_tree_block(trans, root, right, 0, 1);
1673 			free_extent_buffer_stale(right);
1674 			right = NULL;
1675 		} else {
1676 			struct btrfs_disk_key right_key;
1677 			btrfs_node_key(right, &right_key, 0);
1678 			tree_mod_log_set_node_key(root->fs_info, parent,
1679 						  &right_key, pslot + 1, 0);
1680 			btrfs_set_node_key(parent, &right_key, pslot + 1);
1681 			btrfs_mark_buffer_dirty(parent);
1682 		}
1683 	}
1684 	if (btrfs_header_nritems(mid) == 1) {
1685 		/*
1686 		 * we're not allowed to leave a node with one item in the
1687 		 * tree during a delete.  A deletion from lower in the tree
1688 		 * could try to delete the only pointer in this node.
1689 		 * So, pull some keys from the left.
1690 		 * There has to be a left pointer at this point because
1691 		 * otherwise we would have pulled some pointers from the
1692 		 * right
1693 		 */
1694 		if (!left) {
1695 			ret = -EROFS;
1696 			btrfs_std_error(root->fs_info, ret);
1697 			goto enospc;
1698 		}
1699 		wret = balance_node_right(trans, root, mid, left);
1700 		if (wret < 0) {
1701 			ret = wret;
1702 			goto enospc;
1703 		}
1704 		if (wret == 1) {
1705 			wret = push_node_left(trans, root, left, mid, 1);
1706 			if (wret < 0)
1707 				ret = wret;
1708 		}
1709 		BUG_ON(wret == 1);
1710 	}
1711 	if (btrfs_header_nritems(mid) == 0) {
1712 		clean_tree_block(trans, root, mid);
1713 		btrfs_tree_unlock(mid);
1714 		del_ptr(trans, root, path, level + 1, pslot, 1);
1715 		root_sub_used(root, mid->len);
1716 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1717 		free_extent_buffer_stale(mid);
1718 		mid = NULL;
1719 	} else {
1720 		/* update the parent key to reflect our changes */
1721 		struct btrfs_disk_key mid_key;
1722 		btrfs_node_key(mid, &mid_key, 0);
1723 		tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
1724 					  pslot, 0);
1725 		btrfs_set_node_key(parent, &mid_key, pslot);
1726 		btrfs_mark_buffer_dirty(parent);
1727 	}
1728 
1729 	/* update the path */
1730 	if (left) {
1731 		if (btrfs_header_nritems(left) > orig_slot) {
1732 			extent_buffer_get(left);
1733 			/* left was locked after cow */
1734 			path->nodes[level] = left;
1735 			path->slots[level + 1] -= 1;
1736 			path->slots[level] = orig_slot;
1737 			if (mid) {
1738 				btrfs_tree_unlock(mid);
1739 				free_extent_buffer(mid);
1740 			}
1741 		} else {
1742 			orig_slot -= btrfs_header_nritems(left);
1743 			path->slots[level] = orig_slot;
1744 		}
1745 	}
1746 	/* double check we haven't messed things up */
1747 	if (orig_ptr !=
1748 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1749 		BUG();
1750 enospc:
1751 	if (right) {
1752 		btrfs_tree_unlock(right);
1753 		free_extent_buffer(right);
1754 	}
1755 	if (left) {
1756 		if (path->nodes[level] != left)
1757 			btrfs_tree_unlock(left);
1758 		free_extent_buffer(left);
1759 	}
1760 	return ret;
1761 }
1762 
1763 /* Node balancing for insertion.  Here we only split or push nodes around
1764  * when they are completely full.  This is also done top down, so we
1765  * have to be pessimistic.
1766  */
1767 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1768 					  struct btrfs_root *root,
1769 					  struct btrfs_path *path, int level)
1770 {
1771 	struct extent_buffer *right = NULL;
1772 	struct extent_buffer *mid;
1773 	struct extent_buffer *left = NULL;
1774 	struct extent_buffer *parent = NULL;
1775 	int ret = 0;
1776 	int wret;
1777 	int pslot;
1778 	int orig_slot = path->slots[level];
1779 
1780 	if (level == 0)
1781 		return 1;
1782 
1783 	mid = path->nodes[level];
1784 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1785 
1786 	if (level < BTRFS_MAX_LEVEL - 1) {
1787 		parent = path->nodes[level + 1];
1788 		pslot = path->slots[level + 1];
1789 	}
1790 
1791 	if (!parent)
1792 		return 1;
1793 
1794 	left = read_node_slot(root, parent, pslot - 1);
1795 
1796 	/* first, try to make some room in the middle buffer */
1797 	if (left) {
1798 		u32 left_nr;
1799 
1800 		btrfs_tree_lock(left);
1801 		btrfs_set_lock_blocking(left);
1802 
1803 		left_nr = btrfs_header_nritems(left);
1804 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1805 			wret = 1;
1806 		} else {
1807 			ret = btrfs_cow_block(trans, root, left, parent,
1808 					      pslot - 1, &left);
1809 			if (ret)
1810 				wret = 1;
1811 			else {
1812 				wret = push_node_left(trans, root,
1813 						      left, mid, 0);
1814 			}
1815 		}
1816 		if (wret < 0)
1817 			ret = wret;
1818 		if (wret == 0) {
1819 			struct btrfs_disk_key disk_key;
1820 			orig_slot += left_nr;
1821 			btrfs_node_key(mid, &disk_key, 0);
1822 			tree_mod_log_set_node_key(root->fs_info, parent,
1823 						  &disk_key, pslot, 0);
1824 			btrfs_set_node_key(parent, &disk_key, pslot);
1825 			btrfs_mark_buffer_dirty(parent);
1826 			if (btrfs_header_nritems(left) > orig_slot) {
1827 				path->nodes[level] = left;
1828 				path->slots[level + 1] -= 1;
1829 				path->slots[level] = orig_slot;
1830 				btrfs_tree_unlock(mid);
1831 				free_extent_buffer(mid);
1832 			} else {
1833 				orig_slot -=
1834 					btrfs_header_nritems(left);
1835 				path->slots[level] = orig_slot;
1836 				btrfs_tree_unlock(left);
1837 				free_extent_buffer(left);
1838 			}
1839 			return 0;
1840 		}
1841 		btrfs_tree_unlock(left);
1842 		free_extent_buffer(left);
1843 	}
1844 	right = read_node_slot(root, parent, pslot + 1);
1845 
1846 	/*
1847 	 * then try to empty the right most buffer into the middle
1848 	 */
1849 	if (right) {
1850 		u32 right_nr;
1851 
1852 		btrfs_tree_lock(right);
1853 		btrfs_set_lock_blocking(right);
1854 
1855 		right_nr = btrfs_header_nritems(right);
1856 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1857 			wret = 1;
1858 		} else {
1859 			ret = btrfs_cow_block(trans, root, right,
1860 					      parent, pslot + 1,
1861 					      &right);
1862 			if (ret)
1863 				wret = 1;
1864 			else {
1865 				wret = balance_node_right(trans, root,
1866 							  right, mid);
1867 			}
1868 		}
1869 		if (wret < 0)
1870 			ret = wret;
1871 		if (wret == 0) {
1872 			struct btrfs_disk_key disk_key;
1873 
1874 			btrfs_node_key(right, &disk_key, 0);
1875 			tree_mod_log_set_node_key(root->fs_info, parent,
1876 						  &disk_key, pslot + 1, 0);
1877 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
1878 			btrfs_mark_buffer_dirty(parent);
1879 
1880 			if (btrfs_header_nritems(mid) <= orig_slot) {
1881 				path->nodes[level] = right;
1882 				path->slots[level + 1] += 1;
1883 				path->slots[level] = orig_slot -
1884 					btrfs_header_nritems(mid);
1885 				btrfs_tree_unlock(mid);
1886 				free_extent_buffer(mid);
1887 			} else {
1888 				btrfs_tree_unlock(right);
1889 				free_extent_buffer(right);
1890 			}
1891 			return 0;
1892 		}
1893 		btrfs_tree_unlock(right);
1894 		free_extent_buffer(right);
1895 	}
1896 	return 1;
1897 }
1898 
1899 /*
1900  * readahead one full node of leaves, finding things that are close
1901  * to the block in 'slot', and triggering ra on them.
1902  */
1903 static void reada_for_search(struct btrfs_root *root,
1904 			     struct btrfs_path *path,
1905 			     int level, int slot, u64 objectid)
1906 {
1907 	struct extent_buffer *node;
1908 	struct btrfs_disk_key disk_key;
1909 	u32 nritems;
1910 	u64 search;
1911 	u64 target;
1912 	u64 nread = 0;
1913 	u64 gen;
1914 	int direction = path->reada;
1915 	struct extent_buffer *eb;
1916 	u32 nr;
1917 	u32 blocksize;
1918 	u32 nscan = 0;
1919 
1920 	if (level != 1)
1921 		return;
1922 
1923 	if (!path->nodes[level])
1924 		return;
1925 
1926 	node = path->nodes[level];
1927 
1928 	search = btrfs_node_blockptr(node, slot);
1929 	blocksize = btrfs_level_size(root, level - 1);
1930 	eb = btrfs_find_tree_block(root, search, blocksize);
1931 	if (eb) {
1932 		free_extent_buffer(eb);
1933 		return;
1934 	}
1935 
1936 	target = search;
1937 
1938 	nritems = btrfs_header_nritems(node);
1939 	nr = slot;
1940 
1941 	while (1) {
1942 		if (direction < 0) {
1943 			if (nr == 0)
1944 				break;
1945 			nr--;
1946 		} else if (direction > 0) {
1947 			nr++;
1948 			if (nr >= nritems)
1949 				break;
1950 		}
1951 		if (path->reada < 0 && objectid) {
1952 			btrfs_node_key(node, &disk_key, nr);
1953 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
1954 				break;
1955 		}
1956 		search = btrfs_node_blockptr(node, nr);
1957 		if ((search <= target && target - search <= 65536) ||
1958 		    (search > target && search - target <= 65536)) {
1959 			gen = btrfs_node_ptr_generation(node, nr);
1960 			readahead_tree_block(root, search, blocksize, gen);
1961 			nread += blocksize;
1962 		}
1963 		nscan++;
1964 		if ((nread > 65536 || nscan > 32))
1965 			break;
1966 	}
1967 }
1968 
1969 /*
1970  * returns -EAGAIN if it had to drop the path, or zero if everything was in
1971  * cache
1972  */
1973 static noinline int reada_for_balance(struct btrfs_root *root,
1974 				      struct btrfs_path *path, int level)
1975 {
1976 	int slot;
1977 	int nritems;
1978 	struct extent_buffer *parent;
1979 	struct extent_buffer *eb;
1980 	u64 gen;
1981 	u64 block1 = 0;
1982 	u64 block2 = 0;
1983 	int ret = 0;
1984 	int blocksize;
1985 
1986 	parent = path->nodes[level + 1];
1987 	if (!parent)
1988 		return 0;
1989 
1990 	nritems = btrfs_header_nritems(parent);
1991 	slot = path->slots[level + 1];
1992 	blocksize = btrfs_level_size(root, level);
1993 
1994 	if (slot > 0) {
1995 		block1 = btrfs_node_blockptr(parent, slot - 1);
1996 		gen = btrfs_node_ptr_generation(parent, slot - 1);
1997 		eb = btrfs_find_tree_block(root, block1, blocksize);
1998 		/*
1999 		 * if we get -eagain from btrfs_buffer_uptodate, we
2000 		 * don't want to return eagain here.  That will loop
2001 		 * forever
2002 		 */
2003 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2004 			block1 = 0;
2005 		free_extent_buffer(eb);
2006 	}
2007 	if (slot + 1 < nritems) {
2008 		block2 = btrfs_node_blockptr(parent, slot + 1);
2009 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2010 		eb = btrfs_find_tree_block(root, block2, blocksize);
2011 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2012 			block2 = 0;
2013 		free_extent_buffer(eb);
2014 	}
2015 	if (block1 || block2) {
2016 		ret = -EAGAIN;
2017 
2018 		/* release the whole path */
2019 		btrfs_release_path(path);
2020 
2021 		/* read the blocks */
2022 		if (block1)
2023 			readahead_tree_block(root, block1, blocksize, 0);
2024 		if (block2)
2025 			readahead_tree_block(root, block2, blocksize, 0);
2026 
2027 		if (block1) {
2028 			eb = read_tree_block(root, block1, blocksize, 0);
2029 			free_extent_buffer(eb);
2030 		}
2031 		if (block2) {
2032 			eb = read_tree_block(root, block2, blocksize, 0);
2033 			free_extent_buffer(eb);
2034 		}
2035 	}
2036 	return ret;
2037 }
2038 
2039 
2040 /*
2041  * when we walk down the tree, it is usually safe to unlock the higher layers
2042  * in the tree.  The exceptions are when our path goes through slot 0, because
2043  * operations on the tree might require changing key pointers higher up in the
2044  * tree.
2045  *
2046  * callers might also have set path->keep_locks, which tells this code to keep
2047  * the lock if the path points to the last slot in the block.  This is part of
2048  * walking through the tree, and selecting the next slot in the higher block.
2049  *
2050  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2051  * if lowest_unlock is 1, level 0 won't be unlocked
2052  */
2053 static noinline void unlock_up(struct btrfs_path *path, int level,
2054 			       int lowest_unlock, int min_write_lock_level,
2055 			       int *write_lock_level)
2056 {
2057 	int i;
2058 	int skip_level = level;
2059 	int no_skips = 0;
2060 	struct extent_buffer *t;
2061 
2062 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2063 		if (!path->nodes[i])
2064 			break;
2065 		if (!path->locks[i])
2066 			break;
2067 		if (!no_skips && path->slots[i] == 0) {
2068 			skip_level = i + 1;
2069 			continue;
2070 		}
2071 		if (!no_skips && path->keep_locks) {
2072 			u32 nritems;
2073 			t = path->nodes[i];
2074 			nritems = btrfs_header_nritems(t);
2075 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2076 				skip_level = i + 1;
2077 				continue;
2078 			}
2079 		}
2080 		if (skip_level < i && i >= lowest_unlock)
2081 			no_skips = 1;
2082 
2083 		t = path->nodes[i];
2084 		if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2085 			btrfs_tree_unlock_rw(t, path->locks[i]);
2086 			path->locks[i] = 0;
2087 			if (write_lock_level &&
2088 			    i > min_write_lock_level &&
2089 			    i <= *write_lock_level) {
2090 				*write_lock_level = i - 1;
2091 			}
2092 		}
2093 	}
2094 }
2095 
2096 /*
2097  * This releases any locks held in the path starting at level and
2098  * going all the way up to the root.
2099  *
2100  * btrfs_search_slot will keep the lock held on higher nodes in a few
2101  * corner cases, such as COW of the block at slot zero in the node.  This
2102  * ignores those rules, and it should only be called when there are no
2103  * more updates to be done higher up in the tree.
2104  */
2105 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2106 {
2107 	int i;
2108 
2109 	if (path->keep_locks)
2110 		return;
2111 
2112 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2113 		if (!path->nodes[i])
2114 			continue;
2115 		if (!path->locks[i])
2116 			continue;
2117 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2118 		path->locks[i] = 0;
2119 	}
2120 }
2121 
2122 /*
2123  * helper function for btrfs_search_slot.  The goal is to find a block
2124  * in cache without setting the path to blocking.  If we find the block
2125  * we return zero and the path is unchanged.
2126  *
2127  * If we can't find the block, we set the path blocking and do some
2128  * reada.  -EAGAIN is returned and the search must be repeated.
2129  */
2130 static int
2131 read_block_for_search(struct btrfs_trans_handle *trans,
2132 		       struct btrfs_root *root, struct btrfs_path *p,
2133 		       struct extent_buffer **eb_ret, int level, int slot,
2134 		       struct btrfs_key *key, u64 time_seq)
2135 {
2136 	u64 blocknr;
2137 	u64 gen;
2138 	u32 blocksize;
2139 	struct extent_buffer *b = *eb_ret;
2140 	struct extent_buffer *tmp;
2141 	int ret;
2142 
2143 	blocknr = btrfs_node_blockptr(b, slot);
2144 	gen = btrfs_node_ptr_generation(b, slot);
2145 	blocksize = btrfs_level_size(root, level - 1);
2146 
2147 	tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2148 	if (tmp) {
2149 		/* first we do an atomic uptodate check */
2150 		if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
2151 			if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2152 				/*
2153 				 * we found an up to date block without
2154 				 * sleeping, return
2155 				 * right away
2156 				 */
2157 				*eb_ret = tmp;
2158 				return 0;
2159 			}
2160 			/* the pages were up to date, but we failed
2161 			 * the generation number check.  Do a full
2162 			 * read for the generation number that is correct.
2163 			 * We must do this without dropping locks so
2164 			 * we can trust our generation number
2165 			 */
2166 			free_extent_buffer(tmp);
2167 			btrfs_set_path_blocking(p);
2168 
2169 			/* now we're allowed to do a blocking uptodate check */
2170 			tmp = read_tree_block(root, blocknr, blocksize, gen);
2171 			if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
2172 				*eb_ret = tmp;
2173 				return 0;
2174 			}
2175 			free_extent_buffer(tmp);
2176 			btrfs_release_path(p);
2177 			return -EIO;
2178 		}
2179 	}
2180 
2181 	/*
2182 	 * reduce lock contention at high levels
2183 	 * of the btree by dropping locks before
2184 	 * we read.  Don't release the lock on the current
2185 	 * level because we need to walk this node to figure
2186 	 * out which blocks to read.
2187 	 */
2188 	btrfs_unlock_up_safe(p, level + 1);
2189 	btrfs_set_path_blocking(p);
2190 
2191 	free_extent_buffer(tmp);
2192 	if (p->reada)
2193 		reada_for_search(root, p, level, slot, key->objectid);
2194 
2195 	btrfs_release_path(p);
2196 
2197 	ret = -EAGAIN;
2198 	tmp = read_tree_block(root, blocknr, blocksize, 0);
2199 	if (tmp) {
2200 		/*
2201 		 * If the read above didn't mark this buffer up to date,
2202 		 * it will never end up being up to date.  Set ret to EIO now
2203 		 * and give up so that our caller doesn't loop forever
2204 		 * on our EAGAINs.
2205 		 */
2206 		if (!btrfs_buffer_uptodate(tmp, 0, 0))
2207 			ret = -EIO;
2208 		free_extent_buffer(tmp);
2209 	}
2210 	return ret;
2211 }
2212 
2213 /*
2214  * helper function for btrfs_search_slot.  This does all of the checks
2215  * for node-level blocks and does any balancing required based on
2216  * the ins_len.
2217  *
2218  * If no extra work was required, zero is returned.  If we had to
2219  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2220  * start over
2221  */
2222 static int
2223 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2224 		       struct btrfs_root *root, struct btrfs_path *p,
2225 		       struct extent_buffer *b, int level, int ins_len,
2226 		       int *write_lock_level)
2227 {
2228 	int ret;
2229 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2230 	    BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2231 		int sret;
2232 
2233 		if (*write_lock_level < level + 1) {
2234 			*write_lock_level = level + 1;
2235 			btrfs_release_path(p);
2236 			goto again;
2237 		}
2238 
2239 		sret = reada_for_balance(root, p, level);
2240 		if (sret)
2241 			goto again;
2242 
2243 		btrfs_set_path_blocking(p);
2244 		sret = split_node(trans, root, p, level);
2245 		btrfs_clear_path_blocking(p, NULL, 0);
2246 
2247 		BUG_ON(sret > 0);
2248 		if (sret) {
2249 			ret = sret;
2250 			goto done;
2251 		}
2252 		b = p->nodes[level];
2253 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2254 		   BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2255 		int sret;
2256 
2257 		if (*write_lock_level < level + 1) {
2258 			*write_lock_level = level + 1;
2259 			btrfs_release_path(p);
2260 			goto again;
2261 		}
2262 
2263 		sret = reada_for_balance(root, p, level);
2264 		if (sret)
2265 			goto again;
2266 
2267 		btrfs_set_path_blocking(p);
2268 		sret = balance_level(trans, root, p, level);
2269 		btrfs_clear_path_blocking(p, NULL, 0);
2270 
2271 		if (sret) {
2272 			ret = sret;
2273 			goto done;
2274 		}
2275 		b = p->nodes[level];
2276 		if (!b) {
2277 			btrfs_release_path(p);
2278 			goto again;
2279 		}
2280 		BUG_ON(btrfs_header_nritems(b) == 1);
2281 	}
2282 	return 0;
2283 
2284 again:
2285 	ret = -EAGAIN;
2286 done:
2287 	return ret;
2288 }
2289 
2290 /*
2291  * look for key in the tree.  path is filled in with nodes along the way
2292  * if key is found, we return zero and you can find the item in the leaf
2293  * level of the path (level 0)
2294  *
2295  * If the key isn't found, the path points to the slot where it should
2296  * be inserted, and 1 is returned.  If there are other errors during the
2297  * search a negative error number is returned.
2298  *
2299  * if ins_len > 0, nodes and leaves will be split as we walk down the
2300  * tree.  if ins_len < 0, nodes will be merged as we walk down the tree (if
2301  * possible)
2302  */
2303 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2304 		      *root, struct btrfs_key *key, struct btrfs_path *p, int
2305 		      ins_len, int cow)
2306 {
2307 	struct extent_buffer *b;
2308 	int slot;
2309 	int ret;
2310 	int err;
2311 	int level;
2312 	int lowest_unlock = 1;
2313 	int root_lock;
2314 	/* everything at write_lock_level or lower must be write locked */
2315 	int write_lock_level = 0;
2316 	u8 lowest_level = 0;
2317 	int min_write_lock_level;
2318 
2319 	lowest_level = p->lowest_level;
2320 	WARN_ON(lowest_level && ins_len > 0);
2321 	WARN_ON(p->nodes[0] != NULL);
2322 
2323 	if (ins_len < 0) {
2324 		lowest_unlock = 2;
2325 
2326 		/* when we are removing items, we might have to go up to level
2327 		 * two as we update tree pointers  Make sure we keep write
2328 		 * for those levels as well
2329 		 */
2330 		write_lock_level = 2;
2331 	} else if (ins_len > 0) {
2332 		/*
2333 		 * for inserting items, make sure we have a write lock on
2334 		 * level 1 so we can update keys
2335 		 */
2336 		write_lock_level = 1;
2337 	}
2338 
2339 	if (!cow)
2340 		write_lock_level = -1;
2341 
2342 	if (cow && (p->keep_locks || p->lowest_level))
2343 		write_lock_level = BTRFS_MAX_LEVEL;
2344 
2345 	min_write_lock_level = write_lock_level;
2346 
2347 again:
2348 	/*
2349 	 * we try very hard to do read locks on the root
2350 	 */
2351 	root_lock = BTRFS_READ_LOCK;
2352 	level = 0;
2353 	if (p->search_commit_root) {
2354 		/*
2355 		 * the commit roots are read only
2356 		 * so we always do read locks
2357 		 */
2358 		b = root->commit_root;
2359 		extent_buffer_get(b);
2360 		level = btrfs_header_level(b);
2361 		if (!p->skip_locking)
2362 			btrfs_tree_read_lock(b);
2363 	} else {
2364 		if (p->skip_locking) {
2365 			b = btrfs_root_node(root);
2366 			level = btrfs_header_level(b);
2367 		} else {
2368 			/* we don't know the level of the root node
2369 			 * until we actually have it read locked
2370 			 */
2371 			b = btrfs_read_lock_root_node(root);
2372 			level = btrfs_header_level(b);
2373 			if (level <= write_lock_level) {
2374 				/* whoops, must trade for write lock */
2375 				btrfs_tree_read_unlock(b);
2376 				free_extent_buffer(b);
2377 				b = btrfs_lock_root_node(root);
2378 				root_lock = BTRFS_WRITE_LOCK;
2379 
2380 				/* the level might have changed, check again */
2381 				level = btrfs_header_level(b);
2382 			}
2383 		}
2384 	}
2385 	p->nodes[level] = b;
2386 	if (!p->skip_locking)
2387 		p->locks[level] = root_lock;
2388 
2389 	while (b) {
2390 		level = btrfs_header_level(b);
2391 
2392 		/*
2393 		 * setup the path here so we can release it under lock
2394 		 * contention with the cow code
2395 		 */
2396 		if (cow) {
2397 			/*
2398 			 * if we don't really need to cow this block
2399 			 * then we don't want to set the path blocking,
2400 			 * so we test it here
2401 			 */
2402 			if (!should_cow_block(trans, root, b))
2403 				goto cow_done;
2404 
2405 			btrfs_set_path_blocking(p);
2406 
2407 			/*
2408 			 * must have write locks on this node and the
2409 			 * parent
2410 			 */
2411 			if (level + 1 > write_lock_level) {
2412 				write_lock_level = level + 1;
2413 				btrfs_release_path(p);
2414 				goto again;
2415 			}
2416 
2417 			err = btrfs_cow_block(trans, root, b,
2418 					      p->nodes[level + 1],
2419 					      p->slots[level + 1], &b);
2420 			if (err) {
2421 				ret = err;
2422 				goto done;
2423 			}
2424 		}
2425 cow_done:
2426 		BUG_ON(!cow && ins_len);
2427 
2428 		p->nodes[level] = b;
2429 		btrfs_clear_path_blocking(p, NULL, 0);
2430 
2431 		/*
2432 		 * we have a lock on b and as long as we aren't changing
2433 		 * the tree, there is no way to for the items in b to change.
2434 		 * It is safe to drop the lock on our parent before we
2435 		 * go through the expensive btree search on b.
2436 		 *
2437 		 * If cow is true, then we might be changing slot zero,
2438 		 * which may require changing the parent.  So, we can't
2439 		 * drop the lock until after we know which slot we're
2440 		 * operating on.
2441 		 */
2442 		if (!cow)
2443 			btrfs_unlock_up_safe(p, level + 1);
2444 
2445 		ret = bin_search(b, key, level, &slot);
2446 
2447 		if (level != 0) {
2448 			int dec = 0;
2449 			if (ret && slot > 0) {
2450 				dec = 1;
2451 				slot -= 1;
2452 			}
2453 			p->slots[level] = slot;
2454 			err = setup_nodes_for_search(trans, root, p, b, level,
2455 					     ins_len, &write_lock_level);
2456 			if (err == -EAGAIN)
2457 				goto again;
2458 			if (err) {
2459 				ret = err;
2460 				goto done;
2461 			}
2462 			b = p->nodes[level];
2463 			slot = p->slots[level];
2464 
2465 			/*
2466 			 * slot 0 is special, if we change the key
2467 			 * we have to update the parent pointer
2468 			 * which means we must have a write lock
2469 			 * on the parent
2470 			 */
2471 			if (slot == 0 && cow &&
2472 			    write_lock_level < level + 1) {
2473 				write_lock_level = level + 1;
2474 				btrfs_release_path(p);
2475 				goto again;
2476 			}
2477 
2478 			unlock_up(p, level, lowest_unlock,
2479 				  min_write_lock_level, &write_lock_level);
2480 
2481 			if (level == lowest_level) {
2482 				if (dec)
2483 					p->slots[level]++;
2484 				goto done;
2485 			}
2486 
2487 			err = read_block_for_search(trans, root, p,
2488 						    &b, level, slot, key, 0);
2489 			if (err == -EAGAIN)
2490 				goto again;
2491 			if (err) {
2492 				ret = err;
2493 				goto done;
2494 			}
2495 
2496 			if (!p->skip_locking) {
2497 				level = btrfs_header_level(b);
2498 				if (level <= write_lock_level) {
2499 					err = btrfs_try_tree_write_lock(b);
2500 					if (!err) {
2501 						btrfs_set_path_blocking(p);
2502 						btrfs_tree_lock(b);
2503 						btrfs_clear_path_blocking(p, b,
2504 								  BTRFS_WRITE_LOCK);
2505 					}
2506 					p->locks[level] = BTRFS_WRITE_LOCK;
2507 				} else {
2508 					err = btrfs_try_tree_read_lock(b);
2509 					if (!err) {
2510 						btrfs_set_path_blocking(p);
2511 						btrfs_tree_read_lock(b);
2512 						btrfs_clear_path_blocking(p, b,
2513 								  BTRFS_READ_LOCK);
2514 					}
2515 					p->locks[level] = BTRFS_READ_LOCK;
2516 				}
2517 				p->nodes[level] = b;
2518 			}
2519 		} else {
2520 			p->slots[level] = slot;
2521 			if (ins_len > 0 &&
2522 			    btrfs_leaf_free_space(root, b) < ins_len) {
2523 				if (write_lock_level < 1) {
2524 					write_lock_level = 1;
2525 					btrfs_release_path(p);
2526 					goto again;
2527 				}
2528 
2529 				btrfs_set_path_blocking(p);
2530 				err = split_leaf(trans, root, key,
2531 						 p, ins_len, ret == 0);
2532 				btrfs_clear_path_blocking(p, NULL, 0);
2533 
2534 				BUG_ON(err > 0);
2535 				if (err) {
2536 					ret = err;
2537 					goto done;
2538 				}
2539 			}
2540 			if (!p->search_for_split)
2541 				unlock_up(p, level, lowest_unlock,
2542 					  min_write_lock_level, &write_lock_level);
2543 			goto done;
2544 		}
2545 	}
2546 	ret = 1;
2547 done:
2548 	/*
2549 	 * we don't really know what they plan on doing with the path
2550 	 * from here on, so for now just mark it as blocking
2551 	 */
2552 	if (!p->leave_spinning)
2553 		btrfs_set_path_blocking(p);
2554 	if (ret < 0)
2555 		btrfs_release_path(p);
2556 	return ret;
2557 }
2558 
2559 /*
2560  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2561  * current state of the tree together with the operations recorded in the tree
2562  * modification log to search for the key in a previous version of this tree, as
2563  * denoted by the time_seq parameter.
2564  *
2565  * Naturally, there is no support for insert, delete or cow operations.
2566  *
2567  * The resulting path and return value will be set up as if we called
2568  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2569  */
2570 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2571 			  struct btrfs_path *p, u64 time_seq)
2572 {
2573 	struct extent_buffer *b;
2574 	int slot;
2575 	int ret;
2576 	int err;
2577 	int level;
2578 	int lowest_unlock = 1;
2579 	u8 lowest_level = 0;
2580 
2581 	lowest_level = p->lowest_level;
2582 	WARN_ON(p->nodes[0] != NULL);
2583 
2584 	if (p->search_commit_root) {
2585 		BUG_ON(time_seq);
2586 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2587 	}
2588 
2589 again:
2590 	level = 0;
2591 	b = get_old_root(root, time_seq);
2592 	extent_buffer_get(b);
2593 	level = btrfs_header_level(b);
2594 	btrfs_tree_read_lock(b);
2595 	p->locks[level] = BTRFS_READ_LOCK;
2596 
2597 	while (b) {
2598 		level = btrfs_header_level(b);
2599 		p->nodes[level] = b;
2600 		btrfs_clear_path_blocking(p, NULL, 0);
2601 
2602 		/*
2603 		 * we have a lock on b and as long as we aren't changing
2604 		 * the tree, there is no way to for the items in b to change.
2605 		 * It is safe to drop the lock on our parent before we
2606 		 * go through the expensive btree search on b.
2607 		 */
2608 		btrfs_unlock_up_safe(p, level + 1);
2609 
2610 		ret = bin_search(b, key, level, &slot);
2611 
2612 		if (level != 0) {
2613 			int dec = 0;
2614 			if (ret && slot > 0) {
2615 				dec = 1;
2616 				slot -= 1;
2617 			}
2618 			p->slots[level] = slot;
2619 			unlock_up(p, level, lowest_unlock, 0, NULL);
2620 
2621 			if (level == lowest_level) {
2622 				if (dec)
2623 					p->slots[level]++;
2624 				goto done;
2625 			}
2626 
2627 			err = read_block_for_search(NULL, root, p, &b, level,
2628 						    slot, key, time_seq);
2629 			if (err == -EAGAIN)
2630 				goto again;
2631 			if (err) {
2632 				ret = err;
2633 				goto done;
2634 			}
2635 
2636 			level = btrfs_header_level(b);
2637 			err = btrfs_try_tree_read_lock(b);
2638 			if (!err) {
2639 				btrfs_set_path_blocking(p);
2640 				btrfs_tree_read_lock(b);
2641 				btrfs_clear_path_blocking(p, b,
2642 							  BTRFS_READ_LOCK);
2643 			}
2644 			p->locks[level] = BTRFS_READ_LOCK;
2645 			p->nodes[level] = b;
2646 			b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2647 			if (b != p->nodes[level]) {
2648 				btrfs_tree_unlock_rw(p->nodes[level],
2649 						     p->locks[level]);
2650 				p->locks[level] = 0;
2651 				p->nodes[level] = b;
2652 			}
2653 		} else {
2654 			p->slots[level] = slot;
2655 			unlock_up(p, level, lowest_unlock, 0, NULL);
2656 			goto done;
2657 		}
2658 	}
2659 	ret = 1;
2660 done:
2661 	if (!p->leave_spinning)
2662 		btrfs_set_path_blocking(p);
2663 	if (ret < 0)
2664 		btrfs_release_path(p);
2665 
2666 	return ret;
2667 }
2668 
2669 /*
2670  * adjust the pointers going up the tree, starting at level
2671  * making sure the right key of each node is points to 'key'.
2672  * This is used after shifting pointers to the left, so it stops
2673  * fixing up pointers when a given leaf/node is not in slot 0 of the
2674  * higher levels
2675  *
2676  */
2677 static void fixup_low_keys(struct btrfs_trans_handle *trans,
2678 			   struct btrfs_root *root, struct btrfs_path *path,
2679 			   struct btrfs_disk_key *key, int level)
2680 {
2681 	int i;
2682 	struct extent_buffer *t;
2683 
2684 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2685 		int tslot = path->slots[i];
2686 		if (!path->nodes[i])
2687 			break;
2688 		t = path->nodes[i];
2689 		tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
2690 		btrfs_set_node_key(t, key, tslot);
2691 		btrfs_mark_buffer_dirty(path->nodes[i]);
2692 		if (tslot != 0)
2693 			break;
2694 	}
2695 }
2696 
2697 /*
2698  * update item key.
2699  *
2700  * This function isn't completely safe. It's the caller's responsibility
2701  * that the new key won't break the order
2702  */
2703 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2704 			     struct btrfs_root *root, struct btrfs_path *path,
2705 			     struct btrfs_key *new_key)
2706 {
2707 	struct btrfs_disk_key disk_key;
2708 	struct extent_buffer *eb;
2709 	int slot;
2710 
2711 	eb = path->nodes[0];
2712 	slot = path->slots[0];
2713 	if (slot > 0) {
2714 		btrfs_item_key(eb, &disk_key, slot - 1);
2715 		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
2716 	}
2717 	if (slot < btrfs_header_nritems(eb) - 1) {
2718 		btrfs_item_key(eb, &disk_key, slot + 1);
2719 		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
2720 	}
2721 
2722 	btrfs_cpu_key_to_disk(&disk_key, new_key);
2723 	btrfs_set_item_key(eb, &disk_key, slot);
2724 	btrfs_mark_buffer_dirty(eb);
2725 	if (slot == 0)
2726 		fixup_low_keys(trans, root, path, &disk_key, 1);
2727 }
2728 
2729 /*
2730  * try to push data from one node into the next node left in the
2731  * tree.
2732  *
2733  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2734  * error, and > 0 if there was no room in the left hand block.
2735  */
2736 static int push_node_left(struct btrfs_trans_handle *trans,
2737 			  struct btrfs_root *root, struct extent_buffer *dst,
2738 			  struct extent_buffer *src, int empty)
2739 {
2740 	int push_items = 0;
2741 	int src_nritems;
2742 	int dst_nritems;
2743 	int ret = 0;
2744 
2745 	src_nritems = btrfs_header_nritems(src);
2746 	dst_nritems = btrfs_header_nritems(dst);
2747 	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2748 	WARN_ON(btrfs_header_generation(src) != trans->transid);
2749 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
2750 
2751 	if (!empty && src_nritems <= 8)
2752 		return 1;
2753 
2754 	if (push_items <= 0)
2755 		return 1;
2756 
2757 	if (empty) {
2758 		push_items = min(src_nritems, push_items);
2759 		if (push_items < src_nritems) {
2760 			/* leave at least 8 pointers in the node if
2761 			 * we aren't going to empty it
2762 			 */
2763 			if (src_nritems - push_items < 8) {
2764 				if (push_items <= 8)
2765 					return 1;
2766 				push_items -= 8;
2767 			}
2768 		}
2769 	} else
2770 		push_items = min(src_nritems - 8, push_items);
2771 
2772 	tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
2773 			     push_items);
2774 	copy_extent_buffer(dst, src,
2775 			   btrfs_node_key_ptr_offset(dst_nritems),
2776 			   btrfs_node_key_ptr_offset(0),
2777 			   push_items * sizeof(struct btrfs_key_ptr));
2778 
2779 	if (push_items < src_nritems) {
2780 		tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
2781 				     src_nritems - push_items);
2782 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2783 				      btrfs_node_key_ptr_offset(push_items),
2784 				      (src_nritems - push_items) *
2785 				      sizeof(struct btrfs_key_ptr));
2786 	}
2787 	btrfs_set_header_nritems(src, src_nritems - push_items);
2788 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
2789 	btrfs_mark_buffer_dirty(src);
2790 	btrfs_mark_buffer_dirty(dst);
2791 
2792 	return ret;
2793 }
2794 
2795 /*
2796  * try to push data from one node into the next node right in the
2797  * tree.
2798  *
2799  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2800  * error, and > 0 if there was no room in the right hand block.
2801  *
2802  * this will  only push up to 1/2 the contents of the left node over
2803  */
2804 static int balance_node_right(struct btrfs_trans_handle *trans,
2805 			      struct btrfs_root *root,
2806 			      struct extent_buffer *dst,
2807 			      struct extent_buffer *src)
2808 {
2809 	int push_items = 0;
2810 	int max_push;
2811 	int src_nritems;
2812 	int dst_nritems;
2813 	int ret = 0;
2814 
2815 	WARN_ON(btrfs_header_generation(src) != trans->transid);
2816 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
2817 
2818 	src_nritems = btrfs_header_nritems(src);
2819 	dst_nritems = btrfs_header_nritems(dst);
2820 	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2821 	if (push_items <= 0)
2822 		return 1;
2823 
2824 	if (src_nritems < 4)
2825 		return 1;
2826 
2827 	max_push = src_nritems / 2 + 1;
2828 	/* don't try to empty the node */
2829 	if (max_push >= src_nritems)
2830 		return 1;
2831 
2832 	if (max_push < push_items)
2833 		push_items = max_push;
2834 
2835 	tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
2836 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2837 				      btrfs_node_key_ptr_offset(0),
2838 				      (dst_nritems) *
2839 				      sizeof(struct btrfs_key_ptr));
2840 
2841 	tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
2842 			     src_nritems - push_items, push_items);
2843 	copy_extent_buffer(dst, src,
2844 			   btrfs_node_key_ptr_offset(0),
2845 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
2846 			   push_items * sizeof(struct btrfs_key_ptr));
2847 
2848 	btrfs_set_header_nritems(src, src_nritems - push_items);
2849 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
2850 
2851 	btrfs_mark_buffer_dirty(src);
2852 	btrfs_mark_buffer_dirty(dst);
2853 
2854 	return ret;
2855 }
2856 
2857 /*
2858  * helper function to insert a new root level in the tree.
2859  * A new node is allocated, and a single item is inserted to
2860  * point to the existing root
2861  *
2862  * returns zero on success or < 0 on failure.
2863  */
2864 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2865 			   struct btrfs_root *root,
2866 			   struct btrfs_path *path, int level)
2867 {
2868 	u64 lower_gen;
2869 	struct extent_buffer *lower;
2870 	struct extent_buffer *c;
2871 	struct extent_buffer *old;
2872 	struct btrfs_disk_key lower_key;
2873 
2874 	BUG_ON(path->nodes[level]);
2875 	BUG_ON(path->nodes[level-1] != root->node);
2876 
2877 	lower = path->nodes[level-1];
2878 	if (level == 1)
2879 		btrfs_item_key(lower, &lower_key, 0);
2880 	else
2881 		btrfs_node_key(lower, &lower_key, 0);
2882 
2883 	c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2884 				   root->root_key.objectid, &lower_key,
2885 				   level, root->node->start, 0);
2886 	if (IS_ERR(c))
2887 		return PTR_ERR(c);
2888 
2889 	root_add_used(root, root->nodesize);
2890 
2891 	memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
2892 	btrfs_set_header_nritems(c, 1);
2893 	btrfs_set_header_level(c, level);
2894 	btrfs_set_header_bytenr(c, c->start);
2895 	btrfs_set_header_generation(c, trans->transid);
2896 	btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
2897 	btrfs_set_header_owner(c, root->root_key.objectid);
2898 
2899 	write_extent_buffer(c, root->fs_info->fsid,
2900 			    (unsigned long)btrfs_header_fsid(c),
2901 			    BTRFS_FSID_SIZE);
2902 
2903 	write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2904 			    (unsigned long)btrfs_header_chunk_tree_uuid(c),
2905 			    BTRFS_UUID_SIZE);
2906 
2907 	btrfs_set_node_key(c, &lower_key, 0);
2908 	btrfs_set_node_blockptr(c, 0, lower->start);
2909 	lower_gen = btrfs_header_generation(lower);
2910 	WARN_ON(lower_gen != trans->transid);
2911 
2912 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
2913 
2914 	btrfs_mark_buffer_dirty(c);
2915 
2916 	old = root->node;
2917 	tree_mod_log_set_root_pointer(root, c);
2918 	rcu_assign_pointer(root->node, c);
2919 
2920 	/* the super has an extra ref to root->node */
2921 	free_extent_buffer(old);
2922 
2923 	add_root_to_dirty_list(root);
2924 	extent_buffer_get(c);
2925 	path->nodes[level] = c;
2926 	path->locks[level] = BTRFS_WRITE_LOCK;
2927 	path->slots[level] = 0;
2928 	return 0;
2929 }
2930 
2931 /*
2932  * worker function to insert a single pointer in a node.
2933  * the node should have enough room for the pointer already
2934  *
2935  * slot and level indicate where you want the key to go, and
2936  * blocknr is the block the key points to.
2937  */
2938 static void insert_ptr(struct btrfs_trans_handle *trans,
2939 		       struct btrfs_root *root, struct btrfs_path *path,
2940 		       struct btrfs_disk_key *key, u64 bytenr,
2941 		       int slot, int level, int tree_mod_log)
2942 {
2943 	struct extent_buffer *lower;
2944 	int nritems;
2945 	int ret;
2946 
2947 	BUG_ON(!path->nodes[level]);
2948 	btrfs_assert_tree_locked(path->nodes[level]);
2949 	lower = path->nodes[level];
2950 	nritems = btrfs_header_nritems(lower);
2951 	BUG_ON(slot > nritems);
2952 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
2953 	if (slot != nritems) {
2954 		if (tree_mod_log && level)
2955 			tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
2956 					     slot, nritems - slot);
2957 		memmove_extent_buffer(lower,
2958 			      btrfs_node_key_ptr_offset(slot + 1),
2959 			      btrfs_node_key_ptr_offset(slot),
2960 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
2961 	}
2962 	if (tree_mod_log && level) {
2963 		ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
2964 					      MOD_LOG_KEY_ADD);
2965 		BUG_ON(ret < 0);
2966 	}
2967 	btrfs_set_node_key(lower, key, slot);
2968 	btrfs_set_node_blockptr(lower, slot, bytenr);
2969 	WARN_ON(trans->transid == 0);
2970 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2971 	btrfs_set_header_nritems(lower, nritems + 1);
2972 	btrfs_mark_buffer_dirty(lower);
2973 }
2974 
2975 /*
2976  * split the node at the specified level in path in two.
2977  * The path is corrected to point to the appropriate node after the split
2978  *
2979  * Before splitting this tries to make some room in the node by pushing
2980  * left and right, if either one works, it returns right away.
2981  *
2982  * returns 0 on success and < 0 on failure
2983  */
2984 static noinline int split_node(struct btrfs_trans_handle *trans,
2985 			       struct btrfs_root *root,
2986 			       struct btrfs_path *path, int level)
2987 {
2988 	struct extent_buffer *c;
2989 	struct extent_buffer *split;
2990 	struct btrfs_disk_key disk_key;
2991 	int mid;
2992 	int ret;
2993 	u32 c_nritems;
2994 
2995 	c = path->nodes[level];
2996 	WARN_ON(btrfs_header_generation(c) != trans->transid);
2997 	if (c == root->node) {
2998 		/* trying to split the root, lets make a new one */
2999 		ret = insert_new_root(trans, root, path, level + 1);
3000 		if (ret)
3001 			return ret;
3002 	} else {
3003 		ret = push_nodes_for_insert(trans, root, path, level);
3004 		c = path->nodes[level];
3005 		if (!ret && btrfs_header_nritems(c) <
3006 		    BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3007 			return 0;
3008 		if (ret < 0)
3009 			return ret;
3010 	}
3011 
3012 	c_nritems = btrfs_header_nritems(c);
3013 	mid = (c_nritems + 1) / 2;
3014 	btrfs_node_key(c, &disk_key, mid);
3015 
3016 	split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3017 					root->root_key.objectid,
3018 					&disk_key, level, c->start, 0);
3019 	if (IS_ERR(split))
3020 		return PTR_ERR(split);
3021 
3022 	root_add_used(root, root->nodesize);
3023 
3024 	memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3025 	btrfs_set_header_level(split, btrfs_header_level(c));
3026 	btrfs_set_header_bytenr(split, split->start);
3027 	btrfs_set_header_generation(split, trans->transid);
3028 	btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3029 	btrfs_set_header_owner(split, root->root_key.objectid);
3030 	write_extent_buffer(split, root->fs_info->fsid,
3031 			    (unsigned long)btrfs_header_fsid(split),
3032 			    BTRFS_FSID_SIZE);
3033 	write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3034 			    (unsigned long)btrfs_header_chunk_tree_uuid(split),
3035 			    BTRFS_UUID_SIZE);
3036 
3037 	tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3038 	copy_extent_buffer(split, c,
3039 			   btrfs_node_key_ptr_offset(0),
3040 			   btrfs_node_key_ptr_offset(mid),
3041 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3042 	btrfs_set_header_nritems(split, c_nritems - mid);
3043 	btrfs_set_header_nritems(c, mid);
3044 	ret = 0;
3045 
3046 	btrfs_mark_buffer_dirty(c);
3047 	btrfs_mark_buffer_dirty(split);
3048 
3049 	insert_ptr(trans, root, path, &disk_key, split->start,
3050 		   path->slots[level + 1] + 1, level + 1, 1);
3051 
3052 	if (path->slots[level] >= mid) {
3053 		path->slots[level] -= mid;
3054 		btrfs_tree_unlock(c);
3055 		free_extent_buffer(c);
3056 		path->nodes[level] = split;
3057 		path->slots[level + 1] += 1;
3058 	} else {
3059 		btrfs_tree_unlock(split);
3060 		free_extent_buffer(split);
3061 	}
3062 	return ret;
3063 }
3064 
3065 /*
3066  * how many bytes are required to store the items in a leaf.  start
3067  * and nr indicate which items in the leaf to check.  This totals up the
3068  * space used both by the item structs and the item data
3069  */
3070 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3071 {
3072 	int data_len;
3073 	int nritems = btrfs_header_nritems(l);
3074 	int end = min(nritems, start + nr) - 1;
3075 
3076 	if (!nr)
3077 		return 0;
3078 	data_len = btrfs_item_end_nr(l, start);
3079 	data_len = data_len - btrfs_item_offset_nr(l, end);
3080 	data_len += sizeof(struct btrfs_item) * nr;
3081 	WARN_ON(data_len < 0);
3082 	return data_len;
3083 }
3084 
3085 /*
3086  * The space between the end of the leaf items and
3087  * the start of the leaf data.  IOW, how much room
3088  * the leaf has left for both items and data
3089  */
3090 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3091 				   struct extent_buffer *leaf)
3092 {
3093 	int nritems = btrfs_header_nritems(leaf);
3094 	int ret;
3095 	ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3096 	if (ret < 0) {
3097 		printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3098 		       "used %d nritems %d\n",
3099 		       ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3100 		       leaf_space_used(leaf, 0, nritems), nritems);
3101 	}
3102 	return ret;
3103 }
3104 
3105 /*
3106  * min slot controls the lowest index we're willing to push to the
3107  * right.  We'll push up to and including min_slot, but no lower
3108  */
3109 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3110 				      struct btrfs_root *root,
3111 				      struct btrfs_path *path,
3112 				      int data_size, int empty,
3113 				      struct extent_buffer *right,
3114 				      int free_space, u32 left_nritems,
3115 				      u32 min_slot)
3116 {
3117 	struct extent_buffer *left = path->nodes[0];
3118 	struct extent_buffer *upper = path->nodes[1];
3119 	struct btrfs_map_token token;
3120 	struct btrfs_disk_key disk_key;
3121 	int slot;
3122 	u32 i;
3123 	int push_space = 0;
3124 	int push_items = 0;
3125 	struct btrfs_item *item;
3126 	u32 nr;
3127 	u32 right_nritems;
3128 	u32 data_end;
3129 	u32 this_item_size;
3130 
3131 	btrfs_init_map_token(&token);
3132 
3133 	if (empty)
3134 		nr = 0;
3135 	else
3136 		nr = max_t(u32, 1, min_slot);
3137 
3138 	if (path->slots[0] >= left_nritems)
3139 		push_space += data_size;
3140 
3141 	slot = path->slots[1];
3142 	i = left_nritems - 1;
3143 	while (i >= nr) {
3144 		item = btrfs_item_nr(left, i);
3145 
3146 		if (!empty && push_items > 0) {
3147 			if (path->slots[0] > i)
3148 				break;
3149 			if (path->slots[0] == i) {
3150 				int space = btrfs_leaf_free_space(root, left);
3151 				if (space + push_space * 2 > free_space)
3152 					break;
3153 			}
3154 		}
3155 
3156 		if (path->slots[0] == i)
3157 			push_space += data_size;
3158 
3159 		this_item_size = btrfs_item_size(left, item);
3160 		if (this_item_size + sizeof(*item) + push_space > free_space)
3161 			break;
3162 
3163 		push_items++;
3164 		push_space += this_item_size + sizeof(*item);
3165 		if (i == 0)
3166 			break;
3167 		i--;
3168 	}
3169 
3170 	if (push_items == 0)
3171 		goto out_unlock;
3172 
3173 	if (!empty && push_items == left_nritems)
3174 		WARN_ON(1);
3175 
3176 	/* push left to right */
3177 	right_nritems = btrfs_header_nritems(right);
3178 
3179 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3180 	push_space -= leaf_data_end(root, left);
3181 
3182 	/* make room in the right data area */
3183 	data_end = leaf_data_end(root, right);
3184 	memmove_extent_buffer(right,
3185 			      btrfs_leaf_data(right) + data_end - push_space,
3186 			      btrfs_leaf_data(right) + data_end,
3187 			      BTRFS_LEAF_DATA_SIZE(root) - data_end);
3188 
3189 	/* copy from the left data area */
3190 	copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3191 		     BTRFS_LEAF_DATA_SIZE(root) - push_space,
3192 		     btrfs_leaf_data(left) + leaf_data_end(root, left),
3193 		     push_space);
3194 
3195 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3196 			      btrfs_item_nr_offset(0),
3197 			      right_nritems * sizeof(struct btrfs_item));
3198 
3199 	/* copy the items from left to right */
3200 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3201 		   btrfs_item_nr_offset(left_nritems - push_items),
3202 		   push_items * sizeof(struct btrfs_item));
3203 
3204 	/* update the item pointers */
3205 	right_nritems += push_items;
3206 	btrfs_set_header_nritems(right, right_nritems);
3207 	push_space = BTRFS_LEAF_DATA_SIZE(root);
3208 	for (i = 0; i < right_nritems; i++) {
3209 		item = btrfs_item_nr(right, i);
3210 		push_space -= btrfs_token_item_size(right, item, &token);
3211 		btrfs_set_token_item_offset(right, item, push_space, &token);
3212 	}
3213 
3214 	left_nritems -= push_items;
3215 	btrfs_set_header_nritems(left, left_nritems);
3216 
3217 	if (left_nritems)
3218 		btrfs_mark_buffer_dirty(left);
3219 	else
3220 		clean_tree_block(trans, root, left);
3221 
3222 	btrfs_mark_buffer_dirty(right);
3223 
3224 	btrfs_item_key(right, &disk_key, 0);
3225 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3226 	btrfs_mark_buffer_dirty(upper);
3227 
3228 	/* then fixup the leaf pointer in the path */
3229 	if (path->slots[0] >= left_nritems) {
3230 		path->slots[0] -= left_nritems;
3231 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3232 			clean_tree_block(trans, root, path->nodes[0]);
3233 		btrfs_tree_unlock(path->nodes[0]);
3234 		free_extent_buffer(path->nodes[0]);
3235 		path->nodes[0] = right;
3236 		path->slots[1] += 1;
3237 	} else {
3238 		btrfs_tree_unlock(right);
3239 		free_extent_buffer(right);
3240 	}
3241 	return 0;
3242 
3243 out_unlock:
3244 	btrfs_tree_unlock(right);
3245 	free_extent_buffer(right);
3246 	return 1;
3247 }
3248 
3249 /*
3250  * push some data in the path leaf to the right, trying to free up at
3251  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3252  *
3253  * returns 1 if the push failed because the other node didn't have enough
3254  * room, 0 if everything worked out and < 0 if there were major errors.
3255  *
3256  * this will push starting from min_slot to the end of the leaf.  It won't
3257  * push any slot lower than min_slot
3258  */
3259 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3260 			   *root, struct btrfs_path *path,
3261 			   int min_data_size, int data_size,
3262 			   int empty, u32 min_slot)
3263 {
3264 	struct extent_buffer *left = path->nodes[0];
3265 	struct extent_buffer *right;
3266 	struct extent_buffer *upper;
3267 	int slot;
3268 	int free_space;
3269 	u32 left_nritems;
3270 	int ret;
3271 
3272 	if (!path->nodes[1])
3273 		return 1;
3274 
3275 	slot = path->slots[1];
3276 	upper = path->nodes[1];
3277 	if (slot >= btrfs_header_nritems(upper) - 1)
3278 		return 1;
3279 
3280 	btrfs_assert_tree_locked(path->nodes[1]);
3281 
3282 	right = read_node_slot(root, upper, slot + 1);
3283 	if (right == NULL)
3284 		return 1;
3285 
3286 	btrfs_tree_lock(right);
3287 	btrfs_set_lock_blocking(right);
3288 
3289 	free_space = btrfs_leaf_free_space(root, right);
3290 	if (free_space < data_size)
3291 		goto out_unlock;
3292 
3293 	/* cow and double check */
3294 	ret = btrfs_cow_block(trans, root, right, upper,
3295 			      slot + 1, &right);
3296 	if (ret)
3297 		goto out_unlock;
3298 
3299 	free_space = btrfs_leaf_free_space(root, right);
3300 	if (free_space < data_size)
3301 		goto out_unlock;
3302 
3303 	left_nritems = btrfs_header_nritems(left);
3304 	if (left_nritems == 0)
3305 		goto out_unlock;
3306 
3307 	return __push_leaf_right(trans, root, path, min_data_size, empty,
3308 				right, free_space, left_nritems, min_slot);
3309 out_unlock:
3310 	btrfs_tree_unlock(right);
3311 	free_extent_buffer(right);
3312 	return 1;
3313 }
3314 
3315 /*
3316  * push some data in the path leaf to the left, trying to free up at
3317  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3318  *
3319  * max_slot can put a limit on how far into the leaf we'll push items.  The
3320  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3321  * items
3322  */
3323 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3324 				     struct btrfs_root *root,
3325 				     struct btrfs_path *path, int data_size,
3326 				     int empty, struct extent_buffer *left,
3327 				     int free_space, u32 right_nritems,
3328 				     u32 max_slot)
3329 {
3330 	struct btrfs_disk_key disk_key;
3331 	struct extent_buffer *right = path->nodes[0];
3332 	int i;
3333 	int push_space = 0;
3334 	int push_items = 0;
3335 	struct btrfs_item *item;
3336 	u32 old_left_nritems;
3337 	u32 nr;
3338 	int ret = 0;
3339 	u32 this_item_size;
3340 	u32 old_left_item_size;
3341 	struct btrfs_map_token token;
3342 
3343 	btrfs_init_map_token(&token);
3344 
3345 	if (empty)
3346 		nr = min(right_nritems, max_slot);
3347 	else
3348 		nr = min(right_nritems - 1, max_slot);
3349 
3350 	for (i = 0; i < nr; i++) {
3351 		item = btrfs_item_nr(right, i);
3352 
3353 		if (!empty && push_items > 0) {
3354 			if (path->slots[0] < i)
3355 				break;
3356 			if (path->slots[0] == i) {
3357 				int space = btrfs_leaf_free_space(root, right);
3358 				if (space + push_space * 2 > free_space)
3359 					break;
3360 			}
3361 		}
3362 
3363 		if (path->slots[0] == i)
3364 			push_space += data_size;
3365 
3366 		this_item_size = btrfs_item_size(right, item);
3367 		if (this_item_size + sizeof(*item) + push_space > free_space)
3368 			break;
3369 
3370 		push_items++;
3371 		push_space += this_item_size + sizeof(*item);
3372 	}
3373 
3374 	if (push_items == 0) {
3375 		ret = 1;
3376 		goto out;
3377 	}
3378 	if (!empty && push_items == btrfs_header_nritems(right))
3379 		WARN_ON(1);
3380 
3381 	/* push data from right to left */
3382 	copy_extent_buffer(left, right,
3383 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3384 			   btrfs_item_nr_offset(0),
3385 			   push_items * sizeof(struct btrfs_item));
3386 
3387 	push_space = BTRFS_LEAF_DATA_SIZE(root) -
3388 		     btrfs_item_offset_nr(right, push_items - 1);
3389 
3390 	copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3391 		     leaf_data_end(root, left) - push_space,
3392 		     btrfs_leaf_data(right) +
3393 		     btrfs_item_offset_nr(right, push_items - 1),
3394 		     push_space);
3395 	old_left_nritems = btrfs_header_nritems(left);
3396 	BUG_ON(old_left_nritems <= 0);
3397 
3398 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3399 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3400 		u32 ioff;
3401 
3402 		item = btrfs_item_nr(left, i);
3403 
3404 		ioff = btrfs_token_item_offset(left, item, &token);
3405 		btrfs_set_token_item_offset(left, item,
3406 		      ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3407 		      &token);
3408 	}
3409 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3410 
3411 	/* fixup right node */
3412 	if (push_items > right_nritems) {
3413 		printk(KERN_CRIT "push items %d nr %u\n", push_items,
3414 		       right_nritems);
3415 		WARN_ON(1);
3416 	}
3417 
3418 	if (push_items < right_nritems) {
3419 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3420 						  leaf_data_end(root, right);
3421 		memmove_extent_buffer(right, btrfs_leaf_data(right) +
3422 				      BTRFS_LEAF_DATA_SIZE(root) - push_space,
3423 				      btrfs_leaf_data(right) +
3424 				      leaf_data_end(root, right), push_space);
3425 
3426 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3427 			      btrfs_item_nr_offset(push_items),
3428 			     (btrfs_header_nritems(right) - push_items) *
3429 			     sizeof(struct btrfs_item));
3430 	}
3431 	right_nritems -= push_items;
3432 	btrfs_set_header_nritems(right, right_nritems);
3433 	push_space = BTRFS_LEAF_DATA_SIZE(root);
3434 	for (i = 0; i < right_nritems; i++) {
3435 		item = btrfs_item_nr(right, i);
3436 
3437 		push_space = push_space - btrfs_token_item_size(right,
3438 								item, &token);
3439 		btrfs_set_token_item_offset(right, item, push_space, &token);
3440 	}
3441 
3442 	btrfs_mark_buffer_dirty(left);
3443 	if (right_nritems)
3444 		btrfs_mark_buffer_dirty(right);
3445 	else
3446 		clean_tree_block(trans, root, right);
3447 
3448 	btrfs_item_key(right, &disk_key, 0);
3449 	fixup_low_keys(trans, root, path, &disk_key, 1);
3450 
3451 	/* then fixup the leaf pointer in the path */
3452 	if (path->slots[0] < push_items) {
3453 		path->slots[0] += old_left_nritems;
3454 		btrfs_tree_unlock(path->nodes[0]);
3455 		free_extent_buffer(path->nodes[0]);
3456 		path->nodes[0] = left;
3457 		path->slots[1] -= 1;
3458 	} else {
3459 		btrfs_tree_unlock(left);
3460 		free_extent_buffer(left);
3461 		path->slots[0] -= push_items;
3462 	}
3463 	BUG_ON(path->slots[0] < 0);
3464 	return ret;
3465 out:
3466 	btrfs_tree_unlock(left);
3467 	free_extent_buffer(left);
3468 	return ret;
3469 }
3470 
3471 /*
3472  * push some data in the path leaf to the left, trying to free up at
3473  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3474  *
3475  * max_slot can put a limit on how far into the leaf we'll push items.  The
3476  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3477  * items
3478  */
3479 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3480 			  *root, struct btrfs_path *path, int min_data_size,
3481 			  int data_size, int empty, u32 max_slot)
3482 {
3483 	struct extent_buffer *right = path->nodes[0];
3484 	struct extent_buffer *left;
3485 	int slot;
3486 	int free_space;
3487 	u32 right_nritems;
3488 	int ret = 0;
3489 
3490 	slot = path->slots[1];
3491 	if (slot == 0)
3492 		return 1;
3493 	if (!path->nodes[1])
3494 		return 1;
3495 
3496 	right_nritems = btrfs_header_nritems(right);
3497 	if (right_nritems == 0)
3498 		return 1;
3499 
3500 	btrfs_assert_tree_locked(path->nodes[1]);
3501 
3502 	left = read_node_slot(root, path->nodes[1], slot - 1);
3503 	if (left == NULL)
3504 		return 1;
3505 
3506 	btrfs_tree_lock(left);
3507 	btrfs_set_lock_blocking(left);
3508 
3509 	free_space = btrfs_leaf_free_space(root, left);
3510 	if (free_space < data_size) {
3511 		ret = 1;
3512 		goto out;
3513 	}
3514 
3515 	/* cow and double check */
3516 	ret = btrfs_cow_block(trans, root, left,
3517 			      path->nodes[1], slot - 1, &left);
3518 	if (ret) {
3519 		/* we hit -ENOSPC, but it isn't fatal here */
3520 		if (ret == -ENOSPC)
3521 			ret = 1;
3522 		goto out;
3523 	}
3524 
3525 	free_space = btrfs_leaf_free_space(root, left);
3526 	if (free_space < data_size) {
3527 		ret = 1;
3528 		goto out;
3529 	}
3530 
3531 	return __push_leaf_left(trans, root, path, min_data_size,
3532 			       empty, left, free_space, right_nritems,
3533 			       max_slot);
3534 out:
3535 	btrfs_tree_unlock(left);
3536 	free_extent_buffer(left);
3537 	return ret;
3538 }
3539 
3540 /*
3541  * split the path's leaf in two, making sure there is at least data_size
3542  * available for the resulting leaf level of the path.
3543  */
3544 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3545 				    struct btrfs_root *root,
3546 				    struct btrfs_path *path,
3547 				    struct extent_buffer *l,
3548 				    struct extent_buffer *right,
3549 				    int slot, int mid, int nritems)
3550 {
3551 	int data_copy_size;
3552 	int rt_data_off;
3553 	int i;
3554 	struct btrfs_disk_key disk_key;
3555 	struct btrfs_map_token token;
3556 
3557 	btrfs_init_map_token(&token);
3558 
3559 	nritems = nritems - mid;
3560 	btrfs_set_header_nritems(right, nritems);
3561 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3562 
3563 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3564 			   btrfs_item_nr_offset(mid),
3565 			   nritems * sizeof(struct btrfs_item));
3566 
3567 	copy_extent_buffer(right, l,
3568 		     btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3569 		     data_copy_size, btrfs_leaf_data(l) +
3570 		     leaf_data_end(root, l), data_copy_size);
3571 
3572 	rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3573 		      btrfs_item_end_nr(l, mid);
3574 
3575 	for (i = 0; i < nritems; i++) {
3576 		struct btrfs_item *item = btrfs_item_nr(right, i);
3577 		u32 ioff;
3578 
3579 		ioff = btrfs_token_item_offset(right, item, &token);
3580 		btrfs_set_token_item_offset(right, item,
3581 					    ioff + rt_data_off, &token);
3582 	}
3583 
3584 	btrfs_set_header_nritems(l, mid);
3585 	btrfs_item_key(right, &disk_key, 0);
3586 	insert_ptr(trans, root, path, &disk_key, right->start,
3587 		   path->slots[1] + 1, 1, 0);
3588 
3589 	btrfs_mark_buffer_dirty(right);
3590 	btrfs_mark_buffer_dirty(l);
3591 	BUG_ON(path->slots[0] != slot);
3592 
3593 	if (mid <= slot) {
3594 		btrfs_tree_unlock(path->nodes[0]);
3595 		free_extent_buffer(path->nodes[0]);
3596 		path->nodes[0] = right;
3597 		path->slots[0] -= mid;
3598 		path->slots[1] += 1;
3599 	} else {
3600 		btrfs_tree_unlock(right);
3601 		free_extent_buffer(right);
3602 	}
3603 
3604 	BUG_ON(path->slots[0] < 0);
3605 }
3606 
3607 /*
3608  * double splits happen when we need to insert a big item in the middle
3609  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
3610  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3611  *          A                 B                 C
3612  *
3613  * We avoid this by trying to push the items on either side of our target
3614  * into the adjacent leaves.  If all goes well we can avoid the double split
3615  * completely.
3616  */
3617 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3618 					  struct btrfs_root *root,
3619 					  struct btrfs_path *path,
3620 					  int data_size)
3621 {
3622 	int ret;
3623 	int progress = 0;
3624 	int slot;
3625 	u32 nritems;
3626 
3627 	slot = path->slots[0];
3628 
3629 	/*
3630 	 * try to push all the items after our slot into the
3631 	 * right leaf
3632 	 */
3633 	ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3634 	if (ret < 0)
3635 		return ret;
3636 
3637 	if (ret == 0)
3638 		progress++;
3639 
3640 	nritems = btrfs_header_nritems(path->nodes[0]);
3641 	/*
3642 	 * our goal is to get our slot at the start or end of a leaf.  If
3643 	 * we've done so we're done
3644 	 */
3645 	if (path->slots[0] == 0 || path->slots[0] == nritems)
3646 		return 0;
3647 
3648 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3649 		return 0;
3650 
3651 	/* try to push all the items before our slot into the next leaf */
3652 	slot = path->slots[0];
3653 	ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3654 	if (ret < 0)
3655 		return ret;
3656 
3657 	if (ret == 0)
3658 		progress++;
3659 
3660 	if (progress)
3661 		return 0;
3662 	return 1;
3663 }
3664 
3665 /*
3666  * split the path's leaf in two, making sure there is at least data_size
3667  * available for the resulting leaf level of the path.
3668  *
3669  * returns 0 if all went well and < 0 on failure.
3670  */
3671 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3672 			       struct btrfs_root *root,
3673 			       struct btrfs_key *ins_key,
3674 			       struct btrfs_path *path, int data_size,
3675 			       int extend)
3676 {
3677 	struct btrfs_disk_key disk_key;
3678 	struct extent_buffer *l;
3679 	u32 nritems;
3680 	int mid;
3681 	int slot;
3682 	struct extent_buffer *right;
3683 	int ret = 0;
3684 	int wret;
3685 	int split;
3686 	int num_doubles = 0;
3687 	int tried_avoid_double = 0;
3688 
3689 	l = path->nodes[0];
3690 	slot = path->slots[0];
3691 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
3692 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3693 		return -EOVERFLOW;
3694 
3695 	/* first try to make some room by pushing left and right */
3696 	if (data_size) {
3697 		wret = push_leaf_right(trans, root, path, data_size,
3698 				       data_size, 0, 0);
3699 		if (wret < 0)
3700 			return wret;
3701 		if (wret) {
3702 			wret = push_leaf_left(trans, root, path, data_size,
3703 					      data_size, 0, (u32)-1);
3704 			if (wret < 0)
3705 				return wret;
3706 		}
3707 		l = path->nodes[0];
3708 
3709 		/* did the pushes work? */
3710 		if (btrfs_leaf_free_space(root, l) >= data_size)
3711 			return 0;
3712 	}
3713 
3714 	if (!path->nodes[1]) {
3715 		ret = insert_new_root(trans, root, path, 1);
3716 		if (ret)
3717 			return ret;
3718 	}
3719 again:
3720 	split = 1;
3721 	l = path->nodes[0];
3722 	slot = path->slots[0];
3723 	nritems = btrfs_header_nritems(l);
3724 	mid = (nritems + 1) / 2;
3725 
3726 	if (mid <= slot) {
3727 		if (nritems == 1 ||
3728 		    leaf_space_used(l, mid, nritems - mid) + data_size >
3729 			BTRFS_LEAF_DATA_SIZE(root)) {
3730 			if (slot >= nritems) {
3731 				split = 0;
3732 			} else {
3733 				mid = slot;
3734 				if (mid != nritems &&
3735 				    leaf_space_used(l, mid, nritems - mid) +
3736 				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3737 					if (data_size && !tried_avoid_double)
3738 						goto push_for_double;
3739 					split = 2;
3740 				}
3741 			}
3742 		}
3743 	} else {
3744 		if (leaf_space_used(l, 0, mid) + data_size >
3745 			BTRFS_LEAF_DATA_SIZE(root)) {
3746 			if (!extend && data_size && slot == 0) {
3747 				split = 0;
3748 			} else if ((extend || !data_size) && slot == 0) {
3749 				mid = 1;
3750 			} else {
3751 				mid = slot;
3752 				if (mid != nritems &&
3753 				    leaf_space_used(l, mid, nritems - mid) +
3754 				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3755 					if (data_size && !tried_avoid_double)
3756 						goto push_for_double;
3757 					split = 2 ;
3758 				}
3759 			}
3760 		}
3761 	}
3762 
3763 	if (split == 0)
3764 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
3765 	else
3766 		btrfs_item_key(l, &disk_key, mid);
3767 
3768 	right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
3769 					root->root_key.objectid,
3770 					&disk_key, 0, l->start, 0);
3771 	if (IS_ERR(right))
3772 		return PTR_ERR(right);
3773 
3774 	root_add_used(root, root->leafsize);
3775 
3776 	memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
3777 	btrfs_set_header_bytenr(right, right->start);
3778 	btrfs_set_header_generation(right, trans->transid);
3779 	btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
3780 	btrfs_set_header_owner(right, root->root_key.objectid);
3781 	btrfs_set_header_level(right, 0);
3782 	write_extent_buffer(right, root->fs_info->fsid,
3783 			    (unsigned long)btrfs_header_fsid(right),
3784 			    BTRFS_FSID_SIZE);
3785 
3786 	write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
3787 			    (unsigned long)btrfs_header_chunk_tree_uuid(right),
3788 			    BTRFS_UUID_SIZE);
3789 
3790 	if (split == 0) {
3791 		if (mid <= slot) {
3792 			btrfs_set_header_nritems(right, 0);
3793 			insert_ptr(trans, root, path, &disk_key, right->start,
3794 				   path->slots[1] + 1, 1, 0);
3795 			btrfs_tree_unlock(path->nodes[0]);
3796 			free_extent_buffer(path->nodes[0]);
3797 			path->nodes[0] = right;
3798 			path->slots[0] = 0;
3799 			path->slots[1] += 1;
3800 		} else {
3801 			btrfs_set_header_nritems(right, 0);
3802 			insert_ptr(trans, root, path, &disk_key, right->start,
3803 					  path->slots[1], 1, 0);
3804 			btrfs_tree_unlock(path->nodes[0]);
3805 			free_extent_buffer(path->nodes[0]);
3806 			path->nodes[0] = right;
3807 			path->slots[0] = 0;
3808 			if (path->slots[1] == 0)
3809 				fixup_low_keys(trans, root, path,
3810 					       &disk_key, 1);
3811 		}
3812 		btrfs_mark_buffer_dirty(right);
3813 		return ret;
3814 	}
3815 
3816 	copy_for_split(trans, root, path, l, right, slot, mid, nritems);
3817 
3818 	if (split == 2) {
3819 		BUG_ON(num_doubles != 0);
3820 		num_doubles++;
3821 		goto again;
3822 	}
3823 
3824 	return 0;
3825 
3826 push_for_double:
3827 	push_for_double_split(trans, root, path, data_size);
3828 	tried_avoid_double = 1;
3829 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3830 		return 0;
3831 	goto again;
3832 }
3833 
3834 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3835 					 struct btrfs_root *root,
3836 					 struct btrfs_path *path, int ins_len)
3837 {
3838 	struct btrfs_key key;
3839 	struct extent_buffer *leaf;
3840 	struct btrfs_file_extent_item *fi;
3841 	u64 extent_len = 0;
3842 	u32 item_size;
3843 	int ret;
3844 
3845 	leaf = path->nodes[0];
3846 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3847 
3848 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3849 	       key.type != BTRFS_EXTENT_CSUM_KEY);
3850 
3851 	if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3852 		return 0;
3853 
3854 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3855 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
3856 		fi = btrfs_item_ptr(leaf, path->slots[0],
3857 				    struct btrfs_file_extent_item);
3858 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3859 	}
3860 	btrfs_release_path(path);
3861 
3862 	path->keep_locks = 1;
3863 	path->search_for_split = 1;
3864 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3865 	path->search_for_split = 0;
3866 	if (ret < 0)
3867 		goto err;
3868 
3869 	ret = -EAGAIN;
3870 	leaf = path->nodes[0];
3871 	/* if our item isn't there or got smaller, return now */
3872 	if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3873 		goto err;
3874 
3875 	/* the leaf has  changed, it now has room.  return now */
3876 	if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3877 		goto err;
3878 
3879 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
3880 		fi = btrfs_item_ptr(leaf, path->slots[0],
3881 				    struct btrfs_file_extent_item);
3882 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3883 			goto err;
3884 	}
3885 
3886 	btrfs_set_path_blocking(path);
3887 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
3888 	if (ret)
3889 		goto err;
3890 
3891 	path->keep_locks = 0;
3892 	btrfs_unlock_up_safe(path, 1);
3893 	return 0;
3894 err:
3895 	path->keep_locks = 0;
3896 	return ret;
3897 }
3898 
3899 static noinline int split_item(struct btrfs_trans_handle *trans,
3900 			       struct btrfs_root *root,
3901 			       struct btrfs_path *path,
3902 			       struct btrfs_key *new_key,
3903 			       unsigned long split_offset)
3904 {
3905 	struct extent_buffer *leaf;
3906 	struct btrfs_item *item;
3907 	struct btrfs_item *new_item;
3908 	int slot;
3909 	char *buf;
3910 	u32 nritems;
3911 	u32 item_size;
3912 	u32 orig_offset;
3913 	struct btrfs_disk_key disk_key;
3914 
3915 	leaf = path->nodes[0];
3916 	BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3917 
3918 	btrfs_set_path_blocking(path);
3919 
3920 	item = btrfs_item_nr(leaf, path->slots[0]);
3921 	orig_offset = btrfs_item_offset(leaf, item);
3922 	item_size = btrfs_item_size(leaf, item);
3923 
3924 	buf = kmalloc(item_size, GFP_NOFS);
3925 	if (!buf)
3926 		return -ENOMEM;
3927 
3928 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3929 			    path->slots[0]), item_size);
3930 
3931 	slot = path->slots[0] + 1;
3932 	nritems = btrfs_header_nritems(leaf);
3933 	if (slot != nritems) {
3934 		/* shift the items */
3935 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3936 				btrfs_item_nr_offset(slot),
3937 				(nritems - slot) * sizeof(struct btrfs_item));
3938 	}
3939 
3940 	btrfs_cpu_key_to_disk(&disk_key, new_key);
3941 	btrfs_set_item_key(leaf, &disk_key, slot);
3942 
3943 	new_item = btrfs_item_nr(leaf, slot);
3944 
3945 	btrfs_set_item_offset(leaf, new_item, orig_offset);
3946 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3947 
3948 	btrfs_set_item_offset(leaf, item,
3949 			      orig_offset + item_size - split_offset);
3950 	btrfs_set_item_size(leaf, item, split_offset);
3951 
3952 	btrfs_set_header_nritems(leaf, nritems + 1);
3953 
3954 	/* write the data for the start of the original item */
3955 	write_extent_buffer(leaf, buf,
3956 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
3957 			    split_offset);
3958 
3959 	/* write the data for the new item */
3960 	write_extent_buffer(leaf, buf + split_offset,
3961 			    btrfs_item_ptr_offset(leaf, slot),
3962 			    item_size - split_offset);
3963 	btrfs_mark_buffer_dirty(leaf);
3964 
3965 	BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
3966 	kfree(buf);
3967 	return 0;
3968 }
3969 
3970 /*
3971  * This function splits a single item into two items,
3972  * giving 'new_key' to the new item and splitting the
3973  * old one at split_offset (from the start of the item).
3974  *
3975  * The path may be released by this operation.  After
3976  * the split, the path is pointing to the old item.  The
3977  * new item is going to be in the same node as the old one.
3978  *
3979  * Note, the item being split must be smaller enough to live alone on
3980  * a tree block with room for one extra struct btrfs_item
3981  *
3982  * This allows us to split the item in place, keeping a lock on the
3983  * leaf the entire time.
3984  */
3985 int btrfs_split_item(struct btrfs_trans_handle *trans,
3986 		     struct btrfs_root *root,
3987 		     struct btrfs_path *path,
3988 		     struct btrfs_key *new_key,
3989 		     unsigned long split_offset)
3990 {
3991 	int ret;
3992 	ret = setup_leaf_for_split(trans, root, path,
3993 				   sizeof(struct btrfs_item));
3994 	if (ret)
3995 		return ret;
3996 
3997 	ret = split_item(trans, root, path, new_key, split_offset);
3998 	return ret;
3999 }
4000 
4001 /*
4002  * This function duplicate a item, giving 'new_key' to the new item.
4003  * It guarantees both items live in the same tree leaf and the new item
4004  * is contiguous with the original item.
4005  *
4006  * This allows us to split file extent in place, keeping a lock on the
4007  * leaf the entire time.
4008  */
4009 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4010 			 struct btrfs_root *root,
4011 			 struct btrfs_path *path,
4012 			 struct btrfs_key *new_key)
4013 {
4014 	struct extent_buffer *leaf;
4015 	int ret;
4016 	u32 item_size;
4017 
4018 	leaf = path->nodes[0];
4019 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4020 	ret = setup_leaf_for_split(trans, root, path,
4021 				   item_size + sizeof(struct btrfs_item));
4022 	if (ret)
4023 		return ret;
4024 
4025 	path->slots[0]++;
4026 	setup_items_for_insert(trans, root, path, new_key, &item_size,
4027 			       item_size, item_size +
4028 			       sizeof(struct btrfs_item), 1);
4029 	leaf = path->nodes[0];
4030 	memcpy_extent_buffer(leaf,
4031 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4032 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4033 			     item_size);
4034 	return 0;
4035 }
4036 
4037 /*
4038  * make the item pointed to by the path smaller.  new_size indicates
4039  * how small to make it, and from_end tells us if we just chop bytes
4040  * off the end of the item or if we shift the item to chop bytes off
4041  * the front.
4042  */
4043 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
4044 			 struct btrfs_root *root,
4045 			 struct btrfs_path *path,
4046 			 u32 new_size, int from_end)
4047 {
4048 	int slot;
4049 	struct extent_buffer *leaf;
4050 	struct btrfs_item *item;
4051 	u32 nritems;
4052 	unsigned int data_end;
4053 	unsigned int old_data_start;
4054 	unsigned int old_size;
4055 	unsigned int size_diff;
4056 	int i;
4057 	struct btrfs_map_token token;
4058 
4059 	btrfs_init_map_token(&token);
4060 
4061 	leaf = path->nodes[0];
4062 	slot = path->slots[0];
4063 
4064 	old_size = btrfs_item_size_nr(leaf, slot);
4065 	if (old_size == new_size)
4066 		return;
4067 
4068 	nritems = btrfs_header_nritems(leaf);
4069 	data_end = leaf_data_end(root, leaf);
4070 
4071 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4072 
4073 	size_diff = old_size - new_size;
4074 
4075 	BUG_ON(slot < 0);
4076 	BUG_ON(slot >= nritems);
4077 
4078 	/*
4079 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4080 	 */
4081 	/* first correct the data pointers */
4082 	for (i = slot; i < nritems; i++) {
4083 		u32 ioff;
4084 		item = btrfs_item_nr(leaf, i);
4085 
4086 		ioff = btrfs_token_item_offset(leaf, item, &token);
4087 		btrfs_set_token_item_offset(leaf, item,
4088 					    ioff + size_diff, &token);
4089 	}
4090 
4091 	/* shift the data */
4092 	if (from_end) {
4093 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4094 			      data_end + size_diff, btrfs_leaf_data(leaf) +
4095 			      data_end, old_data_start + new_size - data_end);
4096 	} else {
4097 		struct btrfs_disk_key disk_key;
4098 		u64 offset;
4099 
4100 		btrfs_item_key(leaf, &disk_key, slot);
4101 
4102 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4103 			unsigned long ptr;
4104 			struct btrfs_file_extent_item *fi;
4105 
4106 			fi = btrfs_item_ptr(leaf, slot,
4107 					    struct btrfs_file_extent_item);
4108 			fi = (struct btrfs_file_extent_item *)(
4109 			     (unsigned long)fi - size_diff);
4110 
4111 			if (btrfs_file_extent_type(leaf, fi) ==
4112 			    BTRFS_FILE_EXTENT_INLINE) {
4113 				ptr = btrfs_item_ptr_offset(leaf, slot);
4114 				memmove_extent_buffer(leaf, ptr,
4115 				      (unsigned long)fi,
4116 				      offsetof(struct btrfs_file_extent_item,
4117 						 disk_bytenr));
4118 			}
4119 		}
4120 
4121 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4122 			      data_end + size_diff, btrfs_leaf_data(leaf) +
4123 			      data_end, old_data_start - data_end);
4124 
4125 		offset = btrfs_disk_key_offset(&disk_key);
4126 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4127 		btrfs_set_item_key(leaf, &disk_key, slot);
4128 		if (slot == 0)
4129 			fixup_low_keys(trans, root, path, &disk_key, 1);
4130 	}
4131 
4132 	item = btrfs_item_nr(leaf, slot);
4133 	btrfs_set_item_size(leaf, item, new_size);
4134 	btrfs_mark_buffer_dirty(leaf);
4135 
4136 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4137 		btrfs_print_leaf(root, leaf);
4138 		BUG();
4139 	}
4140 }
4141 
4142 /*
4143  * make the item pointed to by the path bigger, data_size is the new size.
4144  */
4145 void btrfs_extend_item(struct btrfs_trans_handle *trans,
4146 		       struct btrfs_root *root, struct btrfs_path *path,
4147 		       u32 data_size)
4148 {
4149 	int slot;
4150 	struct extent_buffer *leaf;
4151 	struct btrfs_item *item;
4152 	u32 nritems;
4153 	unsigned int data_end;
4154 	unsigned int old_data;
4155 	unsigned int old_size;
4156 	int i;
4157 	struct btrfs_map_token token;
4158 
4159 	btrfs_init_map_token(&token);
4160 
4161 	leaf = path->nodes[0];
4162 
4163 	nritems = btrfs_header_nritems(leaf);
4164 	data_end = leaf_data_end(root, leaf);
4165 
4166 	if (btrfs_leaf_free_space(root, leaf) < data_size) {
4167 		btrfs_print_leaf(root, leaf);
4168 		BUG();
4169 	}
4170 	slot = path->slots[0];
4171 	old_data = btrfs_item_end_nr(leaf, slot);
4172 
4173 	BUG_ON(slot < 0);
4174 	if (slot >= nritems) {
4175 		btrfs_print_leaf(root, leaf);
4176 		printk(KERN_CRIT "slot %d too large, nritems %d\n",
4177 		       slot, nritems);
4178 		BUG_ON(1);
4179 	}
4180 
4181 	/*
4182 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4183 	 */
4184 	/* first correct the data pointers */
4185 	for (i = slot; i < nritems; i++) {
4186 		u32 ioff;
4187 		item = btrfs_item_nr(leaf, i);
4188 
4189 		ioff = btrfs_token_item_offset(leaf, item, &token);
4190 		btrfs_set_token_item_offset(leaf, item,
4191 					    ioff - data_size, &token);
4192 	}
4193 
4194 	/* shift the data */
4195 	memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4196 		      data_end - data_size, btrfs_leaf_data(leaf) +
4197 		      data_end, old_data - data_end);
4198 
4199 	data_end = old_data;
4200 	old_size = btrfs_item_size_nr(leaf, slot);
4201 	item = btrfs_item_nr(leaf, slot);
4202 	btrfs_set_item_size(leaf, item, old_size + data_size);
4203 	btrfs_mark_buffer_dirty(leaf);
4204 
4205 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4206 		btrfs_print_leaf(root, leaf);
4207 		BUG();
4208 	}
4209 }
4210 
4211 /*
4212  * Given a key and some data, insert items into the tree.
4213  * This does all the path init required, making room in the tree if needed.
4214  * Returns the number of keys that were inserted.
4215  */
4216 int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
4217 			    struct btrfs_root *root,
4218 			    struct btrfs_path *path,
4219 			    struct btrfs_key *cpu_key, u32 *data_size,
4220 			    int nr)
4221 {
4222 	struct extent_buffer *leaf;
4223 	struct btrfs_item *item;
4224 	int ret = 0;
4225 	int slot;
4226 	int i;
4227 	u32 nritems;
4228 	u32 total_data = 0;
4229 	u32 total_size = 0;
4230 	unsigned int data_end;
4231 	struct btrfs_disk_key disk_key;
4232 	struct btrfs_key found_key;
4233 	struct btrfs_map_token token;
4234 
4235 	btrfs_init_map_token(&token);
4236 
4237 	for (i = 0; i < nr; i++) {
4238 		if (total_size + data_size[i] + sizeof(struct btrfs_item) >
4239 		    BTRFS_LEAF_DATA_SIZE(root)) {
4240 			break;
4241 			nr = i;
4242 		}
4243 		total_data += data_size[i];
4244 		total_size += data_size[i] + sizeof(struct btrfs_item);
4245 	}
4246 	BUG_ON(nr == 0);
4247 
4248 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4249 	if (ret == 0)
4250 		return -EEXIST;
4251 	if (ret < 0)
4252 		goto out;
4253 
4254 	leaf = path->nodes[0];
4255 
4256 	nritems = btrfs_header_nritems(leaf);
4257 	data_end = leaf_data_end(root, leaf);
4258 
4259 	if (btrfs_leaf_free_space(root, leaf) < total_size) {
4260 		for (i = nr; i >= 0; i--) {
4261 			total_data -= data_size[i];
4262 			total_size -= data_size[i] + sizeof(struct btrfs_item);
4263 			if (total_size < btrfs_leaf_free_space(root, leaf))
4264 				break;
4265 		}
4266 		nr = i;
4267 	}
4268 
4269 	slot = path->slots[0];
4270 	BUG_ON(slot < 0);
4271 
4272 	if (slot != nritems) {
4273 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4274 
4275 		item = btrfs_item_nr(leaf, slot);
4276 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
4277 
4278 		/* figure out how many keys we can insert in here */
4279 		total_data = data_size[0];
4280 		for (i = 1; i < nr; i++) {
4281 			if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
4282 				break;
4283 			total_data += data_size[i];
4284 		}
4285 		nr = i;
4286 
4287 		if (old_data < data_end) {
4288 			btrfs_print_leaf(root, leaf);
4289 			printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4290 			       slot, old_data, data_end);
4291 			BUG_ON(1);
4292 		}
4293 		/*
4294 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4295 		 */
4296 		/* first correct the data pointers */
4297 		for (i = slot; i < nritems; i++) {
4298 			u32 ioff;
4299 
4300 			item = btrfs_item_nr(leaf, i);
4301 			ioff = btrfs_token_item_offset(leaf, item, &token);
4302 			btrfs_set_token_item_offset(leaf, item,
4303 						    ioff - total_data, &token);
4304 		}
4305 		/* shift the items */
4306 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4307 			      btrfs_item_nr_offset(slot),
4308 			      (nritems - slot) * sizeof(struct btrfs_item));
4309 
4310 		/* shift the data */
4311 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4312 			      data_end - total_data, btrfs_leaf_data(leaf) +
4313 			      data_end, old_data - data_end);
4314 		data_end = old_data;
4315 	} else {
4316 		/*
4317 		 * this sucks but it has to be done, if we are inserting at
4318 		 * the end of the leaf only insert 1 of the items, since we
4319 		 * have no way of knowing whats on the next leaf and we'd have
4320 		 * to drop our current locks to figure it out
4321 		 */
4322 		nr = 1;
4323 	}
4324 
4325 	/* setup the item for the new data */
4326 	for (i = 0; i < nr; i++) {
4327 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4328 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4329 		item = btrfs_item_nr(leaf, slot + i);
4330 		btrfs_set_token_item_offset(leaf, item,
4331 					    data_end - data_size[i], &token);
4332 		data_end -= data_size[i];
4333 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4334 	}
4335 	btrfs_set_header_nritems(leaf, nritems + nr);
4336 	btrfs_mark_buffer_dirty(leaf);
4337 
4338 	ret = 0;
4339 	if (slot == 0) {
4340 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4341 		fixup_low_keys(trans, root, path, &disk_key, 1);
4342 	}
4343 
4344 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4345 		btrfs_print_leaf(root, leaf);
4346 		BUG();
4347 	}
4348 out:
4349 	if (!ret)
4350 		ret = nr;
4351 	return ret;
4352 }
4353 
4354 /*
4355  * this is a helper for btrfs_insert_empty_items, the main goal here is
4356  * to save stack depth by doing the bulk of the work in a function
4357  * that doesn't call btrfs_search_slot
4358  */
4359 void setup_items_for_insert(struct btrfs_trans_handle *trans,
4360 			    struct btrfs_root *root, struct btrfs_path *path,
4361 			    struct btrfs_key *cpu_key, u32 *data_size,
4362 			    u32 total_data, u32 total_size, int nr)
4363 {
4364 	struct btrfs_item *item;
4365 	int i;
4366 	u32 nritems;
4367 	unsigned int data_end;
4368 	struct btrfs_disk_key disk_key;
4369 	struct extent_buffer *leaf;
4370 	int slot;
4371 	struct btrfs_map_token token;
4372 
4373 	btrfs_init_map_token(&token);
4374 
4375 	leaf = path->nodes[0];
4376 	slot = path->slots[0];
4377 
4378 	nritems = btrfs_header_nritems(leaf);
4379 	data_end = leaf_data_end(root, leaf);
4380 
4381 	if (btrfs_leaf_free_space(root, leaf) < total_size) {
4382 		btrfs_print_leaf(root, leaf);
4383 		printk(KERN_CRIT "not enough freespace need %u have %d\n",
4384 		       total_size, btrfs_leaf_free_space(root, leaf));
4385 		BUG();
4386 	}
4387 
4388 	if (slot != nritems) {
4389 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4390 
4391 		if (old_data < data_end) {
4392 			btrfs_print_leaf(root, leaf);
4393 			printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4394 			       slot, old_data, data_end);
4395 			BUG_ON(1);
4396 		}
4397 		/*
4398 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4399 		 */
4400 		/* first correct the data pointers */
4401 		for (i = slot; i < nritems; i++) {
4402 			u32 ioff;
4403 
4404 			item = btrfs_item_nr(leaf, i);
4405 			ioff = btrfs_token_item_offset(leaf, item, &token);
4406 			btrfs_set_token_item_offset(leaf, item,
4407 						    ioff - total_data, &token);
4408 		}
4409 		/* shift the items */
4410 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4411 			      btrfs_item_nr_offset(slot),
4412 			      (nritems - slot) * sizeof(struct btrfs_item));
4413 
4414 		/* shift the data */
4415 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4416 			      data_end - total_data, btrfs_leaf_data(leaf) +
4417 			      data_end, old_data - data_end);
4418 		data_end = old_data;
4419 	}
4420 
4421 	/* setup the item for the new data */
4422 	for (i = 0; i < nr; i++) {
4423 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4424 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4425 		item = btrfs_item_nr(leaf, slot + i);
4426 		btrfs_set_token_item_offset(leaf, item,
4427 					    data_end - data_size[i], &token);
4428 		data_end -= data_size[i];
4429 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4430 	}
4431 
4432 	btrfs_set_header_nritems(leaf, nritems + nr);
4433 
4434 	if (slot == 0) {
4435 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4436 		fixup_low_keys(trans, root, path, &disk_key, 1);
4437 	}
4438 	btrfs_unlock_up_safe(path, 1);
4439 	btrfs_mark_buffer_dirty(leaf);
4440 
4441 	if (btrfs_leaf_free_space(root, leaf) < 0) {
4442 		btrfs_print_leaf(root, leaf);
4443 		BUG();
4444 	}
4445 }
4446 
4447 /*
4448  * Given a key and some data, insert items into the tree.
4449  * This does all the path init required, making room in the tree if needed.
4450  */
4451 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4452 			    struct btrfs_root *root,
4453 			    struct btrfs_path *path,
4454 			    struct btrfs_key *cpu_key, u32 *data_size,
4455 			    int nr)
4456 {
4457 	int ret = 0;
4458 	int slot;
4459 	int i;
4460 	u32 total_size = 0;
4461 	u32 total_data = 0;
4462 
4463 	for (i = 0; i < nr; i++)
4464 		total_data += data_size[i];
4465 
4466 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4467 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4468 	if (ret == 0)
4469 		return -EEXIST;
4470 	if (ret < 0)
4471 		return ret;
4472 
4473 	slot = path->slots[0];
4474 	BUG_ON(slot < 0);
4475 
4476 	setup_items_for_insert(trans, root, path, cpu_key, data_size,
4477 			       total_data, total_size, nr);
4478 	return 0;
4479 }
4480 
4481 /*
4482  * Given a key and some data, insert an item into the tree.
4483  * This does all the path init required, making room in the tree if needed.
4484  */
4485 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4486 		      *root, struct btrfs_key *cpu_key, void *data, u32
4487 		      data_size)
4488 {
4489 	int ret = 0;
4490 	struct btrfs_path *path;
4491 	struct extent_buffer *leaf;
4492 	unsigned long ptr;
4493 
4494 	path = btrfs_alloc_path();
4495 	if (!path)
4496 		return -ENOMEM;
4497 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4498 	if (!ret) {
4499 		leaf = path->nodes[0];
4500 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4501 		write_extent_buffer(leaf, data, ptr, data_size);
4502 		btrfs_mark_buffer_dirty(leaf);
4503 	}
4504 	btrfs_free_path(path);
4505 	return ret;
4506 }
4507 
4508 /*
4509  * delete the pointer from a given node.
4510  *
4511  * the tree should have been previously balanced so the deletion does not
4512  * empty a node.
4513  */
4514 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4515 		    struct btrfs_path *path, int level, int slot,
4516 		    int tree_mod_log)
4517 {
4518 	struct extent_buffer *parent = path->nodes[level];
4519 	u32 nritems;
4520 	int ret;
4521 
4522 	nritems = btrfs_header_nritems(parent);
4523 	if (slot != nritems - 1) {
4524 		if (tree_mod_log && level)
4525 			tree_mod_log_eb_move(root->fs_info, parent, slot,
4526 					     slot + 1, nritems - slot - 1);
4527 		memmove_extent_buffer(parent,
4528 			      btrfs_node_key_ptr_offset(slot),
4529 			      btrfs_node_key_ptr_offset(slot + 1),
4530 			      sizeof(struct btrfs_key_ptr) *
4531 			      (nritems - slot - 1));
4532 	}
4533 
4534 	if (tree_mod_log && level) {
4535 		ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4536 					      MOD_LOG_KEY_REMOVE);
4537 		BUG_ON(ret < 0);
4538 	}
4539 
4540 	nritems--;
4541 	btrfs_set_header_nritems(parent, nritems);
4542 	if (nritems == 0 && parent == root->node) {
4543 		BUG_ON(btrfs_header_level(root->node) != 1);
4544 		/* just turn the root into a leaf and break */
4545 		btrfs_set_header_level(root->node, 0);
4546 	} else if (slot == 0) {
4547 		struct btrfs_disk_key disk_key;
4548 
4549 		btrfs_node_key(parent, &disk_key, 0);
4550 		fixup_low_keys(trans, root, path, &disk_key, level + 1);
4551 	}
4552 	btrfs_mark_buffer_dirty(parent);
4553 }
4554 
4555 /*
4556  * a helper function to delete the leaf pointed to by path->slots[1] and
4557  * path->nodes[1].
4558  *
4559  * This deletes the pointer in path->nodes[1] and frees the leaf
4560  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4561  *
4562  * The path must have already been setup for deleting the leaf, including
4563  * all the proper balancing.  path->nodes[1] must be locked.
4564  */
4565 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4566 				    struct btrfs_root *root,
4567 				    struct btrfs_path *path,
4568 				    struct extent_buffer *leaf)
4569 {
4570 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4571 	del_ptr(trans, root, path, 1, path->slots[1], 1);
4572 
4573 	/*
4574 	 * btrfs_free_extent is expensive, we want to make sure we
4575 	 * aren't holding any locks when we call it
4576 	 */
4577 	btrfs_unlock_up_safe(path, 0);
4578 
4579 	root_sub_used(root, leaf->len);
4580 
4581 	extent_buffer_get(leaf);
4582 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4583 	free_extent_buffer_stale(leaf);
4584 }
4585 /*
4586  * delete the item at the leaf level in path.  If that empties
4587  * the leaf, remove it from the tree
4588  */
4589 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4590 		    struct btrfs_path *path, int slot, int nr)
4591 {
4592 	struct extent_buffer *leaf;
4593 	struct btrfs_item *item;
4594 	int last_off;
4595 	int dsize = 0;
4596 	int ret = 0;
4597 	int wret;
4598 	int i;
4599 	u32 nritems;
4600 	struct btrfs_map_token token;
4601 
4602 	btrfs_init_map_token(&token);
4603 
4604 	leaf = path->nodes[0];
4605 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4606 
4607 	for (i = 0; i < nr; i++)
4608 		dsize += btrfs_item_size_nr(leaf, slot + i);
4609 
4610 	nritems = btrfs_header_nritems(leaf);
4611 
4612 	if (slot + nr != nritems) {
4613 		int data_end = leaf_data_end(root, leaf);
4614 
4615 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4616 			      data_end + dsize,
4617 			      btrfs_leaf_data(leaf) + data_end,
4618 			      last_off - data_end);
4619 
4620 		for (i = slot + nr; i < nritems; i++) {
4621 			u32 ioff;
4622 
4623 			item = btrfs_item_nr(leaf, i);
4624 			ioff = btrfs_token_item_offset(leaf, item, &token);
4625 			btrfs_set_token_item_offset(leaf, item,
4626 						    ioff + dsize, &token);
4627 		}
4628 
4629 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4630 			      btrfs_item_nr_offset(slot + nr),
4631 			      sizeof(struct btrfs_item) *
4632 			      (nritems - slot - nr));
4633 	}
4634 	btrfs_set_header_nritems(leaf, nritems - nr);
4635 	nritems -= nr;
4636 
4637 	/* delete the leaf if we've emptied it */
4638 	if (nritems == 0) {
4639 		if (leaf == root->node) {
4640 			btrfs_set_header_level(leaf, 0);
4641 		} else {
4642 			btrfs_set_path_blocking(path);
4643 			clean_tree_block(trans, root, leaf);
4644 			btrfs_del_leaf(trans, root, path, leaf);
4645 		}
4646 	} else {
4647 		int used = leaf_space_used(leaf, 0, nritems);
4648 		if (slot == 0) {
4649 			struct btrfs_disk_key disk_key;
4650 
4651 			btrfs_item_key(leaf, &disk_key, 0);
4652 			fixup_low_keys(trans, root, path, &disk_key, 1);
4653 		}
4654 
4655 		/* delete the leaf if it is mostly empty */
4656 		if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4657 			/* push_leaf_left fixes the path.
4658 			 * make sure the path still points to our leaf
4659 			 * for possible call to del_ptr below
4660 			 */
4661 			slot = path->slots[1];
4662 			extent_buffer_get(leaf);
4663 
4664 			btrfs_set_path_blocking(path);
4665 			wret = push_leaf_left(trans, root, path, 1, 1,
4666 					      1, (u32)-1);
4667 			if (wret < 0 && wret != -ENOSPC)
4668 				ret = wret;
4669 
4670 			if (path->nodes[0] == leaf &&
4671 			    btrfs_header_nritems(leaf)) {
4672 				wret = push_leaf_right(trans, root, path, 1,
4673 						       1, 1, 0);
4674 				if (wret < 0 && wret != -ENOSPC)
4675 					ret = wret;
4676 			}
4677 
4678 			if (btrfs_header_nritems(leaf) == 0) {
4679 				path->slots[1] = slot;
4680 				btrfs_del_leaf(trans, root, path, leaf);
4681 				free_extent_buffer(leaf);
4682 				ret = 0;
4683 			} else {
4684 				/* if we're still in the path, make sure
4685 				 * we're dirty.  Otherwise, one of the
4686 				 * push_leaf functions must have already
4687 				 * dirtied this buffer
4688 				 */
4689 				if (path->nodes[0] == leaf)
4690 					btrfs_mark_buffer_dirty(leaf);
4691 				free_extent_buffer(leaf);
4692 			}
4693 		} else {
4694 			btrfs_mark_buffer_dirty(leaf);
4695 		}
4696 	}
4697 	return ret;
4698 }
4699 
4700 /*
4701  * search the tree again to find a leaf with lesser keys
4702  * returns 0 if it found something or 1 if there are no lesser leaves.
4703  * returns < 0 on io errors.
4704  *
4705  * This may release the path, and so you may lose any locks held at the
4706  * time you call it.
4707  */
4708 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4709 {
4710 	struct btrfs_key key;
4711 	struct btrfs_disk_key found_key;
4712 	int ret;
4713 
4714 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4715 
4716 	if (key.offset > 0)
4717 		key.offset--;
4718 	else if (key.type > 0)
4719 		key.type--;
4720 	else if (key.objectid > 0)
4721 		key.objectid--;
4722 	else
4723 		return 1;
4724 
4725 	btrfs_release_path(path);
4726 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4727 	if (ret < 0)
4728 		return ret;
4729 	btrfs_item_key(path->nodes[0], &found_key, 0);
4730 	ret = comp_keys(&found_key, &key);
4731 	if (ret < 0)
4732 		return 0;
4733 	return 1;
4734 }
4735 
4736 /*
4737  * A helper function to walk down the tree starting at min_key, and looking
4738  * for nodes or leaves that are either in cache or have a minimum
4739  * transaction id.  This is used by the btree defrag code, and tree logging
4740  *
4741  * This does not cow, but it does stuff the starting key it finds back
4742  * into min_key, so you can call btrfs_search_slot with cow=1 on the
4743  * key and get a writable path.
4744  *
4745  * This does lock as it descends, and path->keep_locks should be set
4746  * to 1 by the caller.
4747  *
4748  * This honors path->lowest_level to prevent descent past a given level
4749  * of the tree.
4750  *
4751  * min_trans indicates the oldest transaction that you are interested
4752  * in walking through.  Any nodes or leaves older than min_trans are
4753  * skipped over (without reading them).
4754  *
4755  * returns zero if something useful was found, < 0 on error and 1 if there
4756  * was nothing in the tree that matched the search criteria.
4757  */
4758 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4759 			 struct btrfs_key *max_key,
4760 			 struct btrfs_path *path, int cache_only,
4761 			 u64 min_trans)
4762 {
4763 	struct extent_buffer *cur;
4764 	struct btrfs_key found_key;
4765 	int slot;
4766 	int sret;
4767 	u32 nritems;
4768 	int level;
4769 	int ret = 1;
4770 
4771 	WARN_ON(!path->keep_locks);
4772 again:
4773 	cur = btrfs_read_lock_root_node(root);
4774 	level = btrfs_header_level(cur);
4775 	WARN_ON(path->nodes[level]);
4776 	path->nodes[level] = cur;
4777 	path->locks[level] = BTRFS_READ_LOCK;
4778 
4779 	if (btrfs_header_generation(cur) < min_trans) {
4780 		ret = 1;
4781 		goto out;
4782 	}
4783 	while (1) {
4784 		nritems = btrfs_header_nritems(cur);
4785 		level = btrfs_header_level(cur);
4786 		sret = bin_search(cur, min_key, level, &slot);
4787 
4788 		/* at the lowest level, we're done, setup the path and exit */
4789 		if (level == path->lowest_level) {
4790 			if (slot >= nritems)
4791 				goto find_next_key;
4792 			ret = 0;
4793 			path->slots[level] = slot;
4794 			btrfs_item_key_to_cpu(cur, &found_key, slot);
4795 			goto out;
4796 		}
4797 		if (sret && slot > 0)
4798 			slot--;
4799 		/*
4800 		 * check this node pointer against the cache_only and
4801 		 * min_trans parameters.  If it isn't in cache or is too
4802 		 * old, skip to the next one.
4803 		 */
4804 		while (slot < nritems) {
4805 			u64 blockptr;
4806 			u64 gen;
4807 			struct extent_buffer *tmp;
4808 			struct btrfs_disk_key disk_key;
4809 
4810 			blockptr = btrfs_node_blockptr(cur, slot);
4811 			gen = btrfs_node_ptr_generation(cur, slot);
4812 			if (gen < min_trans) {
4813 				slot++;
4814 				continue;
4815 			}
4816 			if (!cache_only)
4817 				break;
4818 
4819 			if (max_key) {
4820 				btrfs_node_key(cur, &disk_key, slot);
4821 				if (comp_keys(&disk_key, max_key) >= 0) {
4822 					ret = 1;
4823 					goto out;
4824 				}
4825 			}
4826 
4827 			tmp = btrfs_find_tree_block(root, blockptr,
4828 					    btrfs_level_size(root, level - 1));
4829 
4830 			if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
4831 				free_extent_buffer(tmp);
4832 				break;
4833 			}
4834 			if (tmp)
4835 				free_extent_buffer(tmp);
4836 			slot++;
4837 		}
4838 find_next_key:
4839 		/*
4840 		 * we didn't find a candidate key in this node, walk forward
4841 		 * and find another one
4842 		 */
4843 		if (slot >= nritems) {
4844 			path->slots[level] = slot;
4845 			btrfs_set_path_blocking(path);
4846 			sret = btrfs_find_next_key(root, path, min_key, level,
4847 						  cache_only, min_trans);
4848 			if (sret == 0) {
4849 				btrfs_release_path(path);
4850 				goto again;
4851 			} else {
4852 				goto out;
4853 			}
4854 		}
4855 		/* save our key for returning back */
4856 		btrfs_node_key_to_cpu(cur, &found_key, slot);
4857 		path->slots[level] = slot;
4858 		if (level == path->lowest_level) {
4859 			ret = 0;
4860 			unlock_up(path, level, 1, 0, NULL);
4861 			goto out;
4862 		}
4863 		btrfs_set_path_blocking(path);
4864 		cur = read_node_slot(root, cur, slot);
4865 		BUG_ON(!cur); /* -ENOMEM */
4866 
4867 		btrfs_tree_read_lock(cur);
4868 
4869 		path->locks[level - 1] = BTRFS_READ_LOCK;
4870 		path->nodes[level - 1] = cur;
4871 		unlock_up(path, level, 1, 0, NULL);
4872 		btrfs_clear_path_blocking(path, NULL, 0);
4873 	}
4874 out:
4875 	if (ret == 0)
4876 		memcpy(min_key, &found_key, sizeof(found_key));
4877 	btrfs_set_path_blocking(path);
4878 	return ret;
4879 }
4880 
4881 /*
4882  * this is similar to btrfs_next_leaf, but does not try to preserve
4883  * and fixup the path.  It looks for and returns the next key in the
4884  * tree based on the current path and the cache_only and min_trans
4885  * parameters.
4886  *
4887  * 0 is returned if another key is found, < 0 if there are any errors
4888  * and 1 is returned if there are no higher keys in the tree
4889  *
4890  * path->keep_locks should be set to 1 on the search made before
4891  * calling this function.
4892  */
4893 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4894 			struct btrfs_key *key, int level,
4895 			int cache_only, u64 min_trans)
4896 {
4897 	int slot;
4898 	struct extent_buffer *c;
4899 
4900 	WARN_ON(!path->keep_locks);
4901 	while (level < BTRFS_MAX_LEVEL) {
4902 		if (!path->nodes[level])
4903 			return 1;
4904 
4905 		slot = path->slots[level] + 1;
4906 		c = path->nodes[level];
4907 next:
4908 		if (slot >= btrfs_header_nritems(c)) {
4909 			int ret;
4910 			int orig_lowest;
4911 			struct btrfs_key cur_key;
4912 			if (level + 1 >= BTRFS_MAX_LEVEL ||
4913 			    !path->nodes[level + 1])
4914 				return 1;
4915 
4916 			if (path->locks[level + 1]) {
4917 				level++;
4918 				continue;
4919 			}
4920 
4921 			slot = btrfs_header_nritems(c) - 1;
4922 			if (level == 0)
4923 				btrfs_item_key_to_cpu(c, &cur_key, slot);
4924 			else
4925 				btrfs_node_key_to_cpu(c, &cur_key, slot);
4926 
4927 			orig_lowest = path->lowest_level;
4928 			btrfs_release_path(path);
4929 			path->lowest_level = level;
4930 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
4931 						0, 0);
4932 			path->lowest_level = orig_lowest;
4933 			if (ret < 0)
4934 				return ret;
4935 
4936 			c = path->nodes[level];
4937 			slot = path->slots[level];
4938 			if (ret == 0)
4939 				slot++;
4940 			goto next;
4941 		}
4942 
4943 		if (level == 0)
4944 			btrfs_item_key_to_cpu(c, key, slot);
4945 		else {
4946 			u64 blockptr = btrfs_node_blockptr(c, slot);
4947 			u64 gen = btrfs_node_ptr_generation(c, slot);
4948 
4949 			if (cache_only) {
4950 				struct extent_buffer *cur;
4951 				cur = btrfs_find_tree_block(root, blockptr,
4952 					    btrfs_level_size(root, level - 1));
4953 				if (!cur ||
4954 				    btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
4955 					slot++;
4956 					if (cur)
4957 						free_extent_buffer(cur);
4958 					goto next;
4959 				}
4960 				free_extent_buffer(cur);
4961 			}
4962 			if (gen < min_trans) {
4963 				slot++;
4964 				goto next;
4965 			}
4966 			btrfs_node_key_to_cpu(c, key, slot);
4967 		}
4968 		return 0;
4969 	}
4970 	return 1;
4971 }
4972 
4973 /*
4974  * search the tree again to find a leaf with greater keys
4975  * returns 0 if it found something or 1 if there are no greater leaves.
4976  * returns < 0 on io errors.
4977  */
4978 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4979 {
4980 	int slot;
4981 	int level;
4982 	struct extent_buffer *c;
4983 	struct extent_buffer *next;
4984 	struct btrfs_key key;
4985 	u32 nritems;
4986 	int ret;
4987 	int old_spinning = path->leave_spinning;
4988 	int next_rw_lock = 0;
4989 
4990 	nritems = btrfs_header_nritems(path->nodes[0]);
4991 	if (nritems == 0)
4992 		return 1;
4993 
4994 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4995 again:
4996 	level = 1;
4997 	next = NULL;
4998 	next_rw_lock = 0;
4999 	btrfs_release_path(path);
5000 
5001 	path->keep_locks = 1;
5002 	path->leave_spinning = 1;
5003 
5004 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5005 	path->keep_locks = 0;
5006 
5007 	if (ret < 0)
5008 		return ret;
5009 
5010 	nritems = btrfs_header_nritems(path->nodes[0]);
5011 	/*
5012 	 * by releasing the path above we dropped all our locks.  A balance
5013 	 * could have added more items next to the key that used to be
5014 	 * at the very end of the block.  So, check again here and
5015 	 * advance the path if there are now more items available.
5016 	 */
5017 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5018 		if (ret == 0)
5019 			path->slots[0]++;
5020 		ret = 0;
5021 		goto done;
5022 	}
5023 
5024 	while (level < BTRFS_MAX_LEVEL) {
5025 		if (!path->nodes[level]) {
5026 			ret = 1;
5027 			goto done;
5028 		}
5029 
5030 		slot = path->slots[level] + 1;
5031 		c = path->nodes[level];
5032 		if (slot >= btrfs_header_nritems(c)) {
5033 			level++;
5034 			if (level == BTRFS_MAX_LEVEL) {
5035 				ret = 1;
5036 				goto done;
5037 			}
5038 			continue;
5039 		}
5040 
5041 		if (next) {
5042 			btrfs_tree_unlock_rw(next, next_rw_lock);
5043 			free_extent_buffer(next);
5044 		}
5045 
5046 		next = c;
5047 		next_rw_lock = path->locks[level];
5048 		ret = read_block_for_search(NULL, root, path, &next, level,
5049 					    slot, &key, 0);
5050 		if (ret == -EAGAIN)
5051 			goto again;
5052 
5053 		if (ret < 0) {
5054 			btrfs_release_path(path);
5055 			goto done;
5056 		}
5057 
5058 		if (!path->skip_locking) {
5059 			ret = btrfs_try_tree_read_lock(next);
5060 			if (!ret) {
5061 				btrfs_set_path_blocking(path);
5062 				btrfs_tree_read_lock(next);
5063 				btrfs_clear_path_blocking(path, next,
5064 							  BTRFS_READ_LOCK);
5065 			}
5066 			next_rw_lock = BTRFS_READ_LOCK;
5067 		}
5068 		break;
5069 	}
5070 	path->slots[level] = slot;
5071 	while (1) {
5072 		level--;
5073 		c = path->nodes[level];
5074 		if (path->locks[level])
5075 			btrfs_tree_unlock_rw(c, path->locks[level]);
5076 
5077 		free_extent_buffer(c);
5078 		path->nodes[level] = next;
5079 		path->slots[level] = 0;
5080 		if (!path->skip_locking)
5081 			path->locks[level] = next_rw_lock;
5082 		if (!level)
5083 			break;
5084 
5085 		ret = read_block_for_search(NULL, root, path, &next, level,
5086 					    0, &key, 0);
5087 		if (ret == -EAGAIN)
5088 			goto again;
5089 
5090 		if (ret < 0) {
5091 			btrfs_release_path(path);
5092 			goto done;
5093 		}
5094 
5095 		if (!path->skip_locking) {
5096 			ret = btrfs_try_tree_read_lock(next);
5097 			if (!ret) {
5098 				btrfs_set_path_blocking(path);
5099 				btrfs_tree_read_lock(next);
5100 				btrfs_clear_path_blocking(path, next,
5101 							  BTRFS_READ_LOCK);
5102 			}
5103 			next_rw_lock = BTRFS_READ_LOCK;
5104 		}
5105 	}
5106 	ret = 0;
5107 done:
5108 	unlock_up(path, 0, 1, 0, NULL);
5109 	path->leave_spinning = old_spinning;
5110 	if (!old_spinning)
5111 		btrfs_set_path_blocking(path);
5112 
5113 	return ret;
5114 }
5115 
5116 /*
5117  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5118  * searching until it gets past min_objectid or finds an item of 'type'
5119  *
5120  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5121  */
5122 int btrfs_previous_item(struct btrfs_root *root,
5123 			struct btrfs_path *path, u64 min_objectid,
5124 			int type)
5125 {
5126 	struct btrfs_key found_key;
5127 	struct extent_buffer *leaf;
5128 	u32 nritems;
5129 	int ret;
5130 
5131 	while (1) {
5132 		if (path->slots[0] == 0) {
5133 			btrfs_set_path_blocking(path);
5134 			ret = btrfs_prev_leaf(root, path);
5135 			if (ret != 0)
5136 				return ret;
5137 		} else {
5138 			path->slots[0]--;
5139 		}
5140 		leaf = path->nodes[0];
5141 		nritems = btrfs_header_nritems(leaf);
5142 		if (nritems == 0)
5143 			return 1;
5144 		if (path->slots[0] == nritems)
5145 			path->slots[0]--;
5146 
5147 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5148 		if (found_key.objectid < min_objectid)
5149 			break;
5150 		if (found_key.type == type)
5151 			return 0;
5152 		if (found_key.objectid == min_objectid &&
5153 		    found_key.type < type)
5154 			break;
5155 	}
5156 	return 1;
5157 }
5158