xref: /openbmc/linux/fs/btrfs/extent-tree.c (revision 65cf840f)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include "compat.h"
27 #include "hash.h"
28 #include "ctree.h"
29 #include "disk-io.h"
30 #include "print-tree.h"
31 #include "transaction.h"
32 #include "volumes.h"
33 #include "locking.h"
34 #include "free-space-cache.h"
35 
36 static int update_block_group(struct btrfs_trans_handle *trans,
37 			      struct btrfs_root *root,
38 			      u64 bytenr, u64 num_bytes, int alloc);
39 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
40 				 u64 num_bytes, int reserve, int sinfo);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 				struct btrfs_root *root,
43 				u64 bytenr, u64 num_bytes, u64 parent,
44 				u64 root_objectid, u64 owner_objectid,
45 				u64 owner_offset, int refs_to_drop,
46 				struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48 				    struct extent_buffer *leaf,
49 				    struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51 				      struct btrfs_root *root,
52 				      u64 parent, u64 root_objectid,
53 				      u64 flags, u64 owner, u64 offset,
54 				      struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 				     struct btrfs_root *root,
57 				     u64 parent, u64 root_objectid,
58 				     u64 flags, struct btrfs_disk_key *key,
59 				     int level, struct btrfs_key *ins);
60 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61 			  struct btrfs_root *extent_root, u64 alloc_bytes,
62 			  u64 flags, int force);
63 static int find_next_key(struct btrfs_path *path, int level,
64 			 struct btrfs_key *key);
65 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
66 			    int dump_block_groups);
67 
68 static noinline int
69 block_group_cache_done(struct btrfs_block_group_cache *cache)
70 {
71 	smp_mb();
72 	return cache->cached == BTRFS_CACHE_FINISHED;
73 }
74 
75 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
76 {
77 	return (cache->flags & bits) == bits;
78 }
79 
80 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
81 {
82 	atomic_inc(&cache->count);
83 }
84 
85 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
86 {
87 	if (atomic_dec_and_test(&cache->count)) {
88 		WARN_ON(cache->pinned > 0);
89 		WARN_ON(cache->reserved > 0);
90 		WARN_ON(cache->reserved_pinned > 0);
91 		kfree(cache);
92 	}
93 }
94 
95 /*
96  * this adds the block group to the fs_info rb tree for the block group
97  * cache
98  */
99 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
100 				struct btrfs_block_group_cache *block_group)
101 {
102 	struct rb_node **p;
103 	struct rb_node *parent = NULL;
104 	struct btrfs_block_group_cache *cache;
105 
106 	spin_lock(&info->block_group_cache_lock);
107 	p = &info->block_group_cache_tree.rb_node;
108 
109 	while (*p) {
110 		parent = *p;
111 		cache = rb_entry(parent, struct btrfs_block_group_cache,
112 				 cache_node);
113 		if (block_group->key.objectid < cache->key.objectid) {
114 			p = &(*p)->rb_left;
115 		} else if (block_group->key.objectid > cache->key.objectid) {
116 			p = &(*p)->rb_right;
117 		} else {
118 			spin_unlock(&info->block_group_cache_lock);
119 			return -EEXIST;
120 		}
121 	}
122 
123 	rb_link_node(&block_group->cache_node, parent, p);
124 	rb_insert_color(&block_group->cache_node,
125 			&info->block_group_cache_tree);
126 	spin_unlock(&info->block_group_cache_lock);
127 
128 	return 0;
129 }
130 
131 /*
132  * This will return the block group at or after bytenr if contains is 0, else
133  * it will return the block group that contains the bytenr
134  */
135 static struct btrfs_block_group_cache *
136 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
137 			      int contains)
138 {
139 	struct btrfs_block_group_cache *cache, *ret = NULL;
140 	struct rb_node *n;
141 	u64 end, start;
142 
143 	spin_lock(&info->block_group_cache_lock);
144 	n = info->block_group_cache_tree.rb_node;
145 
146 	while (n) {
147 		cache = rb_entry(n, struct btrfs_block_group_cache,
148 				 cache_node);
149 		end = cache->key.objectid + cache->key.offset - 1;
150 		start = cache->key.objectid;
151 
152 		if (bytenr < start) {
153 			if (!contains && (!ret || start < ret->key.objectid))
154 				ret = cache;
155 			n = n->rb_left;
156 		} else if (bytenr > start) {
157 			if (contains && bytenr <= end) {
158 				ret = cache;
159 				break;
160 			}
161 			n = n->rb_right;
162 		} else {
163 			ret = cache;
164 			break;
165 		}
166 	}
167 	if (ret)
168 		btrfs_get_block_group(ret);
169 	spin_unlock(&info->block_group_cache_lock);
170 
171 	return ret;
172 }
173 
174 static int add_excluded_extent(struct btrfs_root *root,
175 			       u64 start, u64 num_bytes)
176 {
177 	u64 end = start + num_bytes - 1;
178 	set_extent_bits(&root->fs_info->freed_extents[0],
179 			start, end, EXTENT_UPTODATE, GFP_NOFS);
180 	set_extent_bits(&root->fs_info->freed_extents[1],
181 			start, end, EXTENT_UPTODATE, GFP_NOFS);
182 	return 0;
183 }
184 
185 static void free_excluded_extents(struct btrfs_root *root,
186 				  struct btrfs_block_group_cache *cache)
187 {
188 	u64 start, end;
189 
190 	start = cache->key.objectid;
191 	end = start + cache->key.offset - 1;
192 
193 	clear_extent_bits(&root->fs_info->freed_extents[0],
194 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
195 	clear_extent_bits(&root->fs_info->freed_extents[1],
196 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
197 }
198 
199 static int exclude_super_stripes(struct btrfs_root *root,
200 				 struct btrfs_block_group_cache *cache)
201 {
202 	u64 bytenr;
203 	u64 *logical;
204 	int stripe_len;
205 	int i, nr, ret;
206 
207 	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
208 		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
209 		cache->bytes_super += stripe_len;
210 		ret = add_excluded_extent(root, cache->key.objectid,
211 					  stripe_len);
212 		BUG_ON(ret);
213 	}
214 
215 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
216 		bytenr = btrfs_sb_offset(i);
217 		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
218 				       cache->key.objectid, bytenr,
219 				       0, &logical, &nr, &stripe_len);
220 		BUG_ON(ret);
221 
222 		while (nr--) {
223 			cache->bytes_super += stripe_len;
224 			ret = add_excluded_extent(root, logical[nr],
225 						  stripe_len);
226 			BUG_ON(ret);
227 		}
228 
229 		kfree(logical);
230 	}
231 	return 0;
232 }
233 
234 static struct btrfs_caching_control *
235 get_caching_control(struct btrfs_block_group_cache *cache)
236 {
237 	struct btrfs_caching_control *ctl;
238 
239 	spin_lock(&cache->lock);
240 	if (cache->cached != BTRFS_CACHE_STARTED) {
241 		spin_unlock(&cache->lock);
242 		return NULL;
243 	}
244 
245 	ctl = cache->caching_ctl;
246 	atomic_inc(&ctl->count);
247 	spin_unlock(&cache->lock);
248 	return ctl;
249 }
250 
251 static void put_caching_control(struct btrfs_caching_control *ctl)
252 {
253 	if (atomic_dec_and_test(&ctl->count))
254 		kfree(ctl);
255 }
256 
257 /*
258  * this is only called by cache_block_group, since we could have freed extents
259  * we need to check the pinned_extents for any extents that can't be used yet
260  * since their free space will be released as soon as the transaction commits.
261  */
262 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
263 			      struct btrfs_fs_info *info, u64 start, u64 end)
264 {
265 	u64 extent_start, extent_end, size, total_added = 0;
266 	int ret;
267 
268 	while (start < end) {
269 		ret = find_first_extent_bit(info->pinned_extents, start,
270 					    &extent_start, &extent_end,
271 					    EXTENT_DIRTY | EXTENT_UPTODATE);
272 		if (ret)
273 			break;
274 
275 		if (extent_start <= start) {
276 			start = extent_end + 1;
277 		} else if (extent_start > start && extent_start < end) {
278 			size = extent_start - start;
279 			total_added += size;
280 			ret = btrfs_add_free_space(block_group, start,
281 						   size);
282 			BUG_ON(ret);
283 			start = extent_end + 1;
284 		} else {
285 			break;
286 		}
287 	}
288 
289 	if (start < end) {
290 		size = end - start;
291 		total_added += size;
292 		ret = btrfs_add_free_space(block_group, start, size);
293 		BUG_ON(ret);
294 	}
295 
296 	return total_added;
297 }
298 
299 static int caching_kthread(void *data)
300 {
301 	struct btrfs_block_group_cache *block_group = data;
302 	struct btrfs_fs_info *fs_info = block_group->fs_info;
303 	struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
304 	struct btrfs_root *extent_root = fs_info->extent_root;
305 	struct btrfs_path *path;
306 	struct extent_buffer *leaf;
307 	struct btrfs_key key;
308 	u64 total_found = 0;
309 	u64 last = 0;
310 	u32 nritems;
311 	int ret = 0;
312 
313 	path = btrfs_alloc_path();
314 	if (!path)
315 		return -ENOMEM;
316 
317 	exclude_super_stripes(extent_root, block_group);
318 	spin_lock(&block_group->space_info->lock);
319 	block_group->space_info->bytes_readonly += block_group->bytes_super;
320 	spin_unlock(&block_group->space_info->lock);
321 
322 	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
323 
324 	/*
325 	 * We don't want to deadlock with somebody trying to allocate a new
326 	 * extent for the extent root while also trying to search the extent
327 	 * root to add free space.  So we skip locking and search the commit
328 	 * root, since its read-only
329 	 */
330 	path->skip_locking = 1;
331 	path->search_commit_root = 1;
332 	path->reada = 2;
333 
334 	key.objectid = last;
335 	key.offset = 0;
336 	key.type = BTRFS_EXTENT_ITEM_KEY;
337 again:
338 	mutex_lock(&caching_ctl->mutex);
339 	/* need to make sure the commit_root doesn't disappear */
340 	down_read(&fs_info->extent_commit_sem);
341 
342 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
343 	if (ret < 0)
344 		goto err;
345 
346 	leaf = path->nodes[0];
347 	nritems = btrfs_header_nritems(leaf);
348 
349 	while (1) {
350 		smp_mb();
351 		if (fs_info->closing > 1) {
352 			last = (u64)-1;
353 			break;
354 		}
355 
356 		if (path->slots[0] < nritems) {
357 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
358 		} else {
359 			ret = find_next_key(path, 0, &key);
360 			if (ret)
361 				break;
362 
363 			caching_ctl->progress = last;
364 			btrfs_release_path(extent_root, path);
365 			up_read(&fs_info->extent_commit_sem);
366 			mutex_unlock(&caching_ctl->mutex);
367 			if (btrfs_transaction_in_commit(fs_info))
368 				schedule_timeout(1);
369 			else
370 				cond_resched();
371 			goto again;
372 		}
373 
374 		if (key.objectid < block_group->key.objectid) {
375 			path->slots[0]++;
376 			continue;
377 		}
378 
379 		if (key.objectid >= block_group->key.objectid +
380 		    block_group->key.offset)
381 			break;
382 
383 		if (key.type == BTRFS_EXTENT_ITEM_KEY) {
384 			total_found += add_new_free_space(block_group,
385 							  fs_info, last,
386 							  key.objectid);
387 			last = key.objectid + key.offset;
388 
389 			if (total_found > (1024 * 1024 * 2)) {
390 				total_found = 0;
391 				wake_up(&caching_ctl->wait);
392 			}
393 		}
394 		path->slots[0]++;
395 	}
396 	ret = 0;
397 
398 	total_found += add_new_free_space(block_group, fs_info, last,
399 					  block_group->key.objectid +
400 					  block_group->key.offset);
401 	caching_ctl->progress = (u64)-1;
402 
403 	spin_lock(&block_group->lock);
404 	block_group->caching_ctl = NULL;
405 	block_group->cached = BTRFS_CACHE_FINISHED;
406 	spin_unlock(&block_group->lock);
407 
408 err:
409 	btrfs_free_path(path);
410 	up_read(&fs_info->extent_commit_sem);
411 
412 	free_excluded_extents(extent_root, block_group);
413 
414 	mutex_unlock(&caching_ctl->mutex);
415 	wake_up(&caching_ctl->wait);
416 
417 	put_caching_control(caching_ctl);
418 	atomic_dec(&block_group->space_info->caching_threads);
419 	btrfs_put_block_group(block_group);
420 
421 	return 0;
422 }
423 
424 static int cache_block_group(struct btrfs_block_group_cache *cache)
425 {
426 	struct btrfs_fs_info *fs_info = cache->fs_info;
427 	struct btrfs_caching_control *caching_ctl;
428 	struct task_struct *tsk;
429 	int ret = 0;
430 
431 	smp_mb();
432 	if (cache->cached != BTRFS_CACHE_NO)
433 		return 0;
434 
435 	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
436 	BUG_ON(!caching_ctl);
437 
438 	INIT_LIST_HEAD(&caching_ctl->list);
439 	mutex_init(&caching_ctl->mutex);
440 	init_waitqueue_head(&caching_ctl->wait);
441 	caching_ctl->block_group = cache;
442 	caching_ctl->progress = cache->key.objectid;
443 	/* one for caching kthread, one for caching block group list */
444 	atomic_set(&caching_ctl->count, 2);
445 
446 	spin_lock(&cache->lock);
447 	if (cache->cached != BTRFS_CACHE_NO) {
448 		spin_unlock(&cache->lock);
449 		kfree(caching_ctl);
450 		return 0;
451 	}
452 	cache->caching_ctl = caching_ctl;
453 	cache->cached = BTRFS_CACHE_STARTED;
454 	spin_unlock(&cache->lock);
455 
456 	down_write(&fs_info->extent_commit_sem);
457 	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
458 	up_write(&fs_info->extent_commit_sem);
459 
460 	atomic_inc(&cache->space_info->caching_threads);
461 	btrfs_get_block_group(cache);
462 
463 	tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
464 			  cache->key.objectid);
465 	if (IS_ERR(tsk)) {
466 		ret = PTR_ERR(tsk);
467 		printk(KERN_ERR "error running thread %d\n", ret);
468 		BUG();
469 	}
470 
471 	return ret;
472 }
473 
474 /*
475  * return the block group that starts at or after bytenr
476  */
477 static struct btrfs_block_group_cache *
478 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
479 {
480 	struct btrfs_block_group_cache *cache;
481 
482 	cache = block_group_cache_tree_search(info, bytenr, 0);
483 
484 	return cache;
485 }
486 
487 /*
488  * return the block group that contains the given bytenr
489  */
490 struct btrfs_block_group_cache *btrfs_lookup_block_group(
491 						 struct btrfs_fs_info *info,
492 						 u64 bytenr)
493 {
494 	struct btrfs_block_group_cache *cache;
495 
496 	cache = block_group_cache_tree_search(info, bytenr, 1);
497 
498 	return cache;
499 }
500 
501 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
502 						  u64 flags)
503 {
504 	struct list_head *head = &info->space_info;
505 	struct btrfs_space_info *found;
506 
507 	flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
508 		 BTRFS_BLOCK_GROUP_METADATA;
509 
510 	rcu_read_lock();
511 	list_for_each_entry_rcu(found, head, list) {
512 		if (found->flags == flags) {
513 			rcu_read_unlock();
514 			return found;
515 		}
516 	}
517 	rcu_read_unlock();
518 	return NULL;
519 }
520 
521 /*
522  * after adding space to the filesystem, we need to clear the full flags
523  * on all the space infos.
524  */
525 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
526 {
527 	struct list_head *head = &info->space_info;
528 	struct btrfs_space_info *found;
529 
530 	rcu_read_lock();
531 	list_for_each_entry_rcu(found, head, list)
532 		found->full = 0;
533 	rcu_read_unlock();
534 }
535 
536 static u64 div_factor(u64 num, int factor)
537 {
538 	if (factor == 10)
539 		return num;
540 	num *= factor;
541 	do_div(num, 10);
542 	return num;
543 }
544 
545 u64 btrfs_find_block_group(struct btrfs_root *root,
546 			   u64 search_start, u64 search_hint, int owner)
547 {
548 	struct btrfs_block_group_cache *cache;
549 	u64 used;
550 	u64 last = max(search_hint, search_start);
551 	u64 group_start = 0;
552 	int full_search = 0;
553 	int factor = 9;
554 	int wrapped = 0;
555 again:
556 	while (1) {
557 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
558 		if (!cache)
559 			break;
560 
561 		spin_lock(&cache->lock);
562 		last = cache->key.objectid + cache->key.offset;
563 		used = btrfs_block_group_used(&cache->item);
564 
565 		if ((full_search || !cache->ro) &&
566 		    block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
567 			if (used + cache->pinned + cache->reserved <
568 			    div_factor(cache->key.offset, factor)) {
569 				group_start = cache->key.objectid;
570 				spin_unlock(&cache->lock);
571 				btrfs_put_block_group(cache);
572 				goto found;
573 			}
574 		}
575 		spin_unlock(&cache->lock);
576 		btrfs_put_block_group(cache);
577 		cond_resched();
578 	}
579 	if (!wrapped) {
580 		last = search_start;
581 		wrapped = 1;
582 		goto again;
583 	}
584 	if (!full_search && factor < 10) {
585 		last = search_start;
586 		full_search = 1;
587 		factor = 10;
588 		goto again;
589 	}
590 found:
591 	return group_start;
592 }
593 
594 /* simple helper to search for an existing extent at a given offset */
595 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
596 {
597 	int ret;
598 	struct btrfs_key key;
599 	struct btrfs_path *path;
600 
601 	path = btrfs_alloc_path();
602 	BUG_ON(!path);
603 	key.objectid = start;
604 	key.offset = len;
605 	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
606 	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
607 				0, 0);
608 	btrfs_free_path(path);
609 	return ret;
610 }
611 
612 /*
613  * helper function to lookup reference count and flags of extent.
614  *
615  * the head node for delayed ref is used to store the sum of all the
616  * reference count modifications queued up in the rbtree. the head
617  * node may also store the extent flags to set. This way you can check
618  * to see what the reference count and extent flags would be if all of
619  * the delayed refs are not processed.
620  */
621 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
622 			     struct btrfs_root *root, u64 bytenr,
623 			     u64 num_bytes, u64 *refs, u64 *flags)
624 {
625 	struct btrfs_delayed_ref_head *head;
626 	struct btrfs_delayed_ref_root *delayed_refs;
627 	struct btrfs_path *path;
628 	struct btrfs_extent_item *ei;
629 	struct extent_buffer *leaf;
630 	struct btrfs_key key;
631 	u32 item_size;
632 	u64 num_refs;
633 	u64 extent_flags;
634 	int ret;
635 
636 	path = btrfs_alloc_path();
637 	if (!path)
638 		return -ENOMEM;
639 
640 	key.objectid = bytenr;
641 	key.type = BTRFS_EXTENT_ITEM_KEY;
642 	key.offset = num_bytes;
643 	if (!trans) {
644 		path->skip_locking = 1;
645 		path->search_commit_root = 1;
646 	}
647 again:
648 	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
649 				&key, path, 0, 0);
650 	if (ret < 0)
651 		goto out_free;
652 
653 	if (ret == 0) {
654 		leaf = path->nodes[0];
655 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
656 		if (item_size >= sizeof(*ei)) {
657 			ei = btrfs_item_ptr(leaf, path->slots[0],
658 					    struct btrfs_extent_item);
659 			num_refs = btrfs_extent_refs(leaf, ei);
660 			extent_flags = btrfs_extent_flags(leaf, ei);
661 		} else {
662 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
663 			struct btrfs_extent_item_v0 *ei0;
664 			BUG_ON(item_size != sizeof(*ei0));
665 			ei0 = btrfs_item_ptr(leaf, path->slots[0],
666 					     struct btrfs_extent_item_v0);
667 			num_refs = btrfs_extent_refs_v0(leaf, ei0);
668 			/* FIXME: this isn't correct for data */
669 			extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
670 #else
671 			BUG();
672 #endif
673 		}
674 		BUG_ON(num_refs == 0);
675 	} else {
676 		num_refs = 0;
677 		extent_flags = 0;
678 		ret = 0;
679 	}
680 
681 	if (!trans)
682 		goto out;
683 
684 	delayed_refs = &trans->transaction->delayed_refs;
685 	spin_lock(&delayed_refs->lock);
686 	head = btrfs_find_delayed_ref_head(trans, bytenr);
687 	if (head) {
688 		if (!mutex_trylock(&head->mutex)) {
689 			atomic_inc(&head->node.refs);
690 			spin_unlock(&delayed_refs->lock);
691 
692 			btrfs_release_path(root->fs_info->extent_root, path);
693 
694 			mutex_lock(&head->mutex);
695 			mutex_unlock(&head->mutex);
696 			btrfs_put_delayed_ref(&head->node);
697 			goto again;
698 		}
699 		if (head->extent_op && head->extent_op->update_flags)
700 			extent_flags |= head->extent_op->flags_to_set;
701 		else
702 			BUG_ON(num_refs == 0);
703 
704 		num_refs += head->node.ref_mod;
705 		mutex_unlock(&head->mutex);
706 	}
707 	spin_unlock(&delayed_refs->lock);
708 out:
709 	WARN_ON(num_refs == 0);
710 	if (refs)
711 		*refs = num_refs;
712 	if (flags)
713 		*flags = extent_flags;
714 out_free:
715 	btrfs_free_path(path);
716 	return ret;
717 }
718 
719 /*
720  * Back reference rules.  Back refs have three main goals:
721  *
722  * 1) differentiate between all holders of references to an extent so that
723  *    when a reference is dropped we can make sure it was a valid reference
724  *    before freeing the extent.
725  *
726  * 2) Provide enough information to quickly find the holders of an extent
727  *    if we notice a given block is corrupted or bad.
728  *
729  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
730  *    maintenance.  This is actually the same as #2, but with a slightly
731  *    different use case.
732  *
733  * There are two kinds of back refs. The implicit back refs is optimized
734  * for pointers in non-shared tree blocks. For a given pointer in a block,
735  * back refs of this kind provide information about the block's owner tree
736  * and the pointer's key. These information allow us to find the block by
737  * b-tree searching. The full back refs is for pointers in tree blocks not
738  * referenced by their owner trees. The location of tree block is recorded
739  * in the back refs. Actually the full back refs is generic, and can be
740  * used in all cases the implicit back refs is used. The major shortcoming
741  * of the full back refs is its overhead. Every time a tree block gets
742  * COWed, we have to update back refs entry for all pointers in it.
743  *
744  * For a newly allocated tree block, we use implicit back refs for
745  * pointers in it. This means most tree related operations only involve
746  * implicit back refs. For a tree block created in old transaction, the
747  * only way to drop a reference to it is COW it. So we can detect the
748  * event that tree block loses its owner tree's reference and do the
749  * back refs conversion.
750  *
751  * When a tree block is COW'd through a tree, there are four cases:
752  *
753  * The reference count of the block is one and the tree is the block's
754  * owner tree. Nothing to do in this case.
755  *
756  * The reference count of the block is one and the tree is not the
757  * block's owner tree. In this case, full back refs is used for pointers
758  * in the block. Remove these full back refs, add implicit back refs for
759  * every pointers in the new block.
760  *
761  * The reference count of the block is greater than one and the tree is
762  * the block's owner tree. In this case, implicit back refs is used for
763  * pointers in the block. Add full back refs for every pointers in the
764  * block, increase lower level extents' reference counts. The original
765  * implicit back refs are entailed to the new block.
766  *
767  * The reference count of the block is greater than one and the tree is
768  * not the block's owner tree. Add implicit back refs for every pointer in
769  * the new block, increase lower level extents' reference count.
770  *
771  * Back Reference Key composing:
772  *
773  * The key objectid corresponds to the first byte in the extent,
774  * The key type is used to differentiate between types of back refs.
775  * There are different meanings of the key offset for different types
776  * of back refs.
777  *
778  * File extents can be referenced by:
779  *
780  * - multiple snapshots, subvolumes, or different generations in one subvol
781  * - different files inside a single subvolume
782  * - different offsets inside a file (bookend extents in file.c)
783  *
784  * The extent ref structure for the implicit back refs has fields for:
785  *
786  * - Objectid of the subvolume root
787  * - objectid of the file holding the reference
788  * - original offset in the file
789  * - how many bookend extents
790  *
791  * The key offset for the implicit back refs is hash of the first
792  * three fields.
793  *
794  * The extent ref structure for the full back refs has field for:
795  *
796  * - number of pointers in the tree leaf
797  *
798  * The key offset for the implicit back refs is the first byte of
799  * the tree leaf
800  *
801  * When a file extent is allocated, The implicit back refs is used.
802  * the fields are filled in:
803  *
804  *     (root_key.objectid, inode objectid, offset in file, 1)
805  *
806  * When a file extent is removed file truncation, we find the
807  * corresponding implicit back refs and check the following fields:
808  *
809  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
810  *
811  * Btree extents can be referenced by:
812  *
813  * - Different subvolumes
814  *
815  * Both the implicit back refs and the full back refs for tree blocks
816  * only consist of key. The key offset for the implicit back refs is
817  * objectid of block's owner tree. The key offset for the full back refs
818  * is the first byte of parent block.
819  *
820  * When implicit back refs is used, information about the lowest key and
821  * level of the tree block are required. These information are stored in
822  * tree block info structure.
823  */
824 
825 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
826 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
827 				  struct btrfs_root *root,
828 				  struct btrfs_path *path,
829 				  u64 owner, u32 extra_size)
830 {
831 	struct btrfs_extent_item *item;
832 	struct btrfs_extent_item_v0 *ei0;
833 	struct btrfs_extent_ref_v0 *ref0;
834 	struct btrfs_tree_block_info *bi;
835 	struct extent_buffer *leaf;
836 	struct btrfs_key key;
837 	struct btrfs_key found_key;
838 	u32 new_size = sizeof(*item);
839 	u64 refs;
840 	int ret;
841 
842 	leaf = path->nodes[0];
843 	BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
844 
845 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
846 	ei0 = btrfs_item_ptr(leaf, path->slots[0],
847 			     struct btrfs_extent_item_v0);
848 	refs = btrfs_extent_refs_v0(leaf, ei0);
849 
850 	if (owner == (u64)-1) {
851 		while (1) {
852 			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
853 				ret = btrfs_next_leaf(root, path);
854 				if (ret < 0)
855 					return ret;
856 				BUG_ON(ret > 0);
857 				leaf = path->nodes[0];
858 			}
859 			btrfs_item_key_to_cpu(leaf, &found_key,
860 					      path->slots[0]);
861 			BUG_ON(key.objectid != found_key.objectid);
862 			if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
863 				path->slots[0]++;
864 				continue;
865 			}
866 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
867 					      struct btrfs_extent_ref_v0);
868 			owner = btrfs_ref_objectid_v0(leaf, ref0);
869 			break;
870 		}
871 	}
872 	btrfs_release_path(root, path);
873 
874 	if (owner < BTRFS_FIRST_FREE_OBJECTID)
875 		new_size += sizeof(*bi);
876 
877 	new_size -= sizeof(*ei0);
878 	ret = btrfs_search_slot(trans, root, &key, path,
879 				new_size + extra_size, 1);
880 	if (ret < 0)
881 		return ret;
882 	BUG_ON(ret);
883 
884 	ret = btrfs_extend_item(trans, root, path, new_size);
885 	BUG_ON(ret);
886 
887 	leaf = path->nodes[0];
888 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
889 	btrfs_set_extent_refs(leaf, item, refs);
890 	/* FIXME: get real generation */
891 	btrfs_set_extent_generation(leaf, item, 0);
892 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
893 		btrfs_set_extent_flags(leaf, item,
894 				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
895 				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
896 		bi = (struct btrfs_tree_block_info *)(item + 1);
897 		/* FIXME: get first key of the block */
898 		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
899 		btrfs_set_tree_block_level(leaf, bi, (int)owner);
900 	} else {
901 		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
902 	}
903 	btrfs_mark_buffer_dirty(leaf);
904 	return 0;
905 }
906 #endif
907 
908 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
909 {
910 	u32 high_crc = ~(u32)0;
911 	u32 low_crc = ~(u32)0;
912 	__le64 lenum;
913 
914 	lenum = cpu_to_le64(root_objectid);
915 	high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
916 	lenum = cpu_to_le64(owner);
917 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
918 	lenum = cpu_to_le64(offset);
919 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
920 
921 	return ((u64)high_crc << 31) ^ (u64)low_crc;
922 }
923 
924 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
925 				     struct btrfs_extent_data_ref *ref)
926 {
927 	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
928 				    btrfs_extent_data_ref_objectid(leaf, ref),
929 				    btrfs_extent_data_ref_offset(leaf, ref));
930 }
931 
932 static int match_extent_data_ref(struct extent_buffer *leaf,
933 				 struct btrfs_extent_data_ref *ref,
934 				 u64 root_objectid, u64 owner, u64 offset)
935 {
936 	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
937 	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
938 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
939 		return 0;
940 	return 1;
941 }
942 
943 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
944 					   struct btrfs_root *root,
945 					   struct btrfs_path *path,
946 					   u64 bytenr, u64 parent,
947 					   u64 root_objectid,
948 					   u64 owner, u64 offset)
949 {
950 	struct btrfs_key key;
951 	struct btrfs_extent_data_ref *ref;
952 	struct extent_buffer *leaf;
953 	u32 nritems;
954 	int ret;
955 	int recow;
956 	int err = -ENOENT;
957 
958 	key.objectid = bytenr;
959 	if (parent) {
960 		key.type = BTRFS_SHARED_DATA_REF_KEY;
961 		key.offset = parent;
962 	} else {
963 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
964 		key.offset = hash_extent_data_ref(root_objectid,
965 						  owner, offset);
966 	}
967 again:
968 	recow = 0;
969 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
970 	if (ret < 0) {
971 		err = ret;
972 		goto fail;
973 	}
974 
975 	if (parent) {
976 		if (!ret)
977 			return 0;
978 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
979 		key.type = BTRFS_EXTENT_REF_V0_KEY;
980 		btrfs_release_path(root, path);
981 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
982 		if (ret < 0) {
983 			err = ret;
984 			goto fail;
985 		}
986 		if (!ret)
987 			return 0;
988 #endif
989 		goto fail;
990 	}
991 
992 	leaf = path->nodes[0];
993 	nritems = btrfs_header_nritems(leaf);
994 	while (1) {
995 		if (path->slots[0] >= nritems) {
996 			ret = btrfs_next_leaf(root, path);
997 			if (ret < 0)
998 				err = ret;
999 			if (ret)
1000 				goto fail;
1001 
1002 			leaf = path->nodes[0];
1003 			nritems = btrfs_header_nritems(leaf);
1004 			recow = 1;
1005 		}
1006 
1007 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1008 		if (key.objectid != bytenr ||
1009 		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
1010 			goto fail;
1011 
1012 		ref = btrfs_item_ptr(leaf, path->slots[0],
1013 				     struct btrfs_extent_data_ref);
1014 
1015 		if (match_extent_data_ref(leaf, ref, root_objectid,
1016 					  owner, offset)) {
1017 			if (recow) {
1018 				btrfs_release_path(root, path);
1019 				goto again;
1020 			}
1021 			err = 0;
1022 			break;
1023 		}
1024 		path->slots[0]++;
1025 	}
1026 fail:
1027 	return err;
1028 }
1029 
1030 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1031 					   struct btrfs_root *root,
1032 					   struct btrfs_path *path,
1033 					   u64 bytenr, u64 parent,
1034 					   u64 root_objectid, u64 owner,
1035 					   u64 offset, int refs_to_add)
1036 {
1037 	struct btrfs_key key;
1038 	struct extent_buffer *leaf;
1039 	u32 size;
1040 	u32 num_refs;
1041 	int ret;
1042 
1043 	key.objectid = bytenr;
1044 	if (parent) {
1045 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1046 		key.offset = parent;
1047 		size = sizeof(struct btrfs_shared_data_ref);
1048 	} else {
1049 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1050 		key.offset = hash_extent_data_ref(root_objectid,
1051 						  owner, offset);
1052 		size = sizeof(struct btrfs_extent_data_ref);
1053 	}
1054 
1055 	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1056 	if (ret && ret != -EEXIST)
1057 		goto fail;
1058 
1059 	leaf = path->nodes[0];
1060 	if (parent) {
1061 		struct btrfs_shared_data_ref *ref;
1062 		ref = btrfs_item_ptr(leaf, path->slots[0],
1063 				     struct btrfs_shared_data_ref);
1064 		if (ret == 0) {
1065 			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1066 		} else {
1067 			num_refs = btrfs_shared_data_ref_count(leaf, ref);
1068 			num_refs += refs_to_add;
1069 			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1070 		}
1071 	} else {
1072 		struct btrfs_extent_data_ref *ref;
1073 		while (ret == -EEXIST) {
1074 			ref = btrfs_item_ptr(leaf, path->slots[0],
1075 					     struct btrfs_extent_data_ref);
1076 			if (match_extent_data_ref(leaf, ref, root_objectid,
1077 						  owner, offset))
1078 				break;
1079 			btrfs_release_path(root, path);
1080 			key.offset++;
1081 			ret = btrfs_insert_empty_item(trans, root, path, &key,
1082 						      size);
1083 			if (ret && ret != -EEXIST)
1084 				goto fail;
1085 
1086 			leaf = path->nodes[0];
1087 		}
1088 		ref = btrfs_item_ptr(leaf, path->slots[0],
1089 				     struct btrfs_extent_data_ref);
1090 		if (ret == 0) {
1091 			btrfs_set_extent_data_ref_root(leaf, ref,
1092 						       root_objectid);
1093 			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1094 			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1095 			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1096 		} else {
1097 			num_refs = btrfs_extent_data_ref_count(leaf, ref);
1098 			num_refs += refs_to_add;
1099 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1100 		}
1101 	}
1102 	btrfs_mark_buffer_dirty(leaf);
1103 	ret = 0;
1104 fail:
1105 	btrfs_release_path(root, path);
1106 	return ret;
1107 }
1108 
1109 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1110 					   struct btrfs_root *root,
1111 					   struct btrfs_path *path,
1112 					   int refs_to_drop)
1113 {
1114 	struct btrfs_key key;
1115 	struct btrfs_extent_data_ref *ref1 = NULL;
1116 	struct btrfs_shared_data_ref *ref2 = NULL;
1117 	struct extent_buffer *leaf;
1118 	u32 num_refs = 0;
1119 	int ret = 0;
1120 
1121 	leaf = path->nodes[0];
1122 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1123 
1124 	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1125 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1126 				      struct btrfs_extent_data_ref);
1127 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1128 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1129 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1130 				      struct btrfs_shared_data_ref);
1131 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1132 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1133 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1134 		struct btrfs_extent_ref_v0 *ref0;
1135 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1136 				      struct btrfs_extent_ref_v0);
1137 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1138 #endif
1139 	} else {
1140 		BUG();
1141 	}
1142 
1143 	BUG_ON(num_refs < refs_to_drop);
1144 	num_refs -= refs_to_drop;
1145 
1146 	if (num_refs == 0) {
1147 		ret = btrfs_del_item(trans, root, path);
1148 	} else {
1149 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1150 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1151 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1152 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1153 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1154 		else {
1155 			struct btrfs_extent_ref_v0 *ref0;
1156 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1157 					struct btrfs_extent_ref_v0);
1158 			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1159 		}
1160 #endif
1161 		btrfs_mark_buffer_dirty(leaf);
1162 	}
1163 	return ret;
1164 }
1165 
1166 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1167 					  struct btrfs_path *path,
1168 					  struct btrfs_extent_inline_ref *iref)
1169 {
1170 	struct btrfs_key key;
1171 	struct extent_buffer *leaf;
1172 	struct btrfs_extent_data_ref *ref1;
1173 	struct btrfs_shared_data_ref *ref2;
1174 	u32 num_refs = 0;
1175 
1176 	leaf = path->nodes[0];
1177 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1178 	if (iref) {
1179 		if (btrfs_extent_inline_ref_type(leaf, iref) ==
1180 		    BTRFS_EXTENT_DATA_REF_KEY) {
1181 			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1182 			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1183 		} else {
1184 			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1185 			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1186 		}
1187 	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1188 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1189 				      struct btrfs_extent_data_ref);
1190 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1191 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1192 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1193 				      struct btrfs_shared_data_ref);
1194 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1195 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1196 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1197 		struct btrfs_extent_ref_v0 *ref0;
1198 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1199 				      struct btrfs_extent_ref_v0);
1200 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1201 #endif
1202 	} else {
1203 		WARN_ON(1);
1204 	}
1205 	return num_refs;
1206 }
1207 
1208 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1209 					  struct btrfs_root *root,
1210 					  struct btrfs_path *path,
1211 					  u64 bytenr, u64 parent,
1212 					  u64 root_objectid)
1213 {
1214 	struct btrfs_key key;
1215 	int ret;
1216 
1217 	key.objectid = bytenr;
1218 	if (parent) {
1219 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1220 		key.offset = parent;
1221 	} else {
1222 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1223 		key.offset = root_objectid;
1224 	}
1225 
1226 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1227 	if (ret > 0)
1228 		ret = -ENOENT;
1229 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1230 	if (ret == -ENOENT && parent) {
1231 		btrfs_release_path(root, path);
1232 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1233 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1234 		if (ret > 0)
1235 			ret = -ENOENT;
1236 	}
1237 #endif
1238 	return ret;
1239 }
1240 
1241 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1242 					  struct btrfs_root *root,
1243 					  struct btrfs_path *path,
1244 					  u64 bytenr, u64 parent,
1245 					  u64 root_objectid)
1246 {
1247 	struct btrfs_key key;
1248 	int ret;
1249 
1250 	key.objectid = bytenr;
1251 	if (parent) {
1252 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1253 		key.offset = parent;
1254 	} else {
1255 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1256 		key.offset = root_objectid;
1257 	}
1258 
1259 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1260 	btrfs_release_path(root, path);
1261 	return ret;
1262 }
1263 
1264 static inline int extent_ref_type(u64 parent, u64 owner)
1265 {
1266 	int type;
1267 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1268 		if (parent > 0)
1269 			type = BTRFS_SHARED_BLOCK_REF_KEY;
1270 		else
1271 			type = BTRFS_TREE_BLOCK_REF_KEY;
1272 	} else {
1273 		if (parent > 0)
1274 			type = BTRFS_SHARED_DATA_REF_KEY;
1275 		else
1276 			type = BTRFS_EXTENT_DATA_REF_KEY;
1277 	}
1278 	return type;
1279 }
1280 
1281 static int find_next_key(struct btrfs_path *path, int level,
1282 			 struct btrfs_key *key)
1283 
1284 {
1285 	for (; level < BTRFS_MAX_LEVEL; level++) {
1286 		if (!path->nodes[level])
1287 			break;
1288 		if (path->slots[level] + 1 >=
1289 		    btrfs_header_nritems(path->nodes[level]))
1290 			continue;
1291 		if (level == 0)
1292 			btrfs_item_key_to_cpu(path->nodes[level], key,
1293 					      path->slots[level] + 1);
1294 		else
1295 			btrfs_node_key_to_cpu(path->nodes[level], key,
1296 					      path->slots[level] + 1);
1297 		return 0;
1298 	}
1299 	return 1;
1300 }
1301 
1302 /*
1303  * look for inline back ref. if back ref is found, *ref_ret is set
1304  * to the address of inline back ref, and 0 is returned.
1305  *
1306  * if back ref isn't found, *ref_ret is set to the address where it
1307  * should be inserted, and -ENOENT is returned.
1308  *
1309  * if insert is true and there are too many inline back refs, the path
1310  * points to the extent item, and -EAGAIN is returned.
1311  *
1312  * NOTE: inline back refs are ordered in the same way that back ref
1313  *	 items in the tree are ordered.
1314  */
1315 static noinline_for_stack
1316 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1317 				 struct btrfs_root *root,
1318 				 struct btrfs_path *path,
1319 				 struct btrfs_extent_inline_ref **ref_ret,
1320 				 u64 bytenr, u64 num_bytes,
1321 				 u64 parent, u64 root_objectid,
1322 				 u64 owner, u64 offset, int insert)
1323 {
1324 	struct btrfs_key key;
1325 	struct extent_buffer *leaf;
1326 	struct btrfs_extent_item *ei;
1327 	struct btrfs_extent_inline_ref *iref;
1328 	u64 flags;
1329 	u64 item_size;
1330 	unsigned long ptr;
1331 	unsigned long end;
1332 	int extra_size;
1333 	int type;
1334 	int want;
1335 	int ret;
1336 	int err = 0;
1337 
1338 	key.objectid = bytenr;
1339 	key.type = BTRFS_EXTENT_ITEM_KEY;
1340 	key.offset = num_bytes;
1341 
1342 	want = extent_ref_type(parent, owner);
1343 	if (insert) {
1344 		extra_size = btrfs_extent_inline_ref_size(want);
1345 		path->keep_locks = 1;
1346 	} else
1347 		extra_size = -1;
1348 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1349 	if (ret < 0) {
1350 		err = ret;
1351 		goto out;
1352 	}
1353 	BUG_ON(ret);
1354 
1355 	leaf = path->nodes[0];
1356 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1357 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1358 	if (item_size < sizeof(*ei)) {
1359 		if (!insert) {
1360 			err = -ENOENT;
1361 			goto out;
1362 		}
1363 		ret = convert_extent_item_v0(trans, root, path, owner,
1364 					     extra_size);
1365 		if (ret < 0) {
1366 			err = ret;
1367 			goto out;
1368 		}
1369 		leaf = path->nodes[0];
1370 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1371 	}
1372 #endif
1373 	BUG_ON(item_size < sizeof(*ei));
1374 
1375 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1376 	flags = btrfs_extent_flags(leaf, ei);
1377 
1378 	ptr = (unsigned long)(ei + 1);
1379 	end = (unsigned long)ei + item_size;
1380 
1381 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1382 		ptr += sizeof(struct btrfs_tree_block_info);
1383 		BUG_ON(ptr > end);
1384 	} else {
1385 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1386 	}
1387 
1388 	err = -ENOENT;
1389 	while (1) {
1390 		if (ptr >= end) {
1391 			WARN_ON(ptr > end);
1392 			break;
1393 		}
1394 		iref = (struct btrfs_extent_inline_ref *)ptr;
1395 		type = btrfs_extent_inline_ref_type(leaf, iref);
1396 		if (want < type)
1397 			break;
1398 		if (want > type) {
1399 			ptr += btrfs_extent_inline_ref_size(type);
1400 			continue;
1401 		}
1402 
1403 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1404 			struct btrfs_extent_data_ref *dref;
1405 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1406 			if (match_extent_data_ref(leaf, dref, root_objectid,
1407 						  owner, offset)) {
1408 				err = 0;
1409 				break;
1410 			}
1411 			if (hash_extent_data_ref_item(leaf, dref) <
1412 			    hash_extent_data_ref(root_objectid, owner, offset))
1413 				break;
1414 		} else {
1415 			u64 ref_offset;
1416 			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1417 			if (parent > 0) {
1418 				if (parent == ref_offset) {
1419 					err = 0;
1420 					break;
1421 				}
1422 				if (ref_offset < parent)
1423 					break;
1424 			} else {
1425 				if (root_objectid == ref_offset) {
1426 					err = 0;
1427 					break;
1428 				}
1429 				if (ref_offset < root_objectid)
1430 					break;
1431 			}
1432 		}
1433 		ptr += btrfs_extent_inline_ref_size(type);
1434 	}
1435 	if (err == -ENOENT && insert) {
1436 		if (item_size + extra_size >=
1437 		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1438 			err = -EAGAIN;
1439 			goto out;
1440 		}
1441 		/*
1442 		 * To add new inline back ref, we have to make sure
1443 		 * there is no corresponding back ref item.
1444 		 * For simplicity, we just do not add new inline back
1445 		 * ref if there is any kind of item for this block
1446 		 */
1447 		if (find_next_key(path, 0, &key) == 0 &&
1448 		    key.objectid == bytenr &&
1449 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1450 			err = -EAGAIN;
1451 			goto out;
1452 		}
1453 	}
1454 	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1455 out:
1456 	if (insert) {
1457 		path->keep_locks = 0;
1458 		btrfs_unlock_up_safe(path, 1);
1459 	}
1460 	return err;
1461 }
1462 
1463 /*
1464  * helper to add new inline back ref
1465  */
1466 static noinline_for_stack
1467 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1468 				struct btrfs_root *root,
1469 				struct btrfs_path *path,
1470 				struct btrfs_extent_inline_ref *iref,
1471 				u64 parent, u64 root_objectid,
1472 				u64 owner, u64 offset, int refs_to_add,
1473 				struct btrfs_delayed_extent_op *extent_op)
1474 {
1475 	struct extent_buffer *leaf;
1476 	struct btrfs_extent_item *ei;
1477 	unsigned long ptr;
1478 	unsigned long end;
1479 	unsigned long item_offset;
1480 	u64 refs;
1481 	int size;
1482 	int type;
1483 	int ret;
1484 
1485 	leaf = path->nodes[0];
1486 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1487 	item_offset = (unsigned long)iref - (unsigned long)ei;
1488 
1489 	type = extent_ref_type(parent, owner);
1490 	size = btrfs_extent_inline_ref_size(type);
1491 
1492 	ret = btrfs_extend_item(trans, root, path, size);
1493 	BUG_ON(ret);
1494 
1495 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1496 	refs = btrfs_extent_refs(leaf, ei);
1497 	refs += refs_to_add;
1498 	btrfs_set_extent_refs(leaf, ei, refs);
1499 	if (extent_op)
1500 		__run_delayed_extent_op(extent_op, leaf, ei);
1501 
1502 	ptr = (unsigned long)ei + item_offset;
1503 	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1504 	if (ptr < end - size)
1505 		memmove_extent_buffer(leaf, ptr + size, ptr,
1506 				      end - size - ptr);
1507 
1508 	iref = (struct btrfs_extent_inline_ref *)ptr;
1509 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1510 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1511 		struct btrfs_extent_data_ref *dref;
1512 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1513 		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1514 		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1515 		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1516 		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1517 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1518 		struct btrfs_shared_data_ref *sref;
1519 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1520 		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1521 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1522 	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1523 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1524 	} else {
1525 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1526 	}
1527 	btrfs_mark_buffer_dirty(leaf);
1528 	return 0;
1529 }
1530 
1531 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1532 				 struct btrfs_root *root,
1533 				 struct btrfs_path *path,
1534 				 struct btrfs_extent_inline_ref **ref_ret,
1535 				 u64 bytenr, u64 num_bytes, u64 parent,
1536 				 u64 root_objectid, u64 owner, u64 offset)
1537 {
1538 	int ret;
1539 
1540 	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1541 					   bytenr, num_bytes, parent,
1542 					   root_objectid, owner, offset, 0);
1543 	if (ret != -ENOENT)
1544 		return ret;
1545 
1546 	btrfs_release_path(root, path);
1547 	*ref_ret = NULL;
1548 
1549 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1550 		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1551 					    root_objectid);
1552 	} else {
1553 		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1554 					     root_objectid, owner, offset);
1555 	}
1556 	return ret;
1557 }
1558 
1559 /*
1560  * helper to update/remove inline back ref
1561  */
1562 static noinline_for_stack
1563 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1564 				 struct btrfs_root *root,
1565 				 struct btrfs_path *path,
1566 				 struct btrfs_extent_inline_ref *iref,
1567 				 int refs_to_mod,
1568 				 struct btrfs_delayed_extent_op *extent_op)
1569 {
1570 	struct extent_buffer *leaf;
1571 	struct btrfs_extent_item *ei;
1572 	struct btrfs_extent_data_ref *dref = NULL;
1573 	struct btrfs_shared_data_ref *sref = NULL;
1574 	unsigned long ptr;
1575 	unsigned long end;
1576 	u32 item_size;
1577 	int size;
1578 	int type;
1579 	int ret;
1580 	u64 refs;
1581 
1582 	leaf = path->nodes[0];
1583 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1584 	refs = btrfs_extent_refs(leaf, ei);
1585 	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1586 	refs += refs_to_mod;
1587 	btrfs_set_extent_refs(leaf, ei, refs);
1588 	if (extent_op)
1589 		__run_delayed_extent_op(extent_op, leaf, ei);
1590 
1591 	type = btrfs_extent_inline_ref_type(leaf, iref);
1592 
1593 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1594 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1595 		refs = btrfs_extent_data_ref_count(leaf, dref);
1596 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1597 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1598 		refs = btrfs_shared_data_ref_count(leaf, sref);
1599 	} else {
1600 		refs = 1;
1601 		BUG_ON(refs_to_mod != -1);
1602 	}
1603 
1604 	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1605 	refs += refs_to_mod;
1606 
1607 	if (refs > 0) {
1608 		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1609 			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1610 		else
1611 			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1612 	} else {
1613 		size =  btrfs_extent_inline_ref_size(type);
1614 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1615 		ptr = (unsigned long)iref;
1616 		end = (unsigned long)ei + item_size;
1617 		if (ptr + size < end)
1618 			memmove_extent_buffer(leaf, ptr, ptr + size,
1619 					      end - ptr - size);
1620 		item_size -= size;
1621 		ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1622 		BUG_ON(ret);
1623 	}
1624 	btrfs_mark_buffer_dirty(leaf);
1625 	return 0;
1626 }
1627 
1628 static noinline_for_stack
1629 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1630 				 struct btrfs_root *root,
1631 				 struct btrfs_path *path,
1632 				 u64 bytenr, u64 num_bytes, u64 parent,
1633 				 u64 root_objectid, u64 owner,
1634 				 u64 offset, int refs_to_add,
1635 				 struct btrfs_delayed_extent_op *extent_op)
1636 {
1637 	struct btrfs_extent_inline_ref *iref;
1638 	int ret;
1639 
1640 	ret = lookup_inline_extent_backref(trans, root, path, &iref,
1641 					   bytenr, num_bytes, parent,
1642 					   root_objectid, owner, offset, 1);
1643 	if (ret == 0) {
1644 		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1645 		ret = update_inline_extent_backref(trans, root, path, iref,
1646 						   refs_to_add, extent_op);
1647 	} else if (ret == -ENOENT) {
1648 		ret = setup_inline_extent_backref(trans, root, path, iref,
1649 						  parent, root_objectid,
1650 						  owner, offset, refs_to_add,
1651 						  extent_op);
1652 	}
1653 	return ret;
1654 }
1655 
1656 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1657 				 struct btrfs_root *root,
1658 				 struct btrfs_path *path,
1659 				 u64 bytenr, u64 parent, u64 root_objectid,
1660 				 u64 owner, u64 offset, int refs_to_add)
1661 {
1662 	int ret;
1663 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1664 		BUG_ON(refs_to_add != 1);
1665 		ret = insert_tree_block_ref(trans, root, path, bytenr,
1666 					    parent, root_objectid);
1667 	} else {
1668 		ret = insert_extent_data_ref(trans, root, path, bytenr,
1669 					     parent, root_objectid,
1670 					     owner, offset, refs_to_add);
1671 	}
1672 	return ret;
1673 }
1674 
1675 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1676 				 struct btrfs_root *root,
1677 				 struct btrfs_path *path,
1678 				 struct btrfs_extent_inline_ref *iref,
1679 				 int refs_to_drop, int is_data)
1680 {
1681 	int ret;
1682 
1683 	BUG_ON(!is_data && refs_to_drop != 1);
1684 	if (iref) {
1685 		ret = update_inline_extent_backref(trans, root, path, iref,
1686 						   -refs_to_drop, NULL);
1687 	} else if (is_data) {
1688 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1689 	} else {
1690 		ret = btrfs_del_item(trans, root, path);
1691 	}
1692 	return ret;
1693 }
1694 
1695 static void btrfs_issue_discard(struct block_device *bdev,
1696 				u64 start, u64 len)
1697 {
1698 	blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1699 			BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
1700 }
1701 
1702 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1703 				u64 num_bytes)
1704 {
1705 	int ret;
1706 	u64 map_length = num_bytes;
1707 	struct btrfs_multi_bio *multi = NULL;
1708 
1709 	if (!btrfs_test_opt(root, DISCARD))
1710 		return 0;
1711 
1712 	/* Tell the block device(s) that the sectors can be discarded */
1713 	ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1714 			      bytenr, &map_length, &multi, 0);
1715 	if (!ret) {
1716 		struct btrfs_bio_stripe *stripe = multi->stripes;
1717 		int i;
1718 
1719 		if (map_length > num_bytes)
1720 			map_length = num_bytes;
1721 
1722 		for (i = 0; i < multi->num_stripes; i++, stripe++) {
1723 			btrfs_issue_discard(stripe->dev->bdev,
1724 					    stripe->physical,
1725 					    map_length);
1726 		}
1727 		kfree(multi);
1728 	}
1729 
1730 	return ret;
1731 }
1732 
1733 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1734 			 struct btrfs_root *root,
1735 			 u64 bytenr, u64 num_bytes, u64 parent,
1736 			 u64 root_objectid, u64 owner, u64 offset)
1737 {
1738 	int ret;
1739 	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1740 	       root_objectid == BTRFS_TREE_LOG_OBJECTID);
1741 
1742 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1743 		ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1744 					parent, root_objectid, (int)owner,
1745 					BTRFS_ADD_DELAYED_REF, NULL);
1746 	} else {
1747 		ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1748 					parent, root_objectid, owner, offset,
1749 					BTRFS_ADD_DELAYED_REF, NULL);
1750 	}
1751 	return ret;
1752 }
1753 
1754 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1755 				  struct btrfs_root *root,
1756 				  u64 bytenr, u64 num_bytes,
1757 				  u64 parent, u64 root_objectid,
1758 				  u64 owner, u64 offset, int refs_to_add,
1759 				  struct btrfs_delayed_extent_op *extent_op)
1760 {
1761 	struct btrfs_path *path;
1762 	struct extent_buffer *leaf;
1763 	struct btrfs_extent_item *item;
1764 	u64 refs;
1765 	int ret;
1766 	int err = 0;
1767 
1768 	path = btrfs_alloc_path();
1769 	if (!path)
1770 		return -ENOMEM;
1771 
1772 	path->reada = 1;
1773 	path->leave_spinning = 1;
1774 	/* this will setup the path even if it fails to insert the back ref */
1775 	ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1776 					   path, bytenr, num_bytes, parent,
1777 					   root_objectid, owner, offset,
1778 					   refs_to_add, extent_op);
1779 	if (ret == 0)
1780 		goto out;
1781 
1782 	if (ret != -EAGAIN) {
1783 		err = ret;
1784 		goto out;
1785 	}
1786 
1787 	leaf = path->nodes[0];
1788 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1789 	refs = btrfs_extent_refs(leaf, item);
1790 	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1791 	if (extent_op)
1792 		__run_delayed_extent_op(extent_op, leaf, item);
1793 
1794 	btrfs_mark_buffer_dirty(leaf);
1795 	btrfs_release_path(root->fs_info->extent_root, path);
1796 
1797 	path->reada = 1;
1798 	path->leave_spinning = 1;
1799 
1800 	/* now insert the actual backref */
1801 	ret = insert_extent_backref(trans, root->fs_info->extent_root,
1802 				    path, bytenr, parent, root_objectid,
1803 				    owner, offset, refs_to_add);
1804 	BUG_ON(ret);
1805 out:
1806 	btrfs_free_path(path);
1807 	return err;
1808 }
1809 
1810 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1811 				struct btrfs_root *root,
1812 				struct btrfs_delayed_ref_node *node,
1813 				struct btrfs_delayed_extent_op *extent_op,
1814 				int insert_reserved)
1815 {
1816 	int ret = 0;
1817 	struct btrfs_delayed_data_ref *ref;
1818 	struct btrfs_key ins;
1819 	u64 parent = 0;
1820 	u64 ref_root = 0;
1821 	u64 flags = 0;
1822 
1823 	ins.objectid = node->bytenr;
1824 	ins.offset = node->num_bytes;
1825 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1826 
1827 	ref = btrfs_delayed_node_to_data_ref(node);
1828 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1829 		parent = ref->parent;
1830 	else
1831 		ref_root = ref->root;
1832 
1833 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1834 		if (extent_op) {
1835 			BUG_ON(extent_op->update_key);
1836 			flags |= extent_op->flags_to_set;
1837 		}
1838 		ret = alloc_reserved_file_extent(trans, root,
1839 						 parent, ref_root, flags,
1840 						 ref->objectid, ref->offset,
1841 						 &ins, node->ref_mod);
1842 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1843 		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1844 					     node->num_bytes, parent,
1845 					     ref_root, ref->objectid,
1846 					     ref->offset, node->ref_mod,
1847 					     extent_op);
1848 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1849 		ret = __btrfs_free_extent(trans, root, node->bytenr,
1850 					  node->num_bytes, parent,
1851 					  ref_root, ref->objectid,
1852 					  ref->offset, node->ref_mod,
1853 					  extent_op);
1854 	} else {
1855 		BUG();
1856 	}
1857 	return ret;
1858 }
1859 
1860 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1861 				    struct extent_buffer *leaf,
1862 				    struct btrfs_extent_item *ei)
1863 {
1864 	u64 flags = btrfs_extent_flags(leaf, ei);
1865 	if (extent_op->update_flags) {
1866 		flags |= extent_op->flags_to_set;
1867 		btrfs_set_extent_flags(leaf, ei, flags);
1868 	}
1869 
1870 	if (extent_op->update_key) {
1871 		struct btrfs_tree_block_info *bi;
1872 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1873 		bi = (struct btrfs_tree_block_info *)(ei + 1);
1874 		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1875 	}
1876 }
1877 
1878 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1879 				 struct btrfs_root *root,
1880 				 struct btrfs_delayed_ref_node *node,
1881 				 struct btrfs_delayed_extent_op *extent_op)
1882 {
1883 	struct btrfs_key key;
1884 	struct btrfs_path *path;
1885 	struct btrfs_extent_item *ei;
1886 	struct extent_buffer *leaf;
1887 	u32 item_size;
1888 	int ret;
1889 	int err = 0;
1890 
1891 	path = btrfs_alloc_path();
1892 	if (!path)
1893 		return -ENOMEM;
1894 
1895 	key.objectid = node->bytenr;
1896 	key.type = BTRFS_EXTENT_ITEM_KEY;
1897 	key.offset = node->num_bytes;
1898 
1899 	path->reada = 1;
1900 	path->leave_spinning = 1;
1901 	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1902 				path, 0, 1);
1903 	if (ret < 0) {
1904 		err = ret;
1905 		goto out;
1906 	}
1907 	if (ret > 0) {
1908 		err = -EIO;
1909 		goto out;
1910 	}
1911 
1912 	leaf = path->nodes[0];
1913 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1914 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1915 	if (item_size < sizeof(*ei)) {
1916 		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1917 					     path, (u64)-1, 0);
1918 		if (ret < 0) {
1919 			err = ret;
1920 			goto out;
1921 		}
1922 		leaf = path->nodes[0];
1923 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1924 	}
1925 #endif
1926 	BUG_ON(item_size < sizeof(*ei));
1927 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1928 	__run_delayed_extent_op(extent_op, leaf, ei);
1929 
1930 	btrfs_mark_buffer_dirty(leaf);
1931 out:
1932 	btrfs_free_path(path);
1933 	return err;
1934 }
1935 
1936 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1937 				struct btrfs_root *root,
1938 				struct btrfs_delayed_ref_node *node,
1939 				struct btrfs_delayed_extent_op *extent_op,
1940 				int insert_reserved)
1941 {
1942 	int ret = 0;
1943 	struct btrfs_delayed_tree_ref *ref;
1944 	struct btrfs_key ins;
1945 	u64 parent = 0;
1946 	u64 ref_root = 0;
1947 
1948 	ins.objectid = node->bytenr;
1949 	ins.offset = node->num_bytes;
1950 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1951 
1952 	ref = btrfs_delayed_node_to_tree_ref(node);
1953 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1954 		parent = ref->parent;
1955 	else
1956 		ref_root = ref->root;
1957 
1958 	BUG_ON(node->ref_mod != 1);
1959 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1960 		BUG_ON(!extent_op || !extent_op->update_flags ||
1961 		       !extent_op->update_key);
1962 		ret = alloc_reserved_tree_block(trans, root,
1963 						parent, ref_root,
1964 						extent_op->flags_to_set,
1965 						&extent_op->key,
1966 						ref->level, &ins);
1967 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1968 		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1969 					     node->num_bytes, parent, ref_root,
1970 					     ref->level, 0, 1, extent_op);
1971 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1972 		ret = __btrfs_free_extent(trans, root, node->bytenr,
1973 					  node->num_bytes, parent, ref_root,
1974 					  ref->level, 0, 1, extent_op);
1975 	} else {
1976 		BUG();
1977 	}
1978 	return ret;
1979 }
1980 
1981 /* helper function to actually process a single delayed ref entry */
1982 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1983 			       struct btrfs_root *root,
1984 			       struct btrfs_delayed_ref_node *node,
1985 			       struct btrfs_delayed_extent_op *extent_op,
1986 			       int insert_reserved)
1987 {
1988 	int ret;
1989 	if (btrfs_delayed_ref_is_head(node)) {
1990 		struct btrfs_delayed_ref_head *head;
1991 		/*
1992 		 * we've hit the end of the chain and we were supposed
1993 		 * to insert this extent into the tree.  But, it got
1994 		 * deleted before we ever needed to insert it, so all
1995 		 * we have to do is clean up the accounting
1996 		 */
1997 		BUG_ON(extent_op);
1998 		head = btrfs_delayed_node_to_head(node);
1999 		if (insert_reserved) {
2000 			btrfs_pin_extent(root, node->bytenr,
2001 					 node->num_bytes, 1);
2002 			if (head->is_data) {
2003 				ret = btrfs_del_csums(trans, root,
2004 						      node->bytenr,
2005 						      node->num_bytes);
2006 				BUG_ON(ret);
2007 			}
2008 		}
2009 		mutex_unlock(&head->mutex);
2010 		return 0;
2011 	}
2012 
2013 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2014 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2015 		ret = run_delayed_tree_ref(trans, root, node, extent_op,
2016 					   insert_reserved);
2017 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2018 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
2019 		ret = run_delayed_data_ref(trans, root, node, extent_op,
2020 					   insert_reserved);
2021 	else
2022 		BUG();
2023 	return ret;
2024 }
2025 
2026 static noinline struct btrfs_delayed_ref_node *
2027 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2028 {
2029 	struct rb_node *node;
2030 	struct btrfs_delayed_ref_node *ref;
2031 	int action = BTRFS_ADD_DELAYED_REF;
2032 again:
2033 	/*
2034 	 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2035 	 * this prevents ref count from going down to zero when
2036 	 * there still are pending delayed ref.
2037 	 */
2038 	node = rb_prev(&head->node.rb_node);
2039 	while (1) {
2040 		if (!node)
2041 			break;
2042 		ref = rb_entry(node, struct btrfs_delayed_ref_node,
2043 				rb_node);
2044 		if (ref->bytenr != head->node.bytenr)
2045 			break;
2046 		if (ref->action == action)
2047 			return ref;
2048 		node = rb_prev(node);
2049 	}
2050 	if (action == BTRFS_ADD_DELAYED_REF) {
2051 		action = BTRFS_DROP_DELAYED_REF;
2052 		goto again;
2053 	}
2054 	return NULL;
2055 }
2056 
2057 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2058 				       struct btrfs_root *root,
2059 				       struct list_head *cluster)
2060 {
2061 	struct btrfs_delayed_ref_root *delayed_refs;
2062 	struct btrfs_delayed_ref_node *ref;
2063 	struct btrfs_delayed_ref_head *locked_ref = NULL;
2064 	struct btrfs_delayed_extent_op *extent_op;
2065 	int ret;
2066 	int count = 0;
2067 	int must_insert_reserved = 0;
2068 
2069 	delayed_refs = &trans->transaction->delayed_refs;
2070 	while (1) {
2071 		if (!locked_ref) {
2072 			/* pick a new head ref from the cluster list */
2073 			if (list_empty(cluster))
2074 				break;
2075 
2076 			locked_ref = list_entry(cluster->next,
2077 				     struct btrfs_delayed_ref_head, cluster);
2078 
2079 			/* grab the lock that says we are going to process
2080 			 * all the refs for this head */
2081 			ret = btrfs_delayed_ref_lock(trans, locked_ref);
2082 
2083 			/*
2084 			 * we may have dropped the spin lock to get the head
2085 			 * mutex lock, and that might have given someone else
2086 			 * time to free the head.  If that's true, it has been
2087 			 * removed from our list and we can move on.
2088 			 */
2089 			if (ret == -EAGAIN) {
2090 				locked_ref = NULL;
2091 				count++;
2092 				continue;
2093 			}
2094 		}
2095 
2096 		/*
2097 		 * record the must insert reserved flag before we
2098 		 * drop the spin lock.
2099 		 */
2100 		must_insert_reserved = locked_ref->must_insert_reserved;
2101 		locked_ref->must_insert_reserved = 0;
2102 
2103 		extent_op = locked_ref->extent_op;
2104 		locked_ref->extent_op = NULL;
2105 
2106 		/*
2107 		 * locked_ref is the head node, so we have to go one
2108 		 * node back for any delayed ref updates
2109 		 */
2110 		ref = select_delayed_ref(locked_ref);
2111 		if (!ref) {
2112 			/* All delayed refs have been processed, Go ahead
2113 			 * and send the head node to run_one_delayed_ref,
2114 			 * so that any accounting fixes can happen
2115 			 */
2116 			ref = &locked_ref->node;
2117 
2118 			if (extent_op && must_insert_reserved) {
2119 				kfree(extent_op);
2120 				extent_op = NULL;
2121 			}
2122 
2123 			if (extent_op) {
2124 				spin_unlock(&delayed_refs->lock);
2125 
2126 				ret = run_delayed_extent_op(trans, root,
2127 							    ref, extent_op);
2128 				BUG_ON(ret);
2129 				kfree(extent_op);
2130 
2131 				cond_resched();
2132 				spin_lock(&delayed_refs->lock);
2133 				continue;
2134 			}
2135 
2136 			list_del_init(&locked_ref->cluster);
2137 			locked_ref = NULL;
2138 		}
2139 
2140 		ref->in_tree = 0;
2141 		rb_erase(&ref->rb_node, &delayed_refs->root);
2142 		delayed_refs->num_entries--;
2143 
2144 		spin_unlock(&delayed_refs->lock);
2145 
2146 		ret = run_one_delayed_ref(trans, root, ref, extent_op,
2147 					  must_insert_reserved);
2148 		BUG_ON(ret);
2149 
2150 		btrfs_put_delayed_ref(ref);
2151 		kfree(extent_op);
2152 		count++;
2153 
2154 		cond_resched();
2155 		spin_lock(&delayed_refs->lock);
2156 	}
2157 	return count;
2158 }
2159 
2160 /*
2161  * this starts processing the delayed reference count updates and
2162  * extent insertions we have queued up so far.  count can be
2163  * 0, which means to process everything in the tree at the start
2164  * of the run (but not newly added entries), or it can be some target
2165  * number you'd like to process.
2166  */
2167 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2168 			   struct btrfs_root *root, unsigned long count)
2169 {
2170 	struct rb_node *node;
2171 	struct btrfs_delayed_ref_root *delayed_refs;
2172 	struct btrfs_delayed_ref_node *ref;
2173 	struct list_head cluster;
2174 	int ret;
2175 	int run_all = count == (unsigned long)-1;
2176 	int run_most = 0;
2177 
2178 	if (root == root->fs_info->extent_root)
2179 		root = root->fs_info->tree_root;
2180 
2181 	delayed_refs = &trans->transaction->delayed_refs;
2182 	INIT_LIST_HEAD(&cluster);
2183 again:
2184 	spin_lock(&delayed_refs->lock);
2185 	if (count == 0) {
2186 		count = delayed_refs->num_entries * 2;
2187 		run_most = 1;
2188 	}
2189 	while (1) {
2190 		if (!(run_all || run_most) &&
2191 		    delayed_refs->num_heads_ready < 64)
2192 			break;
2193 
2194 		/*
2195 		 * go find something we can process in the rbtree.  We start at
2196 		 * the beginning of the tree, and then build a cluster
2197 		 * of refs to process starting at the first one we are able to
2198 		 * lock
2199 		 */
2200 		ret = btrfs_find_ref_cluster(trans, &cluster,
2201 					     delayed_refs->run_delayed_start);
2202 		if (ret)
2203 			break;
2204 
2205 		ret = run_clustered_refs(trans, root, &cluster);
2206 		BUG_ON(ret < 0);
2207 
2208 		count -= min_t(unsigned long, ret, count);
2209 
2210 		if (count == 0)
2211 			break;
2212 	}
2213 
2214 	if (run_all) {
2215 		node = rb_first(&delayed_refs->root);
2216 		if (!node)
2217 			goto out;
2218 		count = (unsigned long)-1;
2219 
2220 		while (node) {
2221 			ref = rb_entry(node, struct btrfs_delayed_ref_node,
2222 				       rb_node);
2223 			if (btrfs_delayed_ref_is_head(ref)) {
2224 				struct btrfs_delayed_ref_head *head;
2225 
2226 				head = btrfs_delayed_node_to_head(ref);
2227 				atomic_inc(&ref->refs);
2228 
2229 				spin_unlock(&delayed_refs->lock);
2230 				mutex_lock(&head->mutex);
2231 				mutex_unlock(&head->mutex);
2232 
2233 				btrfs_put_delayed_ref(ref);
2234 				cond_resched();
2235 				goto again;
2236 			}
2237 			node = rb_next(node);
2238 		}
2239 		spin_unlock(&delayed_refs->lock);
2240 		schedule_timeout(1);
2241 		goto again;
2242 	}
2243 out:
2244 	spin_unlock(&delayed_refs->lock);
2245 	return 0;
2246 }
2247 
2248 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2249 				struct btrfs_root *root,
2250 				u64 bytenr, u64 num_bytes, u64 flags,
2251 				int is_data)
2252 {
2253 	struct btrfs_delayed_extent_op *extent_op;
2254 	int ret;
2255 
2256 	extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2257 	if (!extent_op)
2258 		return -ENOMEM;
2259 
2260 	extent_op->flags_to_set = flags;
2261 	extent_op->update_flags = 1;
2262 	extent_op->update_key = 0;
2263 	extent_op->is_data = is_data ? 1 : 0;
2264 
2265 	ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2266 	if (ret)
2267 		kfree(extent_op);
2268 	return ret;
2269 }
2270 
2271 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2272 				      struct btrfs_root *root,
2273 				      struct btrfs_path *path,
2274 				      u64 objectid, u64 offset, u64 bytenr)
2275 {
2276 	struct btrfs_delayed_ref_head *head;
2277 	struct btrfs_delayed_ref_node *ref;
2278 	struct btrfs_delayed_data_ref *data_ref;
2279 	struct btrfs_delayed_ref_root *delayed_refs;
2280 	struct rb_node *node;
2281 	int ret = 0;
2282 
2283 	ret = -ENOENT;
2284 	delayed_refs = &trans->transaction->delayed_refs;
2285 	spin_lock(&delayed_refs->lock);
2286 	head = btrfs_find_delayed_ref_head(trans, bytenr);
2287 	if (!head)
2288 		goto out;
2289 
2290 	if (!mutex_trylock(&head->mutex)) {
2291 		atomic_inc(&head->node.refs);
2292 		spin_unlock(&delayed_refs->lock);
2293 
2294 		btrfs_release_path(root->fs_info->extent_root, path);
2295 
2296 		mutex_lock(&head->mutex);
2297 		mutex_unlock(&head->mutex);
2298 		btrfs_put_delayed_ref(&head->node);
2299 		return -EAGAIN;
2300 	}
2301 
2302 	node = rb_prev(&head->node.rb_node);
2303 	if (!node)
2304 		goto out_unlock;
2305 
2306 	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2307 
2308 	if (ref->bytenr != bytenr)
2309 		goto out_unlock;
2310 
2311 	ret = 1;
2312 	if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2313 		goto out_unlock;
2314 
2315 	data_ref = btrfs_delayed_node_to_data_ref(ref);
2316 
2317 	node = rb_prev(node);
2318 	if (node) {
2319 		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2320 		if (ref->bytenr == bytenr)
2321 			goto out_unlock;
2322 	}
2323 
2324 	if (data_ref->root != root->root_key.objectid ||
2325 	    data_ref->objectid != objectid || data_ref->offset != offset)
2326 		goto out_unlock;
2327 
2328 	ret = 0;
2329 out_unlock:
2330 	mutex_unlock(&head->mutex);
2331 out:
2332 	spin_unlock(&delayed_refs->lock);
2333 	return ret;
2334 }
2335 
2336 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2337 					struct btrfs_root *root,
2338 					struct btrfs_path *path,
2339 					u64 objectid, u64 offset, u64 bytenr)
2340 {
2341 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2342 	struct extent_buffer *leaf;
2343 	struct btrfs_extent_data_ref *ref;
2344 	struct btrfs_extent_inline_ref *iref;
2345 	struct btrfs_extent_item *ei;
2346 	struct btrfs_key key;
2347 	u32 item_size;
2348 	int ret;
2349 
2350 	key.objectid = bytenr;
2351 	key.offset = (u64)-1;
2352 	key.type = BTRFS_EXTENT_ITEM_KEY;
2353 
2354 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2355 	if (ret < 0)
2356 		goto out;
2357 	BUG_ON(ret == 0);
2358 
2359 	ret = -ENOENT;
2360 	if (path->slots[0] == 0)
2361 		goto out;
2362 
2363 	path->slots[0]--;
2364 	leaf = path->nodes[0];
2365 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2366 
2367 	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2368 		goto out;
2369 
2370 	ret = 1;
2371 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2372 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2373 	if (item_size < sizeof(*ei)) {
2374 		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2375 		goto out;
2376 	}
2377 #endif
2378 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2379 
2380 	if (item_size != sizeof(*ei) +
2381 	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2382 		goto out;
2383 
2384 	if (btrfs_extent_generation(leaf, ei) <=
2385 	    btrfs_root_last_snapshot(&root->root_item))
2386 		goto out;
2387 
2388 	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2389 	if (btrfs_extent_inline_ref_type(leaf, iref) !=
2390 	    BTRFS_EXTENT_DATA_REF_KEY)
2391 		goto out;
2392 
2393 	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2394 	if (btrfs_extent_refs(leaf, ei) !=
2395 	    btrfs_extent_data_ref_count(leaf, ref) ||
2396 	    btrfs_extent_data_ref_root(leaf, ref) !=
2397 	    root->root_key.objectid ||
2398 	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2399 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
2400 		goto out;
2401 
2402 	ret = 0;
2403 out:
2404 	return ret;
2405 }
2406 
2407 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2408 			  struct btrfs_root *root,
2409 			  u64 objectid, u64 offset, u64 bytenr)
2410 {
2411 	struct btrfs_path *path;
2412 	int ret;
2413 	int ret2;
2414 
2415 	path = btrfs_alloc_path();
2416 	if (!path)
2417 		return -ENOENT;
2418 
2419 	do {
2420 		ret = check_committed_ref(trans, root, path, objectid,
2421 					  offset, bytenr);
2422 		if (ret && ret != -ENOENT)
2423 			goto out;
2424 
2425 		ret2 = check_delayed_ref(trans, root, path, objectid,
2426 					 offset, bytenr);
2427 	} while (ret2 == -EAGAIN);
2428 
2429 	if (ret2 && ret2 != -ENOENT) {
2430 		ret = ret2;
2431 		goto out;
2432 	}
2433 
2434 	if (ret != -ENOENT || ret2 != -ENOENT)
2435 		ret = 0;
2436 out:
2437 	btrfs_free_path(path);
2438 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2439 		WARN_ON(ret > 0);
2440 	return ret;
2441 }
2442 
2443 #if 0
2444 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2445 		    struct extent_buffer *buf, u32 nr_extents)
2446 {
2447 	struct btrfs_key key;
2448 	struct btrfs_file_extent_item *fi;
2449 	u64 root_gen;
2450 	u32 nritems;
2451 	int i;
2452 	int level;
2453 	int ret = 0;
2454 	int shared = 0;
2455 
2456 	if (!root->ref_cows)
2457 		return 0;
2458 
2459 	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2460 		shared = 0;
2461 		root_gen = root->root_key.offset;
2462 	} else {
2463 		shared = 1;
2464 		root_gen = trans->transid - 1;
2465 	}
2466 
2467 	level = btrfs_header_level(buf);
2468 	nritems = btrfs_header_nritems(buf);
2469 
2470 	if (level == 0) {
2471 		struct btrfs_leaf_ref *ref;
2472 		struct btrfs_extent_info *info;
2473 
2474 		ref = btrfs_alloc_leaf_ref(root, nr_extents);
2475 		if (!ref) {
2476 			ret = -ENOMEM;
2477 			goto out;
2478 		}
2479 
2480 		ref->root_gen = root_gen;
2481 		ref->bytenr = buf->start;
2482 		ref->owner = btrfs_header_owner(buf);
2483 		ref->generation = btrfs_header_generation(buf);
2484 		ref->nritems = nr_extents;
2485 		info = ref->extents;
2486 
2487 		for (i = 0; nr_extents > 0 && i < nritems; i++) {
2488 			u64 disk_bytenr;
2489 			btrfs_item_key_to_cpu(buf, &key, i);
2490 			if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2491 				continue;
2492 			fi = btrfs_item_ptr(buf, i,
2493 					    struct btrfs_file_extent_item);
2494 			if (btrfs_file_extent_type(buf, fi) ==
2495 			    BTRFS_FILE_EXTENT_INLINE)
2496 				continue;
2497 			disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2498 			if (disk_bytenr == 0)
2499 				continue;
2500 
2501 			info->bytenr = disk_bytenr;
2502 			info->num_bytes =
2503 				btrfs_file_extent_disk_num_bytes(buf, fi);
2504 			info->objectid = key.objectid;
2505 			info->offset = key.offset;
2506 			info++;
2507 		}
2508 
2509 		ret = btrfs_add_leaf_ref(root, ref, shared);
2510 		if (ret == -EEXIST && shared) {
2511 			struct btrfs_leaf_ref *old;
2512 			old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2513 			BUG_ON(!old);
2514 			btrfs_remove_leaf_ref(root, old);
2515 			btrfs_free_leaf_ref(root, old);
2516 			ret = btrfs_add_leaf_ref(root, ref, shared);
2517 		}
2518 		WARN_ON(ret);
2519 		btrfs_free_leaf_ref(root, ref);
2520 	}
2521 out:
2522 	return ret;
2523 }
2524 
2525 /* when a block goes through cow, we update the reference counts of
2526  * everything that block points to.  The internal pointers of the block
2527  * can be in just about any order, and it is likely to have clusters of
2528  * things that are close together and clusters of things that are not.
2529  *
2530  * To help reduce the seeks that come with updating all of these reference
2531  * counts, sort them by byte number before actual updates are done.
2532  *
2533  * struct refsort is used to match byte number to slot in the btree block.
2534  * we sort based on the byte number and then use the slot to actually
2535  * find the item.
2536  *
2537  * struct refsort is smaller than strcut btrfs_item and smaller than
2538  * struct btrfs_key_ptr.  Since we're currently limited to the page size
2539  * for a btree block, there's no way for a kmalloc of refsorts for a
2540  * single node to be bigger than a page.
2541  */
2542 struct refsort {
2543 	u64 bytenr;
2544 	u32 slot;
2545 };
2546 
2547 /*
2548  * for passing into sort()
2549  */
2550 static int refsort_cmp(const void *a_void, const void *b_void)
2551 {
2552 	const struct refsort *a = a_void;
2553 	const struct refsort *b = b_void;
2554 
2555 	if (a->bytenr < b->bytenr)
2556 		return -1;
2557 	if (a->bytenr > b->bytenr)
2558 		return 1;
2559 	return 0;
2560 }
2561 #endif
2562 
2563 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2564 			   struct btrfs_root *root,
2565 			   struct extent_buffer *buf,
2566 			   int full_backref, int inc)
2567 {
2568 	u64 bytenr;
2569 	u64 num_bytes;
2570 	u64 parent;
2571 	u64 ref_root;
2572 	u32 nritems;
2573 	struct btrfs_key key;
2574 	struct btrfs_file_extent_item *fi;
2575 	int i;
2576 	int level;
2577 	int ret = 0;
2578 	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2579 			    u64, u64, u64, u64, u64, u64);
2580 
2581 	ref_root = btrfs_header_owner(buf);
2582 	nritems = btrfs_header_nritems(buf);
2583 	level = btrfs_header_level(buf);
2584 
2585 	if (!root->ref_cows && level == 0)
2586 		return 0;
2587 
2588 	if (inc)
2589 		process_func = btrfs_inc_extent_ref;
2590 	else
2591 		process_func = btrfs_free_extent;
2592 
2593 	if (full_backref)
2594 		parent = buf->start;
2595 	else
2596 		parent = 0;
2597 
2598 	for (i = 0; i < nritems; i++) {
2599 		if (level == 0) {
2600 			btrfs_item_key_to_cpu(buf, &key, i);
2601 			if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2602 				continue;
2603 			fi = btrfs_item_ptr(buf, i,
2604 					    struct btrfs_file_extent_item);
2605 			if (btrfs_file_extent_type(buf, fi) ==
2606 			    BTRFS_FILE_EXTENT_INLINE)
2607 				continue;
2608 			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2609 			if (bytenr == 0)
2610 				continue;
2611 
2612 			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2613 			key.offset -= btrfs_file_extent_offset(buf, fi);
2614 			ret = process_func(trans, root, bytenr, num_bytes,
2615 					   parent, ref_root, key.objectid,
2616 					   key.offset);
2617 			if (ret)
2618 				goto fail;
2619 		} else {
2620 			bytenr = btrfs_node_blockptr(buf, i);
2621 			num_bytes = btrfs_level_size(root, level - 1);
2622 			ret = process_func(trans, root, bytenr, num_bytes,
2623 					   parent, ref_root, level - 1, 0);
2624 			if (ret)
2625 				goto fail;
2626 		}
2627 	}
2628 	return 0;
2629 fail:
2630 	BUG();
2631 	return ret;
2632 }
2633 
2634 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2635 		  struct extent_buffer *buf, int full_backref)
2636 {
2637 	return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2638 }
2639 
2640 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2641 		  struct extent_buffer *buf, int full_backref)
2642 {
2643 	return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2644 }
2645 
2646 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2647 				 struct btrfs_root *root,
2648 				 struct btrfs_path *path,
2649 				 struct btrfs_block_group_cache *cache)
2650 {
2651 	int ret;
2652 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2653 	unsigned long bi;
2654 	struct extent_buffer *leaf;
2655 
2656 	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2657 	if (ret < 0)
2658 		goto fail;
2659 	BUG_ON(ret);
2660 
2661 	leaf = path->nodes[0];
2662 	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2663 	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2664 	btrfs_mark_buffer_dirty(leaf);
2665 	btrfs_release_path(extent_root, path);
2666 fail:
2667 	if (ret)
2668 		return ret;
2669 	return 0;
2670 
2671 }
2672 
2673 static struct btrfs_block_group_cache *
2674 next_block_group(struct btrfs_root *root,
2675 		 struct btrfs_block_group_cache *cache)
2676 {
2677 	struct rb_node *node;
2678 	spin_lock(&root->fs_info->block_group_cache_lock);
2679 	node = rb_next(&cache->cache_node);
2680 	btrfs_put_block_group(cache);
2681 	if (node) {
2682 		cache = rb_entry(node, struct btrfs_block_group_cache,
2683 				 cache_node);
2684 		btrfs_get_block_group(cache);
2685 	} else
2686 		cache = NULL;
2687 	spin_unlock(&root->fs_info->block_group_cache_lock);
2688 	return cache;
2689 }
2690 
2691 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2692 				   struct btrfs_root *root)
2693 {
2694 	struct btrfs_block_group_cache *cache;
2695 	int err = 0;
2696 	struct btrfs_path *path;
2697 	u64 last = 0;
2698 
2699 	path = btrfs_alloc_path();
2700 	if (!path)
2701 		return -ENOMEM;
2702 
2703 	while (1) {
2704 		if (last == 0) {
2705 			err = btrfs_run_delayed_refs(trans, root,
2706 						     (unsigned long)-1);
2707 			BUG_ON(err);
2708 		}
2709 
2710 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
2711 		while (cache) {
2712 			if (cache->dirty)
2713 				break;
2714 			cache = next_block_group(root, cache);
2715 		}
2716 		if (!cache) {
2717 			if (last == 0)
2718 				break;
2719 			last = 0;
2720 			continue;
2721 		}
2722 
2723 		cache->dirty = 0;
2724 		last = cache->key.objectid + cache->key.offset;
2725 
2726 		err = write_one_cache_group(trans, root, path, cache);
2727 		BUG_ON(err);
2728 		btrfs_put_block_group(cache);
2729 	}
2730 
2731 	btrfs_free_path(path);
2732 	return 0;
2733 }
2734 
2735 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2736 {
2737 	struct btrfs_block_group_cache *block_group;
2738 	int readonly = 0;
2739 
2740 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2741 	if (!block_group || block_group->ro)
2742 		readonly = 1;
2743 	if (block_group)
2744 		btrfs_put_block_group(block_group);
2745 	return readonly;
2746 }
2747 
2748 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2749 			     u64 total_bytes, u64 bytes_used,
2750 			     struct btrfs_space_info **space_info)
2751 {
2752 	struct btrfs_space_info *found;
2753 	int i;
2754 	int factor;
2755 
2756 	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2757 		     BTRFS_BLOCK_GROUP_RAID10))
2758 		factor = 2;
2759 	else
2760 		factor = 1;
2761 
2762 	found = __find_space_info(info, flags);
2763 	if (found) {
2764 		spin_lock(&found->lock);
2765 		found->total_bytes += total_bytes;
2766 		found->bytes_used += bytes_used;
2767 		found->disk_used += bytes_used * factor;
2768 		found->full = 0;
2769 		spin_unlock(&found->lock);
2770 		*space_info = found;
2771 		return 0;
2772 	}
2773 	found = kzalloc(sizeof(*found), GFP_NOFS);
2774 	if (!found)
2775 		return -ENOMEM;
2776 
2777 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
2778 		INIT_LIST_HEAD(&found->block_groups[i]);
2779 	init_rwsem(&found->groups_sem);
2780 	spin_lock_init(&found->lock);
2781 	found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
2782 				BTRFS_BLOCK_GROUP_SYSTEM |
2783 				BTRFS_BLOCK_GROUP_METADATA);
2784 	found->total_bytes = total_bytes;
2785 	found->bytes_used = bytes_used;
2786 	found->disk_used = bytes_used * factor;
2787 	found->bytes_pinned = 0;
2788 	found->bytes_reserved = 0;
2789 	found->bytes_readonly = 0;
2790 	found->bytes_may_use = 0;
2791 	found->full = 0;
2792 	found->force_alloc = 0;
2793 	*space_info = found;
2794 	list_add_rcu(&found->list, &info->space_info);
2795 	atomic_set(&found->caching_threads, 0);
2796 	return 0;
2797 }
2798 
2799 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2800 {
2801 	u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2802 				   BTRFS_BLOCK_GROUP_RAID1 |
2803 				   BTRFS_BLOCK_GROUP_RAID10 |
2804 				   BTRFS_BLOCK_GROUP_DUP);
2805 	if (extra_flags) {
2806 		if (flags & BTRFS_BLOCK_GROUP_DATA)
2807 			fs_info->avail_data_alloc_bits |= extra_flags;
2808 		if (flags & BTRFS_BLOCK_GROUP_METADATA)
2809 			fs_info->avail_metadata_alloc_bits |= extra_flags;
2810 		if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2811 			fs_info->avail_system_alloc_bits |= extra_flags;
2812 	}
2813 }
2814 
2815 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2816 {
2817 	u64 num_devices = root->fs_info->fs_devices->rw_devices;
2818 
2819 	if (num_devices == 1)
2820 		flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2821 	if (num_devices < 4)
2822 		flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2823 
2824 	if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2825 	    (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2826 		      BTRFS_BLOCK_GROUP_RAID10))) {
2827 		flags &= ~BTRFS_BLOCK_GROUP_DUP;
2828 	}
2829 
2830 	if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2831 	    (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2832 		flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2833 	}
2834 
2835 	if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2836 	    ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2837 	     (flags & BTRFS_BLOCK_GROUP_RAID10) |
2838 	     (flags & BTRFS_BLOCK_GROUP_DUP)))
2839 		flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2840 	return flags;
2841 }
2842 
2843 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
2844 {
2845 	if (flags & BTRFS_BLOCK_GROUP_DATA)
2846 		flags |= root->fs_info->avail_data_alloc_bits &
2847 			 root->fs_info->data_alloc_profile;
2848 	else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2849 		flags |= root->fs_info->avail_system_alloc_bits &
2850 			 root->fs_info->system_alloc_profile;
2851 	else if (flags & BTRFS_BLOCK_GROUP_METADATA)
2852 		flags |= root->fs_info->avail_metadata_alloc_bits &
2853 			 root->fs_info->metadata_alloc_profile;
2854 	return btrfs_reduce_alloc_profile(root, flags);
2855 }
2856 
2857 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
2858 {
2859 	u64 flags;
2860 
2861 	if (data)
2862 		flags = BTRFS_BLOCK_GROUP_DATA;
2863 	else if (root == root->fs_info->chunk_root)
2864 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
2865 	else
2866 		flags = BTRFS_BLOCK_GROUP_METADATA;
2867 
2868 	return get_alloc_profile(root, flags);
2869 }
2870 
2871 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2872 {
2873 	BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2874 						       BTRFS_BLOCK_GROUP_DATA);
2875 }
2876 
2877 /*
2878  * This will check the space that the inode allocates from to make sure we have
2879  * enough space for bytes.
2880  */
2881 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
2882 {
2883 	struct btrfs_space_info *data_sinfo;
2884 	struct btrfs_root *root = BTRFS_I(inode)->root;
2885 	u64 used;
2886 	int ret = 0, committed = 0;
2887 
2888 	/* make sure bytes are sectorsize aligned */
2889 	bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2890 
2891 	data_sinfo = BTRFS_I(inode)->space_info;
2892 	if (!data_sinfo)
2893 		goto alloc;
2894 
2895 again:
2896 	/* make sure we have enough space to handle the data first */
2897 	spin_lock(&data_sinfo->lock);
2898 	used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
2899 		data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
2900 		data_sinfo->bytes_may_use;
2901 
2902 	if (used + bytes > data_sinfo->total_bytes) {
2903 		struct btrfs_trans_handle *trans;
2904 
2905 		/*
2906 		 * if we don't have enough free bytes in this space then we need
2907 		 * to alloc a new chunk.
2908 		 */
2909 		if (!data_sinfo->full) {
2910 			u64 alloc_target;
2911 
2912 			data_sinfo->force_alloc = 1;
2913 			spin_unlock(&data_sinfo->lock);
2914 alloc:
2915 			alloc_target = btrfs_get_alloc_profile(root, 1);
2916 			trans = btrfs_join_transaction(root, 1);
2917 			if (IS_ERR(trans))
2918 				return PTR_ERR(trans);
2919 
2920 			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2921 					     bytes + 2 * 1024 * 1024,
2922 					     alloc_target, 0);
2923 			btrfs_end_transaction(trans, root);
2924 			if (ret < 0)
2925 				return ret;
2926 
2927 			if (!data_sinfo) {
2928 				btrfs_set_inode_space_info(root, inode);
2929 				data_sinfo = BTRFS_I(inode)->space_info;
2930 			}
2931 			goto again;
2932 		}
2933 		spin_unlock(&data_sinfo->lock);
2934 
2935 		/* commit the current transaction and try again */
2936 		if (!committed && !root->fs_info->open_ioctl_trans) {
2937 			committed = 1;
2938 			trans = btrfs_join_transaction(root, 1);
2939 			if (IS_ERR(trans))
2940 				return PTR_ERR(trans);
2941 			ret = btrfs_commit_transaction(trans, root);
2942 			if (ret)
2943 				return ret;
2944 			goto again;
2945 		}
2946 
2947 #if 0 /* I hope we never need this code again, just in case */
2948 		printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
2949 		       "%llu bytes_reserved, " "%llu bytes_pinned, "
2950 		       "%llu bytes_readonly, %llu may use %llu total\n",
2951 		       (unsigned long long)bytes,
2952 		       (unsigned long long)data_sinfo->bytes_used,
2953 		       (unsigned long long)data_sinfo->bytes_reserved,
2954 		       (unsigned long long)data_sinfo->bytes_pinned,
2955 		       (unsigned long long)data_sinfo->bytes_readonly,
2956 		       (unsigned long long)data_sinfo->bytes_may_use,
2957 		       (unsigned long long)data_sinfo->total_bytes);
2958 #endif
2959 		return -ENOSPC;
2960 	}
2961 	data_sinfo->bytes_may_use += bytes;
2962 	BTRFS_I(inode)->reserved_bytes += bytes;
2963 	spin_unlock(&data_sinfo->lock);
2964 
2965 	return 0;
2966 }
2967 
2968 /*
2969  * called when we are clearing an delalloc extent from the
2970  * inode's io_tree or there was an error for whatever reason
2971  * after calling btrfs_check_data_free_space
2972  */
2973 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
2974 {
2975 	struct btrfs_root *root = BTRFS_I(inode)->root;
2976 	struct btrfs_space_info *data_sinfo;
2977 
2978 	/* make sure bytes are sectorsize aligned */
2979 	bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2980 
2981 	data_sinfo = BTRFS_I(inode)->space_info;
2982 	spin_lock(&data_sinfo->lock);
2983 	data_sinfo->bytes_may_use -= bytes;
2984 	BTRFS_I(inode)->reserved_bytes -= bytes;
2985 	spin_unlock(&data_sinfo->lock);
2986 }
2987 
2988 static void force_metadata_allocation(struct btrfs_fs_info *info)
2989 {
2990 	struct list_head *head = &info->space_info;
2991 	struct btrfs_space_info *found;
2992 
2993 	rcu_read_lock();
2994 	list_for_each_entry_rcu(found, head, list) {
2995 		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2996 			found->force_alloc = 1;
2997 	}
2998 	rcu_read_unlock();
2999 }
3000 
3001 static int should_alloc_chunk(struct btrfs_space_info *sinfo,
3002 			      u64 alloc_bytes)
3003 {
3004 	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3005 
3006 	if (sinfo->bytes_used + sinfo->bytes_reserved +
3007 	    alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3008 		return 0;
3009 
3010 	if (sinfo->bytes_used + sinfo->bytes_reserved +
3011 	    alloc_bytes < div_factor(num_bytes, 8))
3012 		return 0;
3013 
3014 	return 1;
3015 }
3016 
3017 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3018 			  struct btrfs_root *extent_root, u64 alloc_bytes,
3019 			  u64 flags, int force)
3020 {
3021 	struct btrfs_space_info *space_info;
3022 	struct btrfs_fs_info *fs_info = extent_root->fs_info;
3023 	int ret = 0;
3024 
3025 	mutex_lock(&fs_info->chunk_mutex);
3026 
3027 	flags = btrfs_reduce_alloc_profile(extent_root, flags);
3028 
3029 	space_info = __find_space_info(extent_root->fs_info, flags);
3030 	if (!space_info) {
3031 		ret = update_space_info(extent_root->fs_info, flags,
3032 					0, 0, &space_info);
3033 		BUG_ON(ret);
3034 	}
3035 	BUG_ON(!space_info);
3036 
3037 	spin_lock(&space_info->lock);
3038 	if (space_info->force_alloc)
3039 		force = 1;
3040 	if (space_info->full) {
3041 		spin_unlock(&space_info->lock);
3042 		goto out;
3043 	}
3044 
3045 	if (!force && !should_alloc_chunk(space_info, alloc_bytes)) {
3046 		spin_unlock(&space_info->lock);
3047 		goto out;
3048 	}
3049 	spin_unlock(&space_info->lock);
3050 
3051 	/*
3052 	 * if we're doing a data chunk, go ahead and make sure that
3053 	 * we keep a reasonable number of metadata chunks allocated in the
3054 	 * FS as well.
3055 	 */
3056 	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3057 		fs_info->data_chunk_allocations++;
3058 		if (!(fs_info->data_chunk_allocations %
3059 		      fs_info->metadata_ratio))
3060 			force_metadata_allocation(fs_info);
3061 	}
3062 
3063 	ret = btrfs_alloc_chunk(trans, extent_root, flags);
3064 	spin_lock(&space_info->lock);
3065 	if (ret)
3066 		space_info->full = 1;
3067 	else
3068 		ret = 1;
3069 	space_info->force_alloc = 0;
3070 	spin_unlock(&space_info->lock);
3071 out:
3072 	mutex_unlock(&extent_root->fs_info->chunk_mutex);
3073 	return ret;
3074 }
3075 
3076 static int maybe_allocate_chunk(struct btrfs_trans_handle *trans,
3077 				struct btrfs_root *root,
3078 				struct btrfs_space_info *sinfo, u64 num_bytes)
3079 {
3080 	int ret;
3081 	int end_trans = 0;
3082 
3083 	if (sinfo->full)
3084 		return 0;
3085 
3086 	spin_lock(&sinfo->lock);
3087 	ret = should_alloc_chunk(sinfo, num_bytes + 2 * 1024 * 1024);
3088 	spin_unlock(&sinfo->lock);
3089 	if (!ret)
3090 		return 0;
3091 
3092 	if (!trans) {
3093 		trans = btrfs_join_transaction(root, 1);
3094 		BUG_ON(IS_ERR(trans));
3095 		end_trans = 1;
3096 	}
3097 
3098 	ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3099 			     num_bytes + 2 * 1024 * 1024,
3100 			     get_alloc_profile(root, sinfo->flags), 0);
3101 
3102 	if (end_trans)
3103 		btrfs_end_transaction(trans, root);
3104 
3105 	return ret == 1 ? 1 : 0;
3106 }
3107 
3108 /*
3109  * shrink metadata reservation for delalloc
3110  */
3111 static int shrink_delalloc(struct btrfs_trans_handle *trans,
3112 			   struct btrfs_root *root, u64 to_reclaim)
3113 {
3114 	struct btrfs_block_rsv *block_rsv;
3115 	u64 reserved;
3116 	u64 max_reclaim;
3117 	u64 reclaimed = 0;
3118 	int pause = 1;
3119 	int ret;
3120 
3121 	block_rsv = &root->fs_info->delalloc_block_rsv;
3122 	spin_lock(&block_rsv->lock);
3123 	reserved = block_rsv->reserved;
3124 	spin_unlock(&block_rsv->lock);
3125 
3126 	if (reserved == 0)
3127 		return 0;
3128 
3129 	max_reclaim = min(reserved, to_reclaim);
3130 
3131 	while (1) {
3132 		ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0);
3133 		if (!ret) {
3134 			__set_current_state(TASK_INTERRUPTIBLE);
3135 			schedule_timeout(pause);
3136 			pause <<= 1;
3137 			if (pause > HZ / 10)
3138 				pause = HZ / 10;
3139 		} else {
3140 			pause = 1;
3141 		}
3142 
3143 		spin_lock(&block_rsv->lock);
3144 		if (reserved > block_rsv->reserved)
3145 			reclaimed = reserved - block_rsv->reserved;
3146 		reserved = block_rsv->reserved;
3147 		spin_unlock(&block_rsv->lock);
3148 
3149 		if (reserved == 0 || reclaimed >= max_reclaim)
3150 			break;
3151 
3152 		if (trans && trans->transaction->blocked)
3153 			return -EAGAIN;
3154 	}
3155 	return reclaimed >= to_reclaim;
3156 }
3157 
3158 static int should_retry_reserve(struct btrfs_trans_handle *trans,
3159 				struct btrfs_root *root,
3160 				struct btrfs_block_rsv *block_rsv,
3161 				u64 num_bytes, int *retries)
3162 {
3163 	struct btrfs_space_info *space_info = block_rsv->space_info;
3164 	int ret;
3165 
3166 	if ((*retries) > 2)
3167 		return -ENOSPC;
3168 
3169 	ret = maybe_allocate_chunk(trans, root, space_info, num_bytes);
3170 	if (ret)
3171 		return 1;
3172 
3173 	if (trans && trans->transaction->in_commit)
3174 		return -ENOSPC;
3175 
3176 	ret = shrink_delalloc(trans, root, num_bytes);
3177 	if (ret)
3178 		return ret;
3179 
3180 	spin_lock(&space_info->lock);
3181 	if (space_info->bytes_pinned < num_bytes)
3182 		ret = 1;
3183 	spin_unlock(&space_info->lock);
3184 	if (ret)
3185 		return -ENOSPC;
3186 
3187 	(*retries)++;
3188 
3189 	if (trans)
3190 		return -EAGAIN;
3191 
3192 	trans = btrfs_join_transaction(root, 1);
3193 	BUG_ON(IS_ERR(trans));
3194 	ret = btrfs_commit_transaction(trans, root);
3195 	BUG_ON(ret);
3196 
3197 	return 1;
3198 }
3199 
3200 static int reserve_metadata_bytes(struct btrfs_block_rsv *block_rsv,
3201 				  u64 num_bytes)
3202 {
3203 	struct btrfs_space_info *space_info = block_rsv->space_info;
3204 	u64 unused;
3205 	int ret = -ENOSPC;
3206 
3207 	spin_lock(&space_info->lock);
3208 	unused = space_info->bytes_used + space_info->bytes_reserved +
3209 		 space_info->bytes_pinned + space_info->bytes_readonly;
3210 
3211 	if (unused < space_info->total_bytes)
3212 		unused = space_info->total_bytes - unused;
3213 	else
3214 		unused = 0;
3215 
3216 	if (unused >= num_bytes) {
3217 		if (block_rsv->priority >= 10) {
3218 			space_info->bytes_reserved += num_bytes;
3219 			ret = 0;
3220 		} else {
3221 			if ((unused + block_rsv->reserved) *
3222 			    block_rsv->priority >=
3223 			    (num_bytes + block_rsv->reserved) * 10) {
3224 				space_info->bytes_reserved += num_bytes;
3225 				ret = 0;
3226 			}
3227 		}
3228 	}
3229 	spin_unlock(&space_info->lock);
3230 
3231 	return ret;
3232 }
3233 
3234 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3235 					     struct btrfs_root *root)
3236 {
3237 	struct btrfs_block_rsv *block_rsv;
3238 	if (root->ref_cows)
3239 		block_rsv = trans->block_rsv;
3240 	else
3241 		block_rsv = root->block_rsv;
3242 
3243 	if (!block_rsv)
3244 		block_rsv = &root->fs_info->empty_block_rsv;
3245 
3246 	return block_rsv;
3247 }
3248 
3249 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3250 			       u64 num_bytes)
3251 {
3252 	int ret = -ENOSPC;
3253 	spin_lock(&block_rsv->lock);
3254 	if (block_rsv->reserved >= num_bytes) {
3255 		block_rsv->reserved -= num_bytes;
3256 		if (block_rsv->reserved < block_rsv->size)
3257 			block_rsv->full = 0;
3258 		ret = 0;
3259 	}
3260 	spin_unlock(&block_rsv->lock);
3261 	return ret;
3262 }
3263 
3264 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3265 				u64 num_bytes, int update_size)
3266 {
3267 	spin_lock(&block_rsv->lock);
3268 	block_rsv->reserved += num_bytes;
3269 	if (update_size)
3270 		block_rsv->size += num_bytes;
3271 	else if (block_rsv->reserved >= block_rsv->size)
3272 		block_rsv->full = 1;
3273 	spin_unlock(&block_rsv->lock);
3274 }
3275 
3276 void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3277 			     struct btrfs_block_rsv *dest, u64 num_bytes)
3278 {
3279 	struct btrfs_space_info *space_info = block_rsv->space_info;
3280 
3281 	spin_lock(&block_rsv->lock);
3282 	if (num_bytes == (u64)-1)
3283 		num_bytes = block_rsv->size;
3284 	block_rsv->size -= num_bytes;
3285 	if (block_rsv->reserved >= block_rsv->size) {
3286 		num_bytes = block_rsv->reserved - block_rsv->size;
3287 		block_rsv->reserved = block_rsv->size;
3288 		block_rsv->full = 1;
3289 	} else {
3290 		num_bytes = 0;
3291 	}
3292 	spin_unlock(&block_rsv->lock);
3293 
3294 	if (num_bytes > 0) {
3295 		if (dest) {
3296 			block_rsv_add_bytes(dest, num_bytes, 0);
3297 		} else {
3298 			spin_lock(&space_info->lock);
3299 			space_info->bytes_reserved -= num_bytes;
3300 			spin_unlock(&space_info->lock);
3301 		}
3302 	}
3303 }
3304 
3305 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3306 				   struct btrfs_block_rsv *dst, u64 num_bytes)
3307 {
3308 	int ret;
3309 
3310 	ret = block_rsv_use_bytes(src, num_bytes);
3311 	if (ret)
3312 		return ret;
3313 
3314 	block_rsv_add_bytes(dst, num_bytes, 1);
3315 	return 0;
3316 }
3317 
3318 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3319 {
3320 	memset(rsv, 0, sizeof(*rsv));
3321 	spin_lock_init(&rsv->lock);
3322 	atomic_set(&rsv->usage, 1);
3323 	rsv->priority = 6;
3324 	INIT_LIST_HEAD(&rsv->list);
3325 }
3326 
3327 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3328 {
3329 	struct btrfs_block_rsv *block_rsv;
3330 	struct btrfs_fs_info *fs_info = root->fs_info;
3331 	u64 alloc_target;
3332 
3333 	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3334 	if (!block_rsv)
3335 		return NULL;
3336 
3337 	btrfs_init_block_rsv(block_rsv);
3338 
3339 	alloc_target = btrfs_get_alloc_profile(root, 0);
3340 	block_rsv->space_info = __find_space_info(fs_info,
3341 						  BTRFS_BLOCK_GROUP_METADATA);
3342 
3343 	return block_rsv;
3344 }
3345 
3346 void btrfs_free_block_rsv(struct btrfs_root *root,
3347 			  struct btrfs_block_rsv *rsv)
3348 {
3349 	if (rsv && atomic_dec_and_test(&rsv->usage)) {
3350 		btrfs_block_rsv_release(root, rsv, (u64)-1);
3351 		if (!rsv->durable)
3352 			kfree(rsv);
3353 	}
3354 }
3355 
3356 /*
3357  * make the block_rsv struct be able to capture freed space.
3358  * the captured space will re-add to the the block_rsv struct
3359  * after transaction commit
3360  */
3361 void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
3362 				 struct btrfs_block_rsv *block_rsv)
3363 {
3364 	block_rsv->durable = 1;
3365 	mutex_lock(&fs_info->durable_block_rsv_mutex);
3366 	list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
3367 	mutex_unlock(&fs_info->durable_block_rsv_mutex);
3368 }
3369 
3370 int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3371 			struct btrfs_root *root,
3372 			struct btrfs_block_rsv *block_rsv,
3373 			u64 num_bytes, int *retries)
3374 {
3375 	int ret;
3376 
3377 	if (num_bytes == 0)
3378 		return 0;
3379 again:
3380 	ret = reserve_metadata_bytes(block_rsv, num_bytes);
3381 	if (!ret) {
3382 		block_rsv_add_bytes(block_rsv, num_bytes, 1);
3383 		return 0;
3384 	}
3385 
3386 	ret = should_retry_reserve(trans, root, block_rsv, num_bytes, retries);
3387 	if (ret > 0)
3388 		goto again;
3389 
3390 	return ret;
3391 }
3392 
3393 int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3394 			  struct btrfs_root *root,
3395 			  struct btrfs_block_rsv *block_rsv,
3396 			  u64 min_reserved, int min_factor)
3397 {
3398 	u64 num_bytes = 0;
3399 	int commit_trans = 0;
3400 	int ret = -ENOSPC;
3401 
3402 	if (!block_rsv)
3403 		return 0;
3404 
3405 	spin_lock(&block_rsv->lock);
3406 	if (min_factor > 0)
3407 		num_bytes = div_factor(block_rsv->size, min_factor);
3408 	if (min_reserved > num_bytes)
3409 		num_bytes = min_reserved;
3410 
3411 	if (block_rsv->reserved >= num_bytes) {
3412 		ret = 0;
3413 	} else {
3414 		num_bytes -= block_rsv->reserved;
3415 		if (block_rsv->durable &&
3416 		    block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
3417 			commit_trans = 1;
3418 	}
3419 	spin_unlock(&block_rsv->lock);
3420 	if (!ret)
3421 		return 0;
3422 
3423 	if (block_rsv->refill_used) {
3424 		ret = reserve_metadata_bytes(block_rsv, num_bytes);
3425 		if (!ret) {
3426 			block_rsv_add_bytes(block_rsv, num_bytes, 0);
3427 			return 0;
3428 		}
3429 	}
3430 
3431 	if (commit_trans) {
3432 		if (trans)
3433 			return -EAGAIN;
3434 
3435 		trans = btrfs_join_transaction(root, 1);
3436 		BUG_ON(IS_ERR(trans));
3437 		ret = btrfs_commit_transaction(trans, root);
3438 		return 0;
3439 	}
3440 
3441 	WARN_ON(1);
3442 	printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
3443 		block_rsv->size, block_rsv->reserved,
3444 		block_rsv->freed[0], block_rsv->freed[1]);
3445 
3446 	return -ENOSPC;
3447 }
3448 
3449 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3450 			    struct btrfs_block_rsv *dst_rsv,
3451 			    u64 num_bytes)
3452 {
3453 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3454 }
3455 
3456 void btrfs_block_rsv_release(struct btrfs_root *root,
3457 			     struct btrfs_block_rsv *block_rsv,
3458 			     u64 num_bytes)
3459 {
3460 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3461 	if (global_rsv->full || global_rsv == block_rsv ||
3462 	    block_rsv->space_info != global_rsv->space_info)
3463 		global_rsv = NULL;
3464 	block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
3465 }
3466 
3467 /*
3468  * helper to calculate size of global block reservation.
3469  * the desired value is sum of space used by extent tree,
3470  * checksum tree and root tree
3471  */
3472 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3473 {
3474 	struct btrfs_space_info *sinfo;
3475 	u64 num_bytes;
3476 	u64 meta_used;
3477 	u64 data_used;
3478 	int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3479 #if 0
3480 	/*
3481 	 * per tree used space accounting can be inaccuracy, so we
3482 	 * can't rely on it.
3483 	 */
3484 	spin_lock(&fs_info->extent_root->accounting_lock);
3485 	num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
3486 	spin_unlock(&fs_info->extent_root->accounting_lock);
3487 
3488 	spin_lock(&fs_info->csum_root->accounting_lock);
3489 	num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
3490 	spin_unlock(&fs_info->csum_root->accounting_lock);
3491 
3492 	spin_lock(&fs_info->tree_root->accounting_lock);
3493 	num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
3494 	spin_unlock(&fs_info->tree_root->accounting_lock);
3495 #endif
3496 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3497 	spin_lock(&sinfo->lock);
3498 	data_used = sinfo->bytes_used;
3499 	spin_unlock(&sinfo->lock);
3500 
3501 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3502 	spin_lock(&sinfo->lock);
3503 	meta_used = sinfo->bytes_used;
3504 	spin_unlock(&sinfo->lock);
3505 
3506 	num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3507 		    csum_size * 2;
3508 	num_bytes += div64_u64(data_used + meta_used, 50);
3509 
3510 	if (num_bytes * 3 > meta_used)
3511 		num_bytes = div64_u64(meta_used, 3);
3512 
3513 	return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3514 }
3515 
3516 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3517 {
3518 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3519 	struct btrfs_space_info *sinfo = block_rsv->space_info;
3520 	u64 num_bytes;
3521 
3522 	num_bytes = calc_global_metadata_size(fs_info);
3523 
3524 	spin_lock(&block_rsv->lock);
3525 	spin_lock(&sinfo->lock);
3526 
3527 	block_rsv->size = num_bytes;
3528 
3529 	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
3530 		    sinfo->bytes_reserved + sinfo->bytes_readonly;
3531 
3532 	if (sinfo->total_bytes > num_bytes) {
3533 		num_bytes = sinfo->total_bytes - num_bytes;
3534 		block_rsv->reserved += num_bytes;
3535 		sinfo->bytes_reserved += num_bytes;
3536 	}
3537 
3538 	if (block_rsv->reserved >= block_rsv->size) {
3539 		num_bytes = block_rsv->reserved - block_rsv->size;
3540 		sinfo->bytes_reserved -= num_bytes;
3541 		block_rsv->reserved = block_rsv->size;
3542 		block_rsv->full = 1;
3543 	}
3544 #if 0
3545 	printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
3546 		block_rsv->size, block_rsv->reserved);
3547 #endif
3548 	spin_unlock(&sinfo->lock);
3549 	spin_unlock(&block_rsv->lock);
3550 }
3551 
3552 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
3553 {
3554 	struct btrfs_space_info *space_info;
3555 
3556 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3557 	fs_info->chunk_block_rsv.space_info = space_info;
3558 	fs_info->chunk_block_rsv.priority = 10;
3559 
3560 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3561 	fs_info->global_block_rsv.space_info = space_info;
3562 	fs_info->global_block_rsv.priority = 10;
3563 	fs_info->global_block_rsv.refill_used = 1;
3564 	fs_info->delalloc_block_rsv.space_info = space_info;
3565 	fs_info->trans_block_rsv.space_info = space_info;
3566 	fs_info->empty_block_rsv.space_info = space_info;
3567 	fs_info->empty_block_rsv.priority = 10;
3568 
3569 	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3570 	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3571 	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3572 	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
3573 	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
3574 
3575 	btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
3576 
3577 	btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
3578 
3579 	update_global_block_rsv(fs_info);
3580 }
3581 
3582 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3583 {
3584 	block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3585 	WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3586 	WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3587 	WARN_ON(fs_info->trans_block_rsv.size > 0);
3588 	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3589 	WARN_ON(fs_info->chunk_block_rsv.size > 0);
3590 	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3591 }
3592 
3593 static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
3594 {
3595 	return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3596 		3 * num_items;
3597 }
3598 
3599 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3600 				 struct btrfs_root *root,
3601 				 int num_items, int *retries)
3602 {
3603 	u64 num_bytes;
3604 	int ret;
3605 
3606 	if (num_items == 0 || root->fs_info->chunk_root == root)
3607 		return 0;
3608 
3609 	num_bytes = calc_trans_metadata_size(root, num_items);
3610 	ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3611 				  num_bytes, retries);
3612 	if (!ret) {
3613 		trans->bytes_reserved += num_bytes;
3614 		trans->block_rsv = &root->fs_info->trans_block_rsv;
3615 	}
3616 	return ret;
3617 }
3618 
3619 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3620 				  struct btrfs_root *root)
3621 {
3622 	if (!trans->bytes_reserved)
3623 		return;
3624 
3625 	BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
3626 	btrfs_block_rsv_release(root, trans->block_rsv,
3627 				trans->bytes_reserved);
3628 	trans->bytes_reserved = 0;
3629 }
3630 
3631 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3632 				  struct inode *inode)
3633 {
3634 	struct btrfs_root *root = BTRFS_I(inode)->root;
3635 	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3636 	struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3637 
3638 	/*
3639 	 * one for deleting orphan item, one for updating inode and
3640 	 * two for calling btrfs_truncate_inode_items.
3641 	 *
3642 	 * btrfs_truncate_inode_items is a delete operation, it frees
3643 	 * more space than it uses in most cases. So two units of
3644 	 * metadata space should be enough for calling it many times.
3645 	 * If all of the metadata space is used, we can commit
3646 	 * transaction and use space it freed.
3647 	 */
3648 	u64 num_bytes = calc_trans_metadata_size(root, 4);
3649 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3650 }
3651 
3652 void btrfs_orphan_release_metadata(struct inode *inode)
3653 {
3654 	struct btrfs_root *root = BTRFS_I(inode)->root;
3655 	u64 num_bytes = calc_trans_metadata_size(root, 4);
3656 	btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3657 }
3658 
3659 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3660 				struct btrfs_pending_snapshot *pending)
3661 {
3662 	struct btrfs_root *root = pending->root;
3663 	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3664 	struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3665 	/*
3666 	 * two for root back/forward refs, two for directory entries
3667 	 * and one for root of the snapshot.
3668 	 */
3669 	u64 num_bytes = calc_trans_metadata_size(root, 5);
3670 	dst_rsv->space_info = src_rsv->space_info;
3671 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3672 }
3673 
3674 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
3675 {
3676 	return num_bytes >>= 3;
3677 }
3678 
3679 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3680 {
3681 	struct btrfs_root *root = BTRFS_I(inode)->root;
3682 	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3683 	u64 to_reserve;
3684 	int nr_extents;
3685 	int retries = 0;
3686 	int ret;
3687 
3688 	if (btrfs_transaction_in_commit(root->fs_info))
3689 		schedule_timeout(1);
3690 
3691 	num_bytes = ALIGN(num_bytes, root->sectorsize);
3692 again:
3693 	spin_lock(&BTRFS_I(inode)->accounting_lock);
3694 	nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
3695 	if (nr_extents > BTRFS_I(inode)->reserved_extents) {
3696 		nr_extents -= BTRFS_I(inode)->reserved_extents;
3697 		to_reserve = calc_trans_metadata_size(root, nr_extents);
3698 	} else {
3699 		nr_extents = 0;
3700 		to_reserve = 0;
3701 	}
3702 
3703 	to_reserve += calc_csum_metadata_size(inode, num_bytes);
3704 	ret = reserve_metadata_bytes(block_rsv, to_reserve);
3705 	if (ret) {
3706 		spin_unlock(&BTRFS_I(inode)->accounting_lock);
3707 		ret = should_retry_reserve(NULL, root, block_rsv, to_reserve,
3708 					   &retries);
3709 		if (ret > 0)
3710 			goto again;
3711 		return ret;
3712 	}
3713 
3714 	BTRFS_I(inode)->reserved_extents += nr_extents;
3715 	atomic_inc(&BTRFS_I(inode)->outstanding_extents);
3716 	spin_unlock(&BTRFS_I(inode)->accounting_lock);
3717 
3718 	block_rsv_add_bytes(block_rsv, to_reserve, 1);
3719 
3720 	if (block_rsv->size > 512 * 1024 * 1024)
3721 		shrink_delalloc(NULL, root, to_reserve);
3722 
3723 	return 0;
3724 }
3725 
3726 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
3727 {
3728 	struct btrfs_root *root = BTRFS_I(inode)->root;
3729 	u64 to_free;
3730 	int nr_extents;
3731 
3732 	num_bytes = ALIGN(num_bytes, root->sectorsize);
3733 	atomic_dec(&BTRFS_I(inode)->outstanding_extents);
3734 
3735 	spin_lock(&BTRFS_I(inode)->accounting_lock);
3736 	nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
3737 	if (nr_extents < BTRFS_I(inode)->reserved_extents) {
3738 		nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
3739 		BTRFS_I(inode)->reserved_extents -= nr_extents;
3740 	} else {
3741 		nr_extents = 0;
3742 	}
3743 	spin_unlock(&BTRFS_I(inode)->accounting_lock);
3744 
3745 	to_free = calc_csum_metadata_size(inode, num_bytes);
3746 	if (nr_extents > 0)
3747 		to_free += calc_trans_metadata_size(root, nr_extents);
3748 
3749 	btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
3750 				to_free);
3751 }
3752 
3753 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
3754 {
3755 	int ret;
3756 
3757 	ret = btrfs_check_data_free_space(inode, num_bytes);
3758 	if (ret)
3759 		return ret;
3760 
3761 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
3762 	if (ret) {
3763 		btrfs_free_reserved_data_space(inode, num_bytes);
3764 		return ret;
3765 	}
3766 
3767 	return 0;
3768 }
3769 
3770 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
3771 {
3772 	btrfs_delalloc_release_metadata(inode, num_bytes);
3773 	btrfs_free_reserved_data_space(inode, num_bytes);
3774 }
3775 
3776 static int update_block_group(struct btrfs_trans_handle *trans,
3777 			      struct btrfs_root *root,
3778 			      u64 bytenr, u64 num_bytes, int alloc)
3779 {
3780 	struct btrfs_block_group_cache *cache;
3781 	struct btrfs_fs_info *info = root->fs_info;
3782 	int factor;
3783 	u64 total = num_bytes;
3784 	u64 old_val;
3785 	u64 byte_in_group;
3786 
3787 	/* block accounting for super block */
3788 	spin_lock(&info->delalloc_lock);
3789 	old_val = btrfs_super_bytes_used(&info->super_copy);
3790 	if (alloc)
3791 		old_val += num_bytes;
3792 	else
3793 		old_val -= num_bytes;
3794 	btrfs_set_super_bytes_used(&info->super_copy, old_val);
3795 	spin_unlock(&info->delalloc_lock);
3796 
3797 	while (total) {
3798 		cache = btrfs_lookup_block_group(info, bytenr);
3799 		if (!cache)
3800 			return -1;
3801 		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
3802 				    BTRFS_BLOCK_GROUP_RAID1 |
3803 				    BTRFS_BLOCK_GROUP_RAID10))
3804 			factor = 2;
3805 		else
3806 			factor = 1;
3807 		byte_in_group = bytenr - cache->key.objectid;
3808 		WARN_ON(byte_in_group > cache->key.offset);
3809 
3810 		spin_lock(&cache->space_info->lock);
3811 		spin_lock(&cache->lock);
3812 		cache->dirty = 1;
3813 		old_val = btrfs_block_group_used(&cache->item);
3814 		num_bytes = min(total, cache->key.offset - byte_in_group);
3815 		if (alloc) {
3816 			old_val += num_bytes;
3817 			btrfs_set_block_group_used(&cache->item, old_val);
3818 			cache->reserved -= num_bytes;
3819 			cache->space_info->bytes_reserved -= num_bytes;
3820 			cache->space_info->bytes_used += num_bytes;
3821 			cache->space_info->disk_used += num_bytes * factor;
3822 			spin_unlock(&cache->lock);
3823 			spin_unlock(&cache->space_info->lock);
3824 		} else {
3825 			old_val -= num_bytes;
3826 			btrfs_set_block_group_used(&cache->item, old_val);
3827 			cache->pinned += num_bytes;
3828 			cache->space_info->bytes_pinned += num_bytes;
3829 			cache->space_info->bytes_used -= num_bytes;
3830 			cache->space_info->disk_used -= num_bytes * factor;
3831 			spin_unlock(&cache->lock);
3832 			spin_unlock(&cache->space_info->lock);
3833 
3834 			set_extent_dirty(info->pinned_extents,
3835 					 bytenr, bytenr + num_bytes - 1,
3836 					 GFP_NOFS | __GFP_NOFAIL);
3837 		}
3838 		btrfs_put_block_group(cache);
3839 		total -= num_bytes;
3840 		bytenr += num_bytes;
3841 	}
3842 	return 0;
3843 }
3844 
3845 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3846 {
3847 	struct btrfs_block_group_cache *cache;
3848 	u64 bytenr;
3849 
3850 	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
3851 	if (!cache)
3852 		return 0;
3853 
3854 	bytenr = cache->key.objectid;
3855 	btrfs_put_block_group(cache);
3856 
3857 	return bytenr;
3858 }
3859 
3860 static int pin_down_extent(struct btrfs_root *root,
3861 			   struct btrfs_block_group_cache *cache,
3862 			   u64 bytenr, u64 num_bytes, int reserved)
3863 {
3864 	spin_lock(&cache->space_info->lock);
3865 	spin_lock(&cache->lock);
3866 	cache->pinned += num_bytes;
3867 	cache->space_info->bytes_pinned += num_bytes;
3868 	if (reserved) {
3869 		cache->reserved -= num_bytes;
3870 		cache->space_info->bytes_reserved -= num_bytes;
3871 	}
3872 	spin_unlock(&cache->lock);
3873 	spin_unlock(&cache->space_info->lock);
3874 
3875 	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
3876 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
3877 	return 0;
3878 }
3879 
3880 /*
3881  * this function must be called within transaction
3882  */
3883 int btrfs_pin_extent(struct btrfs_root *root,
3884 		     u64 bytenr, u64 num_bytes, int reserved)
3885 {
3886 	struct btrfs_block_group_cache *cache;
3887 
3888 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
3889 	BUG_ON(!cache);
3890 
3891 	pin_down_extent(root, cache, bytenr, num_bytes, reserved);
3892 
3893 	btrfs_put_block_group(cache);
3894 	return 0;
3895 }
3896 
3897 /*
3898  * update size of reserved extents. this function may return -EAGAIN
3899  * if 'reserve' is true or 'sinfo' is false.
3900  */
3901 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
3902 				 u64 num_bytes, int reserve, int sinfo)
3903 {
3904 	int ret = 0;
3905 	if (sinfo) {
3906 		struct btrfs_space_info *space_info = cache->space_info;
3907 		spin_lock(&space_info->lock);
3908 		spin_lock(&cache->lock);
3909 		if (reserve) {
3910 			if (cache->ro) {
3911 				ret = -EAGAIN;
3912 			} else {
3913 				cache->reserved += num_bytes;
3914 				space_info->bytes_reserved += num_bytes;
3915 			}
3916 		} else {
3917 			if (cache->ro)
3918 				space_info->bytes_readonly += num_bytes;
3919 			cache->reserved -= num_bytes;
3920 			space_info->bytes_reserved -= num_bytes;
3921 		}
3922 		spin_unlock(&cache->lock);
3923 		spin_unlock(&space_info->lock);
3924 	} else {
3925 		spin_lock(&cache->lock);
3926 		if (cache->ro) {
3927 			ret = -EAGAIN;
3928 		} else {
3929 			if (reserve)
3930 				cache->reserved += num_bytes;
3931 			else
3932 				cache->reserved -= num_bytes;
3933 		}
3934 		spin_unlock(&cache->lock);
3935 	}
3936 	return ret;
3937 }
3938 
3939 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
3940 				struct btrfs_root *root)
3941 {
3942 	struct btrfs_fs_info *fs_info = root->fs_info;
3943 	struct btrfs_caching_control *next;
3944 	struct btrfs_caching_control *caching_ctl;
3945 	struct btrfs_block_group_cache *cache;
3946 
3947 	down_write(&fs_info->extent_commit_sem);
3948 
3949 	list_for_each_entry_safe(caching_ctl, next,
3950 				 &fs_info->caching_block_groups, list) {
3951 		cache = caching_ctl->block_group;
3952 		if (block_group_cache_done(cache)) {
3953 			cache->last_byte_to_unpin = (u64)-1;
3954 			list_del_init(&caching_ctl->list);
3955 			put_caching_control(caching_ctl);
3956 		} else {
3957 			cache->last_byte_to_unpin = caching_ctl->progress;
3958 		}
3959 	}
3960 
3961 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3962 		fs_info->pinned_extents = &fs_info->freed_extents[1];
3963 	else
3964 		fs_info->pinned_extents = &fs_info->freed_extents[0];
3965 
3966 	up_write(&fs_info->extent_commit_sem);
3967 
3968 	update_global_block_rsv(fs_info);
3969 	return 0;
3970 }
3971 
3972 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
3973 {
3974 	struct btrfs_fs_info *fs_info = root->fs_info;
3975 	struct btrfs_block_group_cache *cache = NULL;
3976 	u64 len;
3977 
3978 	while (start <= end) {
3979 		if (!cache ||
3980 		    start >= cache->key.objectid + cache->key.offset) {
3981 			if (cache)
3982 				btrfs_put_block_group(cache);
3983 			cache = btrfs_lookup_block_group(fs_info, start);
3984 			BUG_ON(!cache);
3985 		}
3986 
3987 		len = cache->key.objectid + cache->key.offset - start;
3988 		len = min(len, end + 1 - start);
3989 
3990 		if (start < cache->last_byte_to_unpin) {
3991 			len = min(len, cache->last_byte_to_unpin - start);
3992 			btrfs_add_free_space(cache, start, len);
3993 		}
3994 
3995 		start += len;
3996 
3997 		spin_lock(&cache->space_info->lock);
3998 		spin_lock(&cache->lock);
3999 		cache->pinned -= len;
4000 		cache->space_info->bytes_pinned -= len;
4001 		if (cache->ro) {
4002 			cache->space_info->bytes_readonly += len;
4003 		} else if (cache->reserved_pinned > 0) {
4004 			len = min(len, cache->reserved_pinned);
4005 			cache->reserved_pinned -= len;
4006 			cache->space_info->bytes_reserved += len;
4007 		}
4008 		spin_unlock(&cache->lock);
4009 		spin_unlock(&cache->space_info->lock);
4010 	}
4011 
4012 	if (cache)
4013 		btrfs_put_block_group(cache);
4014 	return 0;
4015 }
4016 
4017 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4018 			       struct btrfs_root *root)
4019 {
4020 	struct btrfs_fs_info *fs_info = root->fs_info;
4021 	struct extent_io_tree *unpin;
4022 	struct btrfs_block_rsv *block_rsv;
4023 	struct btrfs_block_rsv *next_rsv;
4024 	u64 start;
4025 	u64 end;
4026 	int idx;
4027 	int ret;
4028 
4029 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4030 		unpin = &fs_info->freed_extents[1];
4031 	else
4032 		unpin = &fs_info->freed_extents[0];
4033 
4034 	while (1) {
4035 		ret = find_first_extent_bit(unpin, 0, &start, &end,
4036 					    EXTENT_DIRTY);
4037 		if (ret)
4038 			break;
4039 
4040 		ret = btrfs_discard_extent(root, start, end + 1 - start);
4041 
4042 		clear_extent_dirty(unpin, start, end, GFP_NOFS);
4043 		unpin_extent_range(root, start, end);
4044 		cond_resched();
4045 	}
4046 
4047 	mutex_lock(&fs_info->durable_block_rsv_mutex);
4048 	list_for_each_entry_safe(block_rsv, next_rsv,
4049 				 &fs_info->durable_block_rsv_list, list) {
4050 
4051 		idx = trans->transid & 0x1;
4052 		if (block_rsv->freed[idx] > 0) {
4053 			block_rsv_add_bytes(block_rsv,
4054 					    block_rsv->freed[idx], 0);
4055 			block_rsv->freed[idx] = 0;
4056 		}
4057 		if (atomic_read(&block_rsv->usage) == 0) {
4058 			btrfs_block_rsv_release(root, block_rsv, (u64)-1);
4059 
4060 			if (block_rsv->freed[0] == 0 &&
4061 			    block_rsv->freed[1] == 0) {
4062 				list_del_init(&block_rsv->list);
4063 				kfree(block_rsv);
4064 			}
4065 		} else {
4066 			btrfs_block_rsv_release(root, block_rsv, 0);
4067 		}
4068 	}
4069 	mutex_unlock(&fs_info->durable_block_rsv_mutex);
4070 
4071 	return 0;
4072 }
4073 
4074 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4075 				struct btrfs_root *root,
4076 				u64 bytenr, u64 num_bytes, u64 parent,
4077 				u64 root_objectid, u64 owner_objectid,
4078 				u64 owner_offset, int refs_to_drop,
4079 				struct btrfs_delayed_extent_op *extent_op)
4080 {
4081 	struct btrfs_key key;
4082 	struct btrfs_path *path;
4083 	struct btrfs_fs_info *info = root->fs_info;
4084 	struct btrfs_root *extent_root = info->extent_root;
4085 	struct extent_buffer *leaf;
4086 	struct btrfs_extent_item *ei;
4087 	struct btrfs_extent_inline_ref *iref;
4088 	int ret;
4089 	int is_data;
4090 	int extent_slot = 0;
4091 	int found_extent = 0;
4092 	int num_to_del = 1;
4093 	u32 item_size;
4094 	u64 refs;
4095 
4096 	path = btrfs_alloc_path();
4097 	if (!path)
4098 		return -ENOMEM;
4099 
4100 	path->reada = 1;
4101 	path->leave_spinning = 1;
4102 
4103 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4104 	BUG_ON(!is_data && refs_to_drop != 1);
4105 
4106 	ret = lookup_extent_backref(trans, extent_root, path, &iref,
4107 				    bytenr, num_bytes, parent,
4108 				    root_objectid, owner_objectid,
4109 				    owner_offset);
4110 	if (ret == 0) {
4111 		extent_slot = path->slots[0];
4112 		while (extent_slot >= 0) {
4113 			btrfs_item_key_to_cpu(path->nodes[0], &key,
4114 					      extent_slot);
4115 			if (key.objectid != bytenr)
4116 				break;
4117 			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4118 			    key.offset == num_bytes) {
4119 				found_extent = 1;
4120 				break;
4121 			}
4122 			if (path->slots[0] - extent_slot > 5)
4123 				break;
4124 			extent_slot--;
4125 		}
4126 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4127 		item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4128 		if (found_extent && item_size < sizeof(*ei))
4129 			found_extent = 0;
4130 #endif
4131 		if (!found_extent) {
4132 			BUG_ON(iref);
4133 			ret = remove_extent_backref(trans, extent_root, path,
4134 						    NULL, refs_to_drop,
4135 						    is_data);
4136 			BUG_ON(ret);
4137 			btrfs_release_path(extent_root, path);
4138 			path->leave_spinning = 1;
4139 
4140 			key.objectid = bytenr;
4141 			key.type = BTRFS_EXTENT_ITEM_KEY;
4142 			key.offset = num_bytes;
4143 
4144 			ret = btrfs_search_slot(trans, extent_root,
4145 						&key, path, -1, 1);
4146 			if (ret) {
4147 				printk(KERN_ERR "umm, got %d back from search"
4148 				       ", was looking for %llu\n", ret,
4149 				       (unsigned long long)bytenr);
4150 				btrfs_print_leaf(extent_root, path->nodes[0]);
4151 			}
4152 			BUG_ON(ret);
4153 			extent_slot = path->slots[0];
4154 		}
4155 	} else {
4156 		btrfs_print_leaf(extent_root, path->nodes[0]);
4157 		WARN_ON(1);
4158 		printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4159 		       "parent %llu root %llu  owner %llu offset %llu\n",
4160 		       (unsigned long long)bytenr,
4161 		       (unsigned long long)parent,
4162 		       (unsigned long long)root_objectid,
4163 		       (unsigned long long)owner_objectid,
4164 		       (unsigned long long)owner_offset);
4165 	}
4166 
4167 	leaf = path->nodes[0];
4168 	item_size = btrfs_item_size_nr(leaf, extent_slot);
4169 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4170 	if (item_size < sizeof(*ei)) {
4171 		BUG_ON(found_extent || extent_slot != path->slots[0]);
4172 		ret = convert_extent_item_v0(trans, extent_root, path,
4173 					     owner_objectid, 0);
4174 		BUG_ON(ret < 0);
4175 
4176 		btrfs_release_path(extent_root, path);
4177 		path->leave_spinning = 1;
4178 
4179 		key.objectid = bytenr;
4180 		key.type = BTRFS_EXTENT_ITEM_KEY;
4181 		key.offset = num_bytes;
4182 
4183 		ret = btrfs_search_slot(trans, extent_root, &key, path,
4184 					-1, 1);
4185 		if (ret) {
4186 			printk(KERN_ERR "umm, got %d back from search"
4187 			       ", was looking for %llu\n", ret,
4188 			       (unsigned long long)bytenr);
4189 			btrfs_print_leaf(extent_root, path->nodes[0]);
4190 		}
4191 		BUG_ON(ret);
4192 		extent_slot = path->slots[0];
4193 		leaf = path->nodes[0];
4194 		item_size = btrfs_item_size_nr(leaf, extent_slot);
4195 	}
4196 #endif
4197 	BUG_ON(item_size < sizeof(*ei));
4198 	ei = btrfs_item_ptr(leaf, extent_slot,
4199 			    struct btrfs_extent_item);
4200 	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4201 		struct btrfs_tree_block_info *bi;
4202 		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4203 		bi = (struct btrfs_tree_block_info *)(ei + 1);
4204 		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4205 	}
4206 
4207 	refs = btrfs_extent_refs(leaf, ei);
4208 	BUG_ON(refs < refs_to_drop);
4209 	refs -= refs_to_drop;
4210 
4211 	if (refs > 0) {
4212 		if (extent_op)
4213 			__run_delayed_extent_op(extent_op, leaf, ei);
4214 		/*
4215 		 * In the case of inline back ref, reference count will
4216 		 * be updated by remove_extent_backref
4217 		 */
4218 		if (iref) {
4219 			BUG_ON(!found_extent);
4220 		} else {
4221 			btrfs_set_extent_refs(leaf, ei, refs);
4222 			btrfs_mark_buffer_dirty(leaf);
4223 		}
4224 		if (found_extent) {
4225 			ret = remove_extent_backref(trans, extent_root, path,
4226 						    iref, refs_to_drop,
4227 						    is_data);
4228 			BUG_ON(ret);
4229 		}
4230 	} else {
4231 		if (found_extent) {
4232 			BUG_ON(is_data && refs_to_drop !=
4233 			       extent_data_ref_count(root, path, iref));
4234 			if (iref) {
4235 				BUG_ON(path->slots[0] != extent_slot);
4236 			} else {
4237 				BUG_ON(path->slots[0] != extent_slot + 1);
4238 				path->slots[0] = extent_slot;
4239 				num_to_del = 2;
4240 			}
4241 		}
4242 
4243 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4244 				      num_to_del);
4245 		BUG_ON(ret);
4246 		btrfs_release_path(extent_root, path);
4247 
4248 		if (is_data) {
4249 			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4250 			BUG_ON(ret);
4251 		} else {
4252 			invalidate_mapping_pages(info->btree_inode->i_mapping,
4253 			     bytenr >> PAGE_CACHE_SHIFT,
4254 			     (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4255 		}
4256 
4257 		ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4258 		BUG_ON(ret);
4259 	}
4260 	btrfs_free_path(path);
4261 	return ret;
4262 }
4263 
4264 /*
4265  * when we free an block, it is possible (and likely) that we free the last
4266  * delayed ref for that extent as well.  This searches the delayed ref tree for
4267  * a given extent, and if there are no other delayed refs to be processed, it
4268  * removes it from the tree.
4269  */
4270 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4271 				      struct btrfs_root *root, u64 bytenr)
4272 {
4273 	struct btrfs_delayed_ref_head *head;
4274 	struct btrfs_delayed_ref_root *delayed_refs;
4275 	struct btrfs_delayed_ref_node *ref;
4276 	struct rb_node *node;
4277 	int ret = 0;
4278 
4279 	delayed_refs = &trans->transaction->delayed_refs;
4280 	spin_lock(&delayed_refs->lock);
4281 	head = btrfs_find_delayed_ref_head(trans, bytenr);
4282 	if (!head)
4283 		goto out;
4284 
4285 	node = rb_prev(&head->node.rb_node);
4286 	if (!node)
4287 		goto out;
4288 
4289 	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4290 
4291 	/* there are still entries for this ref, we can't drop it */
4292 	if (ref->bytenr == bytenr)
4293 		goto out;
4294 
4295 	if (head->extent_op) {
4296 		if (!head->must_insert_reserved)
4297 			goto out;
4298 		kfree(head->extent_op);
4299 		head->extent_op = NULL;
4300 	}
4301 
4302 	/*
4303 	 * waiting for the lock here would deadlock.  If someone else has it
4304 	 * locked they are already in the process of dropping it anyway
4305 	 */
4306 	if (!mutex_trylock(&head->mutex))
4307 		goto out;
4308 
4309 	/*
4310 	 * at this point we have a head with no other entries.  Go
4311 	 * ahead and process it.
4312 	 */
4313 	head->node.in_tree = 0;
4314 	rb_erase(&head->node.rb_node, &delayed_refs->root);
4315 
4316 	delayed_refs->num_entries--;
4317 
4318 	/*
4319 	 * we don't take a ref on the node because we're removing it from the
4320 	 * tree, so we just steal the ref the tree was holding.
4321 	 */
4322 	delayed_refs->num_heads--;
4323 	if (list_empty(&head->cluster))
4324 		delayed_refs->num_heads_ready--;
4325 
4326 	list_del_init(&head->cluster);
4327 	spin_unlock(&delayed_refs->lock);
4328 
4329 	BUG_ON(head->extent_op);
4330 	if (head->must_insert_reserved)
4331 		ret = 1;
4332 
4333 	mutex_unlock(&head->mutex);
4334 	btrfs_put_delayed_ref(&head->node);
4335 	return ret;
4336 out:
4337 	spin_unlock(&delayed_refs->lock);
4338 	return 0;
4339 }
4340 
4341 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4342 			   struct btrfs_root *root,
4343 			   struct extent_buffer *buf,
4344 			   u64 parent, int last_ref)
4345 {
4346 	struct btrfs_block_rsv *block_rsv;
4347 	struct btrfs_block_group_cache *cache = NULL;
4348 	int ret;
4349 
4350 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4351 		ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4352 						parent, root->root_key.objectid,
4353 						btrfs_header_level(buf),
4354 						BTRFS_DROP_DELAYED_REF, NULL);
4355 		BUG_ON(ret);
4356 	}
4357 
4358 	if (!last_ref)
4359 		return;
4360 
4361 	block_rsv = get_block_rsv(trans, root);
4362 	cache = btrfs_lookup_block_group(root->fs_info, buf->start);
4363 	if (block_rsv->space_info != cache->space_info)
4364 		goto out;
4365 
4366 	if (btrfs_header_generation(buf) == trans->transid) {
4367 		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4368 			ret = check_ref_cleanup(trans, root, buf->start);
4369 			if (!ret)
4370 				goto pin;
4371 		}
4372 
4373 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4374 			pin_down_extent(root, cache, buf->start, buf->len, 1);
4375 			goto pin;
4376 		}
4377 
4378 		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4379 
4380 		btrfs_add_free_space(cache, buf->start, buf->len);
4381 		ret = update_reserved_bytes(cache, buf->len, 0, 0);
4382 		if (ret == -EAGAIN) {
4383 			/* block group became read-only */
4384 			update_reserved_bytes(cache, buf->len, 0, 1);
4385 			goto out;
4386 		}
4387 
4388 		ret = 1;
4389 		spin_lock(&block_rsv->lock);
4390 		if (block_rsv->reserved < block_rsv->size) {
4391 			block_rsv->reserved += buf->len;
4392 			ret = 0;
4393 		}
4394 		spin_unlock(&block_rsv->lock);
4395 
4396 		if (ret) {
4397 			spin_lock(&cache->space_info->lock);
4398 			cache->space_info->bytes_reserved -= buf->len;
4399 			spin_unlock(&cache->space_info->lock);
4400 		}
4401 		goto out;
4402 	}
4403 pin:
4404 	if (block_rsv->durable && !cache->ro) {
4405 		ret = 0;
4406 		spin_lock(&cache->lock);
4407 		if (!cache->ro) {
4408 			cache->reserved_pinned += buf->len;
4409 			ret = 1;
4410 		}
4411 		spin_unlock(&cache->lock);
4412 
4413 		if (ret) {
4414 			spin_lock(&block_rsv->lock);
4415 			block_rsv->freed[trans->transid & 0x1] += buf->len;
4416 			spin_unlock(&block_rsv->lock);
4417 		}
4418 	}
4419 out:
4420 	btrfs_put_block_group(cache);
4421 }
4422 
4423 int btrfs_free_extent(struct btrfs_trans_handle *trans,
4424 		      struct btrfs_root *root,
4425 		      u64 bytenr, u64 num_bytes, u64 parent,
4426 		      u64 root_objectid, u64 owner, u64 offset)
4427 {
4428 	int ret;
4429 
4430 	/*
4431 	 * tree log blocks never actually go into the extent allocation
4432 	 * tree, just update pinning info and exit early.
4433 	 */
4434 	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4435 		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
4436 		/* unlocks the pinned mutex */
4437 		btrfs_pin_extent(root, bytenr, num_bytes, 1);
4438 		ret = 0;
4439 	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4440 		ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4441 					parent, root_objectid, (int)owner,
4442 					BTRFS_DROP_DELAYED_REF, NULL);
4443 		BUG_ON(ret);
4444 	} else {
4445 		ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4446 					parent, root_objectid, owner,
4447 					offset, BTRFS_DROP_DELAYED_REF, NULL);
4448 		BUG_ON(ret);
4449 	}
4450 	return ret;
4451 }
4452 
4453 static u64 stripe_align(struct btrfs_root *root, u64 val)
4454 {
4455 	u64 mask = ((u64)root->stripesize - 1);
4456 	u64 ret = (val + mask) & ~mask;
4457 	return ret;
4458 }
4459 
4460 /*
4461  * when we wait for progress in the block group caching, its because
4462  * our allocation attempt failed at least once.  So, we must sleep
4463  * and let some progress happen before we try again.
4464  *
4465  * This function will sleep at least once waiting for new free space to
4466  * show up, and then it will check the block group free space numbers
4467  * for our min num_bytes.  Another option is to have it go ahead
4468  * and look in the rbtree for a free extent of a given size, but this
4469  * is a good start.
4470  */
4471 static noinline int
4472 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4473 				u64 num_bytes)
4474 {
4475 	struct btrfs_caching_control *caching_ctl;
4476 	DEFINE_WAIT(wait);
4477 
4478 	caching_ctl = get_caching_control(cache);
4479 	if (!caching_ctl)
4480 		return 0;
4481 
4482 	wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4483 		   (cache->free_space >= num_bytes));
4484 
4485 	put_caching_control(caching_ctl);
4486 	return 0;
4487 }
4488 
4489 static noinline int
4490 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4491 {
4492 	struct btrfs_caching_control *caching_ctl;
4493 	DEFINE_WAIT(wait);
4494 
4495 	caching_ctl = get_caching_control(cache);
4496 	if (!caching_ctl)
4497 		return 0;
4498 
4499 	wait_event(caching_ctl->wait, block_group_cache_done(cache));
4500 
4501 	put_caching_control(caching_ctl);
4502 	return 0;
4503 }
4504 
4505 static int get_block_group_index(struct btrfs_block_group_cache *cache)
4506 {
4507 	int index;
4508 	if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4509 		index = 0;
4510 	else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4511 		index = 1;
4512 	else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4513 		index = 2;
4514 	else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4515 		index = 3;
4516 	else
4517 		index = 4;
4518 	return index;
4519 }
4520 
4521 enum btrfs_loop_type {
4522 	LOOP_FIND_IDEAL = 0,
4523 	LOOP_CACHING_NOWAIT = 1,
4524 	LOOP_CACHING_WAIT = 2,
4525 	LOOP_ALLOC_CHUNK = 3,
4526 	LOOP_NO_EMPTY_SIZE = 4,
4527 };
4528 
4529 /*
4530  * walks the btree of allocated extents and find a hole of a given size.
4531  * The key ins is changed to record the hole:
4532  * ins->objectid == block start
4533  * ins->flags = BTRFS_EXTENT_ITEM_KEY
4534  * ins->offset == number of blocks
4535  * Any available blocks before search_start are skipped.
4536  */
4537 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4538 				     struct btrfs_root *orig_root,
4539 				     u64 num_bytes, u64 empty_size,
4540 				     u64 search_start, u64 search_end,
4541 				     u64 hint_byte, struct btrfs_key *ins,
4542 				     int data)
4543 {
4544 	int ret = 0;
4545 	struct btrfs_root *root = orig_root->fs_info->extent_root;
4546 	struct btrfs_free_cluster *last_ptr = NULL;
4547 	struct btrfs_block_group_cache *block_group = NULL;
4548 	int empty_cluster = 2 * 1024 * 1024;
4549 	int allowed_chunk_alloc = 0;
4550 	int done_chunk_alloc = 0;
4551 	struct btrfs_space_info *space_info;
4552 	int last_ptr_loop = 0;
4553 	int loop = 0;
4554 	int index = 0;
4555 	bool found_uncached_bg = false;
4556 	bool failed_cluster_refill = false;
4557 	bool failed_alloc = false;
4558 	u64 ideal_cache_percent = 0;
4559 	u64 ideal_cache_offset = 0;
4560 
4561 	WARN_ON(num_bytes < root->sectorsize);
4562 	btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4563 	ins->objectid = 0;
4564 	ins->offset = 0;
4565 
4566 	space_info = __find_space_info(root->fs_info, data);
4567 	if (!space_info) {
4568 		printk(KERN_ERR "No space info for %d\n", data);
4569 		return -ENOSPC;
4570 	}
4571 
4572 	if (orig_root->ref_cows || empty_size)
4573 		allowed_chunk_alloc = 1;
4574 
4575 	if (data & BTRFS_BLOCK_GROUP_METADATA) {
4576 		last_ptr = &root->fs_info->meta_alloc_cluster;
4577 		if (!btrfs_test_opt(root, SSD))
4578 			empty_cluster = 64 * 1024;
4579 	}
4580 
4581 	if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
4582 		last_ptr = &root->fs_info->data_alloc_cluster;
4583 	}
4584 
4585 	if (last_ptr) {
4586 		spin_lock(&last_ptr->lock);
4587 		if (last_ptr->block_group)
4588 			hint_byte = last_ptr->window_start;
4589 		spin_unlock(&last_ptr->lock);
4590 	}
4591 
4592 	search_start = max(search_start, first_logical_byte(root, 0));
4593 	search_start = max(search_start, hint_byte);
4594 
4595 	if (!last_ptr)
4596 		empty_cluster = 0;
4597 
4598 	if (search_start == hint_byte) {
4599 ideal_cache:
4600 		block_group = btrfs_lookup_block_group(root->fs_info,
4601 						       search_start);
4602 		/*
4603 		 * we don't want to use the block group if it doesn't match our
4604 		 * allocation bits, or if its not cached.
4605 		 *
4606 		 * However if we are re-searching with an ideal block group
4607 		 * picked out then we don't care that the block group is cached.
4608 		 */
4609 		if (block_group && block_group_bits(block_group, data) &&
4610 		    (block_group->cached != BTRFS_CACHE_NO ||
4611 		     search_start == ideal_cache_offset)) {
4612 			down_read(&space_info->groups_sem);
4613 			if (list_empty(&block_group->list) ||
4614 			    block_group->ro) {
4615 				/*
4616 				 * someone is removing this block group,
4617 				 * we can't jump into the have_block_group
4618 				 * target because our list pointers are not
4619 				 * valid
4620 				 */
4621 				btrfs_put_block_group(block_group);
4622 				up_read(&space_info->groups_sem);
4623 			} else {
4624 				index = get_block_group_index(block_group);
4625 				goto have_block_group;
4626 			}
4627 		} else if (block_group) {
4628 			btrfs_put_block_group(block_group);
4629 		}
4630 	}
4631 search:
4632 	down_read(&space_info->groups_sem);
4633 	list_for_each_entry(block_group, &space_info->block_groups[index],
4634 			    list) {
4635 		u64 offset;
4636 		int cached;
4637 
4638 		btrfs_get_block_group(block_group);
4639 		search_start = block_group->key.objectid;
4640 
4641 have_block_group:
4642 		if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4643 			u64 free_percent;
4644 
4645 			free_percent = btrfs_block_group_used(&block_group->item);
4646 			free_percent *= 100;
4647 			free_percent = div64_u64(free_percent,
4648 						 block_group->key.offset);
4649 			free_percent = 100 - free_percent;
4650 			if (free_percent > ideal_cache_percent &&
4651 			    likely(!block_group->ro)) {
4652 				ideal_cache_offset = block_group->key.objectid;
4653 				ideal_cache_percent = free_percent;
4654 			}
4655 
4656 			/*
4657 			 * We only want to start kthread caching if we are at
4658 			 * the point where we will wait for caching to make
4659 			 * progress, or if our ideal search is over and we've
4660 			 * found somebody to start caching.
4661 			 */
4662 			if (loop > LOOP_CACHING_NOWAIT ||
4663 			    (loop > LOOP_FIND_IDEAL &&
4664 			     atomic_read(&space_info->caching_threads) < 2)) {
4665 				ret = cache_block_group(block_group);
4666 				BUG_ON(ret);
4667 			}
4668 			found_uncached_bg = true;
4669 
4670 			/*
4671 			 * If loop is set for cached only, try the next block
4672 			 * group.
4673 			 */
4674 			if (loop == LOOP_FIND_IDEAL)
4675 				goto loop;
4676 		}
4677 
4678 		cached = block_group_cache_done(block_group);
4679 		if (unlikely(!cached))
4680 			found_uncached_bg = true;
4681 
4682 		if (unlikely(block_group->ro))
4683 			goto loop;
4684 
4685 		/*
4686 		 * Ok we want to try and use the cluster allocator, so lets look
4687 		 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
4688 		 * have tried the cluster allocator plenty of times at this
4689 		 * point and not have found anything, so we are likely way too
4690 		 * fragmented for the clustering stuff to find anything, so lets
4691 		 * just skip it and let the allocator find whatever block it can
4692 		 * find
4693 		 */
4694 		if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
4695 			/*
4696 			 * the refill lock keeps out other
4697 			 * people trying to start a new cluster
4698 			 */
4699 			spin_lock(&last_ptr->refill_lock);
4700 			if (last_ptr->block_group &&
4701 			    (last_ptr->block_group->ro ||
4702 			    !block_group_bits(last_ptr->block_group, data))) {
4703 				offset = 0;
4704 				goto refill_cluster;
4705 			}
4706 
4707 			offset = btrfs_alloc_from_cluster(block_group, last_ptr,
4708 						 num_bytes, search_start);
4709 			if (offset) {
4710 				/* we have a block, we're done */
4711 				spin_unlock(&last_ptr->refill_lock);
4712 				goto checks;
4713 			}
4714 
4715 			spin_lock(&last_ptr->lock);
4716 			/*
4717 			 * whoops, this cluster doesn't actually point to
4718 			 * this block group.  Get a ref on the block
4719 			 * group is does point to and try again
4720 			 */
4721 			if (!last_ptr_loop && last_ptr->block_group &&
4722 			    last_ptr->block_group != block_group) {
4723 
4724 				btrfs_put_block_group(block_group);
4725 				block_group = last_ptr->block_group;
4726 				btrfs_get_block_group(block_group);
4727 				spin_unlock(&last_ptr->lock);
4728 				spin_unlock(&last_ptr->refill_lock);
4729 
4730 				last_ptr_loop = 1;
4731 				search_start = block_group->key.objectid;
4732 				/*
4733 				 * we know this block group is properly
4734 				 * in the list because
4735 				 * btrfs_remove_block_group, drops the
4736 				 * cluster before it removes the block
4737 				 * group from the list
4738 				 */
4739 				goto have_block_group;
4740 			}
4741 			spin_unlock(&last_ptr->lock);
4742 refill_cluster:
4743 			/*
4744 			 * this cluster didn't work out, free it and
4745 			 * start over
4746 			 */
4747 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
4748 
4749 			last_ptr_loop = 0;
4750 
4751 			/* allocate a cluster in this block group */
4752 			ret = btrfs_find_space_cluster(trans, root,
4753 					       block_group, last_ptr,
4754 					       offset, num_bytes,
4755 					       empty_cluster + empty_size);
4756 			if (ret == 0) {
4757 				/*
4758 				 * now pull our allocation out of this
4759 				 * cluster
4760 				 */
4761 				offset = btrfs_alloc_from_cluster(block_group,
4762 						  last_ptr, num_bytes,
4763 						  search_start);
4764 				if (offset) {
4765 					/* we found one, proceed */
4766 					spin_unlock(&last_ptr->refill_lock);
4767 					goto checks;
4768 				}
4769 			} else if (!cached && loop > LOOP_CACHING_NOWAIT
4770 				   && !failed_cluster_refill) {
4771 				spin_unlock(&last_ptr->refill_lock);
4772 
4773 				failed_cluster_refill = true;
4774 				wait_block_group_cache_progress(block_group,
4775 				       num_bytes + empty_cluster + empty_size);
4776 				goto have_block_group;
4777 			}
4778 
4779 			/*
4780 			 * at this point we either didn't find a cluster
4781 			 * or we weren't able to allocate a block from our
4782 			 * cluster.  Free the cluster we've been trying
4783 			 * to use, and go to the next block group
4784 			 */
4785 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
4786 			spin_unlock(&last_ptr->refill_lock);
4787 			goto loop;
4788 		}
4789 
4790 		offset = btrfs_find_space_for_alloc(block_group, search_start,
4791 						    num_bytes, empty_size);
4792 		/*
4793 		 * If we didn't find a chunk, and we haven't failed on this
4794 		 * block group before, and this block group is in the middle of
4795 		 * caching and we are ok with waiting, then go ahead and wait
4796 		 * for progress to be made, and set failed_alloc to true.
4797 		 *
4798 		 * If failed_alloc is true then we've already waited on this
4799 		 * block group once and should move on to the next block group.
4800 		 */
4801 		if (!offset && !failed_alloc && !cached &&
4802 		    loop > LOOP_CACHING_NOWAIT) {
4803 			wait_block_group_cache_progress(block_group,
4804 						num_bytes + empty_size);
4805 			failed_alloc = true;
4806 			goto have_block_group;
4807 		} else if (!offset) {
4808 			goto loop;
4809 		}
4810 checks:
4811 		search_start = stripe_align(root, offset);
4812 		/* move on to the next group */
4813 		if (search_start + num_bytes >= search_end) {
4814 			btrfs_add_free_space(block_group, offset, num_bytes);
4815 			goto loop;
4816 		}
4817 
4818 		/* move on to the next group */
4819 		if (search_start + num_bytes >
4820 		    block_group->key.objectid + block_group->key.offset) {
4821 			btrfs_add_free_space(block_group, offset, num_bytes);
4822 			goto loop;
4823 		}
4824 
4825 		ins->objectid = search_start;
4826 		ins->offset = num_bytes;
4827 
4828 		if (offset < search_start)
4829 			btrfs_add_free_space(block_group, offset,
4830 					     search_start - offset);
4831 		BUG_ON(offset > search_start);
4832 
4833 		ret = update_reserved_bytes(block_group, num_bytes, 1,
4834 					    (data & BTRFS_BLOCK_GROUP_DATA));
4835 		if (ret == -EAGAIN) {
4836 			btrfs_add_free_space(block_group, offset, num_bytes);
4837 			goto loop;
4838 		}
4839 
4840 		/* we are all good, lets return */
4841 		ins->objectid = search_start;
4842 		ins->offset = num_bytes;
4843 
4844 		if (offset < search_start)
4845 			btrfs_add_free_space(block_group, offset,
4846 					     search_start - offset);
4847 		BUG_ON(offset > search_start);
4848 		break;
4849 loop:
4850 		failed_cluster_refill = false;
4851 		failed_alloc = false;
4852 		BUG_ON(index != get_block_group_index(block_group));
4853 		btrfs_put_block_group(block_group);
4854 	}
4855 	up_read(&space_info->groups_sem);
4856 
4857 	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
4858 		goto search;
4859 
4860 	/* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
4861 	 *			for them to make caching progress.  Also
4862 	 *			determine the best possible bg to cache
4863 	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
4864 	 *			caching kthreads as we move along
4865 	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4866 	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4867 	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
4868 	 *			again
4869 	 */
4870 	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4871 	    (found_uncached_bg || empty_size || empty_cluster ||
4872 	     allowed_chunk_alloc)) {
4873 		index = 0;
4874 		if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
4875 			found_uncached_bg = false;
4876 			loop++;
4877 			if (!ideal_cache_percent &&
4878 			    atomic_read(&space_info->caching_threads))
4879 				goto search;
4880 
4881 			/*
4882 			 * 1 of the following 2 things have happened so far
4883 			 *
4884 			 * 1) We found an ideal block group for caching that
4885 			 * is mostly full and will cache quickly, so we might
4886 			 * as well wait for it.
4887 			 *
4888 			 * 2) We searched for cached only and we didn't find
4889 			 * anything, and we didn't start any caching kthreads
4890 			 * either, so chances are we will loop through and
4891 			 * start a couple caching kthreads, and then come back
4892 			 * around and just wait for them.  This will be slower
4893 			 * because we will have 2 caching kthreads reading at
4894 			 * the same time when we could have just started one
4895 			 * and waited for it to get far enough to give us an
4896 			 * allocation, so go ahead and go to the wait caching
4897 			 * loop.
4898 			 */
4899 			loop = LOOP_CACHING_WAIT;
4900 			search_start = ideal_cache_offset;
4901 			ideal_cache_percent = 0;
4902 			goto ideal_cache;
4903 		} else if (loop == LOOP_FIND_IDEAL) {
4904 			/*
4905 			 * Didn't find a uncached bg, wait on anything we find
4906 			 * next.
4907 			 */
4908 			loop = LOOP_CACHING_WAIT;
4909 			goto search;
4910 		}
4911 
4912 		if (loop < LOOP_CACHING_WAIT) {
4913 			loop++;
4914 			goto search;
4915 		}
4916 
4917 		if (loop == LOOP_ALLOC_CHUNK) {
4918 			empty_size = 0;
4919 			empty_cluster = 0;
4920 		}
4921 
4922 		if (allowed_chunk_alloc) {
4923 			ret = do_chunk_alloc(trans, root, num_bytes +
4924 					     2 * 1024 * 1024, data, 1);
4925 			allowed_chunk_alloc = 0;
4926 			done_chunk_alloc = 1;
4927 		} else if (!done_chunk_alloc) {
4928 			space_info->force_alloc = 1;
4929 		}
4930 
4931 		if (loop < LOOP_NO_EMPTY_SIZE) {
4932 			loop++;
4933 			goto search;
4934 		}
4935 		ret = -ENOSPC;
4936 	} else if (!ins->objectid) {
4937 		ret = -ENOSPC;
4938 	}
4939 
4940 	/* we found what we needed */
4941 	if (ins->objectid) {
4942 		if (!(data & BTRFS_BLOCK_GROUP_DATA))
4943 			trans->block_group = block_group->key.objectid;
4944 
4945 		btrfs_put_block_group(block_group);
4946 		ret = 0;
4947 	}
4948 
4949 	return ret;
4950 }
4951 
4952 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
4953 			    int dump_block_groups)
4954 {
4955 	struct btrfs_block_group_cache *cache;
4956 	int index = 0;
4957 
4958 	spin_lock(&info->lock);
4959 	printk(KERN_INFO "space_info has %llu free, is %sfull\n",
4960 	       (unsigned long long)(info->total_bytes - info->bytes_used -
4961 				    info->bytes_pinned - info->bytes_reserved -
4962 				    info->bytes_readonly),
4963 	       (info->full) ? "" : "not ");
4964 	printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
4965 	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
4966 	       (unsigned long long)info->total_bytes,
4967 	       (unsigned long long)info->bytes_used,
4968 	       (unsigned long long)info->bytes_pinned,
4969 	       (unsigned long long)info->bytes_reserved,
4970 	       (unsigned long long)info->bytes_may_use,
4971 	       (unsigned long long)info->bytes_readonly);
4972 	spin_unlock(&info->lock);
4973 
4974 	if (!dump_block_groups)
4975 		return;
4976 
4977 	down_read(&info->groups_sem);
4978 again:
4979 	list_for_each_entry(cache, &info->block_groups[index], list) {
4980 		spin_lock(&cache->lock);
4981 		printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
4982 		       "%llu pinned %llu reserved\n",
4983 		       (unsigned long long)cache->key.objectid,
4984 		       (unsigned long long)cache->key.offset,
4985 		       (unsigned long long)btrfs_block_group_used(&cache->item),
4986 		       (unsigned long long)cache->pinned,
4987 		       (unsigned long long)cache->reserved);
4988 		btrfs_dump_free_space(cache, bytes);
4989 		spin_unlock(&cache->lock);
4990 	}
4991 	if (++index < BTRFS_NR_RAID_TYPES)
4992 		goto again;
4993 	up_read(&info->groups_sem);
4994 }
4995 
4996 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4997 			 struct btrfs_root *root,
4998 			 u64 num_bytes, u64 min_alloc_size,
4999 			 u64 empty_size, u64 hint_byte,
5000 			 u64 search_end, struct btrfs_key *ins,
5001 			 u64 data)
5002 {
5003 	int ret;
5004 	u64 search_start = 0;
5005 
5006 	data = btrfs_get_alloc_profile(root, data);
5007 again:
5008 	/*
5009 	 * the only place that sets empty_size is btrfs_realloc_node, which
5010 	 * is not called recursively on allocations
5011 	 */
5012 	if (empty_size || root->ref_cows)
5013 		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5014 				     num_bytes + 2 * 1024 * 1024, data, 0);
5015 
5016 	WARN_ON(num_bytes < root->sectorsize);
5017 	ret = find_free_extent(trans, root, num_bytes, empty_size,
5018 			       search_start, search_end, hint_byte,
5019 			       ins, data);
5020 
5021 	if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5022 		num_bytes = num_bytes >> 1;
5023 		num_bytes = num_bytes & ~(root->sectorsize - 1);
5024 		num_bytes = max(num_bytes, min_alloc_size);
5025 		do_chunk_alloc(trans, root->fs_info->extent_root,
5026 			       num_bytes, data, 1);
5027 		goto again;
5028 	}
5029 	if (ret == -ENOSPC) {
5030 		struct btrfs_space_info *sinfo;
5031 
5032 		sinfo = __find_space_info(root->fs_info, data);
5033 		printk(KERN_ERR "btrfs allocation failed flags %llu, "
5034 		       "wanted %llu\n", (unsigned long long)data,
5035 		       (unsigned long long)num_bytes);
5036 		dump_space_info(sinfo, num_bytes, 1);
5037 	}
5038 
5039 	return ret;
5040 }
5041 
5042 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5043 {
5044 	struct btrfs_block_group_cache *cache;
5045 	int ret = 0;
5046 
5047 	cache = btrfs_lookup_block_group(root->fs_info, start);
5048 	if (!cache) {
5049 		printk(KERN_ERR "Unable to find block group for %llu\n",
5050 		       (unsigned long long)start);
5051 		return -ENOSPC;
5052 	}
5053 
5054 	ret = btrfs_discard_extent(root, start, len);
5055 
5056 	btrfs_add_free_space(cache, start, len);
5057 	update_reserved_bytes(cache, len, 0, 1);
5058 	btrfs_put_block_group(cache);
5059 
5060 	return ret;
5061 }
5062 
5063 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5064 				      struct btrfs_root *root,
5065 				      u64 parent, u64 root_objectid,
5066 				      u64 flags, u64 owner, u64 offset,
5067 				      struct btrfs_key *ins, int ref_mod)
5068 {
5069 	int ret;
5070 	struct btrfs_fs_info *fs_info = root->fs_info;
5071 	struct btrfs_extent_item *extent_item;
5072 	struct btrfs_extent_inline_ref *iref;
5073 	struct btrfs_path *path;
5074 	struct extent_buffer *leaf;
5075 	int type;
5076 	u32 size;
5077 
5078 	if (parent > 0)
5079 		type = BTRFS_SHARED_DATA_REF_KEY;
5080 	else
5081 		type = BTRFS_EXTENT_DATA_REF_KEY;
5082 
5083 	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5084 
5085 	path = btrfs_alloc_path();
5086 	BUG_ON(!path);
5087 
5088 	path->leave_spinning = 1;
5089 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5090 				      ins, size);
5091 	BUG_ON(ret);
5092 
5093 	leaf = path->nodes[0];
5094 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
5095 				     struct btrfs_extent_item);
5096 	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5097 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5098 	btrfs_set_extent_flags(leaf, extent_item,
5099 			       flags | BTRFS_EXTENT_FLAG_DATA);
5100 
5101 	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5102 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
5103 	if (parent > 0) {
5104 		struct btrfs_shared_data_ref *ref;
5105 		ref = (struct btrfs_shared_data_ref *)(iref + 1);
5106 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5107 		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5108 	} else {
5109 		struct btrfs_extent_data_ref *ref;
5110 		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5111 		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5112 		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5113 		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5114 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5115 	}
5116 
5117 	btrfs_mark_buffer_dirty(path->nodes[0]);
5118 	btrfs_free_path(path);
5119 
5120 	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5121 	if (ret) {
5122 		printk(KERN_ERR "btrfs update block group failed for %llu "
5123 		       "%llu\n", (unsigned long long)ins->objectid,
5124 		       (unsigned long long)ins->offset);
5125 		BUG();
5126 	}
5127 	return ret;
5128 }
5129 
5130 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5131 				     struct btrfs_root *root,
5132 				     u64 parent, u64 root_objectid,
5133 				     u64 flags, struct btrfs_disk_key *key,
5134 				     int level, struct btrfs_key *ins)
5135 {
5136 	int ret;
5137 	struct btrfs_fs_info *fs_info = root->fs_info;
5138 	struct btrfs_extent_item *extent_item;
5139 	struct btrfs_tree_block_info *block_info;
5140 	struct btrfs_extent_inline_ref *iref;
5141 	struct btrfs_path *path;
5142 	struct extent_buffer *leaf;
5143 	u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5144 
5145 	path = btrfs_alloc_path();
5146 	BUG_ON(!path);
5147 
5148 	path->leave_spinning = 1;
5149 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5150 				      ins, size);
5151 	BUG_ON(ret);
5152 
5153 	leaf = path->nodes[0];
5154 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
5155 				     struct btrfs_extent_item);
5156 	btrfs_set_extent_refs(leaf, extent_item, 1);
5157 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5158 	btrfs_set_extent_flags(leaf, extent_item,
5159 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5160 	block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5161 
5162 	btrfs_set_tree_block_key(leaf, block_info, key);
5163 	btrfs_set_tree_block_level(leaf, block_info, level);
5164 
5165 	iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5166 	if (parent > 0) {
5167 		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5168 		btrfs_set_extent_inline_ref_type(leaf, iref,
5169 						 BTRFS_SHARED_BLOCK_REF_KEY);
5170 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5171 	} else {
5172 		btrfs_set_extent_inline_ref_type(leaf, iref,
5173 						 BTRFS_TREE_BLOCK_REF_KEY);
5174 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5175 	}
5176 
5177 	btrfs_mark_buffer_dirty(leaf);
5178 	btrfs_free_path(path);
5179 
5180 	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5181 	if (ret) {
5182 		printk(KERN_ERR "btrfs update block group failed for %llu "
5183 		       "%llu\n", (unsigned long long)ins->objectid,
5184 		       (unsigned long long)ins->offset);
5185 		BUG();
5186 	}
5187 	return ret;
5188 }
5189 
5190 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5191 				     struct btrfs_root *root,
5192 				     u64 root_objectid, u64 owner,
5193 				     u64 offset, struct btrfs_key *ins)
5194 {
5195 	int ret;
5196 
5197 	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5198 
5199 	ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5200 					 0, root_objectid, owner, offset,
5201 					 BTRFS_ADD_DELAYED_EXTENT, NULL);
5202 	return ret;
5203 }
5204 
5205 /*
5206  * this is used by the tree logging recovery code.  It records that
5207  * an extent has been allocated and makes sure to clear the free
5208  * space cache bits as well
5209  */
5210 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5211 				   struct btrfs_root *root,
5212 				   u64 root_objectid, u64 owner, u64 offset,
5213 				   struct btrfs_key *ins)
5214 {
5215 	int ret;
5216 	struct btrfs_block_group_cache *block_group;
5217 	struct btrfs_caching_control *caching_ctl;
5218 	u64 start = ins->objectid;
5219 	u64 num_bytes = ins->offset;
5220 
5221 	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5222 	cache_block_group(block_group);
5223 	caching_ctl = get_caching_control(block_group);
5224 
5225 	if (!caching_ctl) {
5226 		BUG_ON(!block_group_cache_done(block_group));
5227 		ret = btrfs_remove_free_space(block_group, start, num_bytes);
5228 		BUG_ON(ret);
5229 	} else {
5230 		mutex_lock(&caching_ctl->mutex);
5231 
5232 		if (start >= caching_ctl->progress) {
5233 			ret = add_excluded_extent(root, start, num_bytes);
5234 			BUG_ON(ret);
5235 		} else if (start + num_bytes <= caching_ctl->progress) {
5236 			ret = btrfs_remove_free_space(block_group,
5237 						      start, num_bytes);
5238 			BUG_ON(ret);
5239 		} else {
5240 			num_bytes = caching_ctl->progress - start;
5241 			ret = btrfs_remove_free_space(block_group,
5242 						      start, num_bytes);
5243 			BUG_ON(ret);
5244 
5245 			start = caching_ctl->progress;
5246 			num_bytes = ins->objectid + ins->offset -
5247 				    caching_ctl->progress;
5248 			ret = add_excluded_extent(root, start, num_bytes);
5249 			BUG_ON(ret);
5250 		}
5251 
5252 		mutex_unlock(&caching_ctl->mutex);
5253 		put_caching_control(caching_ctl);
5254 	}
5255 
5256 	ret = update_reserved_bytes(block_group, ins->offset, 1, 1);
5257 	BUG_ON(ret);
5258 	btrfs_put_block_group(block_group);
5259 	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5260 					 0, owner, offset, ins, 1);
5261 	return ret;
5262 }
5263 
5264 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5265 					    struct btrfs_root *root,
5266 					    u64 bytenr, u32 blocksize,
5267 					    int level)
5268 {
5269 	struct extent_buffer *buf;
5270 
5271 	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5272 	if (!buf)
5273 		return ERR_PTR(-ENOMEM);
5274 	btrfs_set_header_generation(buf, trans->transid);
5275 	btrfs_set_buffer_lockdep_class(buf, level);
5276 	btrfs_tree_lock(buf);
5277 	clean_tree_block(trans, root, buf);
5278 
5279 	btrfs_set_lock_blocking(buf);
5280 	btrfs_set_buffer_uptodate(buf);
5281 
5282 	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
5283 		/*
5284 		 * we allow two log transactions at a time, use different
5285 		 * EXENT bit to differentiate dirty pages.
5286 		 */
5287 		if (root->log_transid % 2 == 0)
5288 			set_extent_dirty(&root->dirty_log_pages, buf->start,
5289 					buf->start + buf->len - 1, GFP_NOFS);
5290 		else
5291 			set_extent_new(&root->dirty_log_pages, buf->start,
5292 					buf->start + buf->len - 1, GFP_NOFS);
5293 	} else {
5294 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5295 			 buf->start + buf->len - 1, GFP_NOFS);
5296 	}
5297 	trans->blocks_used++;
5298 	/* this returns a buffer locked for blocking */
5299 	return buf;
5300 }
5301 
5302 static struct btrfs_block_rsv *
5303 use_block_rsv(struct btrfs_trans_handle *trans,
5304 	      struct btrfs_root *root, u32 blocksize)
5305 {
5306 	struct btrfs_block_rsv *block_rsv;
5307 	int ret;
5308 
5309 	block_rsv = get_block_rsv(trans, root);
5310 
5311 	if (block_rsv->size == 0) {
5312 		ret = reserve_metadata_bytes(block_rsv, blocksize);
5313 		if (ret)
5314 			return ERR_PTR(ret);
5315 		return block_rsv;
5316 	}
5317 
5318 	ret = block_rsv_use_bytes(block_rsv, blocksize);
5319 	if (!ret)
5320 		return block_rsv;
5321 
5322 	WARN_ON(1);
5323 	printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
5324 		block_rsv->size, block_rsv->reserved,
5325 		block_rsv->freed[0], block_rsv->freed[1]);
5326 
5327 	return ERR_PTR(-ENOSPC);
5328 }
5329 
5330 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5331 {
5332 	block_rsv_add_bytes(block_rsv, blocksize, 0);
5333 	block_rsv_release_bytes(block_rsv, NULL, 0);
5334 }
5335 
5336 /*
5337  * finds a free extent and does all the dirty work required for allocation
5338  * returns the key for the extent through ins, and a tree buffer for
5339  * the first block of the extent through buf.
5340  *
5341  * returns the tree buffer or NULL.
5342  */
5343 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5344 					struct btrfs_root *root, u32 blocksize,
5345 					u64 parent, u64 root_objectid,
5346 					struct btrfs_disk_key *key, int level,
5347 					u64 hint, u64 empty_size)
5348 {
5349 	struct btrfs_key ins;
5350 	struct btrfs_block_rsv *block_rsv;
5351 	struct extent_buffer *buf;
5352 	u64 flags = 0;
5353 	int ret;
5354 
5355 
5356 	block_rsv = use_block_rsv(trans, root, blocksize);
5357 	if (IS_ERR(block_rsv))
5358 		return ERR_CAST(block_rsv);
5359 
5360 	ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5361 				   empty_size, hint, (u64)-1, &ins, 0);
5362 	if (ret) {
5363 		unuse_block_rsv(block_rsv, blocksize);
5364 		return ERR_PTR(ret);
5365 	}
5366 
5367 	buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5368 				    blocksize, level);
5369 	BUG_ON(IS_ERR(buf));
5370 
5371 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5372 		if (parent == 0)
5373 			parent = ins.objectid;
5374 		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5375 	} else
5376 		BUG_ON(parent > 0);
5377 
5378 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5379 		struct btrfs_delayed_extent_op *extent_op;
5380 		extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5381 		BUG_ON(!extent_op);
5382 		if (key)
5383 			memcpy(&extent_op->key, key, sizeof(extent_op->key));
5384 		else
5385 			memset(&extent_op->key, 0, sizeof(extent_op->key));
5386 		extent_op->flags_to_set = flags;
5387 		extent_op->update_key = 1;
5388 		extent_op->update_flags = 1;
5389 		extent_op->is_data = 0;
5390 
5391 		ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5392 					ins.offset, parent, root_objectid,
5393 					level, BTRFS_ADD_DELAYED_EXTENT,
5394 					extent_op);
5395 		BUG_ON(ret);
5396 	}
5397 	return buf;
5398 }
5399 
5400 struct walk_control {
5401 	u64 refs[BTRFS_MAX_LEVEL];
5402 	u64 flags[BTRFS_MAX_LEVEL];
5403 	struct btrfs_key update_progress;
5404 	int stage;
5405 	int level;
5406 	int shared_level;
5407 	int update_ref;
5408 	int keep_locks;
5409 	int reada_slot;
5410 	int reada_count;
5411 };
5412 
5413 #define DROP_REFERENCE	1
5414 #define UPDATE_BACKREF	2
5415 
5416 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5417 				     struct btrfs_root *root,
5418 				     struct walk_control *wc,
5419 				     struct btrfs_path *path)
5420 {
5421 	u64 bytenr;
5422 	u64 generation;
5423 	u64 refs;
5424 	u64 flags;
5425 	u64 last = 0;
5426 	u32 nritems;
5427 	u32 blocksize;
5428 	struct btrfs_key key;
5429 	struct extent_buffer *eb;
5430 	int ret;
5431 	int slot;
5432 	int nread = 0;
5433 
5434 	if (path->slots[wc->level] < wc->reada_slot) {
5435 		wc->reada_count = wc->reada_count * 2 / 3;
5436 		wc->reada_count = max(wc->reada_count, 2);
5437 	} else {
5438 		wc->reada_count = wc->reada_count * 3 / 2;
5439 		wc->reada_count = min_t(int, wc->reada_count,
5440 					BTRFS_NODEPTRS_PER_BLOCK(root));
5441 	}
5442 
5443 	eb = path->nodes[wc->level];
5444 	nritems = btrfs_header_nritems(eb);
5445 	blocksize = btrfs_level_size(root, wc->level - 1);
5446 
5447 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5448 		if (nread >= wc->reada_count)
5449 			break;
5450 
5451 		cond_resched();
5452 		bytenr = btrfs_node_blockptr(eb, slot);
5453 		generation = btrfs_node_ptr_generation(eb, slot);
5454 
5455 		if (slot == path->slots[wc->level])
5456 			goto reada;
5457 
5458 		if (wc->stage == UPDATE_BACKREF &&
5459 		    generation <= root->root_key.offset)
5460 			continue;
5461 
5462 		/* We don't lock the tree block, it's OK to be racy here */
5463 		ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5464 					       &refs, &flags);
5465 		BUG_ON(ret);
5466 		BUG_ON(refs == 0);
5467 
5468 		if (wc->stage == DROP_REFERENCE) {
5469 			if (refs == 1)
5470 				goto reada;
5471 
5472 			if (wc->level == 1 &&
5473 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5474 				continue;
5475 			if (!wc->update_ref ||
5476 			    generation <= root->root_key.offset)
5477 				continue;
5478 			btrfs_node_key_to_cpu(eb, &key, slot);
5479 			ret = btrfs_comp_cpu_keys(&key,
5480 						  &wc->update_progress);
5481 			if (ret < 0)
5482 				continue;
5483 		} else {
5484 			if (wc->level == 1 &&
5485 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5486 				continue;
5487 		}
5488 reada:
5489 		ret = readahead_tree_block(root, bytenr, blocksize,
5490 					   generation);
5491 		if (ret)
5492 			break;
5493 		last = bytenr + blocksize;
5494 		nread++;
5495 	}
5496 	wc->reada_slot = slot;
5497 }
5498 
5499 /*
5500  * hepler to process tree block while walking down the tree.
5501  *
5502  * when wc->stage == UPDATE_BACKREF, this function updates
5503  * back refs for pointers in the block.
5504  *
5505  * NOTE: return value 1 means we should stop walking down.
5506  */
5507 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5508 				   struct btrfs_root *root,
5509 				   struct btrfs_path *path,
5510 				   struct walk_control *wc, int lookup_info)
5511 {
5512 	int level = wc->level;
5513 	struct extent_buffer *eb = path->nodes[level];
5514 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5515 	int ret;
5516 
5517 	if (wc->stage == UPDATE_BACKREF &&
5518 	    btrfs_header_owner(eb) != root->root_key.objectid)
5519 		return 1;
5520 
5521 	/*
5522 	 * when reference count of tree block is 1, it won't increase
5523 	 * again. once full backref flag is set, we never clear it.
5524 	 */
5525 	if (lookup_info &&
5526 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5527 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5528 		BUG_ON(!path->locks[level]);
5529 		ret = btrfs_lookup_extent_info(trans, root,
5530 					       eb->start, eb->len,
5531 					       &wc->refs[level],
5532 					       &wc->flags[level]);
5533 		BUG_ON(ret);
5534 		BUG_ON(wc->refs[level] == 0);
5535 	}
5536 
5537 	if (wc->stage == DROP_REFERENCE) {
5538 		if (wc->refs[level] > 1)
5539 			return 1;
5540 
5541 		if (path->locks[level] && !wc->keep_locks) {
5542 			btrfs_tree_unlock(eb);
5543 			path->locks[level] = 0;
5544 		}
5545 		return 0;
5546 	}
5547 
5548 	/* wc->stage == UPDATE_BACKREF */
5549 	if (!(wc->flags[level] & flag)) {
5550 		BUG_ON(!path->locks[level]);
5551 		ret = btrfs_inc_ref(trans, root, eb, 1);
5552 		BUG_ON(ret);
5553 		ret = btrfs_dec_ref(trans, root, eb, 0);
5554 		BUG_ON(ret);
5555 		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5556 						  eb->len, flag, 0);
5557 		BUG_ON(ret);
5558 		wc->flags[level] |= flag;
5559 	}
5560 
5561 	/*
5562 	 * the block is shared by multiple trees, so it's not good to
5563 	 * keep the tree lock
5564 	 */
5565 	if (path->locks[level] && level > 0) {
5566 		btrfs_tree_unlock(eb);
5567 		path->locks[level] = 0;
5568 	}
5569 	return 0;
5570 }
5571 
5572 /*
5573  * hepler to process tree block pointer.
5574  *
5575  * when wc->stage == DROP_REFERENCE, this function checks
5576  * reference count of the block pointed to. if the block
5577  * is shared and we need update back refs for the subtree
5578  * rooted at the block, this function changes wc->stage to
5579  * UPDATE_BACKREF. if the block is shared and there is no
5580  * need to update back, this function drops the reference
5581  * to the block.
5582  *
5583  * NOTE: return value 1 means we should stop walking down.
5584  */
5585 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5586 				 struct btrfs_root *root,
5587 				 struct btrfs_path *path,
5588 				 struct walk_control *wc, int *lookup_info)
5589 {
5590 	u64 bytenr;
5591 	u64 generation;
5592 	u64 parent;
5593 	u32 blocksize;
5594 	struct btrfs_key key;
5595 	struct extent_buffer *next;
5596 	int level = wc->level;
5597 	int reada = 0;
5598 	int ret = 0;
5599 
5600 	generation = btrfs_node_ptr_generation(path->nodes[level],
5601 					       path->slots[level]);
5602 	/*
5603 	 * if the lower level block was created before the snapshot
5604 	 * was created, we know there is no need to update back refs
5605 	 * for the subtree
5606 	 */
5607 	if (wc->stage == UPDATE_BACKREF &&
5608 	    generation <= root->root_key.offset) {
5609 		*lookup_info = 1;
5610 		return 1;
5611 	}
5612 
5613 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5614 	blocksize = btrfs_level_size(root, level - 1);
5615 
5616 	next = btrfs_find_tree_block(root, bytenr, blocksize);
5617 	if (!next) {
5618 		next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5619 		if (!next)
5620 			return -ENOMEM;
5621 		reada = 1;
5622 	}
5623 	btrfs_tree_lock(next);
5624 	btrfs_set_lock_blocking(next);
5625 
5626 	ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5627 				       &wc->refs[level - 1],
5628 				       &wc->flags[level - 1]);
5629 	BUG_ON(ret);
5630 	BUG_ON(wc->refs[level - 1] == 0);
5631 	*lookup_info = 0;
5632 
5633 	if (wc->stage == DROP_REFERENCE) {
5634 		if (wc->refs[level - 1] > 1) {
5635 			if (level == 1 &&
5636 			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5637 				goto skip;
5638 
5639 			if (!wc->update_ref ||
5640 			    generation <= root->root_key.offset)
5641 				goto skip;
5642 
5643 			btrfs_node_key_to_cpu(path->nodes[level], &key,
5644 					      path->slots[level]);
5645 			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5646 			if (ret < 0)
5647 				goto skip;
5648 
5649 			wc->stage = UPDATE_BACKREF;
5650 			wc->shared_level = level - 1;
5651 		}
5652 	} else {
5653 		if (level == 1 &&
5654 		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5655 			goto skip;
5656 	}
5657 
5658 	if (!btrfs_buffer_uptodate(next, generation)) {
5659 		btrfs_tree_unlock(next);
5660 		free_extent_buffer(next);
5661 		next = NULL;
5662 		*lookup_info = 1;
5663 	}
5664 
5665 	if (!next) {
5666 		if (reada && level == 1)
5667 			reada_walk_down(trans, root, wc, path);
5668 		next = read_tree_block(root, bytenr, blocksize, generation);
5669 		btrfs_tree_lock(next);
5670 		btrfs_set_lock_blocking(next);
5671 	}
5672 
5673 	level--;
5674 	BUG_ON(level != btrfs_header_level(next));
5675 	path->nodes[level] = next;
5676 	path->slots[level] = 0;
5677 	path->locks[level] = 1;
5678 	wc->level = level;
5679 	if (wc->level == 1)
5680 		wc->reada_slot = 0;
5681 	return 0;
5682 skip:
5683 	wc->refs[level - 1] = 0;
5684 	wc->flags[level - 1] = 0;
5685 	if (wc->stage == DROP_REFERENCE) {
5686 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5687 			parent = path->nodes[level]->start;
5688 		} else {
5689 			BUG_ON(root->root_key.objectid !=
5690 			       btrfs_header_owner(path->nodes[level]));
5691 			parent = 0;
5692 		}
5693 
5694 		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5695 					root->root_key.objectid, level - 1, 0);
5696 		BUG_ON(ret);
5697 	}
5698 	btrfs_tree_unlock(next);
5699 	free_extent_buffer(next);
5700 	*lookup_info = 1;
5701 	return 1;
5702 }
5703 
5704 /*
5705  * hepler to process tree block while walking up the tree.
5706  *
5707  * when wc->stage == DROP_REFERENCE, this function drops
5708  * reference count on the block.
5709  *
5710  * when wc->stage == UPDATE_BACKREF, this function changes
5711  * wc->stage back to DROP_REFERENCE if we changed wc->stage
5712  * to UPDATE_BACKREF previously while processing the block.
5713  *
5714  * NOTE: return value 1 means we should stop walking up.
5715  */
5716 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5717 				 struct btrfs_root *root,
5718 				 struct btrfs_path *path,
5719 				 struct walk_control *wc)
5720 {
5721 	int ret;
5722 	int level = wc->level;
5723 	struct extent_buffer *eb = path->nodes[level];
5724 	u64 parent = 0;
5725 
5726 	if (wc->stage == UPDATE_BACKREF) {
5727 		BUG_ON(wc->shared_level < level);
5728 		if (level < wc->shared_level)
5729 			goto out;
5730 
5731 		ret = find_next_key(path, level + 1, &wc->update_progress);
5732 		if (ret > 0)
5733 			wc->update_ref = 0;
5734 
5735 		wc->stage = DROP_REFERENCE;
5736 		wc->shared_level = -1;
5737 		path->slots[level] = 0;
5738 
5739 		/*
5740 		 * check reference count again if the block isn't locked.
5741 		 * we should start walking down the tree again if reference
5742 		 * count is one.
5743 		 */
5744 		if (!path->locks[level]) {
5745 			BUG_ON(level == 0);
5746 			btrfs_tree_lock(eb);
5747 			btrfs_set_lock_blocking(eb);
5748 			path->locks[level] = 1;
5749 
5750 			ret = btrfs_lookup_extent_info(trans, root,
5751 						       eb->start, eb->len,
5752 						       &wc->refs[level],
5753 						       &wc->flags[level]);
5754 			BUG_ON(ret);
5755 			BUG_ON(wc->refs[level] == 0);
5756 			if (wc->refs[level] == 1) {
5757 				btrfs_tree_unlock(eb);
5758 				path->locks[level] = 0;
5759 				return 1;
5760 			}
5761 		}
5762 	}
5763 
5764 	/* wc->stage == DROP_REFERENCE */
5765 	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5766 
5767 	if (wc->refs[level] == 1) {
5768 		if (level == 0) {
5769 			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5770 				ret = btrfs_dec_ref(trans, root, eb, 1);
5771 			else
5772 				ret = btrfs_dec_ref(trans, root, eb, 0);
5773 			BUG_ON(ret);
5774 		}
5775 		/* make block locked assertion in clean_tree_block happy */
5776 		if (!path->locks[level] &&
5777 		    btrfs_header_generation(eb) == trans->transid) {
5778 			btrfs_tree_lock(eb);
5779 			btrfs_set_lock_blocking(eb);
5780 			path->locks[level] = 1;
5781 		}
5782 		clean_tree_block(trans, root, eb);
5783 	}
5784 
5785 	if (eb == root->node) {
5786 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5787 			parent = eb->start;
5788 		else
5789 			BUG_ON(root->root_key.objectid !=
5790 			       btrfs_header_owner(eb));
5791 	} else {
5792 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5793 			parent = path->nodes[level + 1]->start;
5794 		else
5795 			BUG_ON(root->root_key.objectid !=
5796 			       btrfs_header_owner(path->nodes[level + 1]));
5797 	}
5798 
5799 	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
5800 out:
5801 	wc->refs[level] = 0;
5802 	wc->flags[level] = 0;
5803 	return 0;
5804 }
5805 
5806 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5807 				   struct btrfs_root *root,
5808 				   struct btrfs_path *path,
5809 				   struct walk_control *wc)
5810 {
5811 	int level = wc->level;
5812 	int lookup_info = 1;
5813 	int ret;
5814 
5815 	while (level >= 0) {
5816 		ret = walk_down_proc(trans, root, path, wc, lookup_info);
5817 		if (ret > 0)
5818 			break;
5819 
5820 		if (level == 0)
5821 			break;
5822 
5823 		if (path->slots[level] >=
5824 		    btrfs_header_nritems(path->nodes[level]))
5825 			break;
5826 
5827 		ret = do_walk_down(trans, root, path, wc, &lookup_info);
5828 		if (ret > 0) {
5829 			path->slots[level]++;
5830 			continue;
5831 		} else if (ret < 0)
5832 			return ret;
5833 		level = wc->level;
5834 	}
5835 	return 0;
5836 }
5837 
5838 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5839 				 struct btrfs_root *root,
5840 				 struct btrfs_path *path,
5841 				 struct walk_control *wc, int max_level)
5842 {
5843 	int level = wc->level;
5844 	int ret;
5845 
5846 	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5847 	while (level < max_level && path->nodes[level]) {
5848 		wc->level = level;
5849 		if (path->slots[level] + 1 <
5850 		    btrfs_header_nritems(path->nodes[level])) {
5851 			path->slots[level]++;
5852 			return 0;
5853 		} else {
5854 			ret = walk_up_proc(trans, root, path, wc);
5855 			if (ret > 0)
5856 				return 0;
5857 
5858 			if (path->locks[level]) {
5859 				btrfs_tree_unlock(path->nodes[level]);
5860 				path->locks[level] = 0;
5861 			}
5862 			free_extent_buffer(path->nodes[level]);
5863 			path->nodes[level] = NULL;
5864 			level++;
5865 		}
5866 	}
5867 	return 1;
5868 }
5869 
5870 /*
5871  * drop a subvolume tree.
5872  *
5873  * this function traverses the tree freeing any blocks that only
5874  * referenced by the tree.
5875  *
5876  * when a shared tree block is found. this function decreases its
5877  * reference count by one. if update_ref is true, this function
5878  * also make sure backrefs for the shared block and all lower level
5879  * blocks are properly updated.
5880  */
5881 int btrfs_drop_snapshot(struct btrfs_root *root,
5882 			struct btrfs_block_rsv *block_rsv, int update_ref)
5883 {
5884 	struct btrfs_path *path;
5885 	struct btrfs_trans_handle *trans;
5886 	struct btrfs_root *tree_root = root->fs_info->tree_root;
5887 	struct btrfs_root_item *root_item = &root->root_item;
5888 	struct walk_control *wc;
5889 	struct btrfs_key key;
5890 	int err = 0;
5891 	int ret;
5892 	int level;
5893 
5894 	path = btrfs_alloc_path();
5895 	BUG_ON(!path);
5896 
5897 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
5898 	BUG_ON(!wc);
5899 
5900 	trans = btrfs_start_transaction(tree_root, 0);
5901 	if (block_rsv)
5902 		trans->block_rsv = block_rsv;
5903 
5904 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
5905 		level = btrfs_header_level(root->node);
5906 		path->nodes[level] = btrfs_lock_root_node(root);
5907 		btrfs_set_lock_blocking(path->nodes[level]);
5908 		path->slots[level] = 0;
5909 		path->locks[level] = 1;
5910 		memset(&wc->update_progress, 0,
5911 		       sizeof(wc->update_progress));
5912 	} else {
5913 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
5914 		memcpy(&wc->update_progress, &key,
5915 		       sizeof(wc->update_progress));
5916 
5917 		level = root_item->drop_level;
5918 		BUG_ON(level == 0);
5919 		path->lowest_level = level;
5920 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5921 		path->lowest_level = 0;
5922 		if (ret < 0) {
5923 			err = ret;
5924 			goto out;
5925 		}
5926 		WARN_ON(ret > 0);
5927 
5928 		/*
5929 		 * unlock our path, this is safe because only this
5930 		 * function is allowed to delete this snapshot
5931 		 */
5932 		btrfs_unlock_up_safe(path, 0);
5933 
5934 		level = btrfs_header_level(root->node);
5935 		while (1) {
5936 			btrfs_tree_lock(path->nodes[level]);
5937 			btrfs_set_lock_blocking(path->nodes[level]);
5938 
5939 			ret = btrfs_lookup_extent_info(trans, root,
5940 						path->nodes[level]->start,
5941 						path->nodes[level]->len,
5942 						&wc->refs[level],
5943 						&wc->flags[level]);
5944 			BUG_ON(ret);
5945 			BUG_ON(wc->refs[level] == 0);
5946 
5947 			if (level == root_item->drop_level)
5948 				break;
5949 
5950 			btrfs_tree_unlock(path->nodes[level]);
5951 			WARN_ON(wc->refs[level] != 1);
5952 			level--;
5953 		}
5954 	}
5955 
5956 	wc->level = level;
5957 	wc->shared_level = -1;
5958 	wc->stage = DROP_REFERENCE;
5959 	wc->update_ref = update_ref;
5960 	wc->keep_locks = 0;
5961 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
5962 
5963 	while (1) {
5964 		ret = walk_down_tree(trans, root, path, wc);
5965 		if (ret < 0) {
5966 			err = ret;
5967 			break;
5968 		}
5969 
5970 		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5971 		if (ret < 0) {
5972 			err = ret;
5973 			break;
5974 		}
5975 
5976 		if (ret > 0) {
5977 			BUG_ON(wc->stage != DROP_REFERENCE);
5978 			break;
5979 		}
5980 
5981 		if (wc->stage == DROP_REFERENCE) {
5982 			level = wc->level;
5983 			btrfs_node_key(path->nodes[level],
5984 				       &root_item->drop_progress,
5985 				       path->slots[level]);
5986 			root_item->drop_level = level;
5987 		}
5988 
5989 		BUG_ON(wc->level == 0);
5990 		if (btrfs_should_end_transaction(trans, tree_root)) {
5991 			ret = btrfs_update_root(trans, tree_root,
5992 						&root->root_key,
5993 						root_item);
5994 			BUG_ON(ret);
5995 
5996 			btrfs_end_transaction_throttle(trans, tree_root);
5997 			trans = btrfs_start_transaction(tree_root, 0);
5998 			if (block_rsv)
5999 				trans->block_rsv = block_rsv;
6000 		}
6001 	}
6002 	btrfs_release_path(root, path);
6003 	BUG_ON(err);
6004 
6005 	ret = btrfs_del_root(trans, tree_root, &root->root_key);
6006 	BUG_ON(ret);
6007 
6008 	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6009 		ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6010 					   NULL, NULL);
6011 		BUG_ON(ret < 0);
6012 		if (ret > 0) {
6013 			ret = btrfs_del_orphan_item(trans, tree_root,
6014 						    root->root_key.objectid);
6015 			BUG_ON(ret);
6016 		}
6017 	}
6018 
6019 	if (root->in_radix) {
6020 		btrfs_free_fs_root(tree_root->fs_info, root);
6021 	} else {
6022 		free_extent_buffer(root->node);
6023 		free_extent_buffer(root->commit_root);
6024 		kfree(root);
6025 	}
6026 out:
6027 	btrfs_end_transaction_throttle(trans, tree_root);
6028 	kfree(wc);
6029 	btrfs_free_path(path);
6030 	return err;
6031 }
6032 
6033 /*
6034  * drop subtree rooted at tree block 'node'.
6035  *
6036  * NOTE: this function will unlock and release tree block 'node'
6037  */
6038 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6039 			struct btrfs_root *root,
6040 			struct extent_buffer *node,
6041 			struct extent_buffer *parent)
6042 {
6043 	struct btrfs_path *path;
6044 	struct walk_control *wc;
6045 	int level;
6046 	int parent_level;
6047 	int ret = 0;
6048 	int wret;
6049 
6050 	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6051 
6052 	path = btrfs_alloc_path();
6053 	BUG_ON(!path);
6054 
6055 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
6056 	BUG_ON(!wc);
6057 
6058 	btrfs_assert_tree_locked(parent);
6059 	parent_level = btrfs_header_level(parent);
6060 	extent_buffer_get(parent);
6061 	path->nodes[parent_level] = parent;
6062 	path->slots[parent_level] = btrfs_header_nritems(parent);
6063 
6064 	btrfs_assert_tree_locked(node);
6065 	level = btrfs_header_level(node);
6066 	path->nodes[level] = node;
6067 	path->slots[level] = 0;
6068 	path->locks[level] = 1;
6069 
6070 	wc->refs[parent_level] = 1;
6071 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6072 	wc->level = level;
6073 	wc->shared_level = -1;
6074 	wc->stage = DROP_REFERENCE;
6075 	wc->update_ref = 0;
6076 	wc->keep_locks = 1;
6077 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6078 
6079 	while (1) {
6080 		wret = walk_down_tree(trans, root, path, wc);
6081 		if (wret < 0) {
6082 			ret = wret;
6083 			break;
6084 		}
6085 
6086 		wret = walk_up_tree(trans, root, path, wc, parent_level);
6087 		if (wret < 0)
6088 			ret = wret;
6089 		if (wret != 0)
6090 			break;
6091 	}
6092 
6093 	kfree(wc);
6094 	btrfs_free_path(path);
6095 	return ret;
6096 }
6097 
6098 #if 0
6099 static unsigned long calc_ra(unsigned long start, unsigned long last,
6100 			     unsigned long nr)
6101 {
6102 	return min(last, start + nr - 1);
6103 }
6104 
6105 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
6106 					 u64 len)
6107 {
6108 	u64 page_start;
6109 	u64 page_end;
6110 	unsigned long first_index;
6111 	unsigned long last_index;
6112 	unsigned long i;
6113 	struct page *page;
6114 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6115 	struct file_ra_state *ra;
6116 	struct btrfs_ordered_extent *ordered;
6117 	unsigned int total_read = 0;
6118 	unsigned int total_dirty = 0;
6119 	int ret = 0;
6120 
6121 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
6122 
6123 	mutex_lock(&inode->i_mutex);
6124 	first_index = start >> PAGE_CACHE_SHIFT;
6125 	last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
6126 
6127 	/* make sure the dirty trick played by the caller work */
6128 	ret = invalidate_inode_pages2_range(inode->i_mapping,
6129 					    first_index, last_index);
6130 	if (ret)
6131 		goto out_unlock;
6132 
6133 	file_ra_state_init(ra, inode->i_mapping);
6134 
6135 	for (i = first_index ; i <= last_index; i++) {
6136 		if (total_read % ra->ra_pages == 0) {
6137 			btrfs_force_ra(inode->i_mapping, ra, NULL, i,
6138 				       calc_ra(i, last_index, ra->ra_pages));
6139 		}
6140 		total_read++;
6141 again:
6142 		if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
6143 			BUG_ON(1);
6144 		page = grab_cache_page(inode->i_mapping, i);
6145 		if (!page) {
6146 			ret = -ENOMEM;
6147 			goto out_unlock;
6148 		}
6149 		if (!PageUptodate(page)) {
6150 			btrfs_readpage(NULL, page);
6151 			lock_page(page);
6152 			if (!PageUptodate(page)) {
6153 				unlock_page(page);
6154 				page_cache_release(page);
6155 				ret = -EIO;
6156 				goto out_unlock;
6157 			}
6158 		}
6159 		wait_on_page_writeback(page);
6160 
6161 		page_start = (u64)page->index << PAGE_CACHE_SHIFT;
6162 		page_end = page_start + PAGE_CACHE_SIZE - 1;
6163 		lock_extent(io_tree, page_start, page_end, GFP_NOFS);
6164 
6165 		ordered = btrfs_lookup_ordered_extent(inode, page_start);
6166 		if (ordered) {
6167 			unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6168 			unlock_page(page);
6169 			page_cache_release(page);
6170 			btrfs_start_ordered_extent(inode, ordered, 1);
6171 			btrfs_put_ordered_extent(ordered);
6172 			goto again;
6173 		}
6174 		set_page_extent_mapped(page);
6175 
6176 		if (i == first_index)
6177 			set_extent_bits(io_tree, page_start, page_end,
6178 					EXTENT_BOUNDARY, GFP_NOFS);
6179 		btrfs_set_extent_delalloc(inode, page_start, page_end);
6180 
6181 		set_page_dirty(page);
6182 		total_dirty++;
6183 
6184 		unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6185 		unlock_page(page);
6186 		page_cache_release(page);
6187 	}
6188 
6189 out_unlock:
6190 	kfree(ra);
6191 	mutex_unlock(&inode->i_mutex);
6192 	balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
6193 	return ret;
6194 }
6195 
6196 static noinline int relocate_data_extent(struct inode *reloc_inode,
6197 					 struct btrfs_key *extent_key,
6198 					 u64 offset)
6199 {
6200 	struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6201 	struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
6202 	struct extent_map *em;
6203 	u64 start = extent_key->objectid - offset;
6204 	u64 end = start + extent_key->offset - 1;
6205 
6206 	em = alloc_extent_map(GFP_NOFS);
6207 	BUG_ON(!em || IS_ERR(em));
6208 
6209 	em->start = start;
6210 	em->len = extent_key->offset;
6211 	em->block_len = extent_key->offset;
6212 	em->block_start = extent_key->objectid;
6213 	em->bdev = root->fs_info->fs_devices->latest_bdev;
6214 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
6215 
6216 	/* setup extent map to cheat btrfs_readpage */
6217 	lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6218 	while (1) {
6219 		int ret;
6220 		write_lock(&em_tree->lock);
6221 		ret = add_extent_mapping(em_tree, em);
6222 		write_unlock(&em_tree->lock);
6223 		if (ret != -EEXIST) {
6224 			free_extent_map(em);
6225 			break;
6226 		}
6227 		btrfs_drop_extent_cache(reloc_inode, start, end, 0);
6228 	}
6229 	unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6230 
6231 	return relocate_inode_pages(reloc_inode, start, extent_key->offset);
6232 }
6233 
6234 struct btrfs_ref_path {
6235 	u64 extent_start;
6236 	u64 nodes[BTRFS_MAX_LEVEL];
6237 	u64 root_objectid;
6238 	u64 root_generation;
6239 	u64 owner_objectid;
6240 	u32 num_refs;
6241 	int lowest_level;
6242 	int current_level;
6243 	int shared_level;
6244 
6245 	struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
6246 	u64 new_nodes[BTRFS_MAX_LEVEL];
6247 };
6248 
6249 struct disk_extent {
6250 	u64 ram_bytes;
6251 	u64 disk_bytenr;
6252 	u64 disk_num_bytes;
6253 	u64 offset;
6254 	u64 num_bytes;
6255 	u8 compression;
6256 	u8 encryption;
6257 	u16 other_encoding;
6258 };
6259 
6260 static int is_cowonly_root(u64 root_objectid)
6261 {
6262 	if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
6263 	    root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
6264 	    root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
6265 	    root_objectid == BTRFS_DEV_TREE_OBJECTID ||
6266 	    root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6267 	    root_objectid == BTRFS_CSUM_TREE_OBJECTID)
6268 		return 1;
6269 	return 0;
6270 }
6271 
6272 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
6273 				    struct btrfs_root *extent_root,
6274 				    struct btrfs_ref_path *ref_path,
6275 				    int first_time)
6276 {
6277 	struct extent_buffer *leaf;
6278 	struct btrfs_path *path;
6279 	struct btrfs_extent_ref *ref;
6280 	struct btrfs_key key;
6281 	struct btrfs_key found_key;
6282 	u64 bytenr;
6283 	u32 nritems;
6284 	int level;
6285 	int ret = 1;
6286 
6287 	path = btrfs_alloc_path();
6288 	if (!path)
6289 		return -ENOMEM;
6290 
6291 	if (first_time) {
6292 		ref_path->lowest_level = -1;
6293 		ref_path->current_level = -1;
6294 		ref_path->shared_level = -1;
6295 		goto walk_up;
6296 	}
6297 walk_down:
6298 	level = ref_path->current_level - 1;
6299 	while (level >= -1) {
6300 		u64 parent;
6301 		if (level < ref_path->lowest_level)
6302 			break;
6303 
6304 		if (level >= 0)
6305 			bytenr = ref_path->nodes[level];
6306 		else
6307 			bytenr = ref_path->extent_start;
6308 		BUG_ON(bytenr == 0);
6309 
6310 		parent = ref_path->nodes[level + 1];
6311 		ref_path->nodes[level + 1] = 0;
6312 		ref_path->current_level = level;
6313 		BUG_ON(parent == 0);
6314 
6315 		key.objectid = bytenr;
6316 		key.offset = parent + 1;
6317 		key.type = BTRFS_EXTENT_REF_KEY;
6318 
6319 		ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6320 		if (ret < 0)
6321 			goto out;
6322 		BUG_ON(ret == 0);
6323 
6324 		leaf = path->nodes[0];
6325 		nritems = btrfs_header_nritems(leaf);
6326 		if (path->slots[0] >= nritems) {
6327 			ret = btrfs_next_leaf(extent_root, path);
6328 			if (ret < 0)
6329 				goto out;
6330 			if (ret > 0)
6331 				goto next;
6332 			leaf = path->nodes[0];
6333 		}
6334 
6335 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6336 		if (found_key.objectid == bytenr &&
6337 		    found_key.type == BTRFS_EXTENT_REF_KEY) {
6338 			if (level < ref_path->shared_level)
6339 				ref_path->shared_level = level;
6340 			goto found;
6341 		}
6342 next:
6343 		level--;
6344 		btrfs_release_path(extent_root, path);
6345 		cond_resched();
6346 	}
6347 	/* reached lowest level */
6348 	ret = 1;
6349 	goto out;
6350 walk_up:
6351 	level = ref_path->current_level;
6352 	while (level < BTRFS_MAX_LEVEL - 1) {
6353 		u64 ref_objectid;
6354 
6355 		if (level >= 0)
6356 			bytenr = ref_path->nodes[level];
6357 		else
6358 			bytenr = ref_path->extent_start;
6359 
6360 		BUG_ON(bytenr == 0);
6361 
6362 		key.objectid = bytenr;
6363 		key.offset = 0;
6364 		key.type = BTRFS_EXTENT_REF_KEY;
6365 
6366 		ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6367 		if (ret < 0)
6368 			goto out;
6369 
6370 		leaf = path->nodes[0];
6371 		nritems = btrfs_header_nritems(leaf);
6372 		if (path->slots[0] >= nritems) {
6373 			ret = btrfs_next_leaf(extent_root, path);
6374 			if (ret < 0)
6375 				goto out;
6376 			if (ret > 0) {
6377 				/* the extent was freed by someone */
6378 				if (ref_path->lowest_level == level)
6379 					goto out;
6380 				btrfs_release_path(extent_root, path);
6381 				goto walk_down;
6382 			}
6383 			leaf = path->nodes[0];
6384 		}
6385 
6386 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6387 		if (found_key.objectid != bytenr ||
6388 				found_key.type != BTRFS_EXTENT_REF_KEY) {
6389 			/* the extent was freed by someone */
6390 			if (ref_path->lowest_level == level) {
6391 				ret = 1;
6392 				goto out;
6393 			}
6394 			btrfs_release_path(extent_root, path);
6395 			goto walk_down;
6396 		}
6397 found:
6398 		ref = btrfs_item_ptr(leaf, path->slots[0],
6399 				struct btrfs_extent_ref);
6400 		ref_objectid = btrfs_ref_objectid(leaf, ref);
6401 		if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
6402 			if (first_time) {
6403 				level = (int)ref_objectid;
6404 				BUG_ON(level >= BTRFS_MAX_LEVEL);
6405 				ref_path->lowest_level = level;
6406 				ref_path->current_level = level;
6407 				ref_path->nodes[level] = bytenr;
6408 			} else {
6409 				WARN_ON(ref_objectid != level);
6410 			}
6411 		} else {
6412 			WARN_ON(level != -1);
6413 		}
6414 		first_time = 0;
6415 
6416 		if (ref_path->lowest_level == level) {
6417 			ref_path->owner_objectid = ref_objectid;
6418 			ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6419 		}
6420 
6421 		/*
6422 		 * the block is tree root or the block isn't in reference
6423 		 * counted tree.
6424 		 */
6425 		if (found_key.objectid == found_key.offset ||
6426 		    is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6427 			ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6428 			ref_path->root_generation =
6429 				btrfs_ref_generation(leaf, ref);
6430 			if (level < 0) {
6431 				/* special reference from the tree log */
6432 				ref_path->nodes[0] = found_key.offset;
6433 				ref_path->current_level = 0;
6434 			}
6435 			ret = 0;
6436 			goto out;
6437 		}
6438 
6439 		level++;
6440 		BUG_ON(ref_path->nodes[level] != 0);
6441 		ref_path->nodes[level] = found_key.offset;
6442 		ref_path->current_level = level;
6443 
6444 		/*
6445 		 * the reference was created in the running transaction,
6446 		 * no need to continue walking up.
6447 		 */
6448 		if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6449 			ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6450 			ref_path->root_generation =
6451 				btrfs_ref_generation(leaf, ref);
6452 			ret = 0;
6453 			goto out;
6454 		}
6455 
6456 		btrfs_release_path(extent_root, path);
6457 		cond_resched();
6458 	}
6459 	/* reached max tree level, but no tree root found. */
6460 	BUG();
6461 out:
6462 	btrfs_free_path(path);
6463 	return ret;
6464 }
6465 
6466 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6467 				struct btrfs_root *extent_root,
6468 				struct btrfs_ref_path *ref_path,
6469 				u64 extent_start)
6470 {
6471 	memset(ref_path, 0, sizeof(*ref_path));
6472 	ref_path->extent_start = extent_start;
6473 
6474 	return __next_ref_path(trans, extent_root, ref_path, 1);
6475 }
6476 
6477 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6478 			       struct btrfs_root *extent_root,
6479 			       struct btrfs_ref_path *ref_path)
6480 {
6481 	return __next_ref_path(trans, extent_root, ref_path, 0);
6482 }
6483 
6484 static noinline int get_new_locations(struct inode *reloc_inode,
6485 				      struct btrfs_key *extent_key,
6486 				      u64 offset, int no_fragment,
6487 				      struct disk_extent **extents,
6488 				      int *nr_extents)
6489 {
6490 	struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6491 	struct btrfs_path *path;
6492 	struct btrfs_file_extent_item *fi;
6493 	struct extent_buffer *leaf;
6494 	struct disk_extent *exts = *extents;
6495 	struct btrfs_key found_key;
6496 	u64 cur_pos;
6497 	u64 last_byte;
6498 	u32 nritems;
6499 	int nr = 0;
6500 	int max = *nr_extents;
6501 	int ret;
6502 
6503 	WARN_ON(!no_fragment && *extents);
6504 	if (!exts) {
6505 		max = 1;
6506 		exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
6507 		if (!exts)
6508 			return -ENOMEM;
6509 	}
6510 
6511 	path = btrfs_alloc_path();
6512 	BUG_ON(!path);
6513 
6514 	cur_pos = extent_key->objectid - offset;
6515 	last_byte = extent_key->objectid + extent_key->offset;
6516 	ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
6517 				       cur_pos, 0);
6518 	if (ret < 0)
6519 		goto out;
6520 	if (ret > 0) {
6521 		ret = -ENOENT;
6522 		goto out;
6523 	}
6524 
6525 	while (1) {
6526 		leaf = path->nodes[0];
6527 		nritems = btrfs_header_nritems(leaf);
6528 		if (path->slots[0] >= nritems) {
6529 			ret = btrfs_next_leaf(root, path);
6530 			if (ret < 0)
6531 				goto out;
6532 			if (ret > 0)
6533 				break;
6534 			leaf = path->nodes[0];
6535 		}
6536 
6537 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6538 		if (found_key.offset != cur_pos ||
6539 		    found_key.type != BTRFS_EXTENT_DATA_KEY ||
6540 		    found_key.objectid != reloc_inode->i_ino)
6541 			break;
6542 
6543 		fi = btrfs_item_ptr(leaf, path->slots[0],
6544 				    struct btrfs_file_extent_item);
6545 		if (btrfs_file_extent_type(leaf, fi) !=
6546 		    BTRFS_FILE_EXTENT_REG ||
6547 		    btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6548 			break;
6549 
6550 		if (nr == max) {
6551 			struct disk_extent *old = exts;
6552 			max *= 2;
6553 			exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
6554 			memcpy(exts, old, sizeof(*exts) * nr);
6555 			if (old != *extents)
6556 				kfree(old);
6557 		}
6558 
6559 		exts[nr].disk_bytenr =
6560 			btrfs_file_extent_disk_bytenr(leaf, fi);
6561 		exts[nr].disk_num_bytes =
6562 			btrfs_file_extent_disk_num_bytes(leaf, fi);
6563 		exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
6564 		exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6565 		exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6566 		exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
6567 		exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
6568 		exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
6569 									   fi);
6570 		BUG_ON(exts[nr].offset > 0);
6571 		BUG_ON(exts[nr].compression || exts[nr].encryption);
6572 		BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
6573 
6574 		cur_pos += exts[nr].num_bytes;
6575 		nr++;
6576 
6577 		if (cur_pos + offset >= last_byte)
6578 			break;
6579 
6580 		if (no_fragment) {
6581 			ret = 1;
6582 			goto out;
6583 		}
6584 		path->slots[0]++;
6585 	}
6586 
6587 	BUG_ON(cur_pos + offset > last_byte);
6588 	if (cur_pos + offset < last_byte) {
6589 		ret = -ENOENT;
6590 		goto out;
6591 	}
6592 	ret = 0;
6593 out:
6594 	btrfs_free_path(path);
6595 	if (ret) {
6596 		if (exts != *extents)
6597 			kfree(exts);
6598 	} else {
6599 		*extents = exts;
6600 		*nr_extents = nr;
6601 	}
6602 	return ret;
6603 }
6604 
6605 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
6606 					struct btrfs_root *root,
6607 					struct btrfs_path *path,
6608 					struct btrfs_key *extent_key,
6609 					struct btrfs_key *leaf_key,
6610 					struct btrfs_ref_path *ref_path,
6611 					struct disk_extent *new_extents,
6612 					int nr_extents)
6613 {
6614 	struct extent_buffer *leaf;
6615 	struct btrfs_file_extent_item *fi;
6616 	struct inode *inode = NULL;
6617 	struct btrfs_key key;
6618 	u64 lock_start = 0;
6619 	u64 lock_end = 0;
6620 	u64 num_bytes;
6621 	u64 ext_offset;
6622 	u64 search_end = (u64)-1;
6623 	u32 nritems;
6624 	int nr_scaned = 0;
6625 	int extent_locked = 0;
6626 	int extent_type;
6627 	int ret;
6628 
6629 	memcpy(&key, leaf_key, sizeof(key));
6630 	if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6631 		if (key.objectid < ref_path->owner_objectid ||
6632 		    (key.objectid == ref_path->owner_objectid &&
6633 		     key.type < BTRFS_EXTENT_DATA_KEY)) {
6634 			key.objectid = ref_path->owner_objectid;
6635 			key.type = BTRFS_EXTENT_DATA_KEY;
6636 			key.offset = 0;
6637 		}
6638 	}
6639 
6640 	while (1) {
6641 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6642 		if (ret < 0)
6643 			goto out;
6644 
6645 		leaf = path->nodes[0];
6646 		nritems = btrfs_header_nritems(leaf);
6647 next:
6648 		if (extent_locked && ret > 0) {
6649 			/*
6650 			 * the file extent item was modified by someone
6651 			 * before the extent got locked.
6652 			 */
6653 			unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6654 				      lock_end, GFP_NOFS);
6655 			extent_locked = 0;
6656 		}
6657 
6658 		if (path->slots[0] >= nritems) {
6659 			if (++nr_scaned > 2)
6660 				break;
6661 
6662 			BUG_ON(extent_locked);
6663 			ret = btrfs_next_leaf(root, path);
6664 			if (ret < 0)
6665 				goto out;
6666 			if (ret > 0)
6667 				break;
6668 			leaf = path->nodes[0];
6669 			nritems = btrfs_header_nritems(leaf);
6670 		}
6671 
6672 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6673 
6674 		if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6675 			if ((key.objectid > ref_path->owner_objectid) ||
6676 			    (key.objectid == ref_path->owner_objectid &&
6677 			     key.type > BTRFS_EXTENT_DATA_KEY) ||
6678 			    key.offset >= search_end)
6679 				break;
6680 		}
6681 
6682 		if (inode && key.objectid != inode->i_ino) {
6683 			BUG_ON(extent_locked);
6684 			btrfs_release_path(root, path);
6685 			mutex_unlock(&inode->i_mutex);
6686 			iput(inode);
6687 			inode = NULL;
6688 			continue;
6689 		}
6690 
6691 		if (key.type != BTRFS_EXTENT_DATA_KEY) {
6692 			path->slots[0]++;
6693 			ret = 1;
6694 			goto next;
6695 		}
6696 		fi = btrfs_item_ptr(leaf, path->slots[0],
6697 				    struct btrfs_file_extent_item);
6698 		extent_type = btrfs_file_extent_type(leaf, fi);
6699 		if ((extent_type != BTRFS_FILE_EXTENT_REG &&
6700 		     extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
6701 		    (btrfs_file_extent_disk_bytenr(leaf, fi) !=
6702 		     extent_key->objectid)) {
6703 			path->slots[0]++;
6704 			ret = 1;
6705 			goto next;
6706 		}
6707 
6708 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6709 		ext_offset = btrfs_file_extent_offset(leaf, fi);
6710 
6711 		if (search_end == (u64)-1) {
6712 			search_end = key.offset - ext_offset +
6713 				btrfs_file_extent_ram_bytes(leaf, fi);
6714 		}
6715 
6716 		if (!extent_locked) {
6717 			lock_start = key.offset;
6718 			lock_end = lock_start + num_bytes - 1;
6719 		} else {
6720 			if (lock_start > key.offset ||
6721 			    lock_end + 1 < key.offset + num_bytes) {
6722 				unlock_extent(&BTRFS_I(inode)->io_tree,
6723 					      lock_start, lock_end, GFP_NOFS);
6724 				extent_locked = 0;
6725 			}
6726 		}
6727 
6728 		if (!inode) {
6729 			btrfs_release_path(root, path);
6730 
6731 			inode = btrfs_iget_locked(root->fs_info->sb,
6732 						  key.objectid, root);
6733 			if (inode->i_state & I_NEW) {
6734 				BTRFS_I(inode)->root = root;
6735 				BTRFS_I(inode)->location.objectid =
6736 					key.objectid;
6737 				BTRFS_I(inode)->location.type =
6738 					BTRFS_INODE_ITEM_KEY;
6739 				BTRFS_I(inode)->location.offset = 0;
6740 				btrfs_read_locked_inode(inode);
6741 				unlock_new_inode(inode);
6742 			}
6743 			/*
6744 			 * some code call btrfs_commit_transaction while
6745 			 * holding the i_mutex, so we can't use mutex_lock
6746 			 * here.
6747 			 */
6748 			if (is_bad_inode(inode) ||
6749 			    !mutex_trylock(&inode->i_mutex)) {
6750 				iput(inode);
6751 				inode = NULL;
6752 				key.offset = (u64)-1;
6753 				goto skip;
6754 			}
6755 		}
6756 
6757 		if (!extent_locked) {
6758 			struct btrfs_ordered_extent *ordered;
6759 
6760 			btrfs_release_path(root, path);
6761 
6762 			lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6763 				    lock_end, GFP_NOFS);
6764 			ordered = btrfs_lookup_first_ordered_extent(inode,
6765 								    lock_end);
6766 			if (ordered &&
6767 			    ordered->file_offset <= lock_end &&
6768 			    ordered->file_offset + ordered->len > lock_start) {
6769 				unlock_extent(&BTRFS_I(inode)->io_tree,
6770 					      lock_start, lock_end, GFP_NOFS);
6771 				btrfs_start_ordered_extent(inode, ordered, 1);
6772 				btrfs_put_ordered_extent(ordered);
6773 				key.offset += num_bytes;
6774 				goto skip;
6775 			}
6776 			if (ordered)
6777 				btrfs_put_ordered_extent(ordered);
6778 
6779 			extent_locked = 1;
6780 			continue;
6781 		}
6782 
6783 		if (nr_extents == 1) {
6784 			/* update extent pointer in place */
6785 			btrfs_set_file_extent_disk_bytenr(leaf, fi,
6786 						new_extents[0].disk_bytenr);
6787 			btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6788 						new_extents[0].disk_num_bytes);
6789 			btrfs_mark_buffer_dirty(leaf);
6790 
6791 			btrfs_drop_extent_cache(inode, key.offset,
6792 						key.offset + num_bytes - 1, 0);
6793 
6794 			ret = btrfs_inc_extent_ref(trans, root,
6795 						new_extents[0].disk_bytenr,
6796 						new_extents[0].disk_num_bytes,
6797 						leaf->start,
6798 						root->root_key.objectid,
6799 						trans->transid,
6800 						key.objectid);
6801 			BUG_ON(ret);
6802 
6803 			ret = btrfs_free_extent(trans, root,
6804 						extent_key->objectid,
6805 						extent_key->offset,
6806 						leaf->start,
6807 						btrfs_header_owner(leaf),
6808 						btrfs_header_generation(leaf),
6809 						key.objectid, 0);
6810 			BUG_ON(ret);
6811 
6812 			btrfs_release_path(root, path);
6813 			key.offset += num_bytes;
6814 		} else {
6815 			BUG_ON(1);
6816 #if 0
6817 			u64 alloc_hint;
6818 			u64 extent_len;
6819 			int i;
6820 			/*
6821 			 * drop old extent pointer at first, then insert the
6822 			 * new pointers one bye one
6823 			 */
6824 			btrfs_release_path(root, path);
6825 			ret = btrfs_drop_extents(trans, root, inode, key.offset,
6826 						 key.offset + num_bytes,
6827 						 key.offset, &alloc_hint);
6828 			BUG_ON(ret);
6829 
6830 			for (i = 0; i < nr_extents; i++) {
6831 				if (ext_offset >= new_extents[i].num_bytes) {
6832 					ext_offset -= new_extents[i].num_bytes;
6833 					continue;
6834 				}
6835 				extent_len = min(new_extents[i].num_bytes -
6836 						 ext_offset, num_bytes);
6837 
6838 				ret = btrfs_insert_empty_item(trans, root,
6839 							      path, &key,
6840 							      sizeof(*fi));
6841 				BUG_ON(ret);
6842 
6843 				leaf = path->nodes[0];
6844 				fi = btrfs_item_ptr(leaf, path->slots[0],
6845 						struct btrfs_file_extent_item);
6846 				btrfs_set_file_extent_generation(leaf, fi,
6847 							trans->transid);
6848 				btrfs_set_file_extent_type(leaf, fi,
6849 							BTRFS_FILE_EXTENT_REG);
6850 				btrfs_set_file_extent_disk_bytenr(leaf, fi,
6851 						new_extents[i].disk_bytenr);
6852 				btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6853 						new_extents[i].disk_num_bytes);
6854 				btrfs_set_file_extent_ram_bytes(leaf, fi,
6855 						new_extents[i].ram_bytes);
6856 
6857 				btrfs_set_file_extent_compression(leaf, fi,
6858 						new_extents[i].compression);
6859 				btrfs_set_file_extent_encryption(leaf, fi,
6860 						new_extents[i].encryption);
6861 				btrfs_set_file_extent_other_encoding(leaf, fi,
6862 						new_extents[i].other_encoding);
6863 
6864 				btrfs_set_file_extent_num_bytes(leaf, fi,
6865 							extent_len);
6866 				ext_offset += new_extents[i].offset;
6867 				btrfs_set_file_extent_offset(leaf, fi,
6868 							ext_offset);
6869 				btrfs_mark_buffer_dirty(leaf);
6870 
6871 				btrfs_drop_extent_cache(inode, key.offset,
6872 						key.offset + extent_len - 1, 0);
6873 
6874 				ret = btrfs_inc_extent_ref(trans, root,
6875 						new_extents[i].disk_bytenr,
6876 						new_extents[i].disk_num_bytes,
6877 						leaf->start,
6878 						root->root_key.objectid,
6879 						trans->transid, key.objectid);
6880 				BUG_ON(ret);
6881 				btrfs_release_path(root, path);
6882 
6883 				inode_add_bytes(inode, extent_len);
6884 
6885 				ext_offset = 0;
6886 				num_bytes -= extent_len;
6887 				key.offset += extent_len;
6888 
6889 				if (num_bytes == 0)
6890 					break;
6891 			}
6892 			BUG_ON(i >= nr_extents);
6893 #endif
6894 		}
6895 
6896 		if (extent_locked) {
6897 			unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6898 				      lock_end, GFP_NOFS);
6899 			extent_locked = 0;
6900 		}
6901 skip:
6902 		if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
6903 		    key.offset >= search_end)
6904 			break;
6905 
6906 		cond_resched();
6907 	}
6908 	ret = 0;
6909 out:
6910 	btrfs_release_path(root, path);
6911 	if (inode) {
6912 		mutex_unlock(&inode->i_mutex);
6913 		if (extent_locked) {
6914 			unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6915 				      lock_end, GFP_NOFS);
6916 		}
6917 		iput(inode);
6918 	}
6919 	return ret;
6920 }
6921 
6922 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
6923 			       struct btrfs_root *root,
6924 			       struct extent_buffer *buf, u64 orig_start)
6925 {
6926 	int level;
6927 	int ret;
6928 
6929 	BUG_ON(btrfs_header_generation(buf) != trans->transid);
6930 	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6931 
6932 	level = btrfs_header_level(buf);
6933 	if (level == 0) {
6934 		struct btrfs_leaf_ref *ref;
6935 		struct btrfs_leaf_ref *orig_ref;
6936 
6937 		orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
6938 		if (!orig_ref)
6939 			return -ENOENT;
6940 
6941 		ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
6942 		if (!ref) {
6943 			btrfs_free_leaf_ref(root, orig_ref);
6944 			return -ENOMEM;
6945 		}
6946 
6947 		ref->nritems = orig_ref->nritems;
6948 		memcpy(ref->extents, orig_ref->extents,
6949 			sizeof(ref->extents[0]) * ref->nritems);
6950 
6951 		btrfs_free_leaf_ref(root, orig_ref);
6952 
6953 		ref->root_gen = trans->transid;
6954 		ref->bytenr = buf->start;
6955 		ref->owner = btrfs_header_owner(buf);
6956 		ref->generation = btrfs_header_generation(buf);
6957 
6958 		ret = btrfs_add_leaf_ref(root, ref, 0);
6959 		WARN_ON(ret);
6960 		btrfs_free_leaf_ref(root, ref);
6961 	}
6962 	return 0;
6963 }
6964 
6965 static noinline int invalidate_extent_cache(struct btrfs_root *root,
6966 					struct extent_buffer *leaf,
6967 					struct btrfs_block_group_cache *group,
6968 					struct btrfs_root *target_root)
6969 {
6970 	struct btrfs_key key;
6971 	struct inode *inode = NULL;
6972 	struct btrfs_file_extent_item *fi;
6973 	struct extent_state *cached_state = NULL;
6974 	u64 num_bytes;
6975 	u64 skip_objectid = 0;
6976 	u32 nritems;
6977 	u32 i;
6978 
6979 	nritems = btrfs_header_nritems(leaf);
6980 	for (i = 0; i < nritems; i++) {
6981 		btrfs_item_key_to_cpu(leaf, &key, i);
6982 		if (key.objectid == skip_objectid ||
6983 		    key.type != BTRFS_EXTENT_DATA_KEY)
6984 			continue;
6985 		fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6986 		if (btrfs_file_extent_type(leaf, fi) ==
6987 		    BTRFS_FILE_EXTENT_INLINE)
6988 			continue;
6989 		if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6990 			continue;
6991 		if (!inode || inode->i_ino != key.objectid) {
6992 			iput(inode);
6993 			inode = btrfs_ilookup(target_root->fs_info->sb,
6994 					      key.objectid, target_root, 1);
6995 		}
6996 		if (!inode) {
6997 			skip_objectid = key.objectid;
6998 			continue;
6999 		}
7000 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7001 
7002 		lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
7003 				 key.offset + num_bytes - 1, 0, &cached_state,
7004 				 GFP_NOFS);
7005 		btrfs_drop_extent_cache(inode, key.offset,
7006 					key.offset + num_bytes - 1, 1);
7007 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
7008 				     key.offset + num_bytes - 1, &cached_state,
7009 				     GFP_NOFS);
7010 		cond_resched();
7011 	}
7012 	iput(inode);
7013 	return 0;
7014 }
7015 
7016 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
7017 					struct btrfs_root *root,
7018 					struct extent_buffer *leaf,
7019 					struct btrfs_block_group_cache *group,
7020 					struct inode *reloc_inode)
7021 {
7022 	struct btrfs_key key;
7023 	struct btrfs_key extent_key;
7024 	struct btrfs_file_extent_item *fi;
7025 	struct btrfs_leaf_ref *ref;
7026 	struct disk_extent *new_extent;
7027 	u64 bytenr;
7028 	u64 num_bytes;
7029 	u32 nritems;
7030 	u32 i;
7031 	int ext_index;
7032 	int nr_extent;
7033 	int ret;
7034 
7035 	new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
7036 	BUG_ON(!new_extent);
7037 
7038 	ref = btrfs_lookup_leaf_ref(root, leaf->start);
7039 	BUG_ON(!ref);
7040 
7041 	ext_index = -1;
7042 	nritems = btrfs_header_nritems(leaf);
7043 	for (i = 0; i < nritems; i++) {
7044 		btrfs_item_key_to_cpu(leaf, &key, i);
7045 		if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
7046 			continue;
7047 		fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7048 		if (btrfs_file_extent_type(leaf, fi) ==
7049 		    BTRFS_FILE_EXTENT_INLINE)
7050 			continue;
7051 		bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7052 		num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
7053 		if (bytenr == 0)
7054 			continue;
7055 
7056 		ext_index++;
7057 		if (bytenr >= group->key.objectid + group->key.offset ||
7058 		    bytenr + num_bytes <= group->key.objectid)
7059 			continue;
7060 
7061 		extent_key.objectid = bytenr;
7062 		extent_key.offset = num_bytes;
7063 		extent_key.type = BTRFS_EXTENT_ITEM_KEY;
7064 		nr_extent = 1;
7065 		ret = get_new_locations(reloc_inode, &extent_key,
7066 					group->key.objectid, 1,
7067 					&new_extent, &nr_extent);
7068 		if (ret > 0)
7069 			continue;
7070 		BUG_ON(ret < 0);
7071 
7072 		BUG_ON(ref->extents[ext_index].bytenr != bytenr);
7073 		BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
7074 		ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
7075 		ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
7076 
7077 		btrfs_set_file_extent_disk_bytenr(leaf, fi,
7078 						new_extent->disk_bytenr);
7079 		btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7080 						new_extent->disk_num_bytes);
7081 		btrfs_mark_buffer_dirty(leaf);
7082 
7083 		ret = btrfs_inc_extent_ref(trans, root,
7084 					new_extent->disk_bytenr,
7085 					new_extent->disk_num_bytes,
7086 					leaf->start,
7087 					root->root_key.objectid,
7088 					trans->transid, key.objectid);
7089 		BUG_ON(ret);
7090 
7091 		ret = btrfs_free_extent(trans, root,
7092 					bytenr, num_bytes, leaf->start,
7093 					btrfs_header_owner(leaf),
7094 					btrfs_header_generation(leaf),
7095 					key.objectid, 0);
7096 		BUG_ON(ret);
7097 		cond_resched();
7098 	}
7099 	kfree(new_extent);
7100 	BUG_ON(ext_index + 1 != ref->nritems);
7101 	btrfs_free_leaf_ref(root, ref);
7102 	return 0;
7103 }
7104 
7105 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
7106 			  struct btrfs_root *root)
7107 {
7108 	struct btrfs_root *reloc_root;
7109 	int ret;
7110 
7111 	if (root->reloc_root) {
7112 		reloc_root = root->reloc_root;
7113 		root->reloc_root = NULL;
7114 		list_add(&reloc_root->dead_list,
7115 			 &root->fs_info->dead_reloc_roots);
7116 
7117 		btrfs_set_root_bytenr(&reloc_root->root_item,
7118 				      reloc_root->node->start);
7119 		btrfs_set_root_level(&root->root_item,
7120 				     btrfs_header_level(reloc_root->node));
7121 		memset(&reloc_root->root_item.drop_progress, 0,
7122 			sizeof(struct btrfs_disk_key));
7123 		reloc_root->root_item.drop_level = 0;
7124 
7125 		ret = btrfs_update_root(trans, root->fs_info->tree_root,
7126 					&reloc_root->root_key,
7127 					&reloc_root->root_item);
7128 		BUG_ON(ret);
7129 	}
7130 	return 0;
7131 }
7132 
7133 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
7134 {
7135 	struct btrfs_trans_handle *trans;
7136 	struct btrfs_root *reloc_root;
7137 	struct btrfs_root *prev_root = NULL;
7138 	struct list_head dead_roots;
7139 	int ret;
7140 	unsigned long nr;
7141 
7142 	INIT_LIST_HEAD(&dead_roots);
7143 	list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
7144 
7145 	while (!list_empty(&dead_roots)) {
7146 		reloc_root = list_entry(dead_roots.prev,
7147 					struct btrfs_root, dead_list);
7148 		list_del_init(&reloc_root->dead_list);
7149 
7150 		BUG_ON(reloc_root->commit_root != NULL);
7151 		while (1) {
7152 			trans = btrfs_join_transaction(root, 1);
7153 			BUG_ON(!trans);
7154 
7155 			mutex_lock(&root->fs_info->drop_mutex);
7156 			ret = btrfs_drop_snapshot(trans, reloc_root);
7157 			if (ret != -EAGAIN)
7158 				break;
7159 			mutex_unlock(&root->fs_info->drop_mutex);
7160 
7161 			nr = trans->blocks_used;
7162 			ret = btrfs_end_transaction(trans, root);
7163 			BUG_ON(ret);
7164 			btrfs_btree_balance_dirty(root, nr);
7165 		}
7166 
7167 		free_extent_buffer(reloc_root->node);
7168 
7169 		ret = btrfs_del_root(trans, root->fs_info->tree_root,
7170 				     &reloc_root->root_key);
7171 		BUG_ON(ret);
7172 		mutex_unlock(&root->fs_info->drop_mutex);
7173 
7174 		nr = trans->blocks_used;
7175 		ret = btrfs_end_transaction(trans, root);
7176 		BUG_ON(ret);
7177 		btrfs_btree_balance_dirty(root, nr);
7178 
7179 		kfree(prev_root);
7180 		prev_root = reloc_root;
7181 	}
7182 	if (prev_root) {
7183 		btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
7184 		kfree(prev_root);
7185 	}
7186 	return 0;
7187 }
7188 
7189 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
7190 {
7191 	list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
7192 	return 0;
7193 }
7194 
7195 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
7196 {
7197 	struct btrfs_root *reloc_root;
7198 	struct btrfs_trans_handle *trans;
7199 	struct btrfs_key location;
7200 	int found;
7201 	int ret;
7202 
7203 	mutex_lock(&root->fs_info->tree_reloc_mutex);
7204 	ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
7205 	BUG_ON(ret);
7206 	found = !list_empty(&root->fs_info->dead_reloc_roots);
7207 	mutex_unlock(&root->fs_info->tree_reloc_mutex);
7208 
7209 	if (found) {
7210 		trans = btrfs_start_transaction(root, 1);
7211 		BUG_ON(!trans);
7212 		ret = btrfs_commit_transaction(trans, root);
7213 		BUG_ON(ret);
7214 	}
7215 
7216 	location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
7217 	location.offset = (u64)-1;
7218 	location.type = BTRFS_ROOT_ITEM_KEY;
7219 
7220 	reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
7221 	BUG_ON(!reloc_root);
7222 	btrfs_orphan_cleanup(reloc_root);
7223 	return 0;
7224 }
7225 
7226 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
7227 				    struct btrfs_root *root)
7228 {
7229 	struct btrfs_root *reloc_root;
7230 	struct extent_buffer *eb;
7231 	struct btrfs_root_item *root_item;
7232 	struct btrfs_key root_key;
7233 	int ret;
7234 
7235 	BUG_ON(!root->ref_cows);
7236 	if (root->reloc_root)
7237 		return 0;
7238 
7239 	root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
7240 	BUG_ON(!root_item);
7241 
7242 	ret = btrfs_copy_root(trans, root, root->commit_root,
7243 			      &eb, BTRFS_TREE_RELOC_OBJECTID);
7244 	BUG_ON(ret);
7245 
7246 	root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
7247 	root_key.offset = root->root_key.objectid;
7248 	root_key.type = BTRFS_ROOT_ITEM_KEY;
7249 
7250 	memcpy(root_item, &root->root_item, sizeof(root_item));
7251 	btrfs_set_root_refs(root_item, 0);
7252 	btrfs_set_root_bytenr(root_item, eb->start);
7253 	btrfs_set_root_level(root_item, btrfs_header_level(eb));
7254 	btrfs_set_root_generation(root_item, trans->transid);
7255 
7256 	btrfs_tree_unlock(eb);
7257 	free_extent_buffer(eb);
7258 
7259 	ret = btrfs_insert_root(trans, root->fs_info->tree_root,
7260 				&root_key, root_item);
7261 	BUG_ON(ret);
7262 	kfree(root_item);
7263 
7264 	reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
7265 						 &root_key);
7266 	BUG_ON(!reloc_root);
7267 	reloc_root->last_trans = trans->transid;
7268 	reloc_root->commit_root = NULL;
7269 	reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
7270 
7271 	root->reloc_root = reloc_root;
7272 	return 0;
7273 }
7274 
7275 /*
7276  * Core function of space balance.
7277  *
7278  * The idea is using reloc trees to relocate tree blocks in reference
7279  * counted roots. There is one reloc tree for each subvol, and all
7280  * reloc trees share same root key objectid. Reloc trees are snapshots
7281  * of the latest committed roots of subvols (root->commit_root).
7282  *
7283  * To relocate a tree block referenced by a subvol, there are two steps.
7284  * COW the block through subvol's reloc tree, then update block pointer
7285  * in the subvol to point to the new block. Since all reloc trees share
7286  * same root key objectid, doing special handing for tree blocks owned
7287  * by them is easy. Once a tree block has been COWed in one reloc tree,
7288  * we can use the resulting new block directly when the same block is
7289  * required to COW again through other reloc trees. By this way, relocated
7290  * tree blocks are shared between reloc trees, so they are also shared
7291  * between subvols.
7292  */
7293 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
7294 				      struct btrfs_root *root,
7295 				      struct btrfs_path *path,
7296 				      struct btrfs_key *first_key,
7297 				      struct btrfs_ref_path *ref_path,
7298 				      struct btrfs_block_group_cache *group,
7299 				      struct inode *reloc_inode)
7300 {
7301 	struct btrfs_root *reloc_root;
7302 	struct extent_buffer *eb = NULL;
7303 	struct btrfs_key *keys;
7304 	u64 *nodes;
7305 	int level;
7306 	int shared_level;
7307 	int lowest_level = 0;
7308 	int ret;
7309 
7310 	if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
7311 		lowest_level = ref_path->owner_objectid;
7312 
7313 	if (!root->ref_cows) {
7314 		path->lowest_level = lowest_level;
7315 		ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
7316 		BUG_ON(ret < 0);
7317 		path->lowest_level = 0;
7318 		btrfs_release_path(root, path);
7319 		return 0;
7320 	}
7321 
7322 	mutex_lock(&root->fs_info->tree_reloc_mutex);
7323 	ret = init_reloc_tree(trans, root);
7324 	BUG_ON(ret);
7325 	reloc_root = root->reloc_root;
7326 
7327 	shared_level = ref_path->shared_level;
7328 	ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
7329 
7330 	keys = ref_path->node_keys;
7331 	nodes = ref_path->new_nodes;
7332 	memset(&keys[shared_level + 1], 0,
7333 	       sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
7334 	memset(&nodes[shared_level + 1], 0,
7335 	       sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
7336 
7337 	if (nodes[lowest_level] == 0) {
7338 		path->lowest_level = lowest_level;
7339 		ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7340 					0, 1);
7341 		BUG_ON(ret);
7342 		for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
7343 			eb = path->nodes[level];
7344 			if (!eb || eb == reloc_root->node)
7345 				break;
7346 			nodes[level] = eb->start;
7347 			if (level == 0)
7348 				btrfs_item_key_to_cpu(eb, &keys[level], 0);
7349 			else
7350 				btrfs_node_key_to_cpu(eb, &keys[level], 0);
7351 		}
7352 		if (nodes[0] &&
7353 		    ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7354 			eb = path->nodes[0];
7355 			ret = replace_extents_in_leaf(trans, reloc_root, eb,
7356 						      group, reloc_inode);
7357 			BUG_ON(ret);
7358 		}
7359 		btrfs_release_path(reloc_root, path);
7360 	} else {
7361 		ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
7362 				       lowest_level);
7363 		BUG_ON(ret);
7364 	}
7365 
7366 	/*
7367 	 * replace tree blocks in the fs tree with tree blocks in
7368 	 * the reloc tree.
7369 	 */
7370 	ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
7371 	BUG_ON(ret < 0);
7372 
7373 	if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7374 		ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7375 					0, 0);
7376 		BUG_ON(ret);
7377 		extent_buffer_get(path->nodes[0]);
7378 		eb = path->nodes[0];
7379 		btrfs_release_path(reloc_root, path);
7380 		ret = invalidate_extent_cache(reloc_root, eb, group, root);
7381 		BUG_ON(ret);
7382 		free_extent_buffer(eb);
7383 	}
7384 
7385 	mutex_unlock(&root->fs_info->tree_reloc_mutex);
7386 	path->lowest_level = 0;
7387 	return 0;
7388 }
7389 
7390 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
7391 					struct btrfs_root *root,
7392 					struct btrfs_path *path,
7393 					struct btrfs_key *first_key,
7394 					struct btrfs_ref_path *ref_path)
7395 {
7396 	int ret;
7397 
7398 	ret = relocate_one_path(trans, root, path, first_key,
7399 				ref_path, NULL, NULL);
7400 	BUG_ON(ret);
7401 
7402 	return 0;
7403 }
7404 
7405 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
7406 				    struct btrfs_root *extent_root,
7407 				    struct btrfs_path *path,
7408 				    struct btrfs_key *extent_key)
7409 {
7410 	int ret;
7411 
7412 	ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
7413 	if (ret)
7414 		goto out;
7415 	ret = btrfs_del_item(trans, extent_root, path);
7416 out:
7417 	btrfs_release_path(extent_root, path);
7418 	return ret;
7419 }
7420 
7421 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
7422 						struct btrfs_ref_path *ref_path)
7423 {
7424 	struct btrfs_key root_key;
7425 
7426 	root_key.objectid = ref_path->root_objectid;
7427 	root_key.type = BTRFS_ROOT_ITEM_KEY;
7428 	if (is_cowonly_root(ref_path->root_objectid))
7429 		root_key.offset = 0;
7430 	else
7431 		root_key.offset = (u64)-1;
7432 
7433 	return btrfs_read_fs_root_no_name(fs_info, &root_key);
7434 }
7435 
7436 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
7437 					struct btrfs_path *path,
7438 					struct btrfs_key *extent_key,
7439 					struct btrfs_block_group_cache *group,
7440 					struct inode *reloc_inode, int pass)
7441 {
7442 	struct btrfs_trans_handle *trans;
7443 	struct btrfs_root *found_root;
7444 	struct btrfs_ref_path *ref_path = NULL;
7445 	struct disk_extent *new_extents = NULL;
7446 	int nr_extents = 0;
7447 	int loops;
7448 	int ret;
7449 	int level;
7450 	struct btrfs_key first_key;
7451 	u64 prev_block = 0;
7452 
7453 
7454 	trans = btrfs_start_transaction(extent_root, 1);
7455 	BUG_ON(!trans);
7456 
7457 	if (extent_key->objectid == 0) {
7458 		ret = del_extent_zero(trans, extent_root, path, extent_key);
7459 		goto out;
7460 	}
7461 
7462 	ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7463 	if (!ref_path) {
7464 		ret = -ENOMEM;
7465 		goto out;
7466 	}
7467 
7468 	for (loops = 0; ; loops++) {
7469 		if (loops == 0) {
7470 			ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7471 						   extent_key->objectid);
7472 		} else {
7473 			ret = btrfs_next_ref_path(trans, extent_root, ref_path);
7474 		}
7475 		if (ret < 0)
7476 			goto out;
7477 		if (ret > 0)
7478 			break;
7479 
7480 		if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
7481 		    ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
7482 			continue;
7483 
7484 		found_root = read_ref_root(extent_root->fs_info, ref_path);
7485 		BUG_ON(!found_root);
7486 		/*
7487 		 * for reference counted tree, only process reference paths
7488 		 * rooted at the latest committed root.
7489 		 */
7490 		if (found_root->ref_cows &&
7491 		    ref_path->root_generation != found_root->root_key.offset)
7492 			continue;
7493 
7494 		if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7495 			if (pass == 0) {
7496 				/*
7497 				 * copy data extents to new locations
7498 				 */
7499 				u64 group_start = group->key.objectid;
7500 				ret = relocate_data_extent(reloc_inode,
7501 							   extent_key,
7502 							   group_start);
7503 				if (ret < 0)
7504 					goto out;
7505 				break;
7506 			}
7507 			level = 0;
7508 		} else {
7509 			level = ref_path->owner_objectid;
7510 		}
7511 
7512 		if (prev_block != ref_path->nodes[level]) {
7513 			struct extent_buffer *eb;
7514 			u64 block_start = ref_path->nodes[level];
7515 			u64 block_size = btrfs_level_size(found_root, level);
7516 
7517 			eb = read_tree_block(found_root, block_start,
7518 					     block_size, 0);
7519 			btrfs_tree_lock(eb);
7520 			BUG_ON(level != btrfs_header_level(eb));
7521 
7522 			if (level == 0)
7523 				btrfs_item_key_to_cpu(eb, &first_key, 0);
7524 			else
7525 				btrfs_node_key_to_cpu(eb, &first_key, 0);
7526 
7527 			btrfs_tree_unlock(eb);
7528 			free_extent_buffer(eb);
7529 			prev_block = block_start;
7530 		}
7531 
7532 		mutex_lock(&extent_root->fs_info->trans_mutex);
7533 		btrfs_record_root_in_trans(found_root);
7534 		mutex_unlock(&extent_root->fs_info->trans_mutex);
7535 		if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7536 			/*
7537 			 * try to update data extent references while
7538 			 * keeping metadata shared between snapshots.
7539 			 */
7540 			if (pass == 1) {
7541 				ret = relocate_one_path(trans, found_root,
7542 						path, &first_key, ref_path,
7543 						group, reloc_inode);
7544 				if (ret < 0)
7545 					goto out;
7546 				continue;
7547 			}
7548 			/*
7549 			 * use fallback method to process the remaining
7550 			 * references.
7551 			 */
7552 			if (!new_extents) {
7553 				u64 group_start = group->key.objectid;
7554 				new_extents = kmalloc(sizeof(*new_extents),
7555 						      GFP_NOFS);
7556 				nr_extents = 1;
7557 				ret = get_new_locations(reloc_inode,
7558 							extent_key,
7559 							group_start, 1,
7560 							&new_extents,
7561 							&nr_extents);
7562 				if (ret)
7563 					goto out;
7564 			}
7565 			ret = replace_one_extent(trans, found_root,
7566 						path, extent_key,
7567 						&first_key, ref_path,
7568 						new_extents, nr_extents);
7569 		} else {
7570 			ret = relocate_tree_block(trans, found_root, path,
7571 						  &first_key, ref_path);
7572 		}
7573 		if (ret < 0)
7574 			goto out;
7575 	}
7576 	ret = 0;
7577 out:
7578 	btrfs_end_transaction(trans, extent_root);
7579 	kfree(new_extents);
7580 	kfree(ref_path);
7581 	return ret;
7582 }
7583 #endif
7584 
7585 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7586 {
7587 	u64 num_devices;
7588 	u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7589 		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7590 
7591 	num_devices = root->fs_info->fs_devices->rw_devices;
7592 	if (num_devices == 1) {
7593 		stripped |= BTRFS_BLOCK_GROUP_DUP;
7594 		stripped = flags & ~stripped;
7595 
7596 		/* turn raid0 into single device chunks */
7597 		if (flags & BTRFS_BLOCK_GROUP_RAID0)
7598 			return stripped;
7599 
7600 		/* turn mirroring into duplication */
7601 		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7602 			     BTRFS_BLOCK_GROUP_RAID10))
7603 			return stripped | BTRFS_BLOCK_GROUP_DUP;
7604 		return flags;
7605 	} else {
7606 		/* they already had raid on here, just return */
7607 		if (flags & stripped)
7608 			return flags;
7609 
7610 		stripped |= BTRFS_BLOCK_GROUP_DUP;
7611 		stripped = flags & ~stripped;
7612 
7613 		/* switch duplicated blocks with raid1 */
7614 		if (flags & BTRFS_BLOCK_GROUP_DUP)
7615 			return stripped | BTRFS_BLOCK_GROUP_RAID1;
7616 
7617 		/* turn single device chunks into raid0 */
7618 		return stripped | BTRFS_BLOCK_GROUP_RAID0;
7619 	}
7620 	return flags;
7621 }
7622 
7623 static int set_block_group_ro(struct btrfs_block_group_cache *cache)
7624 {
7625 	struct btrfs_space_info *sinfo = cache->space_info;
7626 	u64 num_bytes;
7627 	int ret = -ENOSPC;
7628 
7629 	if (cache->ro)
7630 		return 0;
7631 
7632 	spin_lock(&sinfo->lock);
7633 	spin_lock(&cache->lock);
7634 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7635 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
7636 
7637 	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7638 	    sinfo->bytes_may_use + sinfo->bytes_readonly +
7639 	    cache->reserved_pinned + num_bytes < sinfo->total_bytes) {
7640 		sinfo->bytes_readonly += num_bytes;
7641 		sinfo->bytes_reserved += cache->reserved_pinned;
7642 		cache->reserved_pinned = 0;
7643 		cache->ro = 1;
7644 		ret = 0;
7645 	}
7646 	spin_unlock(&cache->lock);
7647 	spin_unlock(&sinfo->lock);
7648 	return ret;
7649 }
7650 
7651 int btrfs_set_block_group_ro(struct btrfs_root *root,
7652 			     struct btrfs_block_group_cache *cache)
7653 
7654 {
7655 	struct btrfs_trans_handle *trans;
7656 	u64 alloc_flags;
7657 	int ret;
7658 
7659 	BUG_ON(cache->ro);
7660 
7661 	trans = btrfs_join_transaction(root, 1);
7662 	BUG_ON(IS_ERR(trans));
7663 
7664 	alloc_flags = update_block_group_flags(root, cache->flags);
7665 	if (alloc_flags != cache->flags)
7666 		do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7667 
7668 	ret = set_block_group_ro(cache);
7669 	if (!ret)
7670 		goto out;
7671 	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7672 	ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7673 	if (ret < 0)
7674 		goto out;
7675 	ret = set_block_group_ro(cache);
7676 out:
7677 	btrfs_end_transaction(trans, root);
7678 	return ret;
7679 }
7680 
7681 int btrfs_set_block_group_rw(struct btrfs_root *root,
7682 			      struct btrfs_block_group_cache *cache)
7683 {
7684 	struct btrfs_space_info *sinfo = cache->space_info;
7685 	u64 num_bytes;
7686 
7687 	BUG_ON(!cache->ro);
7688 
7689 	spin_lock(&sinfo->lock);
7690 	spin_lock(&cache->lock);
7691 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7692 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
7693 	sinfo->bytes_readonly -= num_bytes;
7694 	cache->ro = 0;
7695 	spin_unlock(&cache->lock);
7696 	spin_unlock(&sinfo->lock);
7697 	return 0;
7698 }
7699 
7700 /*
7701  * checks to see if its even possible to relocate this block group.
7702  *
7703  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7704  * ok to go ahead and try.
7705  */
7706 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7707 {
7708 	struct btrfs_block_group_cache *block_group;
7709 	struct btrfs_space_info *space_info;
7710 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7711 	struct btrfs_device *device;
7712 	int full = 0;
7713 	int ret = 0;
7714 
7715 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7716 
7717 	/* odd, couldn't find the block group, leave it alone */
7718 	if (!block_group)
7719 		return -1;
7720 
7721 	/* no bytes used, we're good */
7722 	if (!btrfs_block_group_used(&block_group->item))
7723 		goto out;
7724 
7725 	space_info = block_group->space_info;
7726 	spin_lock(&space_info->lock);
7727 
7728 	full = space_info->full;
7729 
7730 	/*
7731 	 * if this is the last block group we have in this space, we can't
7732 	 * relocate it unless we're able to allocate a new chunk below.
7733 	 *
7734 	 * Otherwise, we need to make sure we have room in the space to handle
7735 	 * all of the extents from this block group.  If we can, we're good
7736 	 */
7737 	if ((space_info->total_bytes != block_group->key.offset) &&
7738 	   (space_info->bytes_used + space_info->bytes_reserved +
7739 	    space_info->bytes_pinned + space_info->bytes_readonly +
7740 	    btrfs_block_group_used(&block_group->item) <
7741 	    space_info->total_bytes)) {
7742 		spin_unlock(&space_info->lock);
7743 		goto out;
7744 	}
7745 	spin_unlock(&space_info->lock);
7746 
7747 	/*
7748 	 * ok we don't have enough space, but maybe we have free space on our
7749 	 * devices to allocate new chunks for relocation, so loop through our
7750 	 * alloc devices and guess if we have enough space.  However, if we
7751 	 * were marked as full, then we know there aren't enough chunks, and we
7752 	 * can just return.
7753 	 */
7754 	ret = -1;
7755 	if (full)
7756 		goto out;
7757 
7758 	mutex_lock(&root->fs_info->chunk_mutex);
7759 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7760 		u64 min_free = btrfs_block_group_used(&block_group->item);
7761 		u64 dev_offset, max_avail;
7762 
7763 		/*
7764 		 * check to make sure we can actually find a chunk with enough
7765 		 * space to fit our block group in.
7766 		 */
7767 		if (device->total_bytes > device->bytes_used + min_free) {
7768 			ret = find_free_dev_extent(NULL, device, min_free,
7769 						   &dev_offset, &max_avail);
7770 			if (!ret)
7771 				break;
7772 			ret = -1;
7773 		}
7774 	}
7775 	mutex_unlock(&root->fs_info->chunk_mutex);
7776 out:
7777 	btrfs_put_block_group(block_group);
7778 	return ret;
7779 }
7780 
7781 static int find_first_block_group(struct btrfs_root *root,
7782 		struct btrfs_path *path, struct btrfs_key *key)
7783 {
7784 	int ret = 0;
7785 	struct btrfs_key found_key;
7786 	struct extent_buffer *leaf;
7787 	int slot;
7788 
7789 	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7790 	if (ret < 0)
7791 		goto out;
7792 
7793 	while (1) {
7794 		slot = path->slots[0];
7795 		leaf = path->nodes[0];
7796 		if (slot >= btrfs_header_nritems(leaf)) {
7797 			ret = btrfs_next_leaf(root, path);
7798 			if (ret == 0)
7799 				continue;
7800 			if (ret < 0)
7801 				goto out;
7802 			break;
7803 		}
7804 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7805 
7806 		if (found_key.objectid >= key->objectid &&
7807 		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7808 			ret = 0;
7809 			goto out;
7810 		}
7811 		path->slots[0]++;
7812 	}
7813 out:
7814 	return ret;
7815 }
7816 
7817 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7818 {
7819 	struct btrfs_block_group_cache *block_group;
7820 	struct btrfs_space_info *space_info;
7821 	struct btrfs_caching_control *caching_ctl;
7822 	struct rb_node *n;
7823 
7824 	down_write(&info->extent_commit_sem);
7825 	while (!list_empty(&info->caching_block_groups)) {
7826 		caching_ctl = list_entry(info->caching_block_groups.next,
7827 					 struct btrfs_caching_control, list);
7828 		list_del(&caching_ctl->list);
7829 		put_caching_control(caching_ctl);
7830 	}
7831 	up_write(&info->extent_commit_sem);
7832 
7833 	spin_lock(&info->block_group_cache_lock);
7834 	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7835 		block_group = rb_entry(n, struct btrfs_block_group_cache,
7836 				       cache_node);
7837 		rb_erase(&block_group->cache_node,
7838 			 &info->block_group_cache_tree);
7839 		spin_unlock(&info->block_group_cache_lock);
7840 
7841 		down_write(&block_group->space_info->groups_sem);
7842 		list_del(&block_group->list);
7843 		up_write(&block_group->space_info->groups_sem);
7844 
7845 		if (block_group->cached == BTRFS_CACHE_STARTED)
7846 			wait_block_group_cache_done(block_group);
7847 
7848 		btrfs_remove_free_space_cache(block_group);
7849 		btrfs_put_block_group(block_group);
7850 
7851 		spin_lock(&info->block_group_cache_lock);
7852 	}
7853 	spin_unlock(&info->block_group_cache_lock);
7854 
7855 	/* now that all the block groups are freed, go through and
7856 	 * free all the space_info structs.  This is only called during
7857 	 * the final stages of unmount, and so we know nobody is
7858 	 * using them.  We call synchronize_rcu() once before we start,
7859 	 * just to be on the safe side.
7860 	 */
7861 	synchronize_rcu();
7862 
7863 	release_global_block_rsv(info);
7864 
7865 	while(!list_empty(&info->space_info)) {
7866 		space_info = list_entry(info->space_info.next,
7867 					struct btrfs_space_info,
7868 					list);
7869 		if (space_info->bytes_pinned > 0 ||
7870 		    space_info->bytes_reserved > 0) {
7871 			WARN_ON(1);
7872 			dump_space_info(space_info, 0, 0);
7873 		}
7874 		list_del(&space_info->list);
7875 		kfree(space_info);
7876 	}
7877 	return 0;
7878 }
7879 
7880 static void __link_block_group(struct btrfs_space_info *space_info,
7881 			       struct btrfs_block_group_cache *cache)
7882 {
7883 	int index = get_block_group_index(cache);
7884 
7885 	down_write(&space_info->groups_sem);
7886 	list_add_tail(&cache->list, &space_info->block_groups[index]);
7887 	up_write(&space_info->groups_sem);
7888 }
7889 
7890 int btrfs_read_block_groups(struct btrfs_root *root)
7891 {
7892 	struct btrfs_path *path;
7893 	int ret;
7894 	struct btrfs_block_group_cache *cache;
7895 	struct btrfs_fs_info *info = root->fs_info;
7896 	struct btrfs_space_info *space_info;
7897 	struct btrfs_key key;
7898 	struct btrfs_key found_key;
7899 	struct extent_buffer *leaf;
7900 
7901 	root = info->extent_root;
7902 	key.objectid = 0;
7903 	key.offset = 0;
7904 	btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7905 	path = btrfs_alloc_path();
7906 	if (!path)
7907 		return -ENOMEM;
7908 
7909 	while (1) {
7910 		ret = find_first_block_group(root, path, &key);
7911 		if (ret > 0)
7912 			break;
7913 		if (ret != 0)
7914 			goto error;
7915 
7916 		leaf = path->nodes[0];
7917 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7918 		cache = kzalloc(sizeof(*cache), GFP_NOFS);
7919 		if (!cache) {
7920 			ret = -ENOMEM;
7921 			goto error;
7922 		}
7923 
7924 		atomic_set(&cache->count, 1);
7925 		spin_lock_init(&cache->lock);
7926 		spin_lock_init(&cache->tree_lock);
7927 		cache->fs_info = info;
7928 		INIT_LIST_HEAD(&cache->list);
7929 		INIT_LIST_HEAD(&cache->cluster_list);
7930 
7931 		/*
7932 		 * we only want to have 32k of ram per block group for keeping
7933 		 * track of free space, and if we pass 1/2 of that we want to
7934 		 * start converting things over to using bitmaps
7935 		 */
7936 		cache->extents_thresh = ((1024 * 32) / 2) /
7937 			sizeof(struct btrfs_free_space);
7938 
7939 		read_extent_buffer(leaf, &cache->item,
7940 				   btrfs_item_ptr_offset(leaf, path->slots[0]),
7941 				   sizeof(cache->item));
7942 		memcpy(&cache->key, &found_key, sizeof(found_key));
7943 
7944 		key.objectid = found_key.objectid + found_key.offset;
7945 		btrfs_release_path(root, path);
7946 		cache->flags = btrfs_block_group_flags(&cache->item);
7947 		cache->sectorsize = root->sectorsize;
7948 
7949 		/*
7950 		 * check for two cases, either we are full, and therefore
7951 		 * don't need to bother with the caching work since we won't
7952 		 * find any space, or we are empty, and we can just add all
7953 		 * the space in and be done with it.  This saves us _alot_ of
7954 		 * time, particularly in the full case.
7955 		 */
7956 		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7957 			exclude_super_stripes(root, cache);
7958 			cache->last_byte_to_unpin = (u64)-1;
7959 			cache->cached = BTRFS_CACHE_FINISHED;
7960 			free_excluded_extents(root, cache);
7961 		} else if (btrfs_block_group_used(&cache->item) == 0) {
7962 			exclude_super_stripes(root, cache);
7963 			cache->last_byte_to_unpin = (u64)-1;
7964 			cache->cached = BTRFS_CACHE_FINISHED;
7965 			add_new_free_space(cache, root->fs_info,
7966 					   found_key.objectid,
7967 					   found_key.objectid +
7968 					   found_key.offset);
7969 			free_excluded_extents(root, cache);
7970 		}
7971 
7972 		ret = update_space_info(info, cache->flags, found_key.offset,
7973 					btrfs_block_group_used(&cache->item),
7974 					&space_info);
7975 		BUG_ON(ret);
7976 		cache->space_info = space_info;
7977 		spin_lock(&cache->space_info->lock);
7978 		cache->space_info->bytes_readonly += cache->bytes_super;
7979 		spin_unlock(&cache->space_info->lock);
7980 
7981 		__link_block_group(space_info, cache);
7982 
7983 		ret = btrfs_add_block_group_cache(root->fs_info, cache);
7984 		BUG_ON(ret);
7985 
7986 		set_avail_alloc_bits(root->fs_info, cache->flags);
7987 		if (btrfs_chunk_readonly(root, cache->key.objectid))
7988 			set_block_group_ro(cache);
7989 	}
7990 
7991 	list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7992 		if (!(get_alloc_profile(root, space_info->flags) &
7993 		      (BTRFS_BLOCK_GROUP_RAID10 |
7994 		       BTRFS_BLOCK_GROUP_RAID1 |
7995 		       BTRFS_BLOCK_GROUP_DUP)))
7996 			continue;
7997 		/*
7998 		 * avoid allocating from un-mirrored block group if there are
7999 		 * mirrored block groups.
8000 		 */
8001 		list_for_each_entry(cache, &space_info->block_groups[3], list)
8002 			set_block_group_ro(cache);
8003 		list_for_each_entry(cache, &space_info->block_groups[4], list)
8004 			set_block_group_ro(cache);
8005 	}
8006 
8007 	init_global_block_rsv(info);
8008 	ret = 0;
8009 error:
8010 	btrfs_free_path(path);
8011 	return ret;
8012 }
8013 
8014 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8015 			   struct btrfs_root *root, u64 bytes_used,
8016 			   u64 type, u64 chunk_objectid, u64 chunk_offset,
8017 			   u64 size)
8018 {
8019 	int ret;
8020 	struct btrfs_root *extent_root;
8021 	struct btrfs_block_group_cache *cache;
8022 
8023 	extent_root = root->fs_info->extent_root;
8024 
8025 	root->fs_info->last_trans_log_full_commit = trans->transid;
8026 
8027 	cache = kzalloc(sizeof(*cache), GFP_NOFS);
8028 	if (!cache)
8029 		return -ENOMEM;
8030 
8031 	cache->key.objectid = chunk_offset;
8032 	cache->key.offset = size;
8033 	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8034 	cache->sectorsize = root->sectorsize;
8035 
8036 	/*
8037 	 * we only want to have 32k of ram per block group for keeping track
8038 	 * of free space, and if we pass 1/2 of that we want to start
8039 	 * converting things over to using bitmaps
8040 	 */
8041 	cache->extents_thresh = ((1024 * 32) / 2) /
8042 		sizeof(struct btrfs_free_space);
8043 	atomic_set(&cache->count, 1);
8044 	spin_lock_init(&cache->lock);
8045 	spin_lock_init(&cache->tree_lock);
8046 	INIT_LIST_HEAD(&cache->list);
8047 	INIT_LIST_HEAD(&cache->cluster_list);
8048 
8049 	btrfs_set_block_group_used(&cache->item, bytes_used);
8050 	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8051 	cache->flags = type;
8052 	btrfs_set_block_group_flags(&cache->item, type);
8053 
8054 	cache->last_byte_to_unpin = (u64)-1;
8055 	cache->cached = BTRFS_CACHE_FINISHED;
8056 	exclude_super_stripes(root, cache);
8057 
8058 	add_new_free_space(cache, root->fs_info, chunk_offset,
8059 			   chunk_offset + size);
8060 
8061 	free_excluded_extents(root, cache);
8062 
8063 	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8064 				&cache->space_info);
8065 	BUG_ON(ret);
8066 
8067 	spin_lock(&cache->space_info->lock);
8068 	cache->space_info->bytes_readonly += cache->bytes_super;
8069 	spin_unlock(&cache->space_info->lock);
8070 
8071 	__link_block_group(cache->space_info, cache);
8072 
8073 	ret = btrfs_add_block_group_cache(root->fs_info, cache);
8074 	BUG_ON(ret);
8075 
8076 	ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
8077 				sizeof(cache->item));
8078 	BUG_ON(ret);
8079 
8080 	set_avail_alloc_bits(extent_root->fs_info, type);
8081 
8082 	return 0;
8083 }
8084 
8085 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8086 			     struct btrfs_root *root, u64 group_start)
8087 {
8088 	struct btrfs_path *path;
8089 	struct btrfs_block_group_cache *block_group;
8090 	struct btrfs_free_cluster *cluster;
8091 	struct btrfs_key key;
8092 	int ret;
8093 
8094 	root = root->fs_info->extent_root;
8095 
8096 	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8097 	BUG_ON(!block_group);
8098 	BUG_ON(!block_group->ro);
8099 
8100 	memcpy(&key, &block_group->key, sizeof(key));
8101 
8102 	/* make sure this block group isn't part of an allocation cluster */
8103 	cluster = &root->fs_info->data_alloc_cluster;
8104 	spin_lock(&cluster->refill_lock);
8105 	btrfs_return_cluster_to_free_space(block_group, cluster);
8106 	spin_unlock(&cluster->refill_lock);
8107 
8108 	/*
8109 	 * make sure this block group isn't part of a metadata
8110 	 * allocation cluster
8111 	 */
8112 	cluster = &root->fs_info->meta_alloc_cluster;
8113 	spin_lock(&cluster->refill_lock);
8114 	btrfs_return_cluster_to_free_space(block_group, cluster);
8115 	spin_unlock(&cluster->refill_lock);
8116 
8117 	path = btrfs_alloc_path();
8118 	BUG_ON(!path);
8119 
8120 	spin_lock(&root->fs_info->block_group_cache_lock);
8121 	rb_erase(&block_group->cache_node,
8122 		 &root->fs_info->block_group_cache_tree);
8123 	spin_unlock(&root->fs_info->block_group_cache_lock);
8124 
8125 	down_write(&block_group->space_info->groups_sem);
8126 	/*
8127 	 * we must use list_del_init so people can check to see if they
8128 	 * are still on the list after taking the semaphore
8129 	 */
8130 	list_del_init(&block_group->list);
8131 	up_write(&block_group->space_info->groups_sem);
8132 
8133 	if (block_group->cached == BTRFS_CACHE_STARTED)
8134 		wait_block_group_cache_done(block_group);
8135 
8136 	btrfs_remove_free_space_cache(block_group);
8137 
8138 	spin_lock(&block_group->space_info->lock);
8139 	block_group->space_info->total_bytes -= block_group->key.offset;
8140 	block_group->space_info->bytes_readonly -= block_group->key.offset;
8141 	spin_unlock(&block_group->space_info->lock);
8142 
8143 	btrfs_clear_space_info_full(root->fs_info);
8144 
8145 	btrfs_put_block_group(block_group);
8146 	btrfs_put_block_group(block_group);
8147 
8148 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8149 	if (ret > 0)
8150 		ret = -EIO;
8151 	if (ret < 0)
8152 		goto out;
8153 
8154 	ret = btrfs_del_item(trans, root, path);
8155 out:
8156 	btrfs_free_path(path);
8157 	return ret;
8158 }
8159