xref: /openbmc/linux/fs/btrfs/extent-tree.c (revision 63dc02bd)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 
37 /*
38  * control flags for do_chunk_alloc's force field
39  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
40  * if we really need one.
41  *
42  * CHUNK_ALLOC_LIMITED means to only try and allocate one
43  * if we have very few chunks already allocated.  This is
44  * used as part of the clustering code to help make sure
45  * we have a good pool of storage to cluster in, without
46  * filling the FS with empty chunks
47  *
48  * CHUNK_ALLOC_FORCE means it must try to allocate one
49  *
50  */
51 enum {
52 	CHUNK_ALLOC_NO_FORCE = 0,
53 	CHUNK_ALLOC_LIMITED = 1,
54 	CHUNK_ALLOC_FORCE = 2,
55 };
56 
57 /*
58  * Control how reservations are dealt with.
59  *
60  * RESERVE_FREE - freeing a reservation.
61  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
62  *   ENOSPC accounting
63  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
64  *   bytes_may_use as the ENOSPC accounting is done elsewhere
65  */
66 enum {
67 	RESERVE_FREE = 0,
68 	RESERVE_ALLOC = 1,
69 	RESERVE_ALLOC_NO_ACCOUNT = 2,
70 };
71 
72 static int update_block_group(struct btrfs_trans_handle *trans,
73 			      struct btrfs_root *root,
74 			      u64 bytenr, u64 num_bytes, int alloc);
75 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
76 				struct btrfs_root *root,
77 				u64 bytenr, u64 num_bytes, u64 parent,
78 				u64 root_objectid, u64 owner_objectid,
79 				u64 owner_offset, int refs_to_drop,
80 				struct btrfs_delayed_extent_op *extra_op);
81 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
82 				    struct extent_buffer *leaf,
83 				    struct btrfs_extent_item *ei);
84 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
85 				      struct btrfs_root *root,
86 				      u64 parent, u64 root_objectid,
87 				      u64 flags, u64 owner, u64 offset,
88 				      struct btrfs_key *ins, int ref_mod);
89 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
90 				     struct btrfs_root *root,
91 				     u64 parent, u64 root_objectid,
92 				     u64 flags, struct btrfs_disk_key *key,
93 				     int level, struct btrfs_key *ins);
94 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
95 			  struct btrfs_root *extent_root, u64 alloc_bytes,
96 			  u64 flags, int force);
97 static int find_next_key(struct btrfs_path *path, int level,
98 			 struct btrfs_key *key);
99 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
100 			    int dump_block_groups);
101 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
102 				       u64 num_bytes, int reserve);
103 
104 static noinline int
105 block_group_cache_done(struct btrfs_block_group_cache *cache)
106 {
107 	smp_mb();
108 	return cache->cached == BTRFS_CACHE_FINISHED;
109 }
110 
111 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
112 {
113 	return (cache->flags & bits) == bits;
114 }
115 
116 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
117 {
118 	atomic_inc(&cache->count);
119 }
120 
121 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
122 {
123 	if (atomic_dec_and_test(&cache->count)) {
124 		WARN_ON(cache->pinned > 0);
125 		WARN_ON(cache->reserved > 0);
126 		kfree(cache->free_space_ctl);
127 		kfree(cache);
128 	}
129 }
130 
131 /*
132  * this adds the block group to the fs_info rb tree for the block group
133  * cache
134  */
135 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
136 				struct btrfs_block_group_cache *block_group)
137 {
138 	struct rb_node **p;
139 	struct rb_node *parent = NULL;
140 	struct btrfs_block_group_cache *cache;
141 
142 	spin_lock(&info->block_group_cache_lock);
143 	p = &info->block_group_cache_tree.rb_node;
144 
145 	while (*p) {
146 		parent = *p;
147 		cache = rb_entry(parent, struct btrfs_block_group_cache,
148 				 cache_node);
149 		if (block_group->key.objectid < cache->key.objectid) {
150 			p = &(*p)->rb_left;
151 		} else if (block_group->key.objectid > cache->key.objectid) {
152 			p = &(*p)->rb_right;
153 		} else {
154 			spin_unlock(&info->block_group_cache_lock);
155 			return -EEXIST;
156 		}
157 	}
158 
159 	rb_link_node(&block_group->cache_node, parent, p);
160 	rb_insert_color(&block_group->cache_node,
161 			&info->block_group_cache_tree);
162 	spin_unlock(&info->block_group_cache_lock);
163 
164 	return 0;
165 }
166 
167 /*
168  * This will return the block group at or after bytenr if contains is 0, else
169  * it will return the block group that contains the bytenr
170  */
171 static struct btrfs_block_group_cache *
172 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
173 			      int contains)
174 {
175 	struct btrfs_block_group_cache *cache, *ret = NULL;
176 	struct rb_node *n;
177 	u64 end, start;
178 
179 	spin_lock(&info->block_group_cache_lock);
180 	n = info->block_group_cache_tree.rb_node;
181 
182 	while (n) {
183 		cache = rb_entry(n, struct btrfs_block_group_cache,
184 				 cache_node);
185 		end = cache->key.objectid + cache->key.offset - 1;
186 		start = cache->key.objectid;
187 
188 		if (bytenr < start) {
189 			if (!contains && (!ret || start < ret->key.objectid))
190 				ret = cache;
191 			n = n->rb_left;
192 		} else if (bytenr > start) {
193 			if (contains && bytenr <= end) {
194 				ret = cache;
195 				break;
196 			}
197 			n = n->rb_right;
198 		} else {
199 			ret = cache;
200 			break;
201 		}
202 	}
203 	if (ret)
204 		btrfs_get_block_group(ret);
205 	spin_unlock(&info->block_group_cache_lock);
206 
207 	return ret;
208 }
209 
210 static int add_excluded_extent(struct btrfs_root *root,
211 			       u64 start, u64 num_bytes)
212 {
213 	u64 end = start + num_bytes - 1;
214 	set_extent_bits(&root->fs_info->freed_extents[0],
215 			start, end, EXTENT_UPTODATE, GFP_NOFS);
216 	set_extent_bits(&root->fs_info->freed_extents[1],
217 			start, end, EXTENT_UPTODATE, GFP_NOFS);
218 	return 0;
219 }
220 
221 static void free_excluded_extents(struct btrfs_root *root,
222 				  struct btrfs_block_group_cache *cache)
223 {
224 	u64 start, end;
225 
226 	start = cache->key.objectid;
227 	end = start + cache->key.offset - 1;
228 
229 	clear_extent_bits(&root->fs_info->freed_extents[0],
230 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
231 	clear_extent_bits(&root->fs_info->freed_extents[1],
232 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
233 }
234 
235 static int exclude_super_stripes(struct btrfs_root *root,
236 				 struct btrfs_block_group_cache *cache)
237 {
238 	u64 bytenr;
239 	u64 *logical;
240 	int stripe_len;
241 	int i, nr, ret;
242 
243 	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
244 		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
245 		cache->bytes_super += stripe_len;
246 		ret = add_excluded_extent(root, cache->key.objectid,
247 					  stripe_len);
248 		BUG_ON(ret); /* -ENOMEM */
249 	}
250 
251 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
252 		bytenr = btrfs_sb_offset(i);
253 		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
254 				       cache->key.objectid, bytenr,
255 				       0, &logical, &nr, &stripe_len);
256 		BUG_ON(ret); /* -ENOMEM */
257 
258 		while (nr--) {
259 			cache->bytes_super += stripe_len;
260 			ret = add_excluded_extent(root, logical[nr],
261 						  stripe_len);
262 			BUG_ON(ret); /* -ENOMEM */
263 		}
264 
265 		kfree(logical);
266 	}
267 	return 0;
268 }
269 
270 static struct btrfs_caching_control *
271 get_caching_control(struct btrfs_block_group_cache *cache)
272 {
273 	struct btrfs_caching_control *ctl;
274 
275 	spin_lock(&cache->lock);
276 	if (cache->cached != BTRFS_CACHE_STARTED) {
277 		spin_unlock(&cache->lock);
278 		return NULL;
279 	}
280 
281 	/* We're loading it the fast way, so we don't have a caching_ctl. */
282 	if (!cache->caching_ctl) {
283 		spin_unlock(&cache->lock);
284 		return NULL;
285 	}
286 
287 	ctl = cache->caching_ctl;
288 	atomic_inc(&ctl->count);
289 	spin_unlock(&cache->lock);
290 	return ctl;
291 }
292 
293 static void put_caching_control(struct btrfs_caching_control *ctl)
294 {
295 	if (atomic_dec_and_test(&ctl->count))
296 		kfree(ctl);
297 }
298 
299 /*
300  * this is only called by cache_block_group, since we could have freed extents
301  * we need to check the pinned_extents for any extents that can't be used yet
302  * since their free space will be released as soon as the transaction commits.
303  */
304 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
305 			      struct btrfs_fs_info *info, u64 start, u64 end)
306 {
307 	u64 extent_start, extent_end, size, total_added = 0;
308 	int ret;
309 
310 	while (start < end) {
311 		ret = find_first_extent_bit(info->pinned_extents, start,
312 					    &extent_start, &extent_end,
313 					    EXTENT_DIRTY | EXTENT_UPTODATE);
314 		if (ret)
315 			break;
316 
317 		if (extent_start <= start) {
318 			start = extent_end + 1;
319 		} else if (extent_start > start && extent_start < end) {
320 			size = extent_start - start;
321 			total_added += size;
322 			ret = btrfs_add_free_space(block_group, start,
323 						   size);
324 			BUG_ON(ret); /* -ENOMEM or logic error */
325 			start = extent_end + 1;
326 		} else {
327 			break;
328 		}
329 	}
330 
331 	if (start < end) {
332 		size = end - start;
333 		total_added += size;
334 		ret = btrfs_add_free_space(block_group, start, size);
335 		BUG_ON(ret); /* -ENOMEM or logic error */
336 	}
337 
338 	return total_added;
339 }
340 
341 static noinline void caching_thread(struct btrfs_work *work)
342 {
343 	struct btrfs_block_group_cache *block_group;
344 	struct btrfs_fs_info *fs_info;
345 	struct btrfs_caching_control *caching_ctl;
346 	struct btrfs_root *extent_root;
347 	struct btrfs_path *path;
348 	struct extent_buffer *leaf;
349 	struct btrfs_key key;
350 	u64 total_found = 0;
351 	u64 last = 0;
352 	u32 nritems;
353 	int ret = 0;
354 
355 	caching_ctl = container_of(work, struct btrfs_caching_control, work);
356 	block_group = caching_ctl->block_group;
357 	fs_info = block_group->fs_info;
358 	extent_root = fs_info->extent_root;
359 
360 	path = btrfs_alloc_path();
361 	if (!path)
362 		goto out;
363 
364 	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
365 
366 	/*
367 	 * We don't want to deadlock with somebody trying to allocate a new
368 	 * extent for the extent root while also trying to search the extent
369 	 * root to add free space.  So we skip locking and search the commit
370 	 * root, since its read-only
371 	 */
372 	path->skip_locking = 1;
373 	path->search_commit_root = 1;
374 	path->reada = 1;
375 
376 	key.objectid = last;
377 	key.offset = 0;
378 	key.type = BTRFS_EXTENT_ITEM_KEY;
379 again:
380 	mutex_lock(&caching_ctl->mutex);
381 	/* need to make sure the commit_root doesn't disappear */
382 	down_read(&fs_info->extent_commit_sem);
383 
384 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
385 	if (ret < 0)
386 		goto err;
387 
388 	leaf = path->nodes[0];
389 	nritems = btrfs_header_nritems(leaf);
390 
391 	while (1) {
392 		if (btrfs_fs_closing(fs_info) > 1) {
393 			last = (u64)-1;
394 			break;
395 		}
396 
397 		if (path->slots[0] < nritems) {
398 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
399 		} else {
400 			ret = find_next_key(path, 0, &key);
401 			if (ret)
402 				break;
403 
404 			if (need_resched() ||
405 			    btrfs_next_leaf(extent_root, path)) {
406 				caching_ctl->progress = last;
407 				btrfs_release_path(path);
408 				up_read(&fs_info->extent_commit_sem);
409 				mutex_unlock(&caching_ctl->mutex);
410 				cond_resched();
411 				goto again;
412 			}
413 			leaf = path->nodes[0];
414 			nritems = btrfs_header_nritems(leaf);
415 			continue;
416 		}
417 
418 		if (key.objectid < block_group->key.objectid) {
419 			path->slots[0]++;
420 			continue;
421 		}
422 
423 		if (key.objectid >= block_group->key.objectid +
424 		    block_group->key.offset)
425 			break;
426 
427 		if (key.type == BTRFS_EXTENT_ITEM_KEY) {
428 			total_found += add_new_free_space(block_group,
429 							  fs_info, last,
430 							  key.objectid);
431 			last = key.objectid + key.offset;
432 
433 			if (total_found > (1024 * 1024 * 2)) {
434 				total_found = 0;
435 				wake_up(&caching_ctl->wait);
436 			}
437 		}
438 		path->slots[0]++;
439 	}
440 	ret = 0;
441 
442 	total_found += add_new_free_space(block_group, fs_info, last,
443 					  block_group->key.objectid +
444 					  block_group->key.offset);
445 	caching_ctl->progress = (u64)-1;
446 
447 	spin_lock(&block_group->lock);
448 	block_group->caching_ctl = NULL;
449 	block_group->cached = BTRFS_CACHE_FINISHED;
450 	spin_unlock(&block_group->lock);
451 
452 err:
453 	btrfs_free_path(path);
454 	up_read(&fs_info->extent_commit_sem);
455 
456 	free_excluded_extents(extent_root, block_group);
457 
458 	mutex_unlock(&caching_ctl->mutex);
459 out:
460 	wake_up(&caching_ctl->wait);
461 
462 	put_caching_control(caching_ctl);
463 	btrfs_put_block_group(block_group);
464 }
465 
466 static int cache_block_group(struct btrfs_block_group_cache *cache,
467 			     struct btrfs_trans_handle *trans,
468 			     struct btrfs_root *root,
469 			     int load_cache_only)
470 {
471 	DEFINE_WAIT(wait);
472 	struct btrfs_fs_info *fs_info = cache->fs_info;
473 	struct btrfs_caching_control *caching_ctl;
474 	int ret = 0;
475 
476 	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
477 	if (!caching_ctl)
478 		return -ENOMEM;
479 
480 	INIT_LIST_HEAD(&caching_ctl->list);
481 	mutex_init(&caching_ctl->mutex);
482 	init_waitqueue_head(&caching_ctl->wait);
483 	caching_ctl->block_group = cache;
484 	caching_ctl->progress = cache->key.objectid;
485 	atomic_set(&caching_ctl->count, 1);
486 	caching_ctl->work.func = caching_thread;
487 
488 	spin_lock(&cache->lock);
489 	/*
490 	 * This should be a rare occasion, but this could happen I think in the
491 	 * case where one thread starts to load the space cache info, and then
492 	 * some other thread starts a transaction commit which tries to do an
493 	 * allocation while the other thread is still loading the space cache
494 	 * info.  The previous loop should have kept us from choosing this block
495 	 * group, but if we've moved to the state where we will wait on caching
496 	 * block groups we need to first check if we're doing a fast load here,
497 	 * so we can wait for it to finish, otherwise we could end up allocating
498 	 * from a block group who's cache gets evicted for one reason or
499 	 * another.
500 	 */
501 	while (cache->cached == BTRFS_CACHE_FAST) {
502 		struct btrfs_caching_control *ctl;
503 
504 		ctl = cache->caching_ctl;
505 		atomic_inc(&ctl->count);
506 		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
507 		spin_unlock(&cache->lock);
508 
509 		schedule();
510 
511 		finish_wait(&ctl->wait, &wait);
512 		put_caching_control(ctl);
513 		spin_lock(&cache->lock);
514 	}
515 
516 	if (cache->cached != BTRFS_CACHE_NO) {
517 		spin_unlock(&cache->lock);
518 		kfree(caching_ctl);
519 		return 0;
520 	}
521 	WARN_ON(cache->caching_ctl);
522 	cache->caching_ctl = caching_ctl;
523 	cache->cached = BTRFS_CACHE_FAST;
524 	spin_unlock(&cache->lock);
525 
526 	/*
527 	 * We can't do the read from on-disk cache during a commit since we need
528 	 * to have the normal tree locking.  Also if we are currently trying to
529 	 * allocate blocks for the tree root we can't do the fast caching since
530 	 * we likely hold important locks.
531 	 */
532 	if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
533 		ret = load_free_space_cache(fs_info, cache);
534 
535 		spin_lock(&cache->lock);
536 		if (ret == 1) {
537 			cache->caching_ctl = NULL;
538 			cache->cached = BTRFS_CACHE_FINISHED;
539 			cache->last_byte_to_unpin = (u64)-1;
540 		} else {
541 			if (load_cache_only) {
542 				cache->caching_ctl = NULL;
543 				cache->cached = BTRFS_CACHE_NO;
544 			} else {
545 				cache->cached = BTRFS_CACHE_STARTED;
546 			}
547 		}
548 		spin_unlock(&cache->lock);
549 		wake_up(&caching_ctl->wait);
550 		if (ret == 1) {
551 			put_caching_control(caching_ctl);
552 			free_excluded_extents(fs_info->extent_root, cache);
553 			return 0;
554 		}
555 	} else {
556 		/*
557 		 * We are not going to do the fast caching, set cached to the
558 		 * appropriate value and wakeup any waiters.
559 		 */
560 		spin_lock(&cache->lock);
561 		if (load_cache_only) {
562 			cache->caching_ctl = NULL;
563 			cache->cached = BTRFS_CACHE_NO;
564 		} else {
565 			cache->cached = BTRFS_CACHE_STARTED;
566 		}
567 		spin_unlock(&cache->lock);
568 		wake_up(&caching_ctl->wait);
569 	}
570 
571 	if (load_cache_only) {
572 		put_caching_control(caching_ctl);
573 		return 0;
574 	}
575 
576 	down_write(&fs_info->extent_commit_sem);
577 	atomic_inc(&caching_ctl->count);
578 	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
579 	up_write(&fs_info->extent_commit_sem);
580 
581 	btrfs_get_block_group(cache);
582 
583 	btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
584 
585 	return ret;
586 }
587 
588 /*
589  * return the block group that starts at or after bytenr
590  */
591 static struct btrfs_block_group_cache *
592 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
593 {
594 	struct btrfs_block_group_cache *cache;
595 
596 	cache = block_group_cache_tree_search(info, bytenr, 0);
597 
598 	return cache;
599 }
600 
601 /*
602  * return the block group that contains the given bytenr
603  */
604 struct btrfs_block_group_cache *btrfs_lookup_block_group(
605 						 struct btrfs_fs_info *info,
606 						 u64 bytenr)
607 {
608 	struct btrfs_block_group_cache *cache;
609 
610 	cache = block_group_cache_tree_search(info, bytenr, 1);
611 
612 	return cache;
613 }
614 
615 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
616 						  u64 flags)
617 {
618 	struct list_head *head = &info->space_info;
619 	struct btrfs_space_info *found;
620 
621 	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
622 
623 	rcu_read_lock();
624 	list_for_each_entry_rcu(found, head, list) {
625 		if (found->flags & flags) {
626 			rcu_read_unlock();
627 			return found;
628 		}
629 	}
630 	rcu_read_unlock();
631 	return NULL;
632 }
633 
634 /*
635  * after adding space to the filesystem, we need to clear the full flags
636  * on all the space infos.
637  */
638 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
639 {
640 	struct list_head *head = &info->space_info;
641 	struct btrfs_space_info *found;
642 
643 	rcu_read_lock();
644 	list_for_each_entry_rcu(found, head, list)
645 		found->full = 0;
646 	rcu_read_unlock();
647 }
648 
649 static u64 div_factor(u64 num, int factor)
650 {
651 	if (factor == 10)
652 		return num;
653 	num *= factor;
654 	do_div(num, 10);
655 	return num;
656 }
657 
658 static u64 div_factor_fine(u64 num, int factor)
659 {
660 	if (factor == 100)
661 		return num;
662 	num *= factor;
663 	do_div(num, 100);
664 	return num;
665 }
666 
667 u64 btrfs_find_block_group(struct btrfs_root *root,
668 			   u64 search_start, u64 search_hint, int owner)
669 {
670 	struct btrfs_block_group_cache *cache;
671 	u64 used;
672 	u64 last = max(search_hint, search_start);
673 	u64 group_start = 0;
674 	int full_search = 0;
675 	int factor = 9;
676 	int wrapped = 0;
677 again:
678 	while (1) {
679 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
680 		if (!cache)
681 			break;
682 
683 		spin_lock(&cache->lock);
684 		last = cache->key.objectid + cache->key.offset;
685 		used = btrfs_block_group_used(&cache->item);
686 
687 		if ((full_search || !cache->ro) &&
688 		    block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
689 			if (used + cache->pinned + cache->reserved <
690 			    div_factor(cache->key.offset, factor)) {
691 				group_start = cache->key.objectid;
692 				spin_unlock(&cache->lock);
693 				btrfs_put_block_group(cache);
694 				goto found;
695 			}
696 		}
697 		spin_unlock(&cache->lock);
698 		btrfs_put_block_group(cache);
699 		cond_resched();
700 	}
701 	if (!wrapped) {
702 		last = search_start;
703 		wrapped = 1;
704 		goto again;
705 	}
706 	if (!full_search && factor < 10) {
707 		last = search_start;
708 		full_search = 1;
709 		factor = 10;
710 		goto again;
711 	}
712 found:
713 	return group_start;
714 }
715 
716 /* simple helper to search for an existing extent at a given offset */
717 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
718 {
719 	int ret;
720 	struct btrfs_key key;
721 	struct btrfs_path *path;
722 
723 	path = btrfs_alloc_path();
724 	if (!path)
725 		return -ENOMEM;
726 
727 	key.objectid = start;
728 	key.offset = len;
729 	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
730 	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
731 				0, 0);
732 	btrfs_free_path(path);
733 	return ret;
734 }
735 
736 /*
737  * helper function to lookup reference count and flags of extent.
738  *
739  * the head node for delayed ref is used to store the sum of all the
740  * reference count modifications queued up in the rbtree. the head
741  * node may also store the extent flags to set. This way you can check
742  * to see what the reference count and extent flags would be if all of
743  * the delayed refs are not processed.
744  */
745 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
746 			     struct btrfs_root *root, u64 bytenr,
747 			     u64 num_bytes, u64 *refs, u64 *flags)
748 {
749 	struct btrfs_delayed_ref_head *head;
750 	struct btrfs_delayed_ref_root *delayed_refs;
751 	struct btrfs_path *path;
752 	struct btrfs_extent_item *ei;
753 	struct extent_buffer *leaf;
754 	struct btrfs_key key;
755 	u32 item_size;
756 	u64 num_refs;
757 	u64 extent_flags;
758 	int ret;
759 
760 	path = btrfs_alloc_path();
761 	if (!path)
762 		return -ENOMEM;
763 
764 	key.objectid = bytenr;
765 	key.type = BTRFS_EXTENT_ITEM_KEY;
766 	key.offset = num_bytes;
767 	if (!trans) {
768 		path->skip_locking = 1;
769 		path->search_commit_root = 1;
770 	}
771 again:
772 	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
773 				&key, path, 0, 0);
774 	if (ret < 0)
775 		goto out_free;
776 
777 	if (ret == 0) {
778 		leaf = path->nodes[0];
779 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
780 		if (item_size >= sizeof(*ei)) {
781 			ei = btrfs_item_ptr(leaf, path->slots[0],
782 					    struct btrfs_extent_item);
783 			num_refs = btrfs_extent_refs(leaf, ei);
784 			extent_flags = btrfs_extent_flags(leaf, ei);
785 		} else {
786 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
787 			struct btrfs_extent_item_v0 *ei0;
788 			BUG_ON(item_size != sizeof(*ei0));
789 			ei0 = btrfs_item_ptr(leaf, path->slots[0],
790 					     struct btrfs_extent_item_v0);
791 			num_refs = btrfs_extent_refs_v0(leaf, ei0);
792 			/* FIXME: this isn't correct for data */
793 			extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
794 #else
795 			BUG();
796 #endif
797 		}
798 		BUG_ON(num_refs == 0);
799 	} else {
800 		num_refs = 0;
801 		extent_flags = 0;
802 		ret = 0;
803 	}
804 
805 	if (!trans)
806 		goto out;
807 
808 	delayed_refs = &trans->transaction->delayed_refs;
809 	spin_lock(&delayed_refs->lock);
810 	head = btrfs_find_delayed_ref_head(trans, bytenr);
811 	if (head) {
812 		if (!mutex_trylock(&head->mutex)) {
813 			atomic_inc(&head->node.refs);
814 			spin_unlock(&delayed_refs->lock);
815 
816 			btrfs_release_path(path);
817 
818 			/*
819 			 * Mutex was contended, block until it's released and try
820 			 * again
821 			 */
822 			mutex_lock(&head->mutex);
823 			mutex_unlock(&head->mutex);
824 			btrfs_put_delayed_ref(&head->node);
825 			goto again;
826 		}
827 		if (head->extent_op && head->extent_op->update_flags)
828 			extent_flags |= head->extent_op->flags_to_set;
829 		else
830 			BUG_ON(num_refs == 0);
831 
832 		num_refs += head->node.ref_mod;
833 		mutex_unlock(&head->mutex);
834 	}
835 	spin_unlock(&delayed_refs->lock);
836 out:
837 	WARN_ON(num_refs == 0);
838 	if (refs)
839 		*refs = num_refs;
840 	if (flags)
841 		*flags = extent_flags;
842 out_free:
843 	btrfs_free_path(path);
844 	return ret;
845 }
846 
847 /*
848  * Back reference rules.  Back refs have three main goals:
849  *
850  * 1) differentiate between all holders of references to an extent so that
851  *    when a reference is dropped we can make sure it was a valid reference
852  *    before freeing the extent.
853  *
854  * 2) Provide enough information to quickly find the holders of an extent
855  *    if we notice a given block is corrupted or bad.
856  *
857  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
858  *    maintenance.  This is actually the same as #2, but with a slightly
859  *    different use case.
860  *
861  * There are two kinds of back refs. The implicit back refs is optimized
862  * for pointers in non-shared tree blocks. For a given pointer in a block,
863  * back refs of this kind provide information about the block's owner tree
864  * and the pointer's key. These information allow us to find the block by
865  * b-tree searching. The full back refs is for pointers in tree blocks not
866  * referenced by their owner trees. The location of tree block is recorded
867  * in the back refs. Actually the full back refs is generic, and can be
868  * used in all cases the implicit back refs is used. The major shortcoming
869  * of the full back refs is its overhead. Every time a tree block gets
870  * COWed, we have to update back refs entry for all pointers in it.
871  *
872  * For a newly allocated tree block, we use implicit back refs for
873  * pointers in it. This means most tree related operations only involve
874  * implicit back refs. For a tree block created in old transaction, the
875  * only way to drop a reference to it is COW it. So we can detect the
876  * event that tree block loses its owner tree's reference and do the
877  * back refs conversion.
878  *
879  * When a tree block is COW'd through a tree, there are four cases:
880  *
881  * The reference count of the block is one and the tree is the block's
882  * owner tree. Nothing to do in this case.
883  *
884  * The reference count of the block is one and the tree is not the
885  * block's owner tree. In this case, full back refs is used for pointers
886  * in the block. Remove these full back refs, add implicit back refs for
887  * every pointers in the new block.
888  *
889  * The reference count of the block is greater than one and the tree is
890  * the block's owner tree. In this case, implicit back refs is used for
891  * pointers in the block. Add full back refs for every pointers in the
892  * block, increase lower level extents' reference counts. The original
893  * implicit back refs are entailed to the new block.
894  *
895  * The reference count of the block is greater than one and the tree is
896  * not the block's owner tree. Add implicit back refs for every pointer in
897  * the new block, increase lower level extents' reference count.
898  *
899  * Back Reference Key composing:
900  *
901  * The key objectid corresponds to the first byte in the extent,
902  * The key type is used to differentiate between types of back refs.
903  * There are different meanings of the key offset for different types
904  * of back refs.
905  *
906  * File extents can be referenced by:
907  *
908  * - multiple snapshots, subvolumes, or different generations in one subvol
909  * - different files inside a single subvolume
910  * - different offsets inside a file (bookend extents in file.c)
911  *
912  * The extent ref structure for the implicit back refs has fields for:
913  *
914  * - Objectid of the subvolume root
915  * - objectid of the file holding the reference
916  * - original offset in the file
917  * - how many bookend extents
918  *
919  * The key offset for the implicit back refs is hash of the first
920  * three fields.
921  *
922  * The extent ref structure for the full back refs has field for:
923  *
924  * - number of pointers in the tree leaf
925  *
926  * The key offset for the implicit back refs is the first byte of
927  * the tree leaf
928  *
929  * When a file extent is allocated, The implicit back refs is used.
930  * the fields are filled in:
931  *
932  *     (root_key.objectid, inode objectid, offset in file, 1)
933  *
934  * When a file extent is removed file truncation, we find the
935  * corresponding implicit back refs and check the following fields:
936  *
937  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
938  *
939  * Btree extents can be referenced by:
940  *
941  * - Different subvolumes
942  *
943  * Both the implicit back refs and the full back refs for tree blocks
944  * only consist of key. The key offset for the implicit back refs is
945  * objectid of block's owner tree. The key offset for the full back refs
946  * is the first byte of parent block.
947  *
948  * When implicit back refs is used, information about the lowest key and
949  * level of the tree block are required. These information are stored in
950  * tree block info structure.
951  */
952 
953 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
954 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
955 				  struct btrfs_root *root,
956 				  struct btrfs_path *path,
957 				  u64 owner, u32 extra_size)
958 {
959 	struct btrfs_extent_item *item;
960 	struct btrfs_extent_item_v0 *ei0;
961 	struct btrfs_extent_ref_v0 *ref0;
962 	struct btrfs_tree_block_info *bi;
963 	struct extent_buffer *leaf;
964 	struct btrfs_key key;
965 	struct btrfs_key found_key;
966 	u32 new_size = sizeof(*item);
967 	u64 refs;
968 	int ret;
969 
970 	leaf = path->nodes[0];
971 	BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
972 
973 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
974 	ei0 = btrfs_item_ptr(leaf, path->slots[0],
975 			     struct btrfs_extent_item_v0);
976 	refs = btrfs_extent_refs_v0(leaf, ei0);
977 
978 	if (owner == (u64)-1) {
979 		while (1) {
980 			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
981 				ret = btrfs_next_leaf(root, path);
982 				if (ret < 0)
983 					return ret;
984 				BUG_ON(ret > 0); /* Corruption */
985 				leaf = path->nodes[0];
986 			}
987 			btrfs_item_key_to_cpu(leaf, &found_key,
988 					      path->slots[0]);
989 			BUG_ON(key.objectid != found_key.objectid);
990 			if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
991 				path->slots[0]++;
992 				continue;
993 			}
994 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
995 					      struct btrfs_extent_ref_v0);
996 			owner = btrfs_ref_objectid_v0(leaf, ref0);
997 			break;
998 		}
999 	}
1000 	btrfs_release_path(path);
1001 
1002 	if (owner < BTRFS_FIRST_FREE_OBJECTID)
1003 		new_size += sizeof(*bi);
1004 
1005 	new_size -= sizeof(*ei0);
1006 	ret = btrfs_search_slot(trans, root, &key, path,
1007 				new_size + extra_size, 1);
1008 	if (ret < 0)
1009 		return ret;
1010 	BUG_ON(ret); /* Corruption */
1011 
1012 	btrfs_extend_item(trans, root, path, new_size);
1013 
1014 	leaf = path->nodes[0];
1015 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1016 	btrfs_set_extent_refs(leaf, item, refs);
1017 	/* FIXME: get real generation */
1018 	btrfs_set_extent_generation(leaf, item, 0);
1019 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1020 		btrfs_set_extent_flags(leaf, item,
1021 				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
1022 				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
1023 		bi = (struct btrfs_tree_block_info *)(item + 1);
1024 		/* FIXME: get first key of the block */
1025 		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1026 		btrfs_set_tree_block_level(leaf, bi, (int)owner);
1027 	} else {
1028 		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1029 	}
1030 	btrfs_mark_buffer_dirty(leaf);
1031 	return 0;
1032 }
1033 #endif
1034 
1035 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1036 {
1037 	u32 high_crc = ~(u32)0;
1038 	u32 low_crc = ~(u32)0;
1039 	__le64 lenum;
1040 
1041 	lenum = cpu_to_le64(root_objectid);
1042 	high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1043 	lenum = cpu_to_le64(owner);
1044 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1045 	lenum = cpu_to_le64(offset);
1046 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1047 
1048 	return ((u64)high_crc << 31) ^ (u64)low_crc;
1049 }
1050 
1051 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1052 				     struct btrfs_extent_data_ref *ref)
1053 {
1054 	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1055 				    btrfs_extent_data_ref_objectid(leaf, ref),
1056 				    btrfs_extent_data_ref_offset(leaf, ref));
1057 }
1058 
1059 static int match_extent_data_ref(struct extent_buffer *leaf,
1060 				 struct btrfs_extent_data_ref *ref,
1061 				 u64 root_objectid, u64 owner, u64 offset)
1062 {
1063 	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1064 	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1065 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
1066 		return 0;
1067 	return 1;
1068 }
1069 
1070 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1071 					   struct btrfs_root *root,
1072 					   struct btrfs_path *path,
1073 					   u64 bytenr, u64 parent,
1074 					   u64 root_objectid,
1075 					   u64 owner, u64 offset)
1076 {
1077 	struct btrfs_key key;
1078 	struct btrfs_extent_data_ref *ref;
1079 	struct extent_buffer *leaf;
1080 	u32 nritems;
1081 	int ret;
1082 	int recow;
1083 	int err = -ENOENT;
1084 
1085 	key.objectid = bytenr;
1086 	if (parent) {
1087 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1088 		key.offset = parent;
1089 	} else {
1090 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1091 		key.offset = hash_extent_data_ref(root_objectid,
1092 						  owner, offset);
1093 	}
1094 again:
1095 	recow = 0;
1096 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1097 	if (ret < 0) {
1098 		err = ret;
1099 		goto fail;
1100 	}
1101 
1102 	if (parent) {
1103 		if (!ret)
1104 			return 0;
1105 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1106 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1107 		btrfs_release_path(path);
1108 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1109 		if (ret < 0) {
1110 			err = ret;
1111 			goto fail;
1112 		}
1113 		if (!ret)
1114 			return 0;
1115 #endif
1116 		goto fail;
1117 	}
1118 
1119 	leaf = path->nodes[0];
1120 	nritems = btrfs_header_nritems(leaf);
1121 	while (1) {
1122 		if (path->slots[0] >= nritems) {
1123 			ret = btrfs_next_leaf(root, path);
1124 			if (ret < 0)
1125 				err = ret;
1126 			if (ret)
1127 				goto fail;
1128 
1129 			leaf = path->nodes[0];
1130 			nritems = btrfs_header_nritems(leaf);
1131 			recow = 1;
1132 		}
1133 
1134 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1135 		if (key.objectid != bytenr ||
1136 		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
1137 			goto fail;
1138 
1139 		ref = btrfs_item_ptr(leaf, path->slots[0],
1140 				     struct btrfs_extent_data_ref);
1141 
1142 		if (match_extent_data_ref(leaf, ref, root_objectid,
1143 					  owner, offset)) {
1144 			if (recow) {
1145 				btrfs_release_path(path);
1146 				goto again;
1147 			}
1148 			err = 0;
1149 			break;
1150 		}
1151 		path->slots[0]++;
1152 	}
1153 fail:
1154 	return err;
1155 }
1156 
1157 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1158 					   struct btrfs_root *root,
1159 					   struct btrfs_path *path,
1160 					   u64 bytenr, u64 parent,
1161 					   u64 root_objectid, u64 owner,
1162 					   u64 offset, int refs_to_add)
1163 {
1164 	struct btrfs_key key;
1165 	struct extent_buffer *leaf;
1166 	u32 size;
1167 	u32 num_refs;
1168 	int ret;
1169 
1170 	key.objectid = bytenr;
1171 	if (parent) {
1172 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1173 		key.offset = parent;
1174 		size = sizeof(struct btrfs_shared_data_ref);
1175 	} else {
1176 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1177 		key.offset = hash_extent_data_ref(root_objectid,
1178 						  owner, offset);
1179 		size = sizeof(struct btrfs_extent_data_ref);
1180 	}
1181 
1182 	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1183 	if (ret && ret != -EEXIST)
1184 		goto fail;
1185 
1186 	leaf = path->nodes[0];
1187 	if (parent) {
1188 		struct btrfs_shared_data_ref *ref;
1189 		ref = btrfs_item_ptr(leaf, path->slots[0],
1190 				     struct btrfs_shared_data_ref);
1191 		if (ret == 0) {
1192 			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1193 		} else {
1194 			num_refs = btrfs_shared_data_ref_count(leaf, ref);
1195 			num_refs += refs_to_add;
1196 			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1197 		}
1198 	} else {
1199 		struct btrfs_extent_data_ref *ref;
1200 		while (ret == -EEXIST) {
1201 			ref = btrfs_item_ptr(leaf, path->slots[0],
1202 					     struct btrfs_extent_data_ref);
1203 			if (match_extent_data_ref(leaf, ref, root_objectid,
1204 						  owner, offset))
1205 				break;
1206 			btrfs_release_path(path);
1207 			key.offset++;
1208 			ret = btrfs_insert_empty_item(trans, root, path, &key,
1209 						      size);
1210 			if (ret && ret != -EEXIST)
1211 				goto fail;
1212 
1213 			leaf = path->nodes[0];
1214 		}
1215 		ref = btrfs_item_ptr(leaf, path->slots[0],
1216 				     struct btrfs_extent_data_ref);
1217 		if (ret == 0) {
1218 			btrfs_set_extent_data_ref_root(leaf, ref,
1219 						       root_objectid);
1220 			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1221 			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1222 			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1223 		} else {
1224 			num_refs = btrfs_extent_data_ref_count(leaf, ref);
1225 			num_refs += refs_to_add;
1226 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1227 		}
1228 	}
1229 	btrfs_mark_buffer_dirty(leaf);
1230 	ret = 0;
1231 fail:
1232 	btrfs_release_path(path);
1233 	return ret;
1234 }
1235 
1236 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1237 					   struct btrfs_root *root,
1238 					   struct btrfs_path *path,
1239 					   int refs_to_drop)
1240 {
1241 	struct btrfs_key key;
1242 	struct btrfs_extent_data_ref *ref1 = NULL;
1243 	struct btrfs_shared_data_ref *ref2 = NULL;
1244 	struct extent_buffer *leaf;
1245 	u32 num_refs = 0;
1246 	int ret = 0;
1247 
1248 	leaf = path->nodes[0];
1249 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1250 
1251 	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1252 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1253 				      struct btrfs_extent_data_ref);
1254 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1255 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1256 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1257 				      struct btrfs_shared_data_ref);
1258 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1259 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1260 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1261 		struct btrfs_extent_ref_v0 *ref0;
1262 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1263 				      struct btrfs_extent_ref_v0);
1264 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1265 #endif
1266 	} else {
1267 		BUG();
1268 	}
1269 
1270 	BUG_ON(num_refs < refs_to_drop);
1271 	num_refs -= refs_to_drop;
1272 
1273 	if (num_refs == 0) {
1274 		ret = btrfs_del_item(trans, root, path);
1275 	} else {
1276 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1277 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1278 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1279 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1280 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1281 		else {
1282 			struct btrfs_extent_ref_v0 *ref0;
1283 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1284 					struct btrfs_extent_ref_v0);
1285 			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1286 		}
1287 #endif
1288 		btrfs_mark_buffer_dirty(leaf);
1289 	}
1290 	return ret;
1291 }
1292 
1293 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1294 					  struct btrfs_path *path,
1295 					  struct btrfs_extent_inline_ref *iref)
1296 {
1297 	struct btrfs_key key;
1298 	struct extent_buffer *leaf;
1299 	struct btrfs_extent_data_ref *ref1;
1300 	struct btrfs_shared_data_ref *ref2;
1301 	u32 num_refs = 0;
1302 
1303 	leaf = path->nodes[0];
1304 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1305 	if (iref) {
1306 		if (btrfs_extent_inline_ref_type(leaf, iref) ==
1307 		    BTRFS_EXTENT_DATA_REF_KEY) {
1308 			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1309 			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1310 		} else {
1311 			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1312 			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1313 		}
1314 	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1315 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1316 				      struct btrfs_extent_data_ref);
1317 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1318 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1319 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1320 				      struct btrfs_shared_data_ref);
1321 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1322 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1323 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1324 		struct btrfs_extent_ref_v0 *ref0;
1325 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1326 				      struct btrfs_extent_ref_v0);
1327 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1328 #endif
1329 	} else {
1330 		WARN_ON(1);
1331 	}
1332 	return num_refs;
1333 }
1334 
1335 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1336 					  struct btrfs_root *root,
1337 					  struct btrfs_path *path,
1338 					  u64 bytenr, u64 parent,
1339 					  u64 root_objectid)
1340 {
1341 	struct btrfs_key key;
1342 	int ret;
1343 
1344 	key.objectid = bytenr;
1345 	if (parent) {
1346 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1347 		key.offset = parent;
1348 	} else {
1349 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1350 		key.offset = root_objectid;
1351 	}
1352 
1353 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1354 	if (ret > 0)
1355 		ret = -ENOENT;
1356 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1357 	if (ret == -ENOENT && parent) {
1358 		btrfs_release_path(path);
1359 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1360 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1361 		if (ret > 0)
1362 			ret = -ENOENT;
1363 	}
1364 #endif
1365 	return ret;
1366 }
1367 
1368 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1369 					  struct btrfs_root *root,
1370 					  struct btrfs_path *path,
1371 					  u64 bytenr, u64 parent,
1372 					  u64 root_objectid)
1373 {
1374 	struct btrfs_key key;
1375 	int ret;
1376 
1377 	key.objectid = bytenr;
1378 	if (parent) {
1379 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1380 		key.offset = parent;
1381 	} else {
1382 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1383 		key.offset = root_objectid;
1384 	}
1385 
1386 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1387 	btrfs_release_path(path);
1388 	return ret;
1389 }
1390 
1391 static inline int extent_ref_type(u64 parent, u64 owner)
1392 {
1393 	int type;
1394 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1395 		if (parent > 0)
1396 			type = BTRFS_SHARED_BLOCK_REF_KEY;
1397 		else
1398 			type = BTRFS_TREE_BLOCK_REF_KEY;
1399 	} else {
1400 		if (parent > 0)
1401 			type = BTRFS_SHARED_DATA_REF_KEY;
1402 		else
1403 			type = BTRFS_EXTENT_DATA_REF_KEY;
1404 	}
1405 	return type;
1406 }
1407 
1408 static int find_next_key(struct btrfs_path *path, int level,
1409 			 struct btrfs_key *key)
1410 
1411 {
1412 	for (; level < BTRFS_MAX_LEVEL; level++) {
1413 		if (!path->nodes[level])
1414 			break;
1415 		if (path->slots[level] + 1 >=
1416 		    btrfs_header_nritems(path->nodes[level]))
1417 			continue;
1418 		if (level == 0)
1419 			btrfs_item_key_to_cpu(path->nodes[level], key,
1420 					      path->slots[level] + 1);
1421 		else
1422 			btrfs_node_key_to_cpu(path->nodes[level], key,
1423 					      path->slots[level] + 1);
1424 		return 0;
1425 	}
1426 	return 1;
1427 }
1428 
1429 /*
1430  * look for inline back ref. if back ref is found, *ref_ret is set
1431  * to the address of inline back ref, and 0 is returned.
1432  *
1433  * if back ref isn't found, *ref_ret is set to the address where it
1434  * should be inserted, and -ENOENT is returned.
1435  *
1436  * if insert is true and there are too many inline back refs, the path
1437  * points to the extent item, and -EAGAIN is returned.
1438  *
1439  * NOTE: inline back refs are ordered in the same way that back ref
1440  *	 items in the tree are ordered.
1441  */
1442 static noinline_for_stack
1443 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1444 				 struct btrfs_root *root,
1445 				 struct btrfs_path *path,
1446 				 struct btrfs_extent_inline_ref **ref_ret,
1447 				 u64 bytenr, u64 num_bytes,
1448 				 u64 parent, u64 root_objectid,
1449 				 u64 owner, u64 offset, int insert)
1450 {
1451 	struct btrfs_key key;
1452 	struct extent_buffer *leaf;
1453 	struct btrfs_extent_item *ei;
1454 	struct btrfs_extent_inline_ref *iref;
1455 	u64 flags;
1456 	u64 item_size;
1457 	unsigned long ptr;
1458 	unsigned long end;
1459 	int extra_size;
1460 	int type;
1461 	int want;
1462 	int ret;
1463 	int err = 0;
1464 
1465 	key.objectid = bytenr;
1466 	key.type = BTRFS_EXTENT_ITEM_KEY;
1467 	key.offset = num_bytes;
1468 
1469 	want = extent_ref_type(parent, owner);
1470 	if (insert) {
1471 		extra_size = btrfs_extent_inline_ref_size(want);
1472 		path->keep_locks = 1;
1473 	} else
1474 		extra_size = -1;
1475 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1476 	if (ret < 0) {
1477 		err = ret;
1478 		goto out;
1479 	}
1480 	if (ret && !insert) {
1481 		err = -ENOENT;
1482 		goto out;
1483 	}
1484 	BUG_ON(ret); /* Corruption */
1485 
1486 	leaf = path->nodes[0];
1487 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1488 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1489 	if (item_size < sizeof(*ei)) {
1490 		if (!insert) {
1491 			err = -ENOENT;
1492 			goto out;
1493 		}
1494 		ret = convert_extent_item_v0(trans, root, path, owner,
1495 					     extra_size);
1496 		if (ret < 0) {
1497 			err = ret;
1498 			goto out;
1499 		}
1500 		leaf = path->nodes[0];
1501 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1502 	}
1503 #endif
1504 	BUG_ON(item_size < sizeof(*ei));
1505 
1506 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1507 	flags = btrfs_extent_flags(leaf, ei);
1508 
1509 	ptr = (unsigned long)(ei + 1);
1510 	end = (unsigned long)ei + item_size;
1511 
1512 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1513 		ptr += sizeof(struct btrfs_tree_block_info);
1514 		BUG_ON(ptr > end);
1515 	} else {
1516 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1517 	}
1518 
1519 	err = -ENOENT;
1520 	while (1) {
1521 		if (ptr >= end) {
1522 			WARN_ON(ptr > end);
1523 			break;
1524 		}
1525 		iref = (struct btrfs_extent_inline_ref *)ptr;
1526 		type = btrfs_extent_inline_ref_type(leaf, iref);
1527 		if (want < type)
1528 			break;
1529 		if (want > type) {
1530 			ptr += btrfs_extent_inline_ref_size(type);
1531 			continue;
1532 		}
1533 
1534 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1535 			struct btrfs_extent_data_ref *dref;
1536 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1537 			if (match_extent_data_ref(leaf, dref, root_objectid,
1538 						  owner, offset)) {
1539 				err = 0;
1540 				break;
1541 			}
1542 			if (hash_extent_data_ref_item(leaf, dref) <
1543 			    hash_extent_data_ref(root_objectid, owner, offset))
1544 				break;
1545 		} else {
1546 			u64 ref_offset;
1547 			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1548 			if (parent > 0) {
1549 				if (parent == ref_offset) {
1550 					err = 0;
1551 					break;
1552 				}
1553 				if (ref_offset < parent)
1554 					break;
1555 			} else {
1556 				if (root_objectid == ref_offset) {
1557 					err = 0;
1558 					break;
1559 				}
1560 				if (ref_offset < root_objectid)
1561 					break;
1562 			}
1563 		}
1564 		ptr += btrfs_extent_inline_ref_size(type);
1565 	}
1566 	if (err == -ENOENT && insert) {
1567 		if (item_size + extra_size >=
1568 		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1569 			err = -EAGAIN;
1570 			goto out;
1571 		}
1572 		/*
1573 		 * To add new inline back ref, we have to make sure
1574 		 * there is no corresponding back ref item.
1575 		 * For simplicity, we just do not add new inline back
1576 		 * ref if there is any kind of item for this block
1577 		 */
1578 		if (find_next_key(path, 0, &key) == 0 &&
1579 		    key.objectid == bytenr &&
1580 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1581 			err = -EAGAIN;
1582 			goto out;
1583 		}
1584 	}
1585 	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1586 out:
1587 	if (insert) {
1588 		path->keep_locks = 0;
1589 		btrfs_unlock_up_safe(path, 1);
1590 	}
1591 	return err;
1592 }
1593 
1594 /*
1595  * helper to add new inline back ref
1596  */
1597 static noinline_for_stack
1598 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1599 				 struct btrfs_root *root,
1600 				 struct btrfs_path *path,
1601 				 struct btrfs_extent_inline_ref *iref,
1602 				 u64 parent, u64 root_objectid,
1603 				 u64 owner, u64 offset, int refs_to_add,
1604 				 struct btrfs_delayed_extent_op *extent_op)
1605 {
1606 	struct extent_buffer *leaf;
1607 	struct btrfs_extent_item *ei;
1608 	unsigned long ptr;
1609 	unsigned long end;
1610 	unsigned long item_offset;
1611 	u64 refs;
1612 	int size;
1613 	int type;
1614 
1615 	leaf = path->nodes[0];
1616 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1617 	item_offset = (unsigned long)iref - (unsigned long)ei;
1618 
1619 	type = extent_ref_type(parent, owner);
1620 	size = btrfs_extent_inline_ref_size(type);
1621 
1622 	btrfs_extend_item(trans, root, path, size);
1623 
1624 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1625 	refs = btrfs_extent_refs(leaf, ei);
1626 	refs += refs_to_add;
1627 	btrfs_set_extent_refs(leaf, ei, refs);
1628 	if (extent_op)
1629 		__run_delayed_extent_op(extent_op, leaf, ei);
1630 
1631 	ptr = (unsigned long)ei + item_offset;
1632 	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1633 	if (ptr < end - size)
1634 		memmove_extent_buffer(leaf, ptr + size, ptr,
1635 				      end - size - ptr);
1636 
1637 	iref = (struct btrfs_extent_inline_ref *)ptr;
1638 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1639 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1640 		struct btrfs_extent_data_ref *dref;
1641 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1642 		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1643 		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1644 		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1645 		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1646 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1647 		struct btrfs_shared_data_ref *sref;
1648 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1649 		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1650 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1651 	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1652 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1653 	} else {
1654 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1655 	}
1656 	btrfs_mark_buffer_dirty(leaf);
1657 }
1658 
1659 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1660 				 struct btrfs_root *root,
1661 				 struct btrfs_path *path,
1662 				 struct btrfs_extent_inline_ref **ref_ret,
1663 				 u64 bytenr, u64 num_bytes, u64 parent,
1664 				 u64 root_objectid, u64 owner, u64 offset)
1665 {
1666 	int ret;
1667 
1668 	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1669 					   bytenr, num_bytes, parent,
1670 					   root_objectid, owner, offset, 0);
1671 	if (ret != -ENOENT)
1672 		return ret;
1673 
1674 	btrfs_release_path(path);
1675 	*ref_ret = NULL;
1676 
1677 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1678 		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1679 					    root_objectid);
1680 	} else {
1681 		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1682 					     root_objectid, owner, offset);
1683 	}
1684 	return ret;
1685 }
1686 
1687 /*
1688  * helper to update/remove inline back ref
1689  */
1690 static noinline_for_stack
1691 void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1692 				  struct btrfs_root *root,
1693 				  struct btrfs_path *path,
1694 				  struct btrfs_extent_inline_ref *iref,
1695 				  int refs_to_mod,
1696 				  struct btrfs_delayed_extent_op *extent_op)
1697 {
1698 	struct extent_buffer *leaf;
1699 	struct btrfs_extent_item *ei;
1700 	struct btrfs_extent_data_ref *dref = NULL;
1701 	struct btrfs_shared_data_ref *sref = NULL;
1702 	unsigned long ptr;
1703 	unsigned long end;
1704 	u32 item_size;
1705 	int size;
1706 	int type;
1707 	u64 refs;
1708 
1709 	leaf = path->nodes[0];
1710 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1711 	refs = btrfs_extent_refs(leaf, ei);
1712 	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1713 	refs += refs_to_mod;
1714 	btrfs_set_extent_refs(leaf, ei, refs);
1715 	if (extent_op)
1716 		__run_delayed_extent_op(extent_op, leaf, ei);
1717 
1718 	type = btrfs_extent_inline_ref_type(leaf, iref);
1719 
1720 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1721 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1722 		refs = btrfs_extent_data_ref_count(leaf, dref);
1723 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1724 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1725 		refs = btrfs_shared_data_ref_count(leaf, sref);
1726 	} else {
1727 		refs = 1;
1728 		BUG_ON(refs_to_mod != -1);
1729 	}
1730 
1731 	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1732 	refs += refs_to_mod;
1733 
1734 	if (refs > 0) {
1735 		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1736 			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1737 		else
1738 			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1739 	} else {
1740 		size =  btrfs_extent_inline_ref_size(type);
1741 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1742 		ptr = (unsigned long)iref;
1743 		end = (unsigned long)ei + item_size;
1744 		if (ptr + size < end)
1745 			memmove_extent_buffer(leaf, ptr, ptr + size,
1746 					      end - ptr - size);
1747 		item_size -= size;
1748 		btrfs_truncate_item(trans, root, path, item_size, 1);
1749 	}
1750 	btrfs_mark_buffer_dirty(leaf);
1751 }
1752 
1753 static noinline_for_stack
1754 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1755 				 struct btrfs_root *root,
1756 				 struct btrfs_path *path,
1757 				 u64 bytenr, u64 num_bytes, u64 parent,
1758 				 u64 root_objectid, u64 owner,
1759 				 u64 offset, int refs_to_add,
1760 				 struct btrfs_delayed_extent_op *extent_op)
1761 {
1762 	struct btrfs_extent_inline_ref *iref;
1763 	int ret;
1764 
1765 	ret = lookup_inline_extent_backref(trans, root, path, &iref,
1766 					   bytenr, num_bytes, parent,
1767 					   root_objectid, owner, offset, 1);
1768 	if (ret == 0) {
1769 		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1770 		update_inline_extent_backref(trans, root, path, iref,
1771 					     refs_to_add, extent_op);
1772 	} else if (ret == -ENOENT) {
1773 		setup_inline_extent_backref(trans, root, path, iref, parent,
1774 					    root_objectid, owner, offset,
1775 					    refs_to_add, extent_op);
1776 		ret = 0;
1777 	}
1778 	return ret;
1779 }
1780 
1781 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1782 				 struct btrfs_root *root,
1783 				 struct btrfs_path *path,
1784 				 u64 bytenr, u64 parent, u64 root_objectid,
1785 				 u64 owner, u64 offset, int refs_to_add)
1786 {
1787 	int ret;
1788 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1789 		BUG_ON(refs_to_add != 1);
1790 		ret = insert_tree_block_ref(trans, root, path, bytenr,
1791 					    parent, root_objectid);
1792 	} else {
1793 		ret = insert_extent_data_ref(trans, root, path, bytenr,
1794 					     parent, root_objectid,
1795 					     owner, offset, refs_to_add);
1796 	}
1797 	return ret;
1798 }
1799 
1800 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1801 				 struct btrfs_root *root,
1802 				 struct btrfs_path *path,
1803 				 struct btrfs_extent_inline_ref *iref,
1804 				 int refs_to_drop, int is_data)
1805 {
1806 	int ret = 0;
1807 
1808 	BUG_ON(!is_data && refs_to_drop != 1);
1809 	if (iref) {
1810 		update_inline_extent_backref(trans, root, path, iref,
1811 					     -refs_to_drop, NULL);
1812 	} else if (is_data) {
1813 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1814 	} else {
1815 		ret = btrfs_del_item(trans, root, path);
1816 	}
1817 	return ret;
1818 }
1819 
1820 static int btrfs_issue_discard(struct block_device *bdev,
1821 				u64 start, u64 len)
1822 {
1823 	return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1824 }
1825 
1826 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1827 				u64 num_bytes, u64 *actual_bytes)
1828 {
1829 	int ret;
1830 	u64 discarded_bytes = 0;
1831 	struct btrfs_bio *bbio = NULL;
1832 
1833 
1834 	/* Tell the block device(s) that the sectors can be discarded */
1835 	ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1836 			      bytenr, &num_bytes, &bbio, 0);
1837 	/* Error condition is -ENOMEM */
1838 	if (!ret) {
1839 		struct btrfs_bio_stripe *stripe = bbio->stripes;
1840 		int i;
1841 
1842 
1843 		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1844 			if (!stripe->dev->can_discard)
1845 				continue;
1846 
1847 			ret = btrfs_issue_discard(stripe->dev->bdev,
1848 						  stripe->physical,
1849 						  stripe->length);
1850 			if (!ret)
1851 				discarded_bytes += stripe->length;
1852 			else if (ret != -EOPNOTSUPP)
1853 				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1854 
1855 			/*
1856 			 * Just in case we get back EOPNOTSUPP for some reason,
1857 			 * just ignore the return value so we don't screw up
1858 			 * people calling discard_extent.
1859 			 */
1860 			ret = 0;
1861 		}
1862 		kfree(bbio);
1863 	}
1864 
1865 	if (actual_bytes)
1866 		*actual_bytes = discarded_bytes;
1867 
1868 
1869 	return ret;
1870 }
1871 
1872 /* Can return -ENOMEM */
1873 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1874 			 struct btrfs_root *root,
1875 			 u64 bytenr, u64 num_bytes, u64 parent,
1876 			 u64 root_objectid, u64 owner, u64 offset, int for_cow)
1877 {
1878 	int ret;
1879 	struct btrfs_fs_info *fs_info = root->fs_info;
1880 
1881 	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1882 	       root_objectid == BTRFS_TREE_LOG_OBJECTID);
1883 
1884 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1885 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1886 					num_bytes,
1887 					parent, root_objectid, (int)owner,
1888 					BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1889 	} else {
1890 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1891 					num_bytes,
1892 					parent, root_objectid, owner, offset,
1893 					BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1894 	}
1895 	return ret;
1896 }
1897 
1898 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1899 				  struct btrfs_root *root,
1900 				  u64 bytenr, u64 num_bytes,
1901 				  u64 parent, u64 root_objectid,
1902 				  u64 owner, u64 offset, int refs_to_add,
1903 				  struct btrfs_delayed_extent_op *extent_op)
1904 {
1905 	struct btrfs_path *path;
1906 	struct extent_buffer *leaf;
1907 	struct btrfs_extent_item *item;
1908 	u64 refs;
1909 	int ret;
1910 	int err = 0;
1911 
1912 	path = btrfs_alloc_path();
1913 	if (!path)
1914 		return -ENOMEM;
1915 
1916 	path->reada = 1;
1917 	path->leave_spinning = 1;
1918 	/* this will setup the path even if it fails to insert the back ref */
1919 	ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1920 					   path, bytenr, num_bytes, parent,
1921 					   root_objectid, owner, offset,
1922 					   refs_to_add, extent_op);
1923 	if (ret == 0)
1924 		goto out;
1925 
1926 	if (ret != -EAGAIN) {
1927 		err = ret;
1928 		goto out;
1929 	}
1930 
1931 	leaf = path->nodes[0];
1932 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1933 	refs = btrfs_extent_refs(leaf, item);
1934 	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1935 	if (extent_op)
1936 		__run_delayed_extent_op(extent_op, leaf, item);
1937 
1938 	btrfs_mark_buffer_dirty(leaf);
1939 	btrfs_release_path(path);
1940 
1941 	path->reada = 1;
1942 	path->leave_spinning = 1;
1943 
1944 	/* now insert the actual backref */
1945 	ret = insert_extent_backref(trans, root->fs_info->extent_root,
1946 				    path, bytenr, parent, root_objectid,
1947 				    owner, offset, refs_to_add);
1948 	if (ret)
1949 		btrfs_abort_transaction(trans, root, ret);
1950 out:
1951 	btrfs_free_path(path);
1952 	return err;
1953 }
1954 
1955 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1956 				struct btrfs_root *root,
1957 				struct btrfs_delayed_ref_node *node,
1958 				struct btrfs_delayed_extent_op *extent_op,
1959 				int insert_reserved)
1960 {
1961 	int ret = 0;
1962 	struct btrfs_delayed_data_ref *ref;
1963 	struct btrfs_key ins;
1964 	u64 parent = 0;
1965 	u64 ref_root = 0;
1966 	u64 flags = 0;
1967 
1968 	ins.objectid = node->bytenr;
1969 	ins.offset = node->num_bytes;
1970 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1971 
1972 	ref = btrfs_delayed_node_to_data_ref(node);
1973 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1974 		parent = ref->parent;
1975 	else
1976 		ref_root = ref->root;
1977 
1978 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1979 		if (extent_op) {
1980 			BUG_ON(extent_op->update_key);
1981 			flags |= extent_op->flags_to_set;
1982 		}
1983 		ret = alloc_reserved_file_extent(trans, root,
1984 						 parent, ref_root, flags,
1985 						 ref->objectid, ref->offset,
1986 						 &ins, node->ref_mod);
1987 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1988 		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1989 					     node->num_bytes, parent,
1990 					     ref_root, ref->objectid,
1991 					     ref->offset, node->ref_mod,
1992 					     extent_op);
1993 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1994 		ret = __btrfs_free_extent(trans, root, node->bytenr,
1995 					  node->num_bytes, parent,
1996 					  ref_root, ref->objectid,
1997 					  ref->offset, node->ref_mod,
1998 					  extent_op);
1999 	} else {
2000 		BUG();
2001 	}
2002 	return ret;
2003 }
2004 
2005 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2006 				    struct extent_buffer *leaf,
2007 				    struct btrfs_extent_item *ei)
2008 {
2009 	u64 flags = btrfs_extent_flags(leaf, ei);
2010 	if (extent_op->update_flags) {
2011 		flags |= extent_op->flags_to_set;
2012 		btrfs_set_extent_flags(leaf, ei, flags);
2013 	}
2014 
2015 	if (extent_op->update_key) {
2016 		struct btrfs_tree_block_info *bi;
2017 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2018 		bi = (struct btrfs_tree_block_info *)(ei + 1);
2019 		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2020 	}
2021 }
2022 
2023 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2024 				 struct btrfs_root *root,
2025 				 struct btrfs_delayed_ref_node *node,
2026 				 struct btrfs_delayed_extent_op *extent_op)
2027 {
2028 	struct btrfs_key key;
2029 	struct btrfs_path *path;
2030 	struct btrfs_extent_item *ei;
2031 	struct extent_buffer *leaf;
2032 	u32 item_size;
2033 	int ret;
2034 	int err = 0;
2035 
2036 	if (trans->aborted)
2037 		return 0;
2038 
2039 	path = btrfs_alloc_path();
2040 	if (!path)
2041 		return -ENOMEM;
2042 
2043 	key.objectid = node->bytenr;
2044 	key.type = BTRFS_EXTENT_ITEM_KEY;
2045 	key.offset = node->num_bytes;
2046 
2047 	path->reada = 1;
2048 	path->leave_spinning = 1;
2049 	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2050 				path, 0, 1);
2051 	if (ret < 0) {
2052 		err = ret;
2053 		goto out;
2054 	}
2055 	if (ret > 0) {
2056 		err = -EIO;
2057 		goto out;
2058 	}
2059 
2060 	leaf = path->nodes[0];
2061 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2062 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2063 	if (item_size < sizeof(*ei)) {
2064 		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2065 					     path, (u64)-1, 0);
2066 		if (ret < 0) {
2067 			err = ret;
2068 			goto out;
2069 		}
2070 		leaf = path->nodes[0];
2071 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2072 	}
2073 #endif
2074 	BUG_ON(item_size < sizeof(*ei));
2075 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2076 	__run_delayed_extent_op(extent_op, leaf, ei);
2077 
2078 	btrfs_mark_buffer_dirty(leaf);
2079 out:
2080 	btrfs_free_path(path);
2081 	return err;
2082 }
2083 
2084 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2085 				struct btrfs_root *root,
2086 				struct btrfs_delayed_ref_node *node,
2087 				struct btrfs_delayed_extent_op *extent_op,
2088 				int insert_reserved)
2089 {
2090 	int ret = 0;
2091 	struct btrfs_delayed_tree_ref *ref;
2092 	struct btrfs_key ins;
2093 	u64 parent = 0;
2094 	u64 ref_root = 0;
2095 
2096 	ins.objectid = node->bytenr;
2097 	ins.offset = node->num_bytes;
2098 	ins.type = BTRFS_EXTENT_ITEM_KEY;
2099 
2100 	ref = btrfs_delayed_node_to_tree_ref(node);
2101 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2102 		parent = ref->parent;
2103 	else
2104 		ref_root = ref->root;
2105 
2106 	BUG_ON(node->ref_mod != 1);
2107 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2108 		BUG_ON(!extent_op || !extent_op->update_flags ||
2109 		       !extent_op->update_key);
2110 		ret = alloc_reserved_tree_block(trans, root,
2111 						parent, ref_root,
2112 						extent_op->flags_to_set,
2113 						&extent_op->key,
2114 						ref->level, &ins);
2115 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2116 		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2117 					     node->num_bytes, parent, ref_root,
2118 					     ref->level, 0, 1, extent_op);
2119 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2120 		ret = __btrfs_free_extent(trans, root, node->bytenr,
2121 					  node->num_bytes, parent, ref_root,
2122 					  ref->level, 0, 1, extent_op);
2123 	} else {
2124 		BUG();
2125 	}
2126 	return ret;
2127 }
2128 
2129 /* helper function to actually process a single delayed ref entry */
2130 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2131 			       struct btrfs_root *root,
2132 			       struct btrfs_delayed_ref_node *node,
2133 			       struct btrfs_delayed_extent_op *extent_op,
2134 			       int insert_reserved)
2135 {
2136 	int ret = 0;
2137 
2138 	if (trans->aborted)
2139 		return 0;
2140 
2141 	if (btrfs_delayed_ref_is_head(node)) {
2142 		struct btrfs_delayed_ref_head *head;
2143 		/*
2144 		 * we've hit the end of the chain and we were supposed
2145 		 * to insert this extent into the tree.  But, it got
2146 		 * deleted before we ever needed to insert it, so all
2147 		 * we have to do is clean up the accounting
2148 		 */
2149 		BUG_ON(extent_op);
2150 		head = btrfs_delayed_node_to_head(node);
2151 		if (insert_reserved) {
2152 			btrfs_pin_extent(root, node->bytenr,
2153 					 node->num_bytes, 1);
2154 			if (head->is_data) {
2155 				ret = btrfs_del_csums(trans, root,
2156 						      node->bytenr,
2157 						      node->num_bytes);
2158 			}
2159 		}
2160 		mutex_unlock(&head->mutex);
2161 		return ret;
2162 	}
2163 
2164 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2165 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2166 		ret = run_delayed_tree_ref(trans, root, node, extent_op,
2167 					   insert_reserved);
2168 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2169 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
2170 		ret = run_delayed_data_ref(trans, root, node, extent_op,
2171 					   insert_reserved);
2172 	else
2173 		BUG();
2174 	return ret;
2175 }
2176 
2177 static noinline struct btrfs_delayed_ref_node *
2178 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2179 {
2180 	struct rb_node *node;
2181 	struct btrfs_delayed_ref_node *ref;
2182 	int action = BTRFS_ADD_DELAYED_REF;
2183 again:
2184 	/*
2185 	 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2186 	 * this prevents ref count from going down to zero when
2187 	 * there still are pending delayed ref.
2188 	 */
2189 	node = rb_prev(&head->node.rb_node);
2190 	while (1) {
2191 		if (!node)
2192 			break;
2193 		ref = rb_entry(node, struct btrfs_delayed_ref_node,
2194 				rb_node);
2195 		if (ref->bytenr != head->node.bytenr)
2196 			break;
2197 		if (ref->action == action)
2198 			return ref;
2199 		node = rb_prev(node);
2200 	}
2201 	if (action == BTRFS_ADD_DELAYED_REF) {
2202 		action = BTRFS_DROP_DELAYED_REF;
2203 		goto again;
2204 	}
2205 	return NULL;
2206 }
2207 
2208 /*
2209  * Returns 0 on success or if called with an already aborted transaction.
2210  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2211  */
2212 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2213 				       struct btrfs_root *root,
2214 				       struct list_head *cluster)
2215 {
2216 	struct btrfs_delayed_ref_root *delayed_refs;
2217 	struct btrfs_delayed_ref_node *ref;
2218 	struct btrfs_delayed_ref_head *locked_ref = NULL;
2219 	struct btrfs_delayed_extent_op *extent_op;
2220 	int ret;
2221 	int count = 0;
2222 	int must_insert_reserved = 0;
2223 
2224 	delayed_refs = &trans->transaction->delayed_refs;
2225 	while (1) {
2226 		if (!locked_ref) {
2227 			/* pick a new head ref from the cluster list */
2228 			if (list_empty(cluster))
2229 				break;
2230 
2231 			locked_ref = list_entry(cluster->next,
2232 				     struct btrfs_delayed_ref_head, cluster);
2233 
2234 			/* grab the lock that says we are going to process
2235 			 * all the refs for this head */
2236 			ret = btrfs_delayed_ref_lock(trans, locked_ref);
2237 
2238 			/*
2239 			 * we may have dropped the spin lock to get the head
2240 			 * mutex lock, and that might have given someone else
2241 			 * time to free the head.  If that's true, it has been
2242 			 * removed from our list and we can move on.
2243 			 */
2244 			if (ret == -EAGAIN) {
2245 				locked_ref = NULL;
2246 				count++;
2247 				continue;
2248 			}
2249 		}
2250 
2251 		/*
2252 		 * locked_ref is the head node, so we have to go one
2253 		 * node back for any delayed ref updates
2254 		 */
2255 		ref = select_delayed_ref(locked_ref);
2256 
2257 		if (ref && ref->seq &&
2258 		    btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
2259 			/*
2260 			 * there are still refs with lower seq numbers in the
2261 			 * process of being added. Don't run this ref yet.
2262 			 */
2263 			list_del_init(&locked_ref->cluster);
2264 			mutex_unlock(&locked_ref->mutex);
2265 			locked_ref = NULL;
2266 			delayed_refs->num_heads_ready++;
2267 			spin_unlock(&delayed_refs->lock);
2268 			cond_resched();
2269 			spin_lock(&delayed_refs->lock);
2270 			continue;
2271 		}
2272 
2273 		/*
2274 		 * record the must insert reserved flag before we
2275 		 * drop the spin lock.
2276 		 */
2277 		must_insert_reserved = locked_ref->must_insert_reserved;
2278 		locked_ref->must_insert_reserved = 0;
2279 
2280 		extent_op = locked_ref->extent_op;
2281 		locked_ref->extent_op = NULL;
2282 
2283 		if (!ref) {
2284 			/* All delayed refs have been processed, Go ahead
2285 			 * and send the head node to run_one_delayed_ref,
2286 			 * so that any accounting fixes can happen
2287 			 */
2288 			ref = &locked_ref->node;
2289 
2290 			if (extent_op && must_insert_reserved) {
2291 				kfree(extent_op);
2292 				extent_op = NULL;
2293 			}
2294 
2295 			if (extent_op) {
2296 				spin_unlock(&delayed_refs->lock);
2297 
2298 				ret = run_delayed_extent_op(trans, root,
2299 							    ref, extent_op);
2300 				kfree(extent_op);
2301 
2302 				if (ret) {
2303 					printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
2304 					spin_lock(&delayed_refs->lock);
2305 					return ret;
2306 				}
2307 
2308 				goto next;
2309 			}
2310 
2311 			list_del_init(&locked_ref->cluster);
2312 			locked_ref = NULL;
2313 		}
2314 
2315 		ref->in_tree = 0;
2316 		rb_erase(&ref->rb_node, &delayed_refs->root);
2317 		delayed_refs->num_entries--;
2318 		/*
2319 		 * we modified num_entries, but as we're currently running
2320 		 * delayed refs, skip
2321 		 *     wake_up(&delayed_refs->seq_wait);
2322 		 * here.
2323 		 */
2324 		spin_unlock(&delayed_refs->lock);
2325 
2326 		ret = run_one_delayed_ref(trans, root, ref, extent_op,
2327 					  must_insert_reserved);
2328 
2329 		btrfs_put_delayed_ref(ref);
2330 		kfree(extent_op);
2331 		count++;
2332 
2333 		if (ret) {
2334 			printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
2335 			spin_lock(&delayed_refs->lock);
2336 			return ret;
2337 		}
2338 
2339 next:
2340 		do_chunk_alloc(trans, root->fs_info->extent_root,
2341 			       2 * 1024 * 1024,
2342 			       btrfs_get_alloc_profile(root, 0),
2343 			       CHUNK_ALLOC_NO_FORCE);
2344 		cond_resched();
2345 		spin_lock(&delayed_refs->lock);
2346 	}
2347 	return count;
2348 }
2349 
2350 
2351 static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
2352 			unsigned long num_refs)
2353 {
2354 	struct list_head *first_seq = delayed_refs->seq_head.next;
2355 
2356 	spin_unlock(&delayed_refs->lock);
2357 	pr_debug("waiting for more refs (num %ld, first %p)\n",
2358 		 num_refs, first_seq);
2359 	wait_event(delayed_refs->seq_wait,
2360 		   num_refs != delayed_refs->num_entries ||
2361 		   delayed_refs->seq_head.next != first_seq);
2362 	pr_debug("done waiting for more refs (num %ld, first %p)\n",
2363 		 delayed_refs->num_entries, delayed_refs->seq_head.next);
2364 	spin_lock(&delayed_refs->lock);
2365 }
2366 
2367 /*
2368  * this starts processing the delayed reference count updates and
2369  * extent insertions we have queued up so far.  count can be
2370  * 0, which means to process everything in the tree at the start
2371  * of the run (but not newly added entries), or it can be some target
2372  * number you'd like to process.
2373  *
2374  * Returns 0 on success or if called with an aborted transaction
2375  * Returns <0 on error and aborts the transaction
2376  */
2377 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2378 			   struct btrfs_root *root, unsigned long count)
2379 {
2380 	struct rb_node *node;
2381 	struct btrfs_delayed_ref_root *delayed_refs;
2382 	struct btrfs_delayed_ref_node *ref;
2383 	struct list_head cluster;
2384 	int ret;
2385 	u64 delayed_start;
2386 	int run_all = count == (unsigned long)-1;
2387 	int run_most = 0;
2388 	unsigned long num_refs = 0;
2389 	int consider_waiting;
2390 
2391 	/* We'll clean this up in btrfs_cleanup_transaction */
2392 	if (trans->aborted)
2393 		return 0;
2394 
2395 	if (root == root->fs_info->extent_root)
2396 		root = root->fs_info->tree_root;
2397 
2398 	do_chunk_alloc(trans, root->fs_info->extent_root,
2399 		       2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
2400 		       CHUNK_ALLOC_NO_FORCE);
2401 
2402 	delayed_refs = &trans->transaction->delayed_refs;
2403 	INIT_LIST_HEAD(&cluster);
2404 again:
2405 	consider_waiting = 0;
2406 	spin_lock(&delayed_refs->lock);
2407 	if (count == 0) {
2408 		count = delayed_refs->num_entries * 2;
2409 		run_most = 1;
2410 	}
2411 	while (1) {
2412 		if (!(run_all || run_most) &&
2413 		    delayed_refs->num_heads_ready < 64)
2414 			break;
2415 
2416 		/*
2417 		 * go find something we can process in the rbtree.  We start at
2418 		 * the beginning of the tree, and then build a cluster
2419 		 * of refs to process starting at the first one we are able to
2420 		 * lock
2421 		 */
2422 		delayed_start = delayed_refs->run_delayed_start;
2423 		ret = btrfs_find_ref_cluster(trans, &cluster,
2424 					     delayed_refs->run_delayed_start);
2425 		if (ret)
2426 			break;
2427 
2428 		if (delayed_start >= delayed_refs->run_delayed_start) {
2429 			if (consider_waiting == 0) {
2430 				/*
2431 				 * btrfs_find_ref_cluster looped. let's do one
2432 				 * more cycle. if we don't run any delayed ref
2433 				 * during that cycle (because we can't because
2434 				 * all of them are blocked) and if the number of
2435 				 * refs doesn't change, we avoid busy waiting.
2436 				 */
2437 				consider_waiting = 1;
2438 				num_refs = delayed_refs->num_entries;
2439 			} else {
2440 				wait_for_more_refs(delayed_refs, num_refs);
2441 				/*
2442 				 * after waiting, things have changed. we
2443 				 * dropped the lock and someone else might have
2444 				 * run some refs, built new clusters and so on.
2445 				 * therefore, we restart staleness detection.
2446 				 */
2447 				consider_waiting = 0;
2448 			}
2449 		}
2450 
2451 		ret = run_clustered_refs(trans, root, &cluster);
2452 		if (ret < 0) {
2453 			spin_unlock(&delayed_refs->lock);
2454 			btrfs_abort_transaction(trans, root, ret);
2455 			return ret;
2456 		}
2457 
2458 		count -= min_t(unsigned long, ret, count);
2459 
2460 		if (count == 0)
2461 			break;
2462 
2463 		if (ret || delayed_refs->run_delayed_start == 0) {
2464 			/* refs were run, let's reset staleness detection */
2465 			consider_waiting = 0;
2466 		}
2467 	}
2468 
2469 	if (run_all) {
2470 		node = rb_first(&delayed_refs->root);
2471 		if (!node)
2472 			goto out;
2473 		count = (unsigned long)-1;
2474 
2475 		while (node) {
2476 			ref = rb_entry(node, struct btrfs_delayed_ref_node,
2477 				       rb_node);
2478 			if (btrfs_delayed_ref_is_head(ref)) {
2479 				struct btrfs_delayed_ref_head *head;
2480 
2481 				head = btrfs_delayed_node_to_head(ref);
2482 				atomic_inc(&ref->refs);
2483 
2484 				spin_unlock(&delayed_refs->lock);
2485 				/*
2486 				 * Mutex was contended, block until it's
2487 				 * released and try again
2488 				 */
2489 				mutex_lock(&head->mutex);
2490 				mutex_unlock(&head->mutex);
2491 
2492 				btrfs_put_delayed_ref(ref);
2493 				cond_resched();
2494 				goto again;
2495 			}
2496 			node = rb_next(node);
2497 		}
2498 		spin_unlock(&delayed_refs->lock);
2499 		schedule_timeout(1);
2500 		goto again;
2501 	}
2502 out:
2503 	spin_unlock(&delayed_refs->lock);
2504 	return 0;
2505 }
2506 
2507 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2508 				struct btrfs_root *root,
2509 				u64 bytenr, u64 num_bytes, u64 flags,
2510 				int is_data)
2511 {
2512 	struct btrfs_delayed_extent_op *extent_op;
2513 	int ret;
2514 
2515 	extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2516 	if (!extent_op)
2517 		return -ENOMEM;
2518 
2519 	extent_op->flags_to_set = flags;
2520 	extent_op->update_flags = 1;
2521 	extent_op->update_key = 0;
2522 	extent_op->is_data = is_data ? 1 : 0;
2523 
2524 	ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2525 					  num_bytes, extent_op);
2526 	if (ret)
2527 		kfree(extent_op);
2528 	return ret;
2529 }
2530 
2531 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2532 				      struct btrfs_root *root,
2533 				      struct btrfs_path *path,
2534 				      u64 objectid, u64 offset, u64 bytenr)
2535 {
2536 	struct btrfs_delayed_ref_head *head;
2537 	struct btrfs_delayed_ref_node *ref;
2538 	struct btrfs_delayed_data_ref *data_ref;
2539 	struct btrfs_delayed_ref_root *delayed_refs;
2540 	struct rb_node *node;
2541 	int ret = 0;
2542 
2543 	ret = -ENOENT;
2544 	delayed_refs = &trans->transaction->delayed_refs;
2545 	spin_lock(&delayed_refs->lock);
2546 	head = btrfs_find_delayed_ref_head(trans, bytenr);
2547 	if (!head)
2548 		goto out;
2549 
2550 	if (!mutex_trylock(&head->mutex)) {
2551 		atomic_inc(&head->node.refs);
2552 		spin_unlock(&delayed_refs->lock);
2553 
2554 		btrfs_release_path(path);
2555 
2556 		/*
2557 		 * Mutex was contended, block until it's released and let
2558 		 * caller try again
2559 		 */
2560 		mutex_lock(&head->mutex);
2561 		mutex_unlock(&head->mutex);
2562 		btrfs_put_delayed_ref(&head->node);
2563 		return -EAGAIN;
2564 	}
2565 
2566 	node = rb_prev(&head->node.rb_node);
2567 	if (!node)
2568 		goto out_unlock;
2569 
2570 	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2571 
2572 	if (ref->bytenr != bytenr)
2573 		goto out_unlock;
2574 
2575 	ret = 1;
2576 	if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2577 		goto out_unlock;
2578 
2579 	data_ref = btrfs_delayed_node_to_data_ref(ref);
2580 
2581 	node = rb_prev(node);
2582 	if (node) {
2583 		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2584 		if (ref->bytenr == bytenr)
2585 			goto out_unlock;
2586 	}
2587 
2588 	if (data_ref->root != root->root_key.objectid ||
2589 	    data_ref->objectid != objectid || data_ref->offset != offset)
2590 		goto out_unlock;
2591 
2592 	ret = 0;
2593 out_unlock:
2594 	mutex_unlock(&head->mutex);
2595 out:
2596 	spin_unlock(&delayed_refs->lock);
2597 	return ret;
2598 }
2599 
2600 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2601 					struct btrfs_root *root,
2602 					struct btrfs_path *path,
2603 					u64 objectid, u64 offset, u64 bytenr)
2604 {
2605 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2606 	struct extent_buffer *leaf;
2607 	struct btrfs_extent_data_ref *ref;
2608 	struct btrfs_extent_inline_ref *iref;
2609 	struct btrfs_extent_item *ei;
2610 	struct btrfs_key key;
2611 	u32 item_size;
2612 	int ret;
2613 
2614 	key.objectid = bytenr;
2615 	key.offset = (u64)-1;
2616 	key.type = BTRFS_EXTENT_ITEM_KEY;
2617 
2618 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2619 	if (ret < 0)
2620 		goto out;
2621 	BUG_ON(ret == 0); /* Corruption */
2622 
2623 	ret = -ENOENT;
2624 	if (path->slots[0] == 0)
2625 		goto out;
2626 
2627 	path->slots[0]--;
2628 	leaf = path->nodes[0];
2629 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2630 
2631 	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2632 		goto out;
2633 
2634 	ret = 1;
2635 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2636 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2637 	if (item_size < sizeof(*ei)) {
2638 		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2639 		goto out;
2640 	}
2641 #endif
2642 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2643 
2644 	if (item_size != sizeof(*ei) +
2645 	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2646 		goto out;
2647 
2648 	if (btrfs_extent_generation(leaf, ei) <=
2649 	    btrfs_root_last_snapshot(&root->root_item))
2650 		goto out;
2651 
2652 	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2653 	if (btrfs_extent_inline_ref_type(leaf, iref) !=
2654 	    BTRFS_EXTENT_DATA_REF_KEY)
2655 		goto out;
2656 
2657 	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2658 	if (btrfs_extent_refs(leaf, ei) !=
2659 	    btrfs_extent_data_ref_count(leaf, ref) ||
2660 	    btrfs_extent_data_ref_root(leaf, ref) !=
2661 	    root->root_key.objectid ||
2662 	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2663 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
2664 		goto out;
2665 
2666 	ret = 0;
2667 out:
2668 	return ret;
2669 }
2670 
2671 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2672 			  struct btrfs_root *root,
2673 			  u64 objectid, u64 offset, u64 bytenr)
2674 {
2675 	struct btrfs_path *path;
2676 	int ret;
2677 	int ret2;
2678 
2679 	path = btrfs_alloc_path();
2680 	if (!path)
2681 		return -ENOENT;
2682 
2683 	do {
2684 		ret = check_committed_ref(trans, root, path, objectid,
2685 					  offset, bytenr);
2686 		if (ret && ret != -ENOENT)
2687 			goto out;
2688 
2689 		ret2 = check_delayed_ref(trans, root, path, objectid,
2690 					 offset, bytenr);
2691 	} while (ret2 == -EAGAIN);
2692 
2693 	if (ret2 && ret2 != -ENOENT) {
2694 		ret = ret2;
2695 		goto out;
2696 	}
2697 
2698 	if (ret != -ENOENT || ret2 != -ENOENT)
2699 		ret = 0;
2700 out:
2701 	btrfs_free_path(path);
2702 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2703 		WARN_ON(ret > 0);
2704 	return ret;
2705 }
2706 
2707 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2708 			   struct btrfs_root *root,
2709 			   struct extent_buffer *buf,
2710 			   int full_backref, int inc, int for_cow)
2711 {
2712 	u64 bytenr;
2713 	u64 num_bytes;
2714 	u64 parent;
2715 	u64 ref_root;
2716 	u32 nritems;
2717 	struct btrfs_key key;
2718 	struct btrfs_file_extent_item *fi;
2719 	int i;
2720 	int level;
2721 	int ret = 0;
2722 	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2723 			    u64, u64, u64, u64, u64, u64, int);
2724 
2725 	ref_root = btrfs_header_owner(buf);
2726 	nritems = btrfs_header_nritems(buf);
2727 	level = btrfs_header_level(buf);
2728 
2729 	if (!root->ref_cows && level == 0)
2730 		return 0;
2731 
2732 	if (inc)
2733 		process_func = btrfs_inc_extent_ref;
2734 	else
2735 		process_func = btrfs_free_extent;
2736 
2737 	if (full_backref)
2738 		parent = buf->start;
2739 	else
2740 		parent = 0;
2741 
2742 	for (i = 0; i < nritems; i++) {
2743 		if (level == 0) {
2744 			btrfs_item_key_to_cpu(buf, &key, i);
2745 			if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2746 				continue;
2747 			fi = btrfs_item_ptr(buf, i,
2748 					    struct btrfs_file_extent_item);
2749 			if (btrfs_file_extent_type(buf, fi) ==
2750 			    BTRFS_FILE_EXTENT_INLINE)
2751 				continue;
2752 			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2753 			if (bytenr == 0)
2754 				continue;
2755 
2756 			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2757 			key.offset -= btrfs_file_extent_offset(buf, fi);
2758 			ret = process_func(trans, root, bytenr, num_bytes,
2759 					   parent, ref_root, key.objectid,
2760 					   key.offset, for_cow);
2761 			if (ret)
2762 				goto fail;
2763 		} else {
2764 			bytenr = btrfs_node_blockptr(buf, i);
2765 			num_bytes = btrfs_level_size(root, level - 1);
2766 			ret = process_func(trans, root, bytenr, num_bytes,
2767 					   parent, ref_root, level - 1, 0,
2768 					   for_cow);
2769 			if (ret)
2770 				goto fail;
2771 		}
2772 	}
2773 	return 0;
2774 fail:
2775 	return ret;
2776 }
2777 
2778 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2779 		  struct extent_buffer *buf, int full_backref, int for_cow)
2780 {
2781 	return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2782 }
2783 
2784 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2785 		  struct extent_buffer *buf, int full_backref, int for_cow)
2786 {
2787 	return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2788 }
2789 
2790 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2791 				 struct btrfs_root *root,
2792 				 struct btrfs_path *path,
2793 				 struct btrfs_block_group_cache *cache)
2794 {
2795 	int ret;
2796 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2797 	unsigned long bi;
2798 	struct extent_buffer *leaf;
2799 
2800 	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2801 	if (ret < 0)
2802 		goto fail;
2803 	BUG_ON(ret); /* Corruption */
2804 
2805 	leaf = path->nodes[0];
2806 	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2807 	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2808 	btrfs_mark_buffer_dirty(leaf);
2809 	btrfs_release_path(path);
2810 fail:
2811 	if (ret) {
2812 		btrfs_abort_transaction(trans, root, ret);
2813 		return ret;
2814 	}
2815 	return 0;
2816 
2817 }
2818 
2819 static struct btrfs_block_group_cache *
2820 next_block_group(struct btrfs_root *root,
2821 		 struct btrfs_block_group_cache *cache)
2822 {
2823 	struct rb_node *node;
2824 	spin_lock(&root->fs_info->block_group_cache_lock);
2825 	node = rb_next(&cache->cache_node);
2826 	btrfs_put_block_group(cache);
2827 	if (node) {
2828 		cache = rb_entry(node, struct btrfs_block_group_cache,
2829 				 cache_node);
2830 		btrfs_get_block_group(cache);
2831 	} else
2832 		cache = NULL;
2833 	spin_unlock(&root->fs_info->block_group_cache_lock);
2834 	return cache;
2835 }
2836 
2837 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2838 			    struct btrfs_trans_handle *trans,
2839 			    struct btrfs_path *path)
2840 {
2841 	struct btrfs_root *root = block_group->fs_info->tree_root;
2842 	struct inode *inode = NULL;
2843 	u64 alloc_hint = 0;
2844 	int dcs = BTRFS_DC_ERROR;
2845 	int num_pages = 0;
2846 	int retries = 0;
2847 	int ret = 0;
2848 
2849 	/*
2850 	 * If this block group is smaller than 100 megs don't bother caching the
2851 	 * block group.
2852 	 */
2853 	if (block_group->key.offset < (100 * 1024 * 1024)) {
2854 		spin_lock(&block_group->lock);
2855 		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2856 		spin_unlock(&block_group->lock);
2857 		return 0;
2858 	}
2859 
2860 again:
2861 	inode = lookup_free_space_inode(root, block_group, path);
2862 	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2863 		ret = PTR_ERR(inode);
2864 		btrfs_release_path(path);
2865 		goto out;
2866 	}
2867 
2868 	if (IS_ERR(inode)) {
2869 		BUG_ON(retries);
2870 		retries++;
2871 
2872 		if (block_group->ro)
2873 			goto out_free;
2874 
2875 		ret = create_free_space_inode(root, trans, block_group, path);
2876 		if (ret)
2877 			goto out_free;
2878 		goto again;
2879 	}
2880 
2881 	/* We've already setup this transaction, go ahead and exit */
2882 	if (block_group->cache_generation == trans->transid &&
2883 	    i_size_read(inode)) {
2884 		dcs = BTRFS_DC_SETUP;
2885 		goto out_put;
2886 	}
2887 
2888 	/*
2889 	 * We want to set the generation to 0, that way if anything goes wrong
2890 	 * from here on out we know not to trust this cache when we load up next
2891 	 * time.
2892 	 */
2893 	BTRFS_I(inode)->generation = 0;
2894 	ret = btrfs_update_inode(trans, root, inode);
2895 	WARN_ON(ret);
2896 
2897 	if (i_size_read(inode) > 0) {
2898 		ret = btrfs_truncate_free_space_cache(root, trans, path,
2899 						      inode);
2900 		if (ret)
2901 			goto out_put;
2902 	}
2903 
2904 	spin_lock(&block_group->lock);
2905 	if (block_group->cached != BTRFS_CACHE_FINISHED) {
2906 		/* We're not cached, don't bother trying to write stuff out */
2907 		dcs = BTRFS_DC_WRITTEN;
2908 		spin_unlock(&block_group->lock);
2909 		goto out_put;
2910 	}
2911 	spin_unlock(&block_group->lock);
2912 
2913 	num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2914 	if (!num_pages)
2915 		num_pages = 1;
2916 
2917 	/*
2918 	 * Just to make absolutely sure we have enough space, we're going to
2919 	 * preallocate 12 pages worth of space for each block group.  In
2920 	 * practice we ought to use at most 8, but we need extra space so we can
2921 	 * add our header and have a terminator between the extents and the
2922 	 * bitmaps.
2923 	 */
2924 	num_pages *= 16;
2925 	num_pages *= PAGE_CACHE_SIZE;
2926 
2927 	ret = btrfs_check_data_free_space(inode, num_pages);
2928 	if (ret)
2929 		goto out_put;
2930 
2931 	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2932 					      num_pages, num_pages,
2933 					      &alloc_hint);
2934 	if (!ret)
2935 		dcs = BTRFS_DC_SETUP;
2936 	btrfs_free_reserved_data_space(inode, num_pages);
2937 
2938 out_put:
2939 	iput(inode);
2940 out_free:
2941 	btrfs_release_path(path);
2942 out:
2943 	spin_lock(&block_group->lock);
2944 	if (!ret && dcs == BTRFS_DC_SETUP)
2945 		block_group->cache_generation = trans->transid;
2946 	block_group->disk_cache_state = dcs;
2947 	spin_unlock(&block_group->lock);
2948 
2949 	return ret;
2950 }
2951 
2952 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2953 				   struct btrfs_root *root)
2954 {
2955 	struct btrfs_block_group_cache *cache;
2956 	int err = 0;
2957 	struct btrfs_path *path;
2958 	u64 last = 0;
2959 
2960 	path = btrfs_alloc_path();
2961 	if (!path)
2962 		return -ENOMEM;
2963 
2964 again:
2965 	while (1) {
2966 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
2967 		while (cache) {
2968 			if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2969 				break;
2970 			cache = next_block_group(root, cache);
2971 		}
2972 		if (!cache) {
2973 			if (last == 0)
2974 				break;
2975 			last = 0;
2976 			continue;
2977 		}
2978 		err = cache_save_setup(cache, trans, path);
2979 		last = cache->key.objectid + cache->key.offset;
2980 		btrfs_put_block_group(cache);
2981 	}
2982 
2983 	while (1) {
2984 		if (last == 0) {
2985 			err = btrfs_run_delayed_refs(trans, root,
2986 						     (unsigned long)-1);
2987 			if (err) /* File system offline */
2988 				goto out;
2989 		}
2990 
2991 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
2992 		while (cache) {
2993 			if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2994 				btrfs_put_block_group(cache);
2995 				goto again;
2996 			}
2997 
2998 			if (cache->dirty)
2999 				break;
3000 			cache = next_block_group(root, cache);
3001 		}
3002 		if (!cache) {
3003 			if (last == 0)
3004 				break;
3005 			last = 0;
3006 			continue;
3007 		}
3008 
3009 		if (cache->disk_cache_state == BTRFS_DC_SETUP)
3010 			cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3011 		cache->dirty = 0;
3012 		last = cache->key.objectid + cache->key.offset;
3013 
3014 		err = write_one_cache_group(trans, root, path, cache);
3015 		if (err) /* File system offline */
3016 			goto out;
3017 
3018 		btrfs_put_block_group(cache);
3019 	}
3020 
3021 	while (1) {
3022 		/*
3023 		 * I don't think this is needed since we're just marking our
3024 		 * preallocated extent as written, but just in case it can't
3025 		 * hurt.
3026 		 */
3027 		if (last == 0) {
3028 			err = btrfs_run_delayed_refs(trans, root,
3029 						     (unsigned long)-1);
3030 			if (err) /* File system offline */
3031 				goto out;
3032 		}
3033 
3034 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
3035 		while (cache) {
3036 			/*
3037 			 * Really this shouldn't happen, but it could if we
3038 			 * couldn't write the entire preallocated extent and
3039 			 * splitting the extent resulted in a new block.
3040 			 */
3041 			if (cache->dirty) {
3042 				btrfs_put_block_group(cache);
3043 				goto again;
3044 			}
3045 			if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3046 				break;
3047 			cache = next_block_group(root, cache);
3048 		}
3049 		if (!cache) {
3050 			if (last == 0)
3051 				break;
3052 			last = 0;
3053 			continue;
3054 		}
3055 
3056 		err = btrfs_write_out_cache(root, trans, cache, path);
3057 
3058 		/*
3059 		 * If we didn't have an error then the cache state is still
3060 		 * NEED_WRITE, so we can set it to WRITTEN.
3061 		 */
3062 		if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3063 			cache->disk_cache_state = BTRFS_DC_WRITTEN;
3064 		last = cache->key.objectid + cache->key.offset;
3065 		btrfs_put_block_group(cache);
3066 	}
3067 out:
3068 
3069 	btrfs_free_path(path);
3070 	return err;
3071 }
3072 
3073 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3074 {
3075 	struct btrfs_block_group_cache *block_group;
3076 	int readonly = 0;
3077 
3078 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3079 	if (!block_group || block_group->ro)
3080 		readonly = 1;
3081 	if (block_group)
3082 		btrfs_put_block_group(block_group);
3083 	return readonly;
3084 }
3085 
3086 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3087 			     u64 total_bytes, u64 bytes_used,
3088 			     struct btrfs_space_info **space_info)
3089 {
3090 	struct btrfs_space_info *found;
3091 	int i;
3092 	int factor;
3093 
3094 	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3095 		     BTRFS_BLOCK_GROUP_RAID10))
3096 		factor = 2;
3097 	else
3098 		factor = 1;
3099 
3100 	found = __find_space_info(info, flags);
3101 	if (found) {
3102 		spin_lock(&found->lock);
3103 		found->total_bytes += total_bytes;
3104 		found->disk_total += total_bytes * factor;
3105 		found->bytes_used += bytes_used;
3106 		found->disk_used += bytes_used * factor;
3107 		found->full = 0;
3108 		spin_unlock(&found->lock);
3109 		*space_info = found;
3110 		return 0;
3111 	}
3112 	found = kzalloc(sizeof(*found), GFP_NOFS);
3113 	if (!found)
3114 		return -ENOMEM;
3115 
3116 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3117 		INIT_LIST_HEAD(&found->block_groups[i]);
3118 	init_rwsem(&found->groups_sem);
3119 	spin_lock_init(&found->lock);
3120 	found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3121 	found->total_bytes = total_bytes;
3122 	found->disk_total = total_bytes * factor;
3123 	found->bytes_used = bytes_used;
3124 	found->disk_used = bytes_used * factor;
3125 	found->bytes_pinned = 0;
3126 	found->bytes_reserved = 0;
3127 	found->bytes_readonly = 0;
3128 	found->bytes_may_use = 0;
3129 	found->full = 0;
3130 	found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3131 	found->chunk_alloc = 0;
3132 	found->flush = 0;
3133 	init_waitqueue_head(&found->wait);
3134 	*space_info = found;
3135 	list_add_rcu(&found->list, &info->space_info);
3136 	return 0;
3137 }
3138 
3139 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3140 {
3141 	u64 extra_flags = chunk_to_extended(flags) &
3142 				BTRFS_EXTENDED_PROFILE_MASK;
3143 
3144 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3145 		fs_info->avail_data_alloc_bits |= extra_flags;
3146 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
3147 		fs_info->avail_metadata_alloc_bits |= extra_flags;
3148 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3149 		fs_info->avail_system_alloc_bits |= extra_flags;
3150 }
3151 
3152 /*
3153  * returns target flags in extended format or 0 if restripe for this
3154  * chunk_type is not in progress
3155  *
3156  * should be called with either volume_mutex or balance_lock held
3157  */
3158 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3159 {
3160 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3161 	u64 target = 0;
3162 
3163 	if (!bctl)
3164 		return 0;
3165 
3166 	if (flags & BTRFS_BLOCK_GROUP_DATA &&
3167 	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3168 		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3169 	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3170 		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3171 		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3172 	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3173 		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3174 		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3175 	}
3176 
3177 	return target;
3178 }
3179 
3180 /*
3181  * @flags: available profiles in extended format (see ctree.h)
3182  *
3183  * Returns reduced profile in chunk format.  If profile changing is in
3184  * progress (either running or paused) picks the target profile (if it's
3185  * already available), otherwise falls back to plain reducing.
3186  */
3187 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3188 {
3189 	/*
3190 	 * we add in the count of missing devices because we want
3191 	 * to make sure that any RAID levels on a degraded FS
3192 	 * continue to be honored.
3193 	 */
3194 	u64 num_devices = root->fs_info->fs_devices->rw_devices +
3195 		root->fs_info->fs_devices->missing_devices;
3196 	u64 target;
3197 
3198 	/*
3199 	 * see if restripe for this chunk_type is in progress, if so
3200 	 * try to reduce to the target profile
3201 	 */
3202 	spin_lock(&root->fs_info->balance_lock);
3203 	target = get_restripe_target(root->fs_info, flags);
3204 	if (target) {
3205 		/* pick target profile only if it's already available */
3206 		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3207 			spin_unlock(&root->fs_info->balance_lock);
3208 			return extended_to_chunk(target);
3209 		}
3210 	}
3211 	spin_unlock(&root->fs_info->balance_lock);
3212 
3213 	if (num_devices == 1)
3214 		flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3215 	if (num_devices < 4)
3216 		flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3217 
3218 	if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3219 	    (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3220 		      BTRFS_BLOCK_GROUP_RAID10))) {
3221 		flags &= ~BTRFS_BLOCK_GROUP_DUP;
3222 	}
3223 
3224 	if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3225 	    (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3226 		flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3227 	}
3228 
3229 	if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3230 	    ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3231 	     (flags & BTRFS_BLOCK_GROUP_RAID10) |
3232 	     (flags & BTRFS_BLOCK_GROUP_DUP))) {
3233 		flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3234 	}
3235 
3236 	return extended_to_chunk(flags);
3237 }
3238 
3239 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3240 {
3241 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3242 		flags |= root->fs_info->avail_data_alloc_bits;
3243 	else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3244 		flags |= root->fs_info->avail_system_alloc_bits;
3245 	else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3246 		flags |= root->fs_info->avail_metadata_alloc_bits;
3247 
3248 	return btrfs_reduce_alloc_profile(root, flags);
3249 }
3250 
3251 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3252 {
3253 	u64 flags;
3254 
3255 	if (data)
3256 		flags = BTRFS_BLOCK_GROUP_DATA;
3257 	else if (root == root->fs_info->chunk_root)
3258 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
3259 	else
3260 		flags = BTRFS_BLOCK_GROUP_METADATA;
3261 
3262 	return get_alloc_profile(root, flags);
3263 }
3264 
3265 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3266 {
3267 	BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3268 						       BTRFS_BLOCK_GROUP_DATA);
3269 }
3270 
3271 /*
3272  * This will check the space that the inode allocates from to make sure we have
3273  * enough space for bytes.
3274  */
3275 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3276 {
3277 	struct btrfs_space_info *data_sinfo;
3278 	struct btrfs_root *root = BTRFS_I(inode)->root;
3279 	u64 used;
3280 	int ret = 0, committed = 0, alloc_chunk = 1;
3281 
3282 	/* make sure bytes are sectorsize aligned */
3283 	bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3284 
3285 	if (root == root->fs_info->tree_root ||
3286 	    BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3287 		alloc_chunk = 0;
3288 		committed = 1;
3289 	}
3290 
3291 	data_sinfo = BTRFS_I(inode)->space_info;
3292 	if (!data_sinfo)
3293 		goto alloc;
3294 
3295 again:
3296 	/* make sure we have enough space to handle the data first */
3297 	spin_lock(&data_sinfo->lock);
3298 	used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3299 		data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3300 		data_sinfo->bytes_may_use;
3301 
3302 	if (used + bytes > data_sinfo->total_bytes) {
3303 		struct btrfs_trans_handle *trans;
3304 
3305 		/*
3306 		 * if we don't have enough free bytes in this space then we need
3307 		 * to alloc a new chunk.
3308 		 */
3309 		if (!data_sinfo->full && alloc_chunk) {
3310 			u64 alloc_target;
3311 
3312 			data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3313 			spin_unlock(&data_sinfo->lock);
3314 alloc:
3315 			alloc_target = btrfs_get_alloc_profile(root, 1);
3316 			trans = btrfs_join_transaction(root);
3317 			if (IS_ERR(trans))
3318 				return PTR_ERR(trans);
3319 
3320 			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3321 					     bytes + 2 * 1024 * 1024,
3322 					     alloc_target,
3323 					     CHUNK_ALLOC_NO_FORCE);
3324 			btrfs_end_transaction(trans, root);
3325 			if (ret < 0) {
3326 				if (ret != -ENOSPC)
3327 					return ret;
3328 				else
3329 					goto commit_trans;
3330 			}
3331 
3332 			if (!data_sinfo) {
3333 				btrfs_set_inode_space_info(root, inode);
3334 				data_sinfo = BTRFS_I(inode)->space_info;
3335 			}
3336 			goto again;
3337 		}
3338 
3339 		/*
3340 		 * If we have less pinned bytes than we want to allocate then
3341 		 * don't bother committing the transaction, it won't help us.
3342 		 */
3343 		if (data_sinfo->bytes_pinned < bytes)
3344 			committed = 1;
3345 		spin_unlock(&data_sinfo->lock);
3346 
3347 		/* commit the current transaction and try again */
3348 commit_trans:
3349 		if (!committed &&
3350 		    !atomic_read(&root->fs_info->open_ioctl_trans)) {
3351 			committed = 1;
3352 			trans = btrfs_join_transaction(root);
3353 			if (IS_ERR(trans))
3354 				return PTR_ERR(trans);
3355 			ret = btrfs_commit_transaction(trans, root);
3356 			if (ret)
3357 				return ret;
3358 			goto again;
3359 		}
3360 
3361 		return -ENOSPC;
3362 	}
3363 	data_sinfo->bytes_may_use += bytes;
3364 	trace_btrfs_space_reservation(root->fs_info, "space_info",
3365 				      data_sinfo->flags, bytes, 1);
3366 	spin_unlock(&data_sinfo->lock);
3367 
3368 	return 0;
3369 }
3370 
3371 /*
3372  * Called if we need to clear a data reservation for this inode.
3373  */
3374 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3375 {
3376 	struct btrfs_root *root = BTRFS_I(inode)->root;
3377 	struct btrfs_space_info *data_sinfo;
3378 
3379 	/* make sure bytes are sectorsize aligned */
3380 	bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3381 
3382 	data_sinfo = BTRFS_I(inode)->space_info;
3383 	spin_lock(&data_sinfo->lock);
3384 	data_sinfo->bytes_may_use -= bytes;
3385 	trace_btrfs_space_reservation(root->fs_info, "space_info",
3386 				      data_sinfo->flags, bytes, 0);
3387 	spin_unlock(&data_sinfo->lock);
3388 }
3389 
3390 static void force_metadata_allocation(struct btrfs_fs_info *info)
3391 {
3392 	struct list_head *head = &info->space_info;
3393 	struct btrfs_space_info *found;
3394 
3395 	rcu_read_lock();
3396 	list_for_each_entry_rcu(found, head, list) {
3397 		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3398 			found->force_alloc = CHUNK_ALLOC_FORCE;
3399 	}
3400 	rcu_read_unlock();
3401 }
3402 
3403 static int should_alloc_chunk(struct btrfs_root *root,
3404 			      struct btrfs_space_info *sinfo, u64 alloc_bytes,
3405 			      int force)
3406 {
3407 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3408 	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3409 	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3410 	u64 thresh;
3411 
3412 	if (force == CHUNK_ALLOC_FORCE)
3413 		return 1;
3414 
3415 	/*
3416 	 * We need to take into account the global rsv because for all intents
3417 	 * and purposes it's used space.  Don't worry about locking the
3418 	 * global_rsv, it doesn't change except when the transaction commits.
3419 	 */
3420 	num_allocated += global_rsv->size;
3421 
3422 	/*
3423 	 * in limited mode, we want to have some free space up to
3424 	 * about 1% of the FS size.
3425 	 */
3426 	if (force == CHUNK_ALLOC_LIMITED) {
3427 		thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3428 		thresh = max_t(u64, 64 * 1024 * 1024,
3429 			       div_factor_fine(thresh, 1));
3430 
3431 		if (num_bytes - num_allocated < thresh)
3432 			return 1;
3433 	}
3434 	thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3435 
3436 	/* 256MB or 2% of the FS */
3437 	thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
3438 	/* system chunks need a much small threshold */
3439 	if (sinfo->flags & BTRFS_BLOCK_GROUP_SYSTEM)
3440 		thresh = 32 * 1024 * 1024;
3441 
3442 	if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
3443 		return 0;
3444 	return 1;
3445 }
3446 
3447 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3448 {
3449 	u64 num_dev;
3450 
3451 	if (type & BTRFS_BLOCK_GROUP_RAID10 ||
3452 	    type & BTRFS_BLOCK_GROUP_RAID0)
3453 		num_dev = root->fs_info->fs_devices->rw_devices;
3454 	else if (type & BTRFS_BLOCK_GROUP_RAID1)
3455 		num_dev = 2;
3456 	else
3457 		num_dev = 1;	/* DUP or single */
3458 
3459 	/* metadata for updaing devices and chunk tree */
3460 	return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3461 }
3462 
3463 static void check_system_chunk(struct btrfs_trans_handle *trans,
3464 			       struct btrfs_root *root, u64 type)
3465 {
3466 	struct btrfs_space_info *info;
3467 	u64 left;
3468 	u64 thresh;
3469 
3470 	info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3471 	spin_lock(&info->lock);
3472 	left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3473 		info->bytes_reserved - info->bytes_readonly;
3474 	spin_unlock(&info->lock);
3475 
3476 	thresh = get_system_chunk_thresh(root, type);
3477 	if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3478 		printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
3479 		       left, thresh, type);
3480 		dump_space_info(info, 0, 0);
3481 	}
3482 
3483 	if (left < thresh) {
3484 		u64 flags;
3485 
3486 		flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3487 		btrfs_alloc_chunk(trans, root, flags);
3488 	}
3489 }
3490 
3491 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3492 			  struct btrfs_root *extent_root, u64 alloc_bytes,
3493 			  u64 flags, int force)
3494 {
3495 	struct btrfs_space_info *space_info;
3496 	struct btrfs_fs_info *fs_info = extent_root->fs_info;
3497 	int wait_for_alloc = 0;
3498 	int ret = 0;
3499 
3500 	space_info = __find_space_info(extent_root->fs_info, flags);
3501 	if (!space_info) {
3502 		ret = update_space_info(extent_root->fs_info, flags,
3503 					0, 0, &space_info);
3504 		BUG_ON(ret); /* -ENOMEM */
3505 	}
3506 	BUG_ON(!space_info); /* Logic error */
3507 
3508 again:
3509 	spin_lock(&space_info->lock);
3510 	if (force < space_info->force_alloc)
3511 		force = space_info->force_alloc;
3512 	if (space_info->full) {
3513 		spin_unlock(&space_info->lock);
3514 		return 0;
3515 	}
3516 
3517 	if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3518 		spin_unlock(&space_info->lock);
3519 		return 0;
3520 	} else if (space_info->chunk_alloc) {
3521 		wait_for_alloc = 1;
3522 	} else {
3523 		space_info->chunk_alloc = 1;
3524 	}
3525 
3526 	spin_unlock(&space_info->lock);
3527 
3528 	mutex_lock(&fs_info->chunk_mutex);
3529 
3530 	/*
3531 	 * The chunk_mutex is held throughout the entirety of a chunk
3532 	 * allocation, so once we've acquired the chunk_mutex we know that the
3533 	 * other guy is done and we need to recheck and see if we should
3534 	 * allocate.
3535 	 */
3536 	if (wait_for_alloc) {
3537 		mutex_unlock(&fs_info->chunk_mutex);
3538 		wait_for_alloc = 0;
3539 		goto again;
3540 	}
3541 
3542 	/*
3543 	 * If we have mixed data/metadata chunks we want to make sure we keep
3544 	 * allocating mixed chunks instead of individual chunks.
3545 	 */
3546 	if (btrfs_mixed_space_info(space_info))
3547 		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3548 
3549 	/*
3550 	 * if we're doing a data chunk, go ahead and make sure that
3551 	 * we keep a reasonable number of metadata chunks allocated in the
3552 	 * FS as well.
3553 	 */
3554 	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3555 		fs_info->data_chunk_allocations++;
3556 		if (!(fs_info->data_chunk_allocations %
3557 		      fs_info->metadata_ratio))
3558 			force_metadata_allocation(fs_info);
3559 	}
3560 
3561 	/*
3562 	 * Check if we have enough space in SYSTEM chunk because we may need
3563 	 * to update devices.
3564 	 */
3565 	check_system_chunk(trans, extent_root, flags);
3566 
3567 	ret = btrfs_alloc_chunk(trans, extent_root, flags);
3568 	if (ret < 0 && ret != -ENOSPC)
3569 		goto out;
3570 
3571 	spin_lock(&space_info->lock);
3572 	if (ret)
3573 		space_info->full = 1;
3574 	else
3575 		ret = 1;
3576 
3577 	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3578 	space_info->chunk_alloc = 0;
3579 	spin_unlock(&space_info->lock);
3580 out:
3581 	mutex_unlock(&extent_root->fs_info->chunk_mutex);
3582 	return ret;
3583 }
3584 
3585 /*
3586  * shrink metadata reservation for delalloc
3587  */
3588 static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
3589 			   bool wait_ordered)
3590 {
3591 	struct btrfs_block_rsv *block_rsv;
3592 	struct btrfs_space_info *space_info;
3593 	struct btrfs_trans_handle *trans;
3594 	u64 reserved;
3595 	u64 max_reclaim;
3596 	u64 reclaimed = 0;
3597 	long time_left;
3598 	unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3599 	int loops = 0;
3600 	unsigned long progress;
3601 
3602 	trans = (struct btrfs_trans_handle *)current->journal_info;
3603 	block_rsv = &root->fs_info->delalloc_block_rsv;
3604 	space_info = block_rsv->space_info;
3605 
3606 	smp_mb();
3607 	reserved = space_info->bytes_may_use;
3608 	progress = space_info->reservation_progress;
3609 
3610 	if (reserved == 0)
3611 		return 0;
3612 
3613 	smp_mb();
3614 	if (root->fs_info->delalloc_bytes == 0) {
3615 		if (trans)
3616 			return 0;
3617 		btrfs_wait_ordered_extents(root, 0, 0);
3618 		return 0;
3619 	}
3620 
3621 	max_reclaim = min(reserved, to_reclaim);
3622 	nr_pages = max_t(unsigned long, nr_pages,
3623 			 max_reclaim >> PAGE_CACHE_SHIFT);
3624 	while (loops < 1024) {
3625 		/* have the flusher threads jump in and do some IO */
3626 		smp_mb();
3627 		nr_pages = min_t(unsigned long, nr_pages,
3628 		       root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3629 		writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
3630 						WB_REASON_FS_FREE_SPACE);
3631 
3632 		spin_lock(&space_info->lock);
3633 		if (reserved > space_info->bytes_may_use)
3634 			reclaimed += reserved - space_info->bytes_may_use;
3635 		reserved = space_info->bytes_may_use;
3636 		spin_unlock(&space_info->lock);
3637 
3638 		loops++;
3639 
3640 		if (reserved == 0 || reclaimed >= max_reclaim)
3641 			break;
3642 
3643 		if (trans && trans->transaction->blocked)
3644 			return -EAGAIN;
3645 
3646 		if (wait_ordered && !trans) {
3647 			btrfs_wait_ordered_extents(root, 0, 0);
3648 		} else {
3649 			time_left = schedule_timeout_interruptible(1);
3650 
3651 			/* We were interrupted, exit */
3652 			if (time_left)
3653 				break;
3654 		}
3655 
3656 		/* we've kicked the IO a few times, if anything has been freed,
3657 		 * exit.  There is no sense in looping here for a long time
3658 		 * when we really need to commit the transaction, or there are
3659 		 * just too many writers without enough free space
3660 		 */
3661 
3662 		if (loops > 3) {
3663 			smp_mb();
3664 			if (progress != space_info->reservation_progress)
3665 				break;
3666 		}
3667 
3668 	}
3669 
3670 	return reclaimed >= to_reclaim;
3671 }
3672 
3673 /**
3674  * maybe_commit_transaction - possibly commit the transaction if its ok to
3675  * @root - the root we're allocating for
3676  * @bytes - the number of bytes we want to reserve
3677  * @force - force the commit
3678  *
3679  * This will check to make sure that committing the transaction will actually
3680  * get us somewhere and then commit the transaction if it does.  Otherwise it
3681  * will return -ENOSPC.
3682  */
3683 static int may_commit_transaction(struct btrfs_root *root,
3684 				  struct btrfs_space_info *space_info,
3685 				  u64 bytes, int force)
3686 {
3687 	struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3688 	struct btrfs_trans_handle *trans;
3689 
3690 	trans = (struct btrfs_trans_handle *)current->journal_info;
3691 	if (trans)
3692 		return -EAGAIN;
3693 
3694 	if (force)
3695 		goto commit;
3696 
3697 	/* See if there is enough pinned space to make this reservation */
3698 	spin_lock(&space_info->lock);
3699 	if (space_info->bytes_pinned >= bytes) {
3700 		spin_unlock(&space_info->lock);
3701 		goto commit;
3702 	}
3703 	spin_unlock(&space_info->lock);
3704 
3705 	/*
3706 	 * See if there is some space in the delayed insertion reservation for
3707 	 * this reservation.
3708 	 */
3709 	if (space_info != delayed_rsv->space_info)
3710 		return -ENOSPC;
3711 
3712 	spin_lock(&space_info->lock);
3713 	spin_lock(&delayed_rsv->lock);
3714 	if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3715 		spin_unlock(&delayed_rsv->lock);
3716 		spin_unlock(&space_info->lock);
3717 		return -ENOSPC;
3718 	}
3719 	spin_unlock(&delayed_rsv->lock);
3720 	spin_unlock(&space_info->lock);
3721 
3722 commit:
3723 	trans = btrfs_join_transaction(root);
3724 	if (IS_ERR(trans))
3725 		return -ENOSPC;
3726 
3727 	return btrfs_commit_transaction(trans, root);
3728 }
3729 
3730 /**
3731  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3732  * @root - the root we're allocating for
3733  * @block_rsv - the block_rsv we're allocating for
3734  * @orig_bytes - the number of bytes we want
3735  * @flush - wether or not we can flush to make our reservation
3736  *
3737  * This will reserve orgi_bytes number of bytes from the space info associated
3738  * with the block_rsv.  If there is not enough space it will make an attempt to
3739  * flush out space to make room.  It will do this by flushing delalloc if
3740  * possible or committing the transaction.  If flush is 0 then no attempts to
3741  * regain reservations will be made and this will fail if there is not enough
3742  * space already.
3743  */
3744 static int reserve_metadata_bytes(struct btrfs_root *root,
3745 				  struct btrfs_block_rsv *block_rsv,
3746 				  u64 orig_bytes, int flush)
3747 {
3748 	struct btrfs_space_info *space_info = block_rsv->space_info;
3749 	u64 used;
3750 	u64 num_bytes = orig_bytes;
3751 	int retries = 0;
3752 	int ret = 0;
3753 	bool committed = false;
3754 	bool flushing = false;
3755 	bool wait_ordered = false;
3756 
3757 again:
3758 	ret = 0;
3759 	spin_lock(&space_info->lock);
3760 	/*
3761 	 * We only want to wait if somebody other than us is flushing and we are
3762 	 * actually alloed to flush.
3763 	 */
3764 	while (flush && !flushing && space_info->flush) {
3765 		spin_unlock(&space_info->lock);
3766 		/*
3767 		 * If we have a trans handle we can't wait because the flusher
3768 		 * may have to commit the transaction, which would mean we would
3769 		 * deadlock since we are waiting for the flusher to finish, but
3770 		 * hold the current transaction open.
3771 		 */
3772 		if (current->journal_info)
3773 			return -EAGAIN;
3774 		ret = wait_event_killable(space_info->wait, !space_info->flush);
3775 		/* Must have been killed, return */
3776 		if (ret)
3777 			return -EINTR;
3778 
3779 		spin_lock(&space_info->lock);
3780 	}
3781 
3782 	ret = -ENOSPC;
3783 	used = space_info->bytes_used + space_info->bytes_reserved +
3784 		space_info->bytes_pinned + space_info->bytes_readonly +
3785 		space_info->bytes_may_use;
3786 
3787 	/*
3788 	 * The idea here is that we've not already over-reserved the block group
3789 	 * then we can go ahead and save our reservation first and then start
3790 	 * flushing if we need to.  Otherwise if we've already overcommitted
3791 	 * lets start flushing stuff first and then come back and try to make
3792 	 * our reservation.
3793 	 */
3794 	if (used <= space_info->total_bytes) {
3795 		if (used + orig_bytes <= space_info->total_bytes) {
3796 			space_info->bytes_may_use += orig_bytes;
3797 			trace_btrfs_space_reservation(root->fs_info,
3798 				"space_info", space_info->flags, orig_bytes, 1);
3799 			ret = 0;
3800 		} else {
3801 			/*
3802 			 * Ok set num_bytes to orig_bytes since we aren't
3803 			 * overocmmitted, this way we only try and reclaim what
3804 			 * we need.
3805 			 */
3806 			num_bytes = orig_bytes;
3807 		}
3808 	} else {
3809 		/*
3810 		 * Ok we're over committed, set num_bytes to the overcommitted
3811 		 * amount plus the amount of bytes that we need for this
3812 		 * reservation.
3813 		 */
3814 		wait_ordered = true;
3815 		num_bytes = used - space_info->total_bytes +
3816 			(orig_bytes * (retries + 1));
3817 	}
3818 
3819 	if (ret) {
3820 		u64 profile = btrfs_get_alloc_profile(root, 0);
3821 		u64 avail;
3822 
3823 		/*
3824 		 * If we have a lot of space that's pinned, don't bother doing
3825 		 * the overcommit dance yet and just commit the transaction.
3826 		 */
3827 		avail = (space_info->total_bytes - space_info->bytes_used) * 8;
3828 		do_div(avail, 10);
3829 		if (space_info->bytes_pinned >= avail && flush && !committed) {
3830 			space_info->flush = 1;
3831 			flushing = true;
3832 			spin_unlock(&space_info->lock);
3833 			ret = may_commit_transaction(root, space_info,
3834 						     orig_bytes, 1);
3835 			if (ret)
3836 				goto out;
3837 			committed = true;
3838 			goto again;
3839 		}
3840 
3841 		spin_lock(&root->fs_info->free_chunk_lock);
3842 		avail = root->fs_info->free_chunk_space;
3843 
3844 		/*
3845 		 * If we have dup, raid1 or raid10 then only half of the free
3846 		 * space is actually useable.
3847 		 */
3848 		if (profile & (BTRFS_BLOCK_GROUP_DUP |
3849 			       BTRFS_BLOCK_GROUP_RAID1 |
3850 			       BTRFS_BLOCK_GROUP_RAID10))
3851 			avail >>= 1;
3852 
3853 		/*
3854 		 * If we aren't flushing don't let us overcommit too much, say
3855 		 * 1/8th of the space.  If we can flush, let it overcommit up to
3856 		 * 1/2 of the space.
3857 		 */
3858 		if (flush)
3859 			avail >>= 3;
3860 		else
3861 			avail >>= 1;
3862 		 spin_unlock(&root->fs_info->free_chunk_lock);
3863 
3864 		if (used + num_bytes < space_info->total_bytes + avail) {
3865 			space_info->bytes_may_use += orig_bytes;
3866 			trace_btrfs_space_reservation(root->fs_info,
3867 				"space_info", space_info->flags, orig_bytes, 1);
3868 			ret = 0;
3869 		} else {
3870 			wait_ordered = true;
3871 		}
3872 	}
3873 
3874 	/*
3875 	 * Couldn't make our reservation, save our place so while we're trying
3876 	 * to reclaim space we can actually use it instead of somebody else
3877 	 * stealing it from us.
3878 	 */
3879 	if (ret && flush) {
3880 		flushing = true;
3881 		space_info->flush = 1;
3882 	}
3883 
3884 	spin_unlock(&space_info->lock);
3885 
3886 	if (!ret || !flush)
3887 		goto out;
3888 
3889 	/*
3890 	 * We do synchronous shrinking since we don't actually unreserve
3891 	 * metadata until after the IO is completed.
3892 	 */
3893 	ret = shrink_delalloc(root, num_bytes, wait_ordered);
3894 	if (ret < 0)
3895 		goto out;
3896 
3897 	ret = 0;
3898 
3899 	/*
3900 	 * So if we were overcommitted it's possible that somebody else flushed
3901 	 * out enough space and we simply didn't have enough space to reclaim,
3902 	 * so go back around and try again.
3903 	 */
3904 	if (retries < 2) {
3905 		wait_ordered = true;
3906 		retries++;
3907 		goto again;
3908 	}
3909 
3910 	ret = -ENOSPC;
3911 	if (committed)
3912 		goto out;
3913 
3914 	ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3915 	if (!ret) {
3916 		committed = true;
3917 		goto again;
3918 	}
3919 
3920 out:
3921 	if (flushing) {
3922 		spin_lock(&space_info->lock);
3923 		space_info->flush = 0;
3924 		wake_up_all(&space_info->wait);
3925 		spin_unlock(&space_info->lock);
3926 	}
3927 	return ret;
3928 }
3929 
3930 static struct btrfs_block_rsv *get_block_rsv(
3931 					const struct btrfs_trans_handle *trans,
3932 					const struct btrfs_root *root)
3933 {
3934 	struct btrfs_block_rsv *block_rsv = NULL;
3935 
3936 	if (root->ref_cows || root == root->fs_info->csum_root)
3937 		block_rsv = trans->block_rsv;
3938 
3939 	if (!block_rsv)
3940 		block_rsv = root->block_rsv;
3941 
3942 	if (!block_rsv)
3943 		block_rsv = &root->fs_info->empty_block_rsv;
3944 
3945 	return block_rsv;
3946 }
3947 
3948 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3949 			       u64 num_bytes)
3950 {
3951 	int ret = -ENOSPC;
3952 	spin_lock(&block_rsv->lock);
3953 	if (block_rsv->reserved >= num_bytes) {
3954 		block_rsv->reserved -= num_bytes;
3955 		if (block_rsv->reserved < block_rsv->size)
3956 			block_rsv->full = 0;
3957 		ret = 0;
3958 	}
3959 	spin_unlock(&block_rsv->lock);
3960 	return ret;
3961 }
3962 
3963 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3964 				u64 num_bytes, int update_size)
3965 {
3966 	spin_lock(&block_rsv->lock);
3967 	block_rsv->reserved += num_bytes;
3968 	if (update_size)
3969 		block_rsv->size += num_bytes;
3970 	else if (block_rsv->reserved >= block_rsv->size)
3971 		block_rsv->full = 1;
3972 	spin_unlock(&block_rsv->lock);
3973 }
3974 
3975 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
3976 				    struct btrfs_block_rsv *block_rsv,
3977 				    struct btrfs_block_rsv *dest, u64 num_bytes)
3978 {
3979 	struct btrfs_space_info *space_info = block_rsv->space_info;
3980 
3981 	spin_lock(&block_rsv->lock);
3982 	if (num_bytes == (u64)-1)
3983 		num_bytes = block_rsv->size;
3984 	block_rsv->size -= num_bytes;
3985 	if (block_rsv->reserved >= block_rsv->size) {
3986 		num_bytes = block_rsv->reserved - block_rsv->size;
3987 		block_rsv->reserved = block_rsv->size;
3988 		block_rsv->full = 1;
3989 	} else {
3990 		num_bytes = 0;
3991 	}
3992 	spin_unlock(&block_rsv->lock);
3993 
3994 	if (num_bytes > 0) {
3995 		if (dest) {
3996 			spin_lock(&dest->lock);
3997 			if (!dest->full) {
3998 				u64 bytes_to_add;
3999 
4000 				bytes_to_add = dest->size - dest->reserved;
4001 				bytes_to_add = min(num_bytes, bytes_to_add);
4002 				dest->reserved += bytes_to_add;
4003 				if (dest->reserved >= dest->size)
4004 					dest->full = 1;
4005 				num_bytes -= bytes_to_add;
4006 			}
4007 			spin_unlock(&dest->lock);
4008 		}
4009 		if (num_bytes) {
4010 			spin_lock(&space_info->lock);
4011 			space_info->bytes_may_use -= num_bytes;
4012 			trace_btrfs_space_reservation(fs_info, "space_info",
4013 					space_info->flags, num_bytes, 0);
4014 			space_info->reservation_progress++;
4015 			spin_unlock(&space_info->lock);
4016 		}
4017 	}
4018 }
4019 
4020 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4021 				   struct btrfs_block_rsv *dst, u64 num_bytes)
4022 {
4023 	int ret;
4024 
4025 	ret = block_rsv_use_bytes(src, num_bytes);
4026 	if (ret)
4027 		return ret;
4028 
4029 	block_rsv_add_bytes(dst, num_bytes, 1);
4030 	return 0;
4031 }
4032 
4033 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
4034 {
4035 	memset(rsv, 0, sizeof(*rsv));
4036 	spin_lock_init(&rsv->lock);
4037 }
4038 
4039 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
4040 {
4041 	struct btrfs_block_rsv *block_rsv;
4042 	struct btrfs_fs_info *fs_info = root->fs_info;
4043 
4044 	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4045 	if (!block_rsv)
4046 		return NULL;
4047 
4048 	btrfs_init_block_rsv(block_rsv);
4049 	block_rsv->space_info = __find_space_info(fs_info,
4050 						  BTRFS_BLOCK_GROUP_METADATA);
4051 	return block_rsv;
4052 }
4053 
4054 void btrfs_free_block_rsv(struct btrfs_root *root,
4055 			  struct btrfs_block_rsv *rsv)
4056 {
4057 	btrfs_block_rsv_release(root, rsv, (u64)-1);
4058 	kfree(rsv);
4059 }
4060 
4061 static inline int __block_rsv_add(struct btrfs_root *root,
4062 				  struct btrfs_block_rsv *block_rsv,
4063 				  u64 num_bytes, int flush)
4064 {
4065 	int ret;
4066 
4067 	if (num_bytes == 0)
4068 		return 0;
4069 
4070 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4071 	if (!ret) {
4072 		block_rsv_add_bytes(block_rsv, num_bytes, 1);
4073 		return 0;
4074 	}
4075 
4076 	return ret;
4077 }
4078 
4079 int btrfs_block_rsv_add(struct btrfs_root *root,
4080 			struct btrfs_block_rsv *block_rsv,
4081 			u64 num_bytes)
4082 {
4083 	return __block_rsv_add(root, block_rsv, num_bytes, 1);
4084 }
4085 
4086 int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
4087 				struct btrfs_block_rsv *block_rsv,
4088 				u64 num_bytes)
4089 {
4090 	return __block_rsv_add(root, block_rsv, num_bytes, 0);
4091 }
4092 
4093 int btrfs_block_rsv_check(struct btrfs_root *root,
4094 			  struct btrfs_block_rsv *block_rsv, int min_factor)
4095 {
4096 	u64 num_bytes = 0;
4097 	int ret = -ENOSPC;
4098 
4099 	if (!block_rsv)
4100 		return 0;
4101 
4102 	spin_lock(&block_rsv->lock);
4103 	num_bytes = div_factor(block_rsv->size, min_factor);
4104 	if (block_rsv->reserved >= num_bytes)
4105 		ret = 0;
4106 	spin_unlock(&block_rsv->lock);
4107 
4108 	return ret;
4109 }
4110 
4111 static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
4112 					   struct btrfs_block_rsv *block_rsv,
4113 					   u64 min_reserved, int flush)
4114 {
4115 	u64 num_bytes = 0;
4116 	int ret = -ENOSPC;
4117 
4118 	if (!block_rsv)
4119 		return 0;
4120 
4121 	spin_lock(&block_rsv->lock);
4122 	num_bytes = min_reserved;
4123 	if (block_rsv->reserved >= num_bytes)
4124 		ret = 0;
4125 	else
4126 		num_bytes -= block_rsv->reserved;
4127 	spin_unlock(&block_rsv->lock);
4128 
4129 	if (!ret)
4130 		return 0;
4131 
4132 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4133 	if (!ret) {
4134 		block_rsv_add_bytes(block_rsv, num_bytes, 0);
4135 		return 0;
4136 	}
4137 
4138 	return ret;
4139 }
4140 
4141 int btrfs_block_rsv_refill(struct btrfs_root *root,
4142 			   struct btrfs_block_rsv *block_rsv,
4143 			   u64 min_reserved)
4144 {
4145 	return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
4146 }
4147 
4148 int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
4149 				   struct btrfs_block_rsv *block_rsv,
4150 				   u64 min_reserved)
4151 {
4152 	return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
4153 }
4154 
4155 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4156 			    struct btrfs_block_rsv *dst_rsv,
4157 			    u64 num_bytes)
4158 {
4159 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4160 }
4161 
4162 void btrfs_block_rsv_release(struct btrfs_root *root,
4163 			     struct btrfs_block_rsv *block_rsv,
4164 			     u64 num_bytes)
4165 {
4166 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4167 	if (global_rsv->full || global_rsv == block_rsv ||
4168 	    block_rsv->space_info != global_rsv->space_info)
4169 		global_rsv = NULL;
4170 	block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4171 				num_bytes);
4172 }
4173 
4174 /*
4175  * helper to calculate size of global block reservation.
4176  * the desired value is sum of space used by extent tree,
4177  * checksum tree and root tree
4178  */
4179 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4180 {
4181 	struct btrfs_space_info *sinfo;
4182 	u64 num_bytes;
4183 	u64 meta_used;
4184 	u64 data_used;
4185 	int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4186 
4187 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4188 	spin_lock(&sinfo->lock);
4189 	data_used = sinfo->bytes_used;
4190 	spin_unlock(&sinfo->lock);
4191 
4192 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4193 	spin_lock(&sinfo->lock);
4194 	if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4195 		data_used = 0;
4196 	meta_used = sinfo->bytes_used;
4197 	spin_unlock(&sinfo->lock);
4198 
4199 	num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4200 		    csum_size * 2;
4201 	num_bytes += div64_u64(data_used + meta_used, 50);
4202 
4203 	if (num_bytes * 3 > meta_used)
4204 		num_bytes = div64_u64(meta_used, 3);
4205 
4206 	return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4207 }
4208 
4209 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4210 {
4211 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4212 	struct btrfs_space_info *sinfo = block_rsv->space_info;
4213 	u64 num_bytes;
4214 
4215 	num_bytes = calc_global_metadata_size(fs_info);
4216 
4217 	spin_lock(&sinfo->lock);
4218 	spin_lock(&block_rsv->lock);
4219 
4220 	block_rsv->size = num_bytes;
4221 
4222 	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4223 		    sinfo->bytes_reserved + sinfo->bytes_readonly +
4224 		    sinfo->bytes_may_use;
4225 
4226 	if (sinfo->total_bytes > num_bytes) {
4227 		num_bytes = sinfo->total_bytes - num_bytes;
4228 		block_rsv->reserved += num_bytes;
4229 		sinfo->bytes_may_use += num_bytes;
4230 		trace_btrfs_space_reservation(fs_info, "space_info",
4231 				      sinfo->flags, num_bytes, 1);
4232 	}
4233 
4234 	if (block_rsv->reserved >= block_rsv->size) {
4235 		num_bytes = block_rsv->reserved - block_rsv->size;
4236 		sinfo->bytes_may_use -= num_bytes;
4237 		trace_btrfs_space_reservation(fs_info, "space_info",
4238 				      sinfo->flags, num_bytes, 0);
4239 		sinfo->reservation_progress++;
4240 		block_rsv->reserved = block_rsv->size;
4241 		block_rsv->full = 1;
4242 	}
4243 
4244 	spin_unlock(&block_rsv->lock);
4245 	spin_unlock(&sinfo->lock);
4246 }
4247 
4248 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4249 {
4250 	struct btrfs_space_info *space_info;
4251 
4252 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4253 	fs_info->chunk_block_rsv.space_info = space_info;
4254 
4255 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4256 	fs_info->global_block_rsv.space_info = space_info;
4257 	fs_info->delalloc_block_rsv.space_info = space_info;
4258 	fs_info->trans_block_rsv.space_info = space_info;
4259 	fs_info->empty_block_rsv.space_info = space_info;
4260 	fs_info->delayed_block_rsv.space_info = space_info;
4261 
4262 	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4263 	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4264 	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4265 	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4266 	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4267 
4268 	update_global_block_rsv(fs_info);
4269 }
4270 
4271 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4272 {
4273 	block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4274 				(u64)-1);
4275 	WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4276 	WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4277 	WARN_ON(fs_info->trans_block_rsv.size > 0);
4278 	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4279 	WARN_ON(fs_info->chunk_block_rsv.size > 0);
4280 	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4281 	WARN_ON(fs_info->delayed_block_rsv.size > 0);
4282 	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4283 }
4284 
4285 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4286 				  struct btrfs_root *root)
4287 {
4288 	if (!trans->bytes_reserved)
4289 		return;
4290 
4291 	trace_btrfs_space_reservation(root->fs_info, "transaction",
4292 				      trans->transid, trans->bytes_reserved, 0);
4293 	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4294 	trans->bytes_reserved = 0;
4295 }
4296 
4297 /* Can only return 0 or -ENOSPC */
4298 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4299 				  struct inode *inode)
4300 {
4301 	struct btrfs_root *root = BTRFS_I(inode)->root;
4302 	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4303 	struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4304 
4305 	/*
4306 	 * We need to hold space in order to delete our orphan item once we've
4307 	 * added it, so this takes the reservation so we can release it later
4308 	 * when we are truly done with the orphan item.
4309 	 */
4310 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4311 	trace_btrfs_space_reservation(root->fs_info, "orphan",
4312 				      btrfs_ino(inode), num_bytes, 1);
4313 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4314 }
4315 
4316 void btrfs_orphan_release_metadata(struct inode *inode)
4317 {
4318 	struct btrfs_root *root = BTRFS_I(inode)->root;
4319 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4320 	trace_btrfs_space_reservation(root->fs_info, "orphan",
4321 				      btrfs_ino(inode), num_bytes, 0);
4322 	btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4323 }
4324 
4325 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4326 				struct btrfs_pending_snapshot *pending)
4327 {
4328 	struct btrfs_root *root = pending->root;
4329 	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4330 	struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4331 	/*
4332 	 * two for root back/forward refs, two for directory entries
4333 	 * and one for root of the snapshot.
4334 	 */
4335 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4336 	dst_rsv->space_info = src_rsv->space_info;
4337 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4338 }
4339 
4340 /**
4341  * drop_outstanding_extent - drop an outstanding extent
4342  * @inode: the inode we're dropping the extent for
4343  *
4344  * This is called when we are freeing up an outstanding extent, either called
4345  * after an error or after an extent is written.  This will return the number of
4346  * reserved extents that need to be freed.  This must be called with
4347  * BTRFS_I(inode)->lock held.
4348  */
4349 static unsigned drop_outstanding_extent(struct inode *inode)
4350 {
4351 	unsigned drop_inode_space = 0;
4352 	unsigned dropped_extents = 0;
4353 
4354 	BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4355 	BTRFS_I(inode)->outstanding_extents--;
4356 
4357 	if (BTRFS_I(inode)->outstanding_extents == 0 &&
4358 	    BTRFS_I(inode)->delalloc_meta_reserved) {
4359 		drop_inode_space = 1;
4360 		BTRFS_I(inode)->delalloc_meta_reserved = 0;
4361 	}
4362 
4363 	/*
4364 	 * If we have more or the same amount of outsanding extents than we have
4365 	 * reserved then we need to leave the reserved extents count alone.
4366 	 */
4367 	if (BTRFS_I(inode)->outstanding_extents >=
4368 	    BTRFS_I(inode)->reserved_extents)
4369 		return drop_inode_space;
4370 
4371 	dropped_extents = BTRFS_I(inode)->reserved_extents -
4372 		BTRFS_I(inode)->outstanding_extents;
4373 	BTRFS_I(inode)->reserved_extents -= dropped_extents;
4374 	return dropped_extents + drop_inode_space;
4375 }
4376 
4377 /**
4378  * calc_csum_metadata_size - return the amount of metada space that must be
4379  *	reserved/free'd for the given bytes.
4380  * @inode: the inode we're manipulating
4381  * @num_bytes: the number of bytes in question
4382  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4383  *
4384  * This adjusts the number of csum_bytes in the inode and then returns the
4385  * correct amount of metadata that must either be reserved or freed.  We
4386  * calculate how many checksums we can fit into one leaf and then divide the
4387  * number of bytes that will need to be checksumed by this value to figure out
4388  * how many checksums will be required.  If we are adding bytes then the number
4389  * may go up and we will return the number of additional bytes that must be
4390  * reserved.  If it is going down we will return the number of bytes that must
4391  * be freed.
4392  *
4393  * This must be called with BTRFS_I(inode)->lock held.
4394  */
4395 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4396 				   int reserve)
4397 {
4398 	struct btrfs_root *root = BTRFS_I(inode)->root;
4399 	u64 csum_size;
4400 	int num_csums_per_leaf;
4401 	int num_csums;
4402 	int old_csums;
4403 
4404 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4405 	    BTRFS_I(inode)->csum_bytes == 0)
4406 		return 0;
4407 
4408 	old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4409 	if (reserve)
4410 		BTRFS_I(inode)->csum_bytes += num_bytes;
4411 	else
4412 		BTRFS_I(inode)->csum_bytes -= num_bytes;
4413 	csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4414 	num_csums_per_leaf = (int)div64_u64(csum_size,
4415 					    sizeof(struct btrfs_csum_item) +
4416 					    sizeof(struct btrfs_disk_key));
4417 	num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4418 	num_csums = num_csums + num_csums_per_leaf - 1;
4419 	num_csums = num_csums / num_csums_per_leaf;
4420 
4421 	old_csums = old_csums + num_csums_per_leaf - 1;
4422 	old_csums = old_csums / num_csums_per_leaf;
4423 
4424 	/* No change, no need to reserve more */
4425 	if (old_csums == num_csums)
4426 		return 0;
4427 
4428 	if (reserve)
4429 		return btrfs_calc_trans_metadata_size(root,
4430 						      num_csums - old_csums);
4431 
4432 	return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4433 }
4434 
4435 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4436 {
4437 	struct btrfs_root *root = BTRFS_I(inode)->root;
4438 	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4439 	u64 to_reserve = 0;
4440 	u64 csum_bytes;
4441 	unsigned nr_extents = 0;
4442 	int extra_reserve = 0;
4443 	int flush = 1;
4444 	int ret;
4445 
4446 	/* Need to be holding the i_mutex here if we aren't free space cache */
4447 	if (btrfs_is_free_space_inode(root, inode))
4448 		flush = 0;
4449 
4450 	if (flush && btrfs_transaction_in_commit(root->fs_info))
4451 		schedule_timeout(1);
4452 
4453 	mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4454 	num_bytes = ALIGN(num_bytes, root->sectorsize);
4455 
4456 	spin_lock(&BTRFS_I(inode)->lock);
4457 	BTRFS_I(inode)->outstanding_extents++;
4458 
4459 	if (BTRFS_I(inode)->outstanding_extents >
4460 	    BTRFS_I(inode)->reserved_extents)
4461 		nr_extents = BTRFS_I(inode)->outstanding_extents -
4462 			BTRFS_I(inode)->reserved_extents;
4463 
4464 	/*
4465 	 * Add an item to reserve for updating the inode when we complete the
4466 	 * delalloc io.
4467 	 */
4468 	if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4469 		nr_extents++;
4470 		extra_reserve = 1;
4471 	}
4472 
4473 	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4474 	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4475 	csum_bytes = BTRFS_I(inode)->csum_bytes;
4476 	spin_unlock(&BTRFS_I(inode)->lock);
4477 
4478 	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4479 	if (ret) {
4480 		u64 to_free = 0;
4481 		unsigned dropped;
4482 
4483 		spin_lock(&BTRFS_I(inode)->lock);
4484 		dropped = drop_outstanding_extent(inode);
4485 		/*
4486 		 * If the inodes csum_bytes is the same as the original
4487 		 * csum_bytes then we know we haven't raced with any free()ers
4488 		 * so we can just reduce our inodes csum bytes and carry on.
4489 		 * Otherwise we have to do the normal free thing to account for
4490 		 * the case that the free side didn't free up its reserve
4491 		 * because of this outstanding reservation.
4492 		 */
4493 		if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4494 			calc_csum_metadata_size(inode, num_bytes, 0);
4495 		else
4496 			to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4497 		spin_unlock(&BTRFS_I(inode)->lock);
4498 		if (dropped)
4499 			to_free += btrfs_calc_trans_metadata_size(root, dropped);
4500 
4501 		if (to_free) {
4502 			btrfs_block_rsv_release(root, block_rsv, to_free);
4503 			trace_btrfs_space_reservation(root->fs_info,
4504 						      "delalloc",
4505 						      btrfs_ino(inode),
4506 						      to_free, 0);
4507 		}
4508 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4509 		return ret;
4510 	}
4511 
4512 	spin_lock(&BTRFS_I(inode)->lock);
4513 	if (extra_reserve) {
4514 		BTRFS_I(inode)->delalloc_meta_reserved = 1;
4515 		nr_extents--;
4516 	}
4517 	BTRFS_I(inode)->reserved_extents += nr_extents;
4518 	spin_unlock(&BTRFS_I(inode)->lock);
4519 	mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4520 
4521 	if (to_reserve)
4522 		trace_btrfs_space_reservation(root->fs_info,"delalloc",
4523 					      btrfs_ino(inode), to_reserve, 1);
4524 	block_rsv_add_bytes(block_rsv, to_reserve, 1);
4525 
4526 	return 0;
4527 }
4528 
4529 /**
4530  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4531  * @inode: the inode to release the reservation for
4532  * @num_bytes: the number of bytes we're releasing
4533  *
4534  * This will release the metadata reservation for an inode.  This can be called
4535  * once we complete IO for a given set of bytes to release their metadata
4536  * reservations.
4537  */
4538 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4539 {
4540 	struct btrfs_root *root = BTRFS_I(inode)->root;
4541 	u64 to_free = 0;
4542 	unsigned dropped;
4543 
4544 	num_bytes = ALIGN(num_bytes, root->sectorsize);
4545 	spin_lock(&BTRFS_I(inode)->lock);
4546 	dropped = drop_outstanding_extent(inode);
4547 
4548 	to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4549 	spin_unlock(&BTRFS_I(inode)->lock);
4550 	if (dropped > 0)
4551 		to_free += btrfs_calc_trans_metadata_size(root, dropped);
4552 
4553 	trace_btrfs_space_reservation(root->fs_info, "delalloc",
4554 				      btrfs_ino(inode), to_free, 0);
4555 	btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4556 				to_free);
4557 }
4558 
4559 /**
4560  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4561  * @inode: inode we're writing to
4562  * @num_bytes: the number of bytes we want to allocate
4563  *
4564  * This will do the following things
4565  *
4566  * o reserve space in the data space info for num_bytes
4567  * o reserve space in the metadata space info based on number of outstanding
4568  *   extents and how much csums will be needed
4569  * o add to the inodes ->delalloc_bytes
4570  * o add it to the fs_info's delalloc inodes list.
4571  *
4572  * This will return 0 for success and -ENOSPC if there is no space left.
4573  */
4574 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4575 {
4576 	int ret;
4577 
4578 	ret = btrfs_check_data_free_space(inode, num_bytes);
4579 	if (ret)
4580 		return ret;
4581 
4582 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4583 	if (ret) {
4584 		btrfs_free_reserved_data_space(inode, num_bytes);
4585 		return ret;
4586 	}
4587 
4588 	return 0;
4589 }
4590 
4591 /**
4592  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4593  * @inode: inode we're releasing space for
4594  * @num_bytes: the number of bytes we want to free up
4595  *
4596  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4597  * called in the case that we don't need the metadata AND data reservations
4598  * anymore.  So if there is an error or we insert an inline extent.
4599  *
4600  * This function will release the metadata space that was not used and will
4601  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4602  * list if there are no delalloc bytes left.
4603  */
4604 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4605 {
4606 	btrfs_delalloc_release_metadata(inode, num_bytes);
4607 	btrfs_free_reserved_data_space(inode, num_bytes);
4608 }
4609 
4610 static int update_block_group(struct btrfs_trans_handle *trans,
4611 			      struct btrfs_root *root,
4612 			      u64 bytenr, u64 num_bytes, int alloc)
4613 {
4614 	struct btrfs_block_group_cache *cache = NULL;
4615 	struct btrfs_fs_info *info = root->fs_info;
4616 	u64 total = num_bytes;
4617 	u64 old_val;
4618 	u64 byte_in_group;
4619 	int factor;
4620 
4621 	/* block accounting for super block */
4622 	spin_lock(&info->delalloc_lock);
4623 	old_val = btrfs_super_bytes_used(info->super_copy);
4624 	if (alloc)
4625 		old_val += num_bytes;
4626 	else
4627 		old_val -= num_bytes;
4628 	btrfs_set_super_bytes_used(info->super_copy, old_val);
4629 	spin_unlock(&info->delalloc_lock);
4630 
4631 	while (total) {
4632 		cache = btrfs_lookup_block_group(info, bytenr);
4633 		if (!cache)
4634 			return -ENOENT;
4635 		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4636 				    BTRFS_BLOCK_GROUP_RAID1 |
4637 				    BTRFS_BLOCK_GROUP_RAID10))
4638 			factor = 2;
4639 		else
4640 			factor = 1;
4641 		/*
4642 		 * If this block group has free space cache written out, we
4643 		 * need to make sure to load it if we are removing space.  This
4644 		 * is because we need the unpinning stage to actually add the
4645 		 * space back to the block group, otherwise we will leak space.
4646 		 */
4647 		if (!alloc && cache->cached == BTRFS_CACHE_NO)
4648 			cache_block_group(cache, trans, NULL, 1);
4649 
4650 		byte_in_group = bytenr - cache->key.objectid;
4651 		WARN_ON(byte_in_group > cache->key.offset);
4652 
4653 		spin_lock(&cache->space_info->lock);
4654 		spin_lock(&cache->lock);
4655 
4656 		if (btrfs_test_opt(root, SPACE_CACHE) &&
4657 		    cache->disk_cache_state < BTRFS_DC_CLEAR)
4658 			cache->disk_cache_state = BTRFS_DC_CLEAR;
4659 
4660 		cache->dirty = 1;
4661 		old_val = btrfs_block_group_used(&cache->item);
4662 		num_bytes = min(total, cache->key.offset - byte_in_group);
4663 		if (alloc) {
4664 			old_val += num_bytes;
4665 			btrfs_set_block_group_used(&cache->item, old_val);
4666 			cache->reserved -= num_bytes;
4667 			cache->space_info->bytes_reserved -= num_bytes;
4668 			cache->space_info->bytes_used += num_bytes;
4669 			cache->space_info->disk_used += num_bytes * factor;
4670 			spin_unlock(&cache->lock);
4671 			spin_unlock(&cache->space_info->lock);
4672 		} else {
4673 			old_val -= num_bytes;
4674 			btrfs_set_block_group_used(&cache->item, old_val);
4675 			cache->pinned += num_bytes;
4676 			cache->space_info->bytes_pinned += num_bytes;
4677 			cache->space_info->bytes_used -= num_bytes;
4678 			cache->space_info->disk_used -= num_bytes * factor;
4679 			spin_unlock(&cache->lock);
4680 			spin_unlock(&cache->space_info->lock);
4681 
4682 			set_extent_dirty(info->pinned_extents,
4683 					 bytenr, bytenr + num_bytes - 1,
4684 					 GFP_NOFS | __GFP_NOFAIL);
4685 		}
4686 		btrfs_put_block_group(cache);
4687 		total -= num_bytes;
4688 		bytenr += num_bytes;
4689 	}
4690 	return 0;
4691 }
4692 
4693 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4694 {
4695 	struct btrfs_block_group_cache *cache;
4696 	u64 bytenr;
4697 
4698 	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4699 	if (!cache)
4700 		return 0;
4701 
4702 	bytenr = cache->key.objectid;
4703 	btrfs_put_block_group(cache);
4704 
4705 	return bytenr;
4706 }
4707 
4708 static int pin_down_extent(struct btrfs_root *root,
4709 			   struct btrfs_block_group_cache *cache,
4710 			   u64 bytenr, u64 num_bytes, int reserved)
4711 {
4712 	spin_lock(&cache->space_info->lock);
4713 	spin_lock(&cache->lock);
4714 	cache->pinned += num_bytes;
4715 	cache->space_info->bytes_pinned += num_bytes;
4716 	if (reserved) {
4717 		cache->reserved -= num_bytes;
4718 		cache->space_info->bytes_reserved -= num_bytes;
4719 	}
4720 	spin_unlock(&cache->lock);
4721 	spin_unlock(&cache->space_info->lock);
4722 
4723 	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4724 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4725 	return 0;
4726 }
4727 
4728 /*
4729  * this function must be called within transaction
4730  */
4731 int btrfs_pin_extent(struct btrfs_root *root,
4732 		     u64 bytenr, u64 num_bytes, int reserved)
4733 {
4734 	struct btrfs_block_group_cache *cache;
4735 
4736 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4737 	BUG_ON(!cache); /* Logic error */
4738 
4739 	pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4740 
4741 	btrfs_put_block_group(cache);
4742 	return 0;
4743 }
4744 
4745 /*
4746  * this function must be called within transaction
4747  */
4748 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4749 				    struct btrfs_root *root,
4750 				    u64 bytenr, u64 num_bytes)
4751 {
4752 	struct btrfs_block_group_cache *cache;
4753 
4754 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4755 	BUG_ON(!cache); /* Logic error */
4756 
4757 	/*
4758 	 * pull in the free space cache (if any) so that our pin
4759 	 * removes the free space from the cache.  We have load_only set
4760 	 * to one because the slow code to read in the free extents does check
4761 	 * the pinned extents.
4762 	 */
4763 	cache_block_group(cache, trans, root, 1);
4764 
4765 	pin_down_extent(root, cache, bytenr, num_bytes, 0);
4766 
4767 	/* remove us from the free space cache (if we're there at all) */
4768 	btrfs_remove_free_space(cache, bytenr, num_bytes);
4769 	btrfs_put_block_group(cache);
4770 	return 0;
4771 }
4772 
4773 /**
4774  * btrfs_update_reserved_bytes - update the block_group and space info counters
4775  * @cache:	The cache we are manipulating
4776  * @num_bytes:	The number of bytes in question
4777  * @reserve:	One of the reservation enums
4778  *
4779  * This is called by the allocator when it reserves space, or by somebody who is
4780  * freeing space that was never actually used on disk.  For example if you
4781  * reserve some space for a new leaf in transaction A and before transaction A
4782  * commits you free that leaf, you call this with reserve set to 0 in order to
4783  * clear the reservation.
4784  *
4785  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4786  * ENOSPC accounting.  For data we handle the reservation through clearing the
4787  * delalloc bits in the io_tree.  We have to do this since we could end up
4788  * allocating less disk space for the amount of data we have reserved in the
4789  * case of compression.
4790  *
4791  * If this is a reservation and the block group has become read only we cannot
4792  * make the reservation and return -EAGAIN, otherwise this function always
4793  * succeeds.
4794  */
4795 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4796 				       u64 num_bytes, int reserve)
4797 {
4798 	struct btrfs_space_info *space_info = cache->space_info;
4799 	int ret = 0;
4800 
4801 	spin_lock(&space_info->lock);
4802 	spin_lock(&cache->lock);
4803 	if (reserve != RESERVE_FREE) {
4804 		if (cache->ro) {
4805 			ret = -EAGAIN;
4806 		} else {
4807 			cache->reserved += num_bytes;
4808 			space_info->bytes_reserved += num_bytes;
4809 			if (reserve == RESERVE_ALLOC) {
4810 				trace_btrfs_space_reservation(cache->fs_info,
4811 						"space_info", space_info->flags,
4812 						num_bytes, 0);
4813 				space_info->bytes_may_use -= num_bytes;
4814 			}
4815 		}
4816 	} else {
4817 		if (cache->ro)
4818 			space_info->bytes_readonly += num_bytes;
4819 		cache->reserved -= num_bytes;
4820 		space_info->bytes_reserved -= num_bytes;
4821 		space_info->reservation_progress++;
4822 	}
4823 	spin_unlock(&cache->lock);
4824 	spin_unlock(&space_info->lock);
4825 	return ret;
4826 }
4827 
4828 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4829 				struct btrfs_root *root)
4830 {
4831 	struct btrfs_fs_info *fs_info = root->fs_info;
4832 	struct btrfs_caching_control *next;
4833 	struct btrfs_caching_control *caching_ctl;
4834 	struct btrfs_block_group_cache *cache;
4835 
4836 	down_write(&fs_info->extent_commit_sem);
4837 
4838 	list_for_each_entry_safe(caching_ctl, next,
4839 				 &fs_info->caching_block_groups, list) {
4840 		cache = caching_ctl->block_group;
4841 		if (block_group_cache_done(cache)) {
4842 			cache->last_byte_to_unpin = (u64)-1;
4843 			list_del_init(&caching_ctl->list);
4844 			put_caching_control(caching_ctl);
4845 		} else {
4846 			cache->last_byte_to_unpin = caching_ctl->progress;
4847 		}
4848 	}
4849 
4850 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4851 		fs_info->pinned_extents = &fs_info->freed_extents[1];
4852 	else
4853 		fs_info->pinned_extents = &fs_info->freed_extents[0];
4854 
4855 	up_write(&fs_info->extent_commit_sem);
4856 
4857 	update_global_block_rsv(fs_info);
4858 }
4859 
4860 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4861 {
4862 	struct btrfs_fs_info *fs_info = root->fs_info;
4863 	struct btrfs_block_group_cache *cache = NULL;
4864 	u64 len;
4865 
4866 	while (start <= end) {
4867 		if (!cache ||
4868 		    start >= cache->key.objectid + cache->key.offset) {
4869 			if (cache)
4870 				btrfs_put_block_group(cache);
4871 			cache = btrfs_lookup_block_group(fs_info, start);
4872 			BUG_ON(!cache); /* Logic error */
4873 		}
4874 
4875 		len = cache->key.objectid + cache->key.offset - start;
4876 		len = min(len, end + 1 - start);
4877 
4878 		if (start < cache->last_byte_to_unpin) {
4879 			len = min(len, cache->last_byte_to_unpin - start);
4880 			btrfs_add_free_space(cache, start, len);
4881 		}
4882 
4883 		start += len;
4884 
4885 		spin_lock(&cache->space_info->lock);
4886 		spin_lock(&cache->lock);
4887 		cache->pinned -= len;
4888 		cache->space_info->bytes_pinned -= len;
4889 		if (cache->ro)
4890 			cache->space_info->bytes_readonly += len;
4891 		spin_unlock(&cache->lock);
4892 		spin_unlock(&cache->space_info->lock);
4893 	}
4894 
4895 	if (cache)
4896 		btrfs_put_block_group(cache);
4897 	return 0;
4898 }
4899 
4900 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4901 			       struct btrfs_root *root)
4902 {
4903 	struct btrfs_fs_info *fs_info = root->fs_info;
4904 	struct extent_io_tree *unpin;
4905 	u64 start;
4906 	u64 end;
4907 	int ret;
4908 
4909 	if (trans->aborted)
4910 		return 0;
4911 
4912 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4913 		unpin = &fs_info->freed_extents[1];
4914 	else
4915 		unpin = &fs_info->freed_extents[0];
4916 
4917 	while (1) {
4918 		ret = find_first_extent_bit(unpin, 0, &start, &end,
4919 					    EXTENT_DIRTY);
4920 		if (ret)
4921 			break;
4922 
4923 		if (btrfs_test_opt(root, DISCARD))
4924 			ret = btrfs_discard_extent(root, start,
4925 						   end + 1 - start, NULL);
4926 
4927 		clear_extent_dirty(unpin, start, end, GFP_NOFS);
4928 		unpin_extent_range(root, start, end);
4929 		cond_resched();
4930 	}
4931 
4932 	return 0;
4933 }
4934 
4935 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4936 				struct btrfs_root *root,
4937 				u64 bytenr, u64 num_bytes, u64 parent,
4938 				u64 root_objectid, u64 owner_objectid,
4939 				u64 owner_offset, int refs_to_drop,
4940 				struct btrfs_delayed_extent_op *extent_op)
4941 {
4942 	struct btrfs_key key;
4943 	struct btrfs_path *path;
4944 	struct btrfs_fs_info *info = root->fs_info;
4945 	struct btrfs_root *extent_root = info->extent_root;
4946 	struct extent_buffer *leaf;
4947 	struct btrfs_extent_item *ei;
4948 	struct btrfs_extent_inline_ref *iref;
4949 	int ret;
4950 	int is_data;
4951 	int extent_slot = 0;
4952 	int found_extent = 0;
4953 	int num_to_del = 1;
4954 	u32 item_size;
4955 	u64 refs;
4956 
4957 	path = btrfs_alloc_path();
4958 	if (!path)
4959 		return -ENOMEM;
4960 
4961 	path->reada = 1;
4962 	path->leave_spinning = 1;
4963 
4964 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4965 	BUG_ON(!is_data && refs_to_drop != 1);
4966 
4967 	ret = lookup_extent_backref(trans, extent_root, path, &iref,
4968 				    bytenr, num_bytes, parent,
4969 				    root_objectid, owner_objectid,
4970 				    owner_offset);
4971 	if (ret == 0) {
4972 		extent_slot = path->slots[0];
4973 		while (extent_slot >= 0) {
4974 			btrfs_item_key_to_cpu(path->nodes[0], &key,
4975 					      extent_slot);
4976 			if (key.objectid != bytenr)
4977 				break;
4978 			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4979 			    key.offset == num_bytes) {
4980 				found_extent = 1;
4981 				break;
4982 			}
4983 			if (path->slots[0] - extent_slot > 5)
4984 				break;
4985 			extent_slot--;
4986 		}
4987 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4988 		item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4989 		if (found_extent && item_size < sizeof(*ei))
4990 			found_extent = 0;
4991 #endif
4992 		if (!found_extent) {
4993 			BUG_ON(iref);
4994 			ret = remove_extent_backref(trans, extent_root, path,
4995 						    NULL, refs_to_drop,
4996 						    is_data);
4997 			if (ret)
4998 				goto abort;
4999 			btrfs_release_path(path);
5000 			path->leave_spinning = 1;
5001 
5002 			key.objectid = bytenr;
5003 			key.type = BTRFS_EXTENT_ITEM_KEY;
5004 			key.offset = num_bytes;
5005 
5006 			ret = btrfs_search_slot(trans, extent_root,
5007 						&key, path, -1, 1);
5008 			if (ret) {
5009 				printk(KERN_ERR "umm, got %d back from search"
5010 				       ", was looking for %llu\n", ret,
5011 				       (unsigned long long)bytenr);
5012 				if (ret > 0)
5013 					btrfs_print_leaf(extent_root,
5014 							 path->nodes[0]);
5015 			}
5016 			if (ret < 0)
5017 				goto abort;
5018 			extent_slot = path->slots[0];
5019 		}
5020 	} else if (ret == -ENOENT) {
5021 		btrfs_print_leaf(extent_root, path->nodes[0]);
5022 		WARN_ON(1);
5023 		printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5024 		       "parent %llu root %llu  owner %llu offset %llu\n",
5025 		       (unsigned long long)bytenr,
5026 		       (unsigned long long)parent,
5027 		       (unsigned long long)root_objectid,
5028 		       (unsigned long long)owner_objectid,
5029 		       (unsigned long long)owner_offset);
5030 	} else {
5031 		goto abort;
5032 	}
5033 
5034 	leaf = path->nodes[0];
5035 	item_size = btrfs_item_size_nr(leaf, extent_slot);
5036 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5037 	if (item_size < sizeof(*ei)) {
5038 		BUG_ON(found_extent || extent_slot != path->slots[0]);
5039 		ret = convert_extent_item_v0(trans, extent_root, path,
5040 					     owner_objectid, 0);
5041 		if (ret < 0)
5042 			goto abort;
5043 
5044 		btrfs_release_path(path);
5045 		path->leave_spinning = 1;
5046 
5047 		key.objectid = bytenr;
5048 		key.type = BTRFS_EXTENT_ITEM_KEY;
5049 		key.offset = num_bytes;
5050 
5051 		ret = btrfs_search_slot(trans, extent_root, &key, path,
5052 					-1, 1);
5053 		if (ret) {
5054 			printk(KERN_ERR "umm, got %d back from search"
5055 			       ", was looking for %llu\n", ret,
5056 			       (unsigned long long)bytenr);
5057 			btrfs_print_leaf(extent_root, path->nodes[0]);
5058 		}
5059 		if (ret < 0)
5060 			goto abort;
5061 		extent_slot = path->slots[0];
5062 		leaf = path->nodes[0];
5063 		item_size = btrfs_item_size_nr(leaf, extent_slot);
5064 	}
5065 #endif
5066 	BUG_ON(item_size < sizeof(*ei));
5067 	ei = btrfs_item_ptr(leaf, extent_slot,
5068 			    struct btrfs_extent_item);
5069 	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5070 		struct btrfs_tree_block_info *bi;
5071 		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5072 		bi = (struct btrfs_tree_block_info *)(ei + 1);
5073 		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5074 	}
5075 
5076 	refs = btrfs_extent_refs(leaf, ei);
5077 	BUG_ON(refs < refs_to_drop);
5078 	refs -= refs_to_drop;
5079 
5080 	if (refs > 0) {
5081 		if (extent_op)
5082 			__run_delayed_extent_op(extent_op, leaf, ei);
5083 		/*
5084 		 * In the case of inline back ref, reference count will
5085 		 * be updated by remove_extent_backref
5086 		 */
5087 		if (iref) {
5088 			BUG_ON(!found_extent);
5089 		} else {
5090 			btrfs_set_extent_refs(leaf, ei, refs);
5091 			btrfs_mark_buffer_dirty(leaf);
5092 		}
5093 		if (found_extent) {
5094 			ret = remove_extent_backref(trans, extent_root, path,
5095 						    iref, refs_to_drop,
5096 						    is_data);
5097 			if (ret)
5098 				goto abort;
5099 		}
5100 	} else {
5101 		if (found_extent) {
5102 			BUG_ON(is_data && refs_to_drop !=
5103 			       extent_data_ref_count(root, path, iref));
5104 			if (iref) {
5105 				BUG_ON(path->slots[0] != extent_slot);
5106 			} else {
5107 				BUG_ON(path->slots[0] != extent_slot + 1);
5108 				path->slots[0] = extent_slot;
5109 				num_to_del = 2;
5110 			}
5111 		}
5112 
5113 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5114 				      num_to_del);
5115 		if (ret)
5116 			goto abort;
5117 		btrfs_release_path(path);
5118 
5119 		if (is_data) {
5120 			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5121 			if (ret)
5122 				goto abort;
5123 		}
5124 
5125 		ret = update_block_group(trans, root, bytenr, num_bytes, 0);
5126 		if (ret)
5127 			goto abort;
5128 	}
5129 out:
5130 	btrfs_free_path(path);
5131 	return ret;
5132 
5133 abort:
5134 	btrfs_abort_transaction(trans, extent_root, ret);
5135 	goto out;
5136 }
5137 
5138 /*
5139  * when we free an block, it is possible (and likely) that we free the last
5140  * delayed ref for that extent as well.  This searches the delayed ref tree for
5141  * a given extent, and if there are no other delayed refs to be processed, it
5142  * removes it from the tree.
5143  */
5144 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5145 				      struct btrfs_root *root, u64 bytenr)
5146 {
5147 	struct btrfs_delayed_ref_head *head;
5148 	struct btrfs_delayed_ref_root *delayed_refs;
5149 	struct btrfs_delayed_ref_node *ref;
5150 	struct rb_node *node;
5151 	int ret = 0;
5152 
5153 	delayed_refs = &trans->transaction->delayed_refs;
5154 	spin_lock(&delayed_refs->lock);
5155 	head = btrfs_find_delayed_ref_head(trans, bytenr);
5156 	if (!head)
5157 		goto out;
5158 
5159 	node = rb_prev(&head->node.rb_node);
5160 	if (!node)
5161 		goto out;
5162 
5163 	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5164 
5165 	/* there are still entries for this ref, we can't drop it */
5166 	if (ref->bytenr == bytenr)
5167 		goto out;
5168 
5169 	if (head->extent_op) {
5170 		if (!head->must_insert_reserved)
5171 			goto out;
5172 		kfree(head->extent_op);
5173 		head->extent_op = NULL;
5174 	}
5175 
5176 	/*
5177 	 * waiting for the lock here would deadlock.  If someone else has it
5178 	 * locked they are already in the process of dropping it anyway
5179 	 */
5180 	if (!mutex_trylock(&head->mutex))
5181 		goto out;
5182 
5183 	/*
5184 	 * at this point we have a head with no other entries.  Go
5185 	 * ahead and process it.
5186 	 */
5187 	head->node.in_tree = 0;
5188 	rb_erase(&head->node.rb_node, &delayed_refs->root);
5189 
5190 	delayed_refs->num_entries--;
5191 	if (waitqueue_active(&delayed_refs->seq_wait))
5192 		wake_up(&delayed_refs->seq_wait);
5193 
5194 	/*
5195 	 * we don't take a ref on the node because we're removing it from the
5196 	 * tree, so we just steal the ref the tree was holding.
5197 	 */
5198 	delayed_refs->num_heads--;
5199 	if (list_empty(&head->cluster))
5200 		delayed_refs->num_heads_ready--;
5201 
5202 	list_del_init(&head->cluster);
5203 	spin_unlock(&delayed_refs->lock);
5204 
5205 	BUG_ON(head->extent_op);
5206 	if (head->must_insert_reserved)
5207 		ret = 1;
5208 
5209 	mutex_unlock(&head->mutex);
5210 	btrfs_put_delayed_ref(&head->node);
5211 	return ret;
5212 out:
5213 	spin_unlock(&delayed_refs->lock);
5214 	return 0;
5215 }
5216 
5217 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5218 			   struct btrfs_root *root,
5219 			   struct extent_buffer *buf,
5220 			   u64 parent, int last_ref, int for_cow)
5221 {
5222 	struct btrfs_block_group_cache *cache = NULL;
5223 	int ret;
5224 
5225 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5226 		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5227 					buf->start, buf->len,
5228 					parent, root->root_key.objectid,
5229 					btrfs_header_level(buf),
5230 					BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5231 		BUG_ON(ret); /* -ENOMEM */
5232 	}
5233 
5234 	if (!last_ref)
5235 		return;
5236 
5237 	cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5238 
5239 	if (btrfs_header_generation(buf) == trans->transid) {
5240 		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5241 			ret = check_ref_cleanup(trans, root, buf->start);
5242 			if (!ret)
5243 				goto out;
5244 		}
5245 
5246 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5247 			pin_down_extent(root, cache, buf->start, buf->len, 1);
5248 			goto out;
5249 		}
5250 
5251 		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5252 
5253 		btrfs_add_free_space(cache, buf->start, buf->len);
5254 		btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5255 	}
5256 out:
5257 	/*
5258 	 * Deleting the buffer, clear the corrupt flag since it doesn't matter
5259 	 * anymore.
5260 	 */
5261 	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5262 	btrfs_put_block_group(cache);
5263 }
5264 
5265 /* Can return -ENOMEM */
5266 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5267 		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5268 		      u64 owner, u64 offset, int for_cow)
5269 {
5270 	int ret;
5271 	struct btrfs_fs_info *fs_info = root->fs_info;
5272 
5273 	/*
5274 	 * tree log blocks never actually go into the extent allocation
5275 	 * tree, just update pinning info and exit early.
5276 	 */
5277 	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5278 		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5279 		/* unlocks the pinned mutex */
5280 		btrfs_pin_extent(root, bytenr, num_bytes, 1);
5281 		ret = 0;
5282 	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5283 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5284 					num_bytes,
5285 					parent, root_objectid, (int)owner,
5286 					BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5287 	} else {
5288 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5289 						num_bytes,
5290 						parent, root_objectid, owner,
5291 						offset, BTRFS_DROP_DELAYED_REF,
5292 						NULL, for_cow);
5293 	}
5294 	return ret;
5295 }
5296 
5297 static u64 stripe_align(struct btrfs_root *root, u64 val)
5298 {
5299 	u64 mask = ((u64)root->stripesize - 1);
5300 	u64 ret = (val + mask) & ~mask;
5301 	return ret;
5302 }
5303 
5304 /*
5305  * when we wait for progress in the block group caching, its because
5306  * our allocation attempt failed at least once.  So, we must sleep
5307  * and let some progress happen before we try again.
5308  *
5309  * This function will sleep at least once waiting for new free space to
5310  * show up, and then it will check the block group free space numbers
5311  * for our min num_bytes.  Another option is to have it go ahead
5312  * and look in the rbtree for a free extent of a given size, but this
5313  * is a good start.
5314  */
5315 static noinline int
5316 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5317 				u64 num_bytes)
5318 {
5319 	struct btrfs_caching_control *caching_ctl;
5320 	DEFINE_WAIT(wait);
5321 
5322 	caching_ctl = get_caching_control(cache);
5323 	if (!caching_ctl)
5324 		return 0;
5325 
5326 	wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5327 		   (cache->free_space_ctl->free_space >= num_bytes));
5328 
5329 	put_caching_control(caching_ctl);
5330 	return 0;
5331 }
5332 
5333 static noinline int
5334 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5335 {
5336 	struct btrfs_caching_control *caching_ctl;
5337 	DEFINE_WAIT(wait);
5338 
5339 	caching_ctl = get_caching_control(cache);
5340 	if (!caching_ctl)
5341 		return 0;
5342 
5343 	wait_event(caching_ctl->wait, block_group_cache_done(cache));
5344 
5345 	put_caching_control(caching_ctl);
5346 	return 0;
5347 }
5348 
5349 static int __get_block_group_index(u64 flags)
5350 {
5351 	int index;
5352 
5353 	if (flags & BTRFS_BLOCK_GROUP_RAID10)
5354 		index = 0;
5355 	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5356 		index = 1;
5357 	else if (flags & BTRFS_BLOCK_GROUP_DUP)
5358 		index = 2;
5359 	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5360 		index = 3;
5361 	else
5362 		index = 4;
5363 
5364 	return index;
5365 }
5366 
5367 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5368 {
5369 	return __get_block_group_index(cache->flags);
5370 }
5371 
5372 enum btrfs_loop_type {
5373 	LOOP_CACHING_NOWAIT = 0,
5374 	LOOP_CACHING_WAIT = 1,
5375 	LOOP_ALLOC_CHUNK = 2,
5376 	LOOP_NO_EMPTY_SIZE = 3,
5377 };
5378 
5379 /*
5380  * walks the btree of allocated extents and find a hole of a given size.
5381  * The key ins is changed to record the hole:
5382  * ins->objectid == block start
5383  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5384  * ins->offset == number of blocks
5385  * Any available blocks before search_start are skipped.
5386  */
5387 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5388 				     struct btrfs_root *orig_root,
5389 				     u64 num_bytes, u64 empty_size,
5390 				     u64 hint_byte, struct btrfs_key *ins,
5391 				     u64 data)
5392 {
5393 	int ret = 0;
5394 	struct btrfs_root *root = orig_root->fs_info->extent_root;
5395 	struct btrfs_free_cluster *last_ptr = NULL;
5396 	struct btrfs_block_group_cache *block_group = NULL;
5397 	struct btrfs_block_group_cache *used_block_group;
5398 	u64 search_start = 0;
5399 	int empty_cluster = 2 * 1024 * 1024;
5400 	int allowed_chunk_alloc = 0;
5401 	int done_chunk_alloc = 0;
5402 	struct btrfs_space_info *space_info;
5403 	int loop = 0;
5404 	int index = 0;
5405 	int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5406 		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5407 	bool found_uncached_bg = false;
5408 	bool failed_cluster_refill = false;
5409 	bool failed_alloc = false;
5410 	bool use_cluster = true;
5411 	bool have_caching_bg = false;
5412 
5413 	WARN_ON(num_bytes < root->sectorsize);
5414 	btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5415 	ins->objectid = 0;
5416 	ins->offset = 0;
5417 
5418 	trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5419 
5420 	space_info = __find_space_info(root->fs_info, data);
5421 	if (!space_info) {
5422 		printk(KERN_ERR "No space info for %llu\n", data);
5423 		return -ENOSPC;
5424 	}
5425 
5426 	/*
5427 	 * If the space info is for both data and metadata it means we have a
5428 	 * small filesystem and we can't use the clustering stuff.
5429 	 */
5430 	if (btrfs_mixed_space_info(space_info))
5431 		use_cluster = false;
5432 
5433 	if (orig_root->ref_cows || empty_size)
5434 		allowed_chunk_alloc = 1;
5435 
5436 	if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5437 		last_ptr = &root->fs_info->meta_alloc_cluster;
5438 		if (!btrfs_test_opt(root, SSD))
5439 			empty_cluster = 64 * 1024;
5440 	}
5441 
5442 	if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5443 	    btrfs_test_opt(root, SSD)) {
5444 		last_ptr = &root->fs_info->data_alloc_cluster;
5445 	}
5446 
5447 	if (last_ptr) {
5448 		spin_lock(&last_ptr->lock);
5449 		if (last_ptr->block_group)
5450 			hint_byte = last_ptr->window_start;
5451 		spin_unlock(&last_ptr->lock);
5452 	}
5453 
5454 	search_start = max(search_start, first_logical_byte(root, 0));
5455 	search_start = max(search_start, hint_byte);
5456 
5457 	if (!last_ptr)
5458 		empty_cluster = 0;
5459 
5460 	if (search_start == hint_byte) {
5461 		block_group = btrfs_lookup_block_group(root->fs_info,
5462 						       search_start);
5463 		used_block_group = block_group;
5464 		/*
5465 		 * we don't want to use the block group if it doesn't match our
5466 		 * allocation bits, or if its not cached.
5467 		 *
5468 		 * However if we are re-searching with an ideal block group
5469 		 * picked out then we don't care that the block group is cached.
5470 		 */
5471 		if (block_group && block_group_bits(block_group, data) &&
5472 		    block_group->cached != BTRFS_CACHE_NO) {
5473 			down_read(&space_info->groups_sem);
5474 			if (list_empty(&block_group->list) ||
5475 			    block_group->ro) {
5476 				/*
5477 				 * someone is removing this block group,
5478 				 * we can't jump into the have_block_group
5479 				 * target because our list pointers are not
5480 				 * valid
5481 				 */
5482 				btrfs_put_block_group(block_group);
5483 				up_read(&space_info->groups_sem);
5484 			} else {
5485 				index = get_block_group_index(block_group);
5486 				goto have_block_group;
5487 			}
5488 		} else if (block_group) {
5489 			btrfs_put_block_group(block_group);
5490 		}
5491 	}
5492 search:
5493 	have_caching_bg = false;
5494 	down_read(&space_info->groups_sem);
5495 	list_for_each_entry(block_group, &space_info->block_groups[index],
5496 			    list) {
5497 		u64 offset;
5498 		int cached;
5499 
5500 		used_block_group = block_group;
5501 		btrfs_get_block_group(block_group);
5502 		search_start = block_group->key.objectid;
5503 
5504 		/*
5505 		 * this can happen if we end up cycling through all the
5506 		 * raid types, but we want to make sure we only allocate
5507 		 * for the proper type.
5508 		 */
5509 		if (!block_group_bits(block_group, data)) {
5510 		    u64 extra = BTRFS_BLOCK_GROUP_DUP |
5511 				BTRFS_BLOCK_GROUP_RAID1 |
5512 				BTRFS_BLOCK_GROUP_RAID10;
5513 
5514 			/*
5515 			 * if they asked for extra copies and this block group
5516 			 * doesn't provide them, bail.  This does allow us to
5517 			 * fill raid0 from raid1.
5518 			 */
5519 			if ((data & extra) && !(block_group->flags & extra))
5520 				goto loop;
5521 		}
5522 
5523 have_block_group:
5524 		cached = block_group_cache_done(block_group);
5525 		if (unlikely(!cached)) {
5526 			found_uncached_bg = true;
5527 			ret = cache_block_group(block_group, trans,
5528 						orig_root, 0);
5529 			BUG_ON(ret < 0);
5530 			ret = 0;
5531 		}
5532 
5533 		if (unlikely(block_group->ro))
5534 			goto loop;
5535 
5536 		/*
5537 		 * Ok we want to try and use the cluster allocator, so
5538 		 * lets look there
5539 		 */
5540 		if (last_ptr) {
5541 			/*
5542 			 * the refill lock keeps out other
5543 			 * people trying to start a new cluster
5544 			 */
5545 			spin_lock(&last_ptr->refill_lock);
5546 			used_block_group = last_ptr->block_group;
5547 			if (used_block_group != block_group &&
5548 			    (!used_block_group ||
5549 			     used_block_group->ro ||
5550 			     !block_group_bits(used_block_group, data))) {
5551 				used_block_group = block_group;
5552 				goto refill_cluster;
5553 			}
5554 
5555 			if (used_block_group != block_group)
5556 				btrfs_get_block_group(used_block_group);
5557 
5558 			offset = btrfs_alloc_from_cluster(used_block_group,
5559 			  last_ptr, num_bytes, used_block_group->key.objectid);
5560 			if (offset) {
5561 				/* we have a block, we're done */
5562 				spin_unlock(&last_ptr->refill_lock);
5563 				trace_btrfs_reserve_extent_cluster(root,
5564 					block_group, search_start, num_bytes);
5565 				goto checks;
5566 			}
5567 
5568 			WARN_ON(last_ptr->block_group != used_block_group);
5569 			if (used_block_group != block_group) {
5570 				btrfs_put_block_group(used_block_group);
5571 				used_block_group = block_group;
5572 			}
5573 refill_cluster:
5574 			BUG_ON(used_block_group != block_group);
5575 			/* If we are on LOOP_NO_EMPTY_SIZE, we can't
5576 			 * set up a new clusters, so lets just skip it
5577 			 * and let the allocator find whatever block
5578 			 * it can find.  If we reach this point, we
5579 			 * will have tried the cluster allocator
5580 			 * plenty of times and not have found
5581 			 * anything, so we are likely way too
5582 			 * fragmented for the clustering stuff to find
5583 			 * anything.
5584 			 *
5585 			 * However, if the cluster is taken from the
5586 			 * current block group, release the cluster
5587 			 * first, so that we stand a better chance of
5588 			 * succeeding in the unclustered
5589 			 * allocation.  */
5590 			if (loop >= LOOP_NO_EMPTY_SIZE &&
5591 			    last_ptr->block_group != block_group) {
5592 				spin_unlock(&last_ptr->refill_lock);
5593 				goto unclustered_alloc;
5594 			}
5595 
5596 			/*
5597 			 * this cluster didn't work out, free it and
5598 			 * start over
5599 			 */
5600 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
5601 
5602 			if (loop >= LOOP_NO_EMPTY_SIZE) {
5603 				spin_unlock(&last_ptr->refill_lock);
5604 				goto unclustered_alloc;
5605 			}
5606 
5607 			/* allocate a cluster in this block group */
5608 			ret = btrfs_find_space_cluster(trans, root,
5609 					       block_group, last_ptr,
5610 					       search_start, num_bytes,
5611 					       empty_cluster + empty_size);
5612 			if (ret == 0) {
5613 				/*
5614 				 * now pull our allocation out of this
5615 				 * cluster
5616 				 */
5617 				offset = btrfs_alloc_from_cluster(block_group,
5618 						  last_ptr, num_bytes,
5619 						  search_start);
5620 				if (offset) {
5621 					/* we found one, proceed */
5622 					spin_unlock(&last_ptr->refill_lock);
5623 					trace_btrfs_reserve_extent_cluster(root,
5624 						block_group, search_start,
5625 						num_bytes);
5626 					goto checks;
5627 				}
5628 			} else if (!cached && loop > LOOP_CACHING_NOWAIT
5629 				   && !failed_cluster_refill) {
5630 				spin_unlock(&last_ptr->refill_lock);
5631 
5632 				failed_cluster_refill = true;
5633 				wait_block_group_cache_progress(block_group,
5634 				       num_bytes + empty_cluster + empty_size);
5635 				goto have_block_group;
5636 			}
5637 
5638 			/*
5639 			 * at this point we either didn't find a cluster
5640 			 * or we weren't able to allocate a block from our
5641 			 * cluster.  Free the cluster we've been trying
5642 			 * to use, and go to the next block group
5643 			 */
5644 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
5645 			spin_unlock(&last_ptr->refill_lock);
5646 			goto loop;
5647 		}
5648 
5649 unclustered_alloc:
5650 		spin_lock(&block_group->free_space_ctl->tree_lock);
5651 		if (cached &&
5652 		    block_group->free_space_ctl->free_space <
5653 		    num_bytes + empty_cluster + empty_size) {
5654 			spin_unlock(&block_group->free_space_ctl->tree_lock);
5655 			goto loop;
5656 		}
5657 		spin_unlock(&block_group->free_space_ctl->tree_lock);
5658 
5659 		offset = btrfs_find_space_for_alloc(block_group, search_start,
5660 						    num_bytes, empty_size);
5661 		/*
5662 		 * If we didn't find a chunk, and we haven't failed on this
5663 		 * block group before, and this block group is in the middle of
5664 		 * caching and we are ok with waiting, then go ahead and wait
5665 		 * for progress to be made, and set failed_alloc to true.
5666 		 *
5667 		 * If failed_alloc is true then we've already waited on this
5668 		 * block group once and should move on to the next block group.
5669 		 */
5670 		if (!offset && !failed_alloc && !cached &&
5671 		    loop > LOOP_CACHING_NOWAIT) {
5672 			wait_block_group_cache_progress(block_group,
5673 						num_bytes + empty_size);
5674 			failed_alloc = true;
5675 			goto have_block_group;
5676 		} else if (!offset) {
5677 			if (!cached)
5678 				have_caching_bg = true;
5679 			goto loop;
5680 		}
5681 checks:
5682 		search_start = stripe_align(root, offset);
5683 
5684 		/* move on to the next group */
5685 		if (search_start + num_bytes >
5686 		    used_block_group->key.objectid + used_block_group->key.offset) {
5687 			btrfs_add_free_space(used_block_group, offset, num_bytes);
5688 			goto loop;
5689 		}
5690 
5691 		if (offset < search_start)
5692 			btrfs_add_free_space(used_block_group, offset,
5693 					     search_start - offset);
5694 		BUG_ON(offset > search_start);
5695 
5696 		ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5697 						  alloc_type);
5698 		if (ret == -EAGAIN) {
5699 			btrfs_add_free_space(used_block_group, offset, num_bytes);
5700 			goto loop;
5701 		}
5702 
5703 		/* we are all good, lets return */
5704 		ins->objectid = search_start;
5705 		ins->offset = num_bytes;
5706 
5707 		trace_btrfs_reserve_extent(orig_root, block_group,
5708 					   search_start, num_bytes);
5709 		if (offset < search_start)
5710 			btrfs_add_free_space(used_block_group, offset,
5711 					     search_start - offset);
5712 		BUG_ON(offset > search_start);
5713 		if (used_block_group != block_group)
5714 			btrfs_put_block_group(used_block_group);
5715 		btrfs_put_block_group(block_group);
5716 		break;
5717 loop:
5718 		failed_cluster_refill = false;
5719 		failed_alloc = false;
5720 		BUG_ON(index != get_block_group_index(block_group));
5721 		if (used_block_group != block_group)
5722 			btrfs_put_block_group(used_block_group);
5723 		btrfs_put_block_group(block_group);
5724 	}
5725 	up_read(&space_info->groups_sem);
5726 
5727 	if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5728 		goto search;
5729 
5730 	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5731 		goto search;
5732 
5733 	/*
5734 	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5735 	 *			caching kthreads as we move along
5736 	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5737 	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5738 	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5739 	 *			again
5740 	 */
5741 	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5742 		index = 0;
5743 		loop++;
5744 		if (loop == LOOP_ALLOC_CHUNK) {
5745 		       if (allowed_chunk_alloc) {
5746 				ret = do_chunk_alloc(trans, root, num_bytes +
5747 						     2 * 1024 * 1024, data,
5748 						     CHUNK_ALLOC_LIMITED);
5749 				if (ret < 0) {
5750 					btrfs_abort_transaction(trans,
5751 								root, ret);
5752 					goto out;
5753 				}
5754 				allowed_chunk_alloc = 0;
5755 				if (ret == 1)
5756 					done_chunk_alloc = 1;
5757 			} else if (!done_chunk_alloc &&
5758 				   space_info->force_alloc ==
5759 				   CHUNK_ALLOC_NO_FORCE) {
5760 				space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5761 			}
5762 
5763 		       /*
5764 			* We didn't allocate a chunk, go ahead and drop the
5765 			* empty size and loop again.
5766 			*/
5767 		       if (!done_chunk_alloc)
5768 			       loop = LOOP_NO_EMPTY_SIZE;
5769 		}
5770 
5771 		if (loop == LOOP_NO_EMPTY_SIZE) {
5772 			empty_size = 0;
5773 			empty_cluster = 0;
5774 		}
5775 
5776 		goto search;
5777 	} else if (!ins->objectid) {
5778 		ret = -ENOSPC;
5779 	} else if (ins->objectid) {
5780 		ret = 0;
5781 	}
5782 out:
5783 
5784 	return ret;
5785 }
5786 
5787 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5788 			    int dump_block_groups)
5789 {
5790 	struct btrfs_block_group_cache *cache;
5791 	int index = 0;
5792 
5793 	spin_lock(&info->lock);
5794 	printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5795 	       (unsigned long long)info->flags,
5796 	       (unsigned long long)(info->total_bytes - info->bytes_used -
5797 				    info->bytes_pinned - info->bytes_reserved -
5798 				    info->bytes_readonly),
5799 	       (info->full) ? "" : "not ");
5800 	printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5801 	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
5802 	       (unsigned long long)info->total_bytes,
5803 	       (unsigned long long)info->bytes_used,
5804 	       (unsigned long long)info->bytes_pinned,
5805 	       (unsigned long long)info->bytes_reserved,
5806 	       (unsigned long long)info->bytes_may_use,
5807 	       (unsigned long long)info->bytes_readonly);
5808 	spin_unlock(&info->lock);
5809 
5810 	if (!dump_block_groups)
5811 		return;
5812 
5813 	down_read(&info->groups_sem);
5814 again:
5815 	list_for_each_entry(cache, &info->block_groups[index], list) {
5816 		spin_lock(&cache->lock);
5817 		printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5818 		       "%llu pinned %llu reserved\n",
5819 		       (unsigned long long)cache->key.objectid,
5820 		       (unsigned long long)cache->key.offset,
5821 		       (unsigned long long)btrfs_block_group_used(&cache->item),
5822 		       (unsigned long long)cache->pinned,
5823 		       (unsigned long long)cache->reserved);
5824 		btrfs_dump_free_space(cache, bytes);
5825 		spin_unlock(&cache->lock);
5826 	}
5827 	if (++index < BTRFS_NR_RAID_TYPES)
5828 		goto again;
5829 	up_read(&info->groups_sem);
5830 }
5831 
5832 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5833 			 struct btrfs_root *root,
5834 			 u64 num_bytes, u64 min_alloc_size,
5835 			 u64 empty_size, u64 hint_byte,
5836 			 struct btrfs_key *ins, u64 data)
5837 {
5838 	bool final_tried = false;
5839 	int ret;
5840 
5841 	data = btrfs_get_alloc_profile(root, data);
5842 again:
5843 	/*
5844 	 * the only place that sets empty_size is btrfs_realloc_node, which
5845 	 * is not called recursively on allocations
5846 	 */
5847 	if (empty_size || root->ref_cows) {
5848 		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5849 				     num_bytes + 2 * 1024 * 1024, data,
5850 				     CHUNK_ALLOC_NO_FORCE);
5851 		if (ret < 0 && ret != -ENOSPC) {
5852 			btrfs_abort_transaction(trans, root, ret);
5853 			return ret;
5854 		}
5855 	}
5856 
5857 	WARN_ON(num_bytes < root->sectorsize);
5858 	ret = find_free_extent(trans, root, num_bytes, empty_size,
5859 			       hint_byte, ins, data);
5860 
5861 	if (ret == -ENOSPC) {
5862 		if (!final_tried) {
5863 			num_bytes = num_bytes >> 1;
5864 			num_bytes = num_bytes & ~(root->sectorsize - 1);
5865 			num_bytes = max(num_bytes, min_alloc_size);
5866 			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5867 				       num_bytes, data, CHUNK_ALLOC_FORCE);
5868 			if (ret < 0 && ret != -ENOSPC) {
5869 				btrfs_abort_transaction(trans, root, ret);
5870 				return ret;
5871 			}
5872 			if (num_bytes == min_alloc_size)
5873 				final_tried = true;
5874 			goto again;
5875 		} else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
5876 			struct btrfs_space_info *sinfo;
5877 
5878 			sinfo = __find_space_info(root->fs_info, data);
5879 			printk(KERN_ERR "btrfs allocation failed flags %llu, "
5880 			       "wanted %llu\n", (unsigned long long)data,
5881 			       (unsigned long long)num_bytes);
5882 			if (sinfo)
5883 				dump_space_info(sinfo, num_bytes, 1);
5884 		}
5885 	}
5886 
5887 	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5888 
5889 	return ret;
5890 }
5891 
5892 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
5893 					u64 start, u64 len, int pin)
5894 {
5895 	struct btrfs_block_group_cache *cache;
5896 	int ret = 0;
5897 
5898 	cache = btrfs_lookup_block_group(root->fs_info, start);
5899 	if (!cache) {
5900 		printk(KERN_ERR "Unable to find block group for %llu\n",
5901 		       (unsigned long long)start);
5902 		return -ENOSPC;
5903 	}
5904 
5905 	if (btrfs_test_opt(root, DISCARD))
5906 		ret = btrfs_discard_extent(root, start, len, NULL);
5907 
5908 	if (pin)
5909 		pin_down_extent(root, cache, start, len, 1);
5910 	else {
5911 		btrfs_add_free_space(cache, start, len);
5912 		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
5913 	}
5914 	btrfs_put_block_group(cache);
5915 
5916 	trace_btrfs_reserved_extent_free(root, start, len);
5917 
5918 	return ret;
5919 }
5920 
5921 int btrfs_free_reserved_extent(struct btrfs_root *root,
5922 					u64 start, u64 len)
5923 {
5924 	return __btrfs_free_reserved_extent(root, start, len, 0);
5925 }
5926 
5927 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
5928 				       u64 start, u64 len)
5929 {
5930 	return __btrfs_free_reserved_extent(root, start, len, 1);
5931 }
5932 
5933 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5934 				      struct btrfs_root *root,
5935 				      u64 parent, u64 root_objectid,
5936 				      u64 flags, u64 owner, u64 offset,
5937 				      struct btrfs_key *ins, int ref_mod)
5938 {
5939 	int ret;
5940 	struct btrfs_fs_info *fs_info = root->fs_info;
5941 	struct btrfs_extent_item *extent_item;
5942 	struct btrfs_extent_inline_ref *iref;
5943 	struct btrfs_path *path;
5944 	struct extent_buffer *leaf;
5945 	int type;
5946 	u32 size;
5947 
5948 	if (parent > 0)
5949 		type = BTRFS_SHARED_DATA_REF_KEY;
5950 	else
5951 		type = BTRFS_EXTENT_DATA_REF_KEY;
5952 
5953 	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5954 
5955 	path = btrfs_alloc_path();
5956 	if (!path)
5957 		return -ENOMEM;
5958 
5959 	path->leave_spinning = 1;
5960 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5961 				      ins, size);
5962 	if (ret) {
5963 		btrfs_free_path(path);
5964 		return ret;
5965 	}
5966 
5967 	leaf = path->nodes[0];
5968 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
5969 				     struct btrfs_extent_item);
5970 	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5971 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5972 	btrfs_set_extent_flags(leaf, extent_item,
5973 			       flags | BTRFS_EXTENT_FLAG_DATA);
5974 
5975 	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5976 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
5977 	if (parent > 0) {
5978 		struct btrfs_shared_data_ref *ref;
5979 		ref = (struct btrfs_shared_data_ref *)(iref + 1);
5980 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5981 		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5982 	} else {
5983 		struct btrfs_extent_data_ref *ref;
5984 		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5985 		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5986 		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5987 		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5988 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5989 	}
5990 
5991 	btrfs_mark_buffer_dirty(path->nodes[0]);
5992 	btrfs_free_path(path);
5993 
5994 	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5995 	if (ret) { /* -ENOENT, logic error */
5996 		printk(KERN_ERR "btrfs update block group failed for %llu "
5997 		       "%llu\n", (unsigned long long)ins->objectid,
5998 		       (unsigned long long)ins->offset);
5999 		BUG();
6000 	}
6001 	return ret;
6002 }
6003 
6004 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6005 				     struct btrfs_root *root,
6006 				     u64 parent, u64 root_objectid,
6007 				     u64 flags, struct btrfs_disk_key *key,
6008 				     int level, struct btrfs_key *ins)
6009 {
6010 	int ret;
6011 	struct btrfs_fs_info *fs_info = root->fs_info;
6012 	struct btrfs_extent_item *extent_item;
6013 	struct btrfs_tree_block_info *block_info;
6014 	struct btrfs_extent_inline_ref *iref;
6015 	struct btrfs_path *path;
6016 	struct extent_buffer *leaf;
6017 	u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
6018 
6019 	path = btrfs_alloc_path();
6020 	if (!path)
6021 		return -ENOMEM;
6022 
6023 	path->leave_spinning = 1;
6024 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6025 				      ins, size);
6026 	if (ret) {
6027 		btrfs_free_path(path);
6028 		return ret;
6029 	}
6030 
6031 	leaf = path->nodes[0];
6032 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
6033 				     struct btrfs_extent_item);
6034 	btrfs_set_extent_refs(leaf, extent_item, 1);
6035 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6036 	btrfs_set_extent_flags(leaf, extent_item,
6037 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6038 	block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6039 
6040 	btrfs_set_tree_block_key(leaf, block_info, key);
6041 	btrfs_set_tree_block_level(leaf, block_info, level);
6042 
6043 	iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6044 	if (parent > 0) {
6045 		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6046 		btrfs_set_extent_inline_ref_type(leaf, iref,
6047 						 BTRFS_SHARED_BLOCK_REF_KEY);
6048 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6049 	} else {
6050 		btrfs_set_extent_inline_ref_type(leaf, iref,
6051 						 BTRFS_TREE_BLOCK_REF_KEY);
6052 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6053 	}
6054 
6055 	btrfs_mark_buffer_dirty(leaf);
6056 	btrfs_free_path(path);
6057 
6058 	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
6059 	if (ret) { /* -ENOENT, logic error */
6060 		printk(KERN_ERR "btrfs update block group failed for %llu "
6061 		       "%llu\n", (unsigned long long)ins->objectid,
6062 		       (unsigned long long)ins->offset);
6063 		BUG();
6064 	}
6065 	return ret;
6066 }
6067 
6068 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6069 				     struct btrfs_root *root,
6070 				     u64 root_objectid, u64 owner,
6071 				     u64 offset, struct btrfs_key *ins)
6072 {
6073 	int ret;
6074 
6075 	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6076 
6077 	ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6078 					 ins->offset, 0,
6079 					 root_objectid, owner, offset,
6080 					 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6081 	return ret;
6082 }
6083 
6084 /*
6085  * this is used by the tree logging recovery code.  It records that
6086  * an extent has been allocated and makes sure to clear the free
6087  * space cache bits as well
6088  */
6089 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6090 				   struct btrfs_root *root,
6091 				   u64 root_objectid, u64 owner, u64 offset,
6092 				   struct btrfs_key *ins)
6093 {
6094 	int ret;
6095 	struct btrfs_block_group_cache *block_group;
6096 	struct btrfs_caching_control *caching_ctl;
6097 	u64 start = ins->objectid;
6098 	u64 num_bytes = ins->offset;
6099 
6100 	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6101 	cache_block_group(block_group, trans, NULL, 0);
6102 	caching_ctl = get_caching_control(block_group);
6103 
6104 	if (!caching_ctl) {
6105 		BUG_ON(!block_group_cache_done(block_group));
6106 		ret = btrfs_remove_free_space(block_group, start, num_bytes);
6107 		BUG_ON(ret); /* -ENOMEM */
6108 	} else {
6109 		mutex_lock(&caching_ctl->mutex);
6110 
6111 		if (start >= caching_ctl->progress) {
6112 			ret = add_excluded_extent(root, start, num_bytes);
6113 			BUG_ON(ret); /* -ENOMEM */
6114 		} else if (start + num_bytes <= caching_ctl->progress) {
6115 			ret = btrfs_remove_free_space(block_group,
6116 						      start, num_bytes);
6117 			BUG_ON(ret); /* -ENOMEM */
6118 		} else {
6119 			num_bytes = caching_ctl->progress - start;
6120 			ret = btrfs_remove_free_space(block_group,
6121 						      start, num_bytes);
6122 			BUG_ON(ret); /* -ENOMEM */
6123 
6124 			start = caching_ctl->progress;
6125 			num_bytes = ins->objectid + ins->offset -
6126 				    caching_ctl->progress;
6127 			ret = add_excluded_extent(root, start, num_bytes);
6128 			BUG_ON(ret); /* -ENOMEM */
6129 		}
6130 
6131 		mutex_unlock(&caching_ctl->mutex);
6132 		put_caching_control(caching_ctl);
6133 	}
6134 
6135 	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6136 					  RESERVE_ALLOC_NO_ACCOUNT);
6137 	BUG_ON(ret); /* logic error */
6138 	btrfs_put_block_group(block_group);
6139 	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6140 					 0, owner, offset, ins, 1);
6141 	return ret;
6142 }
6143 
6144 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6145 					    struct btrfs_root *root,
6146 					    u64 bytenr, u32 blocksize,
6147 					    int level)
6148 {
6149 	struct extent_buffer *buf;
6150 
6151 	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6152 	if (!buf)
6153 		return ERR_PTR(-ENOMEM);
6154 	btrfs_set_header_generation(buf, trans->transid);
6155 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6156 	btrfs_tree_lock(buf);
6157 	clean_tree_block(trans, root, buf);
6158 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6159 
6160 	btrfs_set_lock_blocking(buf);
6161 	btrfs_set_buffer_uptodate(buf);
6162 
6163 	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6164 		/*
6165 		 * we allow two log transactions at a time, use different
6166 		 * EXENT bit to differentiate dirty pages.
6167 		 */
6168 		if (root->log_transid % 2 == 0)
6169 			set_extent_dirty(&root->dirty_log_pages, buf->start,
6170 					buf->start + buf->len - 1, GFP_NOFS);
6171 		else
6172 			set_extent_new(&root->dirty_log_pages, buf->start,
6173 					buf->start + buf->len - 1, GFP_NOFS);
6174 	} else {
6175 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6176 			 buf->start + buf->len - 1, GFP_NOFS);
6177 	}
6178 	trans->blocks_used++;
6179 	/* this returns a buffer locked for blocking */
6180 	return buf;
6181 }
6182 
6183 static struct btrfs_block_rsv *
6184 use_block_rsv(struct btrfs_trans_handle *trans,
6185 	      struct btrfs_root *root, u32 blocksize)
6186 {
6187 	struct btrfs_block_rsv *block_rsv;
6188 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6189 	int ret;
6190 
6191 	block_rsv = get_block_rsv(trans, root);
6192 
6193 	if (block_rsv->size == 0) {
6194 		ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6195 		/*
6196 		 * If we couldn't reserve metadata bytes try and use some from
6197 		 * the global reserve.
6198 		 */
6199 		if (ret && block_rsv != global_rsv) {
6200 			ret = block_rsv_use_bytes(global_rsv, blocksize);
6201 			if (!ret)
6202 				return global_rsv;
6203 			return ERR_PTR(ret);
6204 		} else if (ret) {
6205 			return ERR_PTR(ret);
6206 		}
6207 		return block_rsv;
6208 	}
6209 
6210 	ret = block_rsv_use_bytes(block_rsv, blocksize);
6211 	if (!ret)
6212 		return block_rsv;
6213 	if (ret) {
6214 		static DEFINE_RATELIMIT_STATE(_rs,
6215 				DEFAULT_RATELIMIT_INTERVAL,
6216 				/*DEFAULT_RATELIMIT_BURST*/ 2);
6217 		if (__ratelimit(&_rs)) {
6218 			printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
6219 			WARN_ON(1);
6220 		}
6221 		ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6222 		if (!ret) {
6223 			return block_rsv;
6224 		} else if (ret && block_rsv != global_rsv) {
6225 			ret = block_rsv_use_bytes(global_rsv, blocksize);
6226 			if (!ret)
6227 				return global_rsv;
6228 		}
6229 	}
6230 
6231 	return ERR_PTR(-ENOSPC);
6232 }
6233 
6234 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6235 			    struct btrfs_block_rsv *block_rsv, u32 blocksize)
6236 {
6237 	block_rsv_add_bytes(block_rsv, blocksize, 0);
6238 	block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6239 }
6240 
6241 /*
6242  * finds a free extent and does all the dirty work required for allocation
6243  * returns the key for the extent through ins, and a tree buffer for
6244  * the first block of the extent through buf.
6245  *
6246  * returns the tree buffer or NULL.
6247  */
6248 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6249 					struct btrfs_root *root, u32 blocksize,
6250 					u64 parent, u64 root_objectid,
6251 					struct btrfs_disk_key *key, int level,
6252 					u64 hint, u64 empty_size, int for_cow)
6253 {
6254 	struct btrfs_key ins;
6255 	struct btrfs_block_rsv *block_rsv;
6256 	struct extent_buffer *buf;
6257 	u64 flags = 0;
6258 	int ret;
6259 
6260 
6261 	block_rsv = use_block_rsv(trans, root, blocksize);
6262 	if (IS_ERR(block_rsv))
6263 		return ERR_CAST(block_rsv);
6264 
6265 	ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6266 				   empty_size, hint, &ins, 0);
6267 	if (ret) {
6268 		unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6269 		return ERR_PTR(ret);
6270 	}
6271 
6272 	buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6273 				    blocksize, level);
6274 	BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6275 
6276 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6277 		if (parent == 0)
6278 			parent = ins.objectid;
6279 		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6280 	} else
6281 		BUG_ON(parent > 0);
6282 
6283 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6284 		struct btrfs_delayed_extent_op *extent_op;
6285 		extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6286 		BUG_ON(!extent_op); /* -ENOMEM */
6287 		if (key)
6288 			memcpy(&extent_op->key, key, sizeof(extent_op->key));
6289 		else
6290 			memset(&extent_op->key, 0, sizeof(extent_op->key));
6291 		extent_op->flags_to_set = flags;
6292 		extent_op->update_key = 1;
6293 		extent_op->update_flags = 1;
6294 		extent_op->is_data = 0;
6295 
6296 		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6297 					ins.objectid,
6298 					ins.offset, parent, root_objectid,
6299 					level, BTRFS_ADD_DELAYED_EXTENT,
6300 					extent_op, for_cow);
6301 		BUG_ON(ret); /* -ENOMEM */
6302 	}
6303 	return buf;
6304 }
6305 
6306 struct walk_control {
6307 	u64 refs[BTRFS_MAX_LEVEL];
6308 	u64 flags[BTRFS_MAX_LEVEL];
6309 	struct btrfs_key update_progress;
6310 	int stage;
6311 	int level;
6312 	int shared_level;
6313 	int update_ref;
6314 	int keep_locks;
6315 	int reada_slot;
6316 	int reada_count;
6317 	int for_reloc;
6318 };
6319 
6320 #define DROP_REFERENCE	1
6321 #define UPDATE_BACKREF	2
6322 
6323 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6324 				     struct btrfs_root *root,
6325 				     struct walk_control *wc,
6326 				     struct btrfs_path *path)
6327 {
6328 	u64 bytenr;
6329 	u64 generation;
6330 	u64 refs;
6331 	u64 flags;
6332 	u32 nritems;
6333 	u32 blocksize;
6334 	struct btrfs_key key;
6335 	struct extent_buffer *eb;
6336 	int ret;
6337 	int slot;
6338 	int nread = 0;
6339 
6340 	if (path->slots[wc->level] < wc->reada_slot) {
6341 		wc->reada_count = wc->reada_count * 2 / 3;
6342 		wc->reada_count = max(wc->reada_count, 2);
6343 	} else {
6344 		wc->reada_count = wc->reada_count * 3 / 2;
6345 		wc->reada_count = min_t(int, wc->reada_count,
6346 					BTRFS_NODEPTRS_PER_BLOCK(root));
6347 	}
6348 
6349 	eb = path->nodes[wc->level];
6350 	nritems = btrfs_header_nritems(eb);
6351 	blocksize = btrfs_level_size(root, wc->level - 1);
6352 
6353 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6354 		if (nread >= wc->reada_count)
6355 			break;
6356 
6357 		cond_resched();
6358 		bytenr = btrfs_node_blockptr(eb, slot);
6359 		generation = btrfs_node_ptr_generation(eb, slot);
6360 
6361 		if (slot == path->slots[wc->level])
6362 			goto reada;
6363 
6364 		if (wc->stage == UPDATE_BACKREF &&
6365 		    generation <= root->root_key.offset)
6366 			continue;
6367 
6368 		/* We don't lock the tree block, it's OK to be racy here */
6369 		ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6370 					       &refs, &flags);
6371 		/* We don't care about errors in readahead. */
6372 		if (ret < 0)
6373 			continue;
6374 		BUG_ON(refs == 0);
6375 
6376 		if (wc->stage == DROP_REFERENCE) {
6377 			if (refs == 1)
6378 				goto reada;
6379 
6380 			if (wc->level == 1 &&
6381 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6382 				continue;
6383 			if (!wc->update_ref ||
6384 			    generation <= root->root_key.offset)
6385 				continue;
6386 			btrfs_node_key_to_cpu(eb, &key, slot);
6387 			ret = btrfs_comp_cpu_keys(&key,
6388 						  &wc->update_progress);
6389 			if (ret < 0)
6390 				continue;
6391 		} else {
6392 			if (wc->level == 1 &&
6393 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6394 				continue;
6395 		}
6396 reada:
6397 		ret = readahead_tree_block(root, bytenr, blocksize,
6398 					   generation);
6399 		if (ret)
6400 			break;
6401 		nread++;
6402 	}
6403 	wc->reada_slot = slot;
6404 }
6405 
6406 /*
6407  * hepler to process tree block while walking down the tree.
6408  *
6409  * when wc->stage == UPDATE_BACKREF, this function updates
6410  * back refs for pointers in the block.
6411  *
6412  * NOTE: return value 1 means we should stop walking down.
6413  */
6414 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6415 				   struct btrfs_root *root,
6416 				   struct btrfs_path *path,
6417 				   struct walk_control *wc, int lookup_info)
6418 {
6419 	int level = wc->level;
6420 	struct extent_buffer *eb = path->nodes[level];
6421 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6422 	int ret;
6423 
6424 	if (wc->stage == UPDATE_BACKREF &&
6425 	    btrfs_header_owner(eb) != root->root_key.objectid)
6426 		return 1;
6427 
6428 	/*
6429 	 * when reference count of tree block is 1, it won't increase
6430 	 * again. once full backref flag is set, we never clear it.
6431 	 */
6432 	if (lookup_info &&
6433 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6434 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6435 		BUG_ON(!path->locks[level]);
6436 		ret = btrfs_lookup_extent_info(trans, root,
6437 					       eb->start, eb->len,
6438 					       &wc->refs[level],
6439 					       &wc->flags[level]);
6440 		BUG_ON(ret == -ENOMEM);
6441 		if (ret)
6442 			return ret;
6443 		BUG_ON(wc->refs[level] == 0);
6444 	}
6445 
6446 	if (wc->stage == DROP_REFERENCE) {
6447 		if (wc->refs[level] > 1)
6448 			return 1;
6449 
6450 		if (path->locks[level] && !wc->keep_locks) {
6451 			btrfs_tree_unlock_rw(eb, path->locks[level]);
6452 			path->locks[level] = 0;
6453 		}
6454 		return 0;
6455 	}
6456 
6457 	/* wc->stage == UPDATE_BACKREF */
6458 	if (!(wc->flags[level] & flag)) {
6459 		BUG_ON(!path->locks[level]);
6460 		ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6461 		BUG_ON(ret); /* -ENOMEM */
6462 		ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6463 		BUG_ON(ret); /* -ENOMEM */
6464 		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6465 						  eb->len, flag, 0);
6466 		BUG_ON(ret); /* -ENOMEM */
6467 		wc->flags[level] |= flag;
6468 	}
6469 
6470 	/*
6471 	 * the block is shared by multiple trees, so it's not good to
6472 	 * keep the tree lock
6473 	 */
6474 	if (path->locks[level] && level > 0) {
6475 		btrfs_tree_unlock_rw(eb, path->locks[level]);
6476 		path->locks[level] = 0;
6477 	}
6478 	return 0;
6479 }
6480 
6481 /*
6482  * hepler to process tree block pointer.
6483  *
6484  * when wc->stage == DROP_REFERENCE, this function checks
6485  * reference count of the block pointed to. if the block
6486  * is shared and we need update back refs for the subtree
6487  * rooted at the block, this function changes wc->stage to
6488  * UPDATE_BACKREF. if the block is shared and there is no
6489  * need to update back, this function drops the reference
6490  * to the block.
6491  *
6492  * NOTE: return value 1 means we should stop walking down.
6493  */
6494 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6495 				 struct btrfs_root *root,
6496 				 struct btrfs_path *path,
6497 				 struct walk_control *wc, int *lookup_info)
6498 {
6499 	u64 bytenr;
6500 	u64 generation;
6501 	u64 parent;
6502 	u32 blocksize;
6503 	struct btrfs_key key;
6504 	struct extent_buffer *next;
6505 	int level = wc->level;
6506 	int reada = 0;
6507 	int ret = 0;
6508 
6509 	generation = btrfs_node_ptr_generation(path->nodes[level],
6510 					       path->slots[level]);
6511 	/*
6512 	 * if the lower level block was created before the snapshot
6513 	 * was created, we know there is no need to update back refs
6514 	 * for the subtree
6515 	 */
6516 	if (wc->stage == UPDATE_BACKREF &&
6517 	    generation <= root->root_key.offset) {
6518 		*lookup_info = 1;
6519 		return 1;
6520 	}
6521 
6522 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6523 	blocksize = btrfs_level_size(root, level - 1);
6524 
6525 	next = btrfs_find_tree_block(root, bytenr, blocksize);
6526 	if (!next) {
6527 		next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6528 		if (!next)
6529 			return -ENOMEM;
6530 		reada = 1;
6531 	}
6532 	btrfs_tree_lock(next);
6533 	btrfs_set_lock_blocking(next);
6534 
6535 	ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6536 				       &wc->refs[level - 1],
6537 				       &wc->flags[level - 1]);
6538 	if (ret < 0) {
6539 		btrfs_tree_unlock(next);
6540 		return ret;
6541 	}
6542 
6543 	BUG_ON(wc->refs[level - 1] == 0);
6544 	*lookup_info = 0;
6545 
6546 	if (wc->stage == DROP_REFERENCE) {
6547 		if (wc->refs[level - 1] > 1) {
6548 			if (level == 1 &&
6549 			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6550 				goto skip;
6551 
6552 			if (!wc->update_ref ||
6553 			    generation <= root->root_key.offset)
6554 				goto skip;
6555 
6556 			btrfs_node_key_to_cpu(path->nodes[level], &key,
6557 					      path->slots[level]);
6558 			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6559 			if (ret < 0)
6560 				goto skip;
6561 
6562 			wc->stage = UPDATE_BACKREF;
6563 			wc->shared_level = level - 1;
6564 		}
6565 	} else {
6566 		if (level == 1 &&
6567 		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6568 			goto skip;
6569 	}
6570 
6571 	if (!btrfs_buffer_uptodate(next, generation, 0)) {
6572 		btrfs_tree_unlock(next);
6573 		free_extent_buffer(next);
6574 		next = NULL;
6575 		*lookup_info = 1;
6576 	}
6577 
6578 	if (!next) {
6579 		if (reada && level == 1)
6580 			reada_walk_down(trans, root, wc, path);
6581 		next = read_tree_block(root, bytenr, blocksize, generation);
6582 		if (!next)
6583 			return -EIO;
6584 		btrfs_tree_lock(next);
6585 		btrfs_set_lock_blocking(next);
6586 	}
6587 
6588 	level--;
6589 	BUG_ON(level != btrfs_header_level(next));
6590 	path->nodes[level] = next;
6591 	path->slots[level] = 0;
6592 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6593 	wc->level = level;
6594 	if (wc->level == 1)
6595 		wc->reada_slot = 0;
6596 	return 0;
6597 skip:
6598 	wc->refs[level - 1] = 0;
6599 	wc->flags[level - 1] = 0;
6600 	if (wc->stage == DROP_REFERENCE) {
6601 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6602 			parent = path->nodes[level]->start;
6603 		} else {
6604 			BUG_ON(root->root_key.objectid !=
6605 			       btrfs_header_owner(path->nodes[level]));
6606 			parent = 0;
6607 		}
6608 
6609 		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6610 				root->root_key.objectid, level - 1, 0, 0);
6611 		BUG_ON(ret); /* -ENOMEM */
6612 	}
6613 	btrfs_tree_unlock(next);
6614 	free_extent_buffer(next);
6615 	*lookup_info = 1;
6616 	return 1;
6617 }
6618 
6619 /*
6620  * hepler to process tree block while walking up the tree.
6621  *
6622  * when wc->stage == DROP_REFERENCE, this function drops
6623  * reference count on the block.
6624  *
6625  * when wc->stage == UPDATE_BACKREF, this function changes
6626  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6627  * to UPDATE_BACKREF previously while processing the block.
6628  *
6629  * NOTE: return value 1 means we should stop walking up.
6630  */
6631 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6632 				 struct btrfs_root *root,
6633 				 struct btrfs_path *path,
6634 				 struct walk_control *wc)
6635 {
6636 	int ret;
6637 	int level = wc->level;
6638 	struct extent_buffer *eb = path->nodes[level];
6639 	u64 parent = 0;
6640 
6641 	if (wc->stage == UPDATE_BACKREF) {
6642 		BUG_ON(wc->shared_level < level);
6643 		if (level < wc->shared_level)
6644 			goto out;
6645 
6646 		ret = find_next_key(path, level + 1, &wc->update_progress);
6647 		if (ret > 0)
6648 			wc->update_ref = 0;
6649 
6650 		wc->stage = DROP_REFERENCE;
6651 		wc->shared_level = -1;
6652 		path->slots[level] = 0;
6653 
6654 		/*
6655 		 * check reference count again if the block isn't locked.
6656 		 * we should start walking down the tree again if reference
6657 		 * count is one.
6658 		 */
6659 		if (!path->locks[level]) {
6660 			BUG_ON(level == 0);
6661 			btrfs_tree_lock(eb);
6662 			btrfs_set_lock_blocking(eb);
6663 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6664 
6665 			ret = btrfs_lookup_extent_info(trans, root,
6666 						       eb->start, eb->len,
6667 						       &wc->refs[level],
6668 						       &wc->flags[level]);
6669 			if (ret < 0) {
6670 				btrfs_tree_unlock_rw(eb, path->locks[level]);
6671 				return ret;
6672 			}
6673 			BUG_ON(wc->refs[level] == 0);
6674 			if (wc->refs[level] == 1) {
6675 				btrfs_tree_unlock_rw(eb, path->locks[level]);
6676 				return 1;
6677 			}
6678 		}
6679 	}
6680 
6681 	/* wc->stage == DROP_REFERENCE */
6682 	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6683 
6684 	if (wc->refs[level] == 1) {
6685 		if (level == 0) {
6686 			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6687 				ret = btrfs_dec_ref(trans, root, eb, 1,
6688 						    wc->for_reloc);
6689 			else
6690 				ret = btrfs_dec_ref(trans, root, eb, 0,
6691 						    wc->for_reloc);
6692 			BUG_ON(ret); /* -ENOMEM */
6693 		}
6694 		/* make block locked assertion in clean_tree_block happy */
6695 		if (!path->locks[level] &&
6696 		    btrfs_header_generation(eb) == trans->transid) {
6697 			btrfs_tree_lock(eb);
6698 			btrfs_set_lock_blocking(eb);
6699 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6700 		}
6701 		clean_tree_block(trans, root, eb);
6702 	}
6703 
6704 	if (eb == root->node) {
6705 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6706 			parent = eb->start;
6707 		else
6708 			BUG_ON(root->root_key.objectid !=
6709 			       btrfs_header_owner(eb));
6710 	} else {
6711 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6712 			parent = path->nodes[level + 1]->start;
6713 		else
6714 			BUG_ON(root->root_key.objectid !=
6715 			       btrfs_header_owner(path->nodes[level + 1]));
6716 	}
6717 
6718 	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
6719 out:
6720 	wc->refs[level] = 0;
6721 	wc->flags[level] = 0;
6722 	return 0;
6723 }
6724 
6725 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6726 				   struct btrfs_root *root,
6727 				   struct btrfs_path *path,
6728 				   struct walk_control *wc)
6729 {
6730 	int level = wc->level;
6731 	int lookup_info = 1;
6732 	int ret;
6733 
6734 	while (level >= 0) {
6735 		ret = walk_down_proc(trans, root, path, wc, lookup_info);
6736 		if (ret > 0)
6737 			break;
6738 
6739 		if (level == 0)
6740 			break;
6741 
6742 		if (path->slots[level] >=
6743 		    btrfs_header_nritems(path->nodes[level]))
6744 			break;
6745 
6746 		ret = do_walk_down(trans, root, path, wc, &lookup_info);
6747 		if (ret > 0) {
6748 			path->slots[level]++;
6749 			continue;
6750 		} else if (ret < 0)
6751 			return ret;
6752 		level = wc->level;
6753 	}
6754 	return 0;
6755 }
6756 
6757 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6758 				 struct btrfs_root *root,
6759 				 struct btrfs_path *path,
6760 				 struct walk_control *wc, int max_level)
6761 {
6762 	int level = wc->level;
6763 	int ret;
6764 
6765 	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6766 	while (level < max_level && path->nodes[level]) {
6767 		wc->level = level;
6768 		if (path->slots[level] + 1 <
6769 		    btrfs_header_nritems(path->nodes[level])) {
6770 			path->slots[level]++;
6771 			return 0;
6772 		} else {
6773 			ret = walk_up_proc(trans, root, path, wc);
6774 			if (ret > 0)
6775 				return 0;
6776 
6777 			if (path->locks[level]) {
6778 				btrfs_tree_unlock_rw(path->nodes[level],
6779 						     path->locks[level]);
6780 				path->locks[level] = 0;
6781 			}
6782 			free_extent_buffer(path->nodes[level]);
6783 			path->nodes[level] = NULL;
6784 			level++;
6785 		}
6786 	}
6787 	return 1;
6788 }
6789 
6790 /*
6791  * drop a subvolume tree.
6792  *
6793  * this function traverses the tree freeing any blocks that only
6794  * referenced by the tree.
6795  *
6796  * when a shared tree block is found. this function decreases its
6797  * reference count by one. if update_ref is true, this function
6798  * also make sure backrefs for the shared block and all lower level
6799  * blocks are properly updated.
6800  */
6801 int btrfs_drop_snapshot(struct btrfs_root *root,
6802 			 struct btrfs_block_rsv *block_rsv, int update_ref,
6803 			 int for_reloc)
6804 {
6805 	struct btrfs_path *path;
6806 	struct btrfs_trans_handle *trans;
6807 	struct btrfs_root *tree_root = root->fs_info->tree_root;
6808 	struct btrfs_root_item *root_item = &root->root_item;
6809 	struct walk_control *wc;
6810 	struct btrfs_key key;
6811 	int err = 0;
6812 	int ret;
6813 	int level;
6814 
6815 	path = btrfs_alloc_path();
6816 	if (!path) {
6817 		err = -ENOMEM;
6818 		goto out;
6819 	}
6820 
6821 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
6822 	if (!wc) {
6823 		btrfs_free_path(path);
6824 		err = -ENOMEM;
6825 		goto out;
6826 	}
6827 
6828 	trans = btrfs_start_transaction(tree_root, 0);
6829 	if (IS_ERR(trans)) {
6830 		err = PTR_ERR(trans);
6831 		goto out_free;
6832 	}
6833 
6834 	if (block_rsv)
6835 		trans->block_rsv = block_rsv;
6836 
6837 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6838 		level = btrfs_header_level(root->node);
6839 		path->nodes[level] = btrfs_lock_root_node(root);
6840 		btrfs_set_lock_blocking(path->nodes[level]);
6841 		path->slots[level] = 0;
6842 		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6843 		memset(&wc->update_progress, 0,
6844 		       sizeof(wc->update_progress));
6845 	} else {
6846 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6847 		memcpy(&wc->update_progress, &key,
6848 		       sizeof(wc->update_progress));
6849 
6850 		level = root_item->drop_level;
6851 		BUG_ON(level == 0);
6852 		path->lowest_level = level;
6853 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6854 		path->lowest_level = 0;
6855 		if (ret < 0) {
6856 			err = ret;
6857 			goto out_end_trans;
6858 		}
6859 		WARN_ON(ret > 0);
6860 
6861 		/*
6862 		 * unlock our path, this is safe because only this
6863 		 * function is allowed to delete this snapshot
6864 		 */
6865 		btrfs_unlock_up_safe(path, 0);
6866 
6867 		level = btrfs_header_level(root->node);
6868 		while (1) {
6869 			btrfs_tree_lock(path->nodes[level]);
6870 			btrfs_set_lock_blocking(path->nodes[level]);
6871 
6872 			ret = btrfs_lookup_extent_info(trans, root,
6873 						path->nodes[level]->start,
6874 						path->nodes[level]->len,
6875 						&wc->refs[level],
6876 						&wc->flags[level]);
6877 			if (ret < 0) {
6878 				err = ret;
6879 				goto out_end_trans;
6880 			}
6881 			BUG_ON(wc->refs[level] == 0);
6882 
6883 			if (level == root_item->drop_level)
6884 				break;
6885 
6886 			btrfs_tree_unlock(path->nodes[level]);
6887 			WARN_ON(wc->refs[level] != 1);
6888 			level--;
6889 		}
6890 	}
6891 
6892 	wc->level = level;
6893 	wc->shared_level = -1;
6894 	wc->stage = DROP_REFERENCE;
6895 	wc->update_ref = update_ref;
6896 	wc->keep_locks = 0;
6897 	wc->for_reloc = for_reloc;
6898 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6899 
6900 	while (1) {
6901 		ret = walk_down_tree(trans, root, path, wc);
6902 		if (ret < 0) {
6903 			err = ret;
6904 			break;
6905 		}
6906 
6907 		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6908 		if (ret < 0) {
6909 			err = ret;
6910 			break;
6911 		}
6912 
6913 		if (ret > 0) {
6914 			BUG_ON(wc->stage != DROP_REFERENCE);
6915 			break;
6916 		}
6917 
6918 		if (wc->stage == DROP_REFERENCE) {
6919 			level = wc->level;
6920 			btrfs_node_key(path->nodes[level],
6921 				       &root_item->drop_progress,
6922 				       path->slots[level]);
6923 			root_item->drop_level = level;
6924 		}
6925 
6926 		BUG_ON(wc->level == 0);
6927 		if (btrfs_should_end_transaction(trans, tree_root)) {
6928 			ret = btrfs_update_root(trans, tree_root,
6929 						&root->root_key,
6930 						root_item);
6931 			if (ret) {
6932 				btrfs_abort_transaction(trans, tree_root, ret);
6933 				err = ret;
6934 				goto out_end_trans;
6935 			}
6936 
6937 			btrfs_end_transaction_throttle(trans, tree_root);
6938 			trans = btrfs_start_transaction(tree_root, 0);
6939 			if (IS_ERR(trans)) {
6940 				err = PTR_ERR(trans);
6941 				goto out_free;
6942 			}
6943 			if (block_rsv)
6944 				trans->block_rsv = block_rsv;
6945 		}
6946 	}
6947 	btrfs_release_path(path);
6948 	if (err)
6949 		goto out_end_trans;
6950 
6951 	ret = btrfs_del_root(trans, tree_root, &root->root_key);
6952 	if (ret) {
6953 		btrfs_abort_transaction(trans, tree_root, ret);
6954 		goto out_end_trans;
6955 	}
6956 
6957 	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6958 		ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6959 					   NULL, NULL);
6960 		if (ret < 0) {
6961 			btrfs_abort_transaction(trans, tree_root, ret);
6962 			err = ret;
6963 			goto out_end_trans;
6964 		} else if (ret > 0) {
6965 			/* if we fail to delete the orphan item this time
6966 			 * around, it'll get picked up the next time.
6967 			 *
6968 			 * The most common failure here is just -ENOENT.
6969 			 */
6970 			btrfs_del_orphan_item(trans, tree_root,
6971 					      root->root_key.objectid);
6972 		}
6973 	}
6974 
6975 	if (root->in_radix) {
6976 		btrfs_free_fs_root(tree_root->fs_info, root);
6977 	} else {
6978 		free_extent_buffer(root->node);
6979 		free_extent_buffer(root->commit_root);
6980 		kfree(root);
6981 	}
6982 out_end_trans:
6983 	btrfs_end_transaction_throttle(trans, tree_root);
6984 out_free:
6985 	kfree(wc);
6986 	btrfs_free_path(path);
6987 out:
6988 	if (err)
6989 		btrfs_std_error(root->fs_info, err);
6990 	return err;
6991 }
6992 
6993 /*
6994  * drop subtree rooted at tree block 'node'.
6995  *
6996  * NOTE: this function will unlock and release tree block 'node'
6997  * only used by relocation code
6998  */
6999 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7000 			struct btrfs_root *root,
7001 			struct extent_buffer *node,
7002 			struct extent_buffer *parent)
7003 {
7004 	struct btrfs_path *path;
7005 	struct walk_control *wc;
7006 	int level;
7007 	int parent_level;
7008 	int ret = 0;
7009 	int wret;
7010 
7011 	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7012 
7013 	path = btrfs_alloc_path();
7014 	if (!path)
7015 		return -ENOMEM;
7016 
7017 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
7018 	if (!wc) {
7019 		btrfs_free_path(path);
7020 		return -ENOMEM;
7021 	}
7022 
7023 	btrfs_assert_tree_locked(parent);
7024 	parent_level = btrfs_header_level(parent);
7025 	extent_buffer_get(parent);
7026 	path->nodes[parent_level] = parent;
7027 	path->slots[parent_level] = btrfs_header_nritems(parent);
7028 
7029 	btrfs_assert_tree_locked(node);
7030 	level = btrfs_header_level(node);
7031 	path->nodes[level] = node;
7032 	path->slots[level] = 0;
7033 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7034 
7035 	wc->refs[parent_level] = 1;
7036 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7037 	wc->level = level;
7038 	wc->shared_level = -1;
7039 	wc->stage = DROP_REFERENCE;
7040 	wc->update_ref = 0;
7041 	wc->keep_locks = 1;
7042 	wc->for_reloc = 1;
7043 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7044 
7045 	while (1) {
7046 		wret = walk_down_tree(trans, root, path, wc);
7047 		if (wret < 0) {
7048 			ret = wret;
7049 			break;
7050 		}
7051 
7052 		wret = walk_up_tree(trans, root, path, wc, parent_level);
7053 		if (wret < 0)
7054 			ret = wret;
7055 		if (wret != 0)
7056 			break;
7057 	}
7058 
7059 	kfree(wc);
7060 	btrfs_free_path(path);
7061 	return ret;
7062 }
7063 
7064 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7065 {
7066 	u64 num_devices;
7067 	u64 stripped;
7068 
7069 	/*
7070 	 * if restripe for this chunk_type is on pick target profile and
7071 	 * return, otherwise do the usual balance
7072 	 */
7073 	stripped = get_restripe_target(root->fs_info, flags);
7074 	if (stripped)
7075 		return extended_to_chunk(stripped);
7076 
7077 	/*
7078 	 * we add in the count of missing devices because we want
7079 	 * to make sure that any RAID levels on a degraded FS
7080 	 * continue to be honored.
7081 	 */
7082 	num_devices = root->fs_info->fs_devices->rw_devices +
7083 		root->fs_info->fs_devices->missing_devices;
7084 
7085 	stripped = BTRFS_BLOCK_GROUP_RAID0 |
7086 		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7087 
7088 	if (num_devices == 1) {
7089 		stripped |= BTRFS_BLOCK_GROUP_DUP;
7090 		stripped = flags & ~stripped;
7091 
7092 		/* turn raid0 into single device chunks */
7093 		if (flags & BTRFS_BLOCK_GROUP_RAID0)
7094 			return stripped;
7095 
7096 		/* turn mirroring into duplication */
7097 		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7098 			     BTRFS_BLOCK_GROUP_RAID10))
7099 			return stripped | BTRFS_BLOCK_GROUP_DUP;
7100 	} else {
7101 		/* they already had raid on here, just return */
7102 		if (flags & stripped)
7103 			return flags;
7104 
7105 		stripped |= BTRFS_BLOCK_GROUP_DUP;
7106 		stripped = flags & ~stripped;
7107 
7108 		/* switch duplicated blocks with raid1 */
7109 		if (flags & BTRFS_BLOCK_GROUP_DUP)
7110 			return stripped | BTRFS_BLOCK_GROUP_RAID1;
7111 
7112 		/* this is drive concat, leave it alone */
7113 	}
7114 
7115 	return flags;
7116 }
7117 
7118 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7119 {
7120 	struct btrfs_space_info *sinfo = cache->space_info;
7121 	u64 num_bytes;
7122 	u64 min_allocable_bytes;
7123 	int ret = -ENOSPC;
7124 
7125 
7126 	/*
7127 	 * We need some metadata space and system metadata space for
7128 	 * allocating chunks in some corner cases until we force to set
7129 	 * it to be readonly.
7130 	 */
7131 	if ((sinfo->flags &
7132 	     (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7133 	    !force)
7134 		min_allocable_bytes = 1 * 1024 * 1024;
7135 	else
7136 		min_allocable_bytes = 0;
7137 
7138 	spin_lock(&sinfo->lock);
7139 	spin_lock(&cache->lock);
7140 
7141 	if (cache->ro) {
7142 		ret = 0;
7143 		goto out;
7144 	}
7145 
7146 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7147 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
7148 
7149 	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7150 	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7151 	    min_allocable_bytes <= sinfo->total_bytes) {
7152 		sinfo->bytes_readonly += num_bytes;
7153 		cache->ro = 1;
7154 		ret = 0;
7155 	}
7156 out:
7157 	spin_unlock(&cache->lock);
7158 	spin_unlock(&sinfo->lock);
7159 	return ret;
7160 }
7161 
7162 int btrfs_set_block_group_ro(struct btrfs_root *root,
7163 			     struct btrfs_block_group_cache *cache)
7164 
7165 {
7166 	struct btrfs_trans_handle *trans;
7167 	u64 alloc_flags;
7168 	int ret;
7169 
7170 	BUG_ON(cache->ro);
7171 
7172 	trans = btrfs_join_transaction(root);
7173 	if (IS_ERR(trans))
7174 		return PTR_ERR(trans);
7175 
7176 	alloc_flags = update_block_group_flags(root, cache->flags);
7177 	if (alloc_flags != cache->flags) {
7178 		ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7179 				     CHUNK_ALLOC_FORCE);
7180 		if (ret < 0)
7181 			goto out;
7182 	}
7183 
7184 	ret = set_block_group_ro(cache, 0);
7185 	if (!ret)
7186 		goto out;
7187 	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7188 	ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7189 			     CHUNK_ALLOC_FORCE);
7190 	if (ret < 0)
7191 		goto out;
7192 	ret = set_block_group_ro(cache, 0);
7193 out:
7194 	btrfs_end_transaction(trans, root);
7195 	return ret;
7196 }
7197 
7198 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7199 			    struct btrfs_root *root, u64 type)
7200 {
7201 	u64 alloc_flags = get_alloc_profile(root, type);
7202 	return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7203 			      CHUNK_ALLOC_FORCE);
7204 }
7205 
7206 /*
7207  * helper to account the unused space of all the readonly block group in the
7208  * list. takes mirrors into account.
7209  */
7210 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7211 {
7212 	struct btrfs_block_group_cache *block_group;
7213 	u64 free_bytes = 0;
7214 	int factor;
7215 
7216 	list_for_each_entry(block_group, groups_list, list) {
7217 		spin_lock(&block_group->lock);
7218 
7219 		if (!block_group->ro) {
7220 			spin_unlock(&block_group->lock);
7221 			continue;
7222 		}
7223 
7224 		if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7225 					  BTRFS_BLOCK_GROUP_RAID10 |
7226 					  BTRFS_BLOCK_GROUP_DUP))
7227 			factor = 2;
7228 		else
7229 			factor = 1;
7230 
7231 		free_bytes += (block_group->key.offset -
7232 			       btrfs_block_group_used(&block_group->item)) *
7233 			       factor;
7234 
7235 		spin_unlock(&block_group->lock);
7236 	}
7237 
7238 	return free_bytes;
7239 }
7240 
7241 /*
7242  * helper to account the unused space of all the readonly block group in the
7243  * space_info. takes mirrors into account.
7244  */
7245 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7246 {
7247 	int i;
7248 	u64 free_bytes = 0;
7249 
7250 	spin_lock(&sinfo->lock);
7251 
7252 	for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7253 		if (!list_empty(&sinfo->block_groups[i]))
7254 			free_bytes += __btrfs_get_ro_block_group_free_space(
7255 						&sinfo->block_groups[i]);
7256 
7257 	spin_unlock(&sinfo->lock);
7258 
7259 	return free_bytes;
7260 }
7261 
7262 void btrfs_set_block_group_rw(struct btrfs_root *root,
7263 			      struct btrfs_block_group_cache *cache)
7264 {
7265 	struct btrfs_space_info *sinfo = cache->space_info;
7266 	u64 num_bytes;
7267 
7268 	BUG_ON(!cache->ro);
7269 
7270 	spin_lock(&sinfo->lock);
7271 	spin_lock(&cache->lock);
7272 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7273 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
7274 	sinfo->bytes_readonly -= num_bytes;
7275 	cache->ro = 0;
7276 	spin_unlock(&cache->lock);
7277 	spin_unlock(&sinfo->lock);
7278 }
7279 
7280 /*
7281  * checks to see if its even possible to relocate this block group.
7282  *
7283  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7284  * ok to go ahead and try.
7285  */
7286 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7287 {
7288 	struct btrfs_block_group_cache *block_group;
7289 	struct btrfs_space_info *space_info;
7290 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7291 	struct btrfs_device *device;
7292 	u64 min_free;
7293 	u64 dev_min = 1;
7294 	u64 dev_nr = 0;
7295 	u64 target;
7296 	int index;
7297 	int full = 0;
7298 	int ret = 0;
7299 
7300 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7301 
7302 	/* odd, couldn't find the block group, leave it alone */
7303 	if (!block_group)
7304 		return -1;
7305 
7306 	min_free = btrfs_block_group_used(&block_group->item);
7307 
7308 	/* no bytes used, we're good */
7309 	if (!min_free)
7310 		goto out;
7311 
7312 	space_info = block_group->space_info;
7313 	spin_lock(&space_info->lock);
7314 
7315 	full = space_info->full;
7316 
7317 	/*
7318 	 * if this is the last block group we have in this space, we can't
7319 	 * relocate it unless we're able to allocate a new chunk below.
7320 	 *
7321 	 * Otherwise, we need to make sure we have room in the space to handle
7322 	 * all of the extents from this block group.  If we can, we're good
7323 	 */
7324 	if ((space_info->total_bytes != block_group->key.offset) &&
7325 	    (space_info->bytes_used + space_info->bytes_reserved +
7326 	     space_info->bytes_pinned + space_info->bytes_readonly +
7327 	     min_free < space_info->total_bytes)) {
7328 		spin_unlock(&space_info->lock);
7329 		goto out;
7330 	}
7331 	spin_unlock(&space_info->lock);
7332 
7333 	/*
7334 	 * ok we don't have enough space, but maybe we have free space on our
7335 	 * devices to allocate new chunks for relocation, so loop through our
7336 	 * alloc devices and guess if we have enough space.  if this block
7337 	 * group is going to be restriped, run checks against the target
7338 	 * profile instead of the current one.
7339 	 */
7340 	ret = -1;
7341 
7342 	/*
7343 	 * index:
7344 	 *      0: raid10
7345 	 *      1: raid1
7346 	 *      2: dup
7347 	 *      3: raid0
7348 	 *      4: single
7349 	 */
7350 	target = get_restripe_target(root->fs_info, block_group->flags);
7351 	if (target) {
7352 		index = __get_block_group_index(extended_to_chunk(target));
7353 	} else {
7354 		/*
7355 		 * this is just a balance, so if we were marked as full
7356 		 * we know there is no space for a new chunk
7357 		 */
7358 		if (full)
7359 			goto out;
7360 
7361 		index = get_block_group_index(block_group);
7362 	}
7363 
7364 	if (index == 0) {
7365 		dev_min = 4;
7366 		/* Divide by 2 */
7367 		min_free >>= 1;
7368 	} else if (index == 1) {
7369 		dev_min = 2;
7370 	} else if (index == 2) {
7371 		/* Multiply by 2 */
7372 		min_free <<= 1;
7373 	} else if (index == 3) {
7374 		dev_min = fs_devices->rw_devices;
7375 		do_div(min_free, dev_min);
7376 	}
7377 
7378 	mutex_lock(&root->fs_info->chunk_mutex);
7379 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7380 		u64 dev_offset;
7381 
7382 		/*
7383 		 * check to make sure we can actually find a chunk with enough
7384 		 * space to fit our block group in.
7385 		 */
7386 		if (device->total_bytes > device->bytes_used + min_free) {
7387 			ret = find_free_dev_extent(device, min_free,
7388 						   &dev_offset, NULL);
7389 			if (!ret)
7390 				dev_nr++;
7391 
7392 			if (dev_nr >= dev_min)
7393 				break;
7394 
7395 			ret = -1;
7396 		}
7397 	}
7398 	mutex_unlock(&root->fs_info->chunk_mutex);
7399 out:
7400 	btrfs_put_block_group(block_group);
7401 	return ret;
7402 }
7403 
7404 static int find_first_block_group(struct btrfs_root *root,
7405 		struct btrfs_path *path, struct btrfs_key *key)
7406 {
7407 	int ret = 0;
7408 	struct btrfs_key found_key;
7409 	struct extent_buffer *leaf;
7410 	int slot;
7411 
7412 	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7413 	if (ret < 0)
7414 		goto out;
7415 
7416 	while (1) {
7417 		slot = path->slots[0];
7418 		leaf = path->nodes[0];
7419 		if (slot >= btrfs_header_nritems(leaf)) {
7420 			ret = btrfs_next_leaf(root, path);
7421 			if (ret == 0)
7422 				continue;
7423 			if (ret < 0)
7424 				goto out;
7425 			break;
7426 		}
7427 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7428 
7429 		if (found_key.objectid >= key->objectid &&
7430 		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7431 			ret = 0;
7432 			goto out;
7433 		}
7434 		path->slots[0]++;
7435 	}
7436 out:
7437 	return ret;
7438 }
7439 
7440 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7441 {
7442 	struct btrfs_block_group_cache *block_group;
7443 	u64 last = 0;
7444 
7445 	while (1) {
7446 		struct inode *inode;
7447 
7448 		block_group = btrfs_lookup_first_block_group(info, last);
7449 		while (block_group) {
7450 			spin_lock(&block_group->lock);
7451 			if (block_group->iref)
7452 				break;
7453 			spin_unlock(&block_group->lock);
7454 			block_group = next_block_group(info->tree_root,
7455 						       block_group);
7456 		}
7457 		if (!block_group) {
7458 			if (last == 0)
7459 				break;
7460 			last = 0;
7461 			continue;
7462 		}
7463 
7464 		inode = block_group->inode;
7465 		block_group->iref = 0;
7466 		block_group->inode = NULL;
7467 		spin_unlock(&block_group->lock);
7468 		iput(inode);
7469 		last = block_group->key.objectid + block_group->key.offset;
7470 		btrfs_put_block_group(block_group);
7471 	}
7472 }
7473 
7474 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7475 {
7476 	struct btrfs_block_group_cache *block_group;
7477 	struct btrfs_space_info *space_info;
7478 	struct btrfs_caching_control *caching_ctl;
7479 	struct rb_node *n;
7480 
7481 	down_write(&info->extent_commit_sem);
7482 	while (!list_empty(&info->caching_block_groups)) {
7483 		caching_ctl = list_entry(info->caching_block_groups.next,
7484 					 struct btrfs_caching_control, list);
7485 		list_del(&caching_ctl->list);
7486 		put_caching_control(caching_ctl);
7487 	}
7488 	up_write(&info->extent_commit_sem);
7489 
7490 	spin_lock(&info->block_group_cache_lock);
7491 	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7492 		block_group = rb_entry(n, struct btrfs_block_group_cache,
7493 				       cache_node);
7494 		rb_erase(&block_group->cache_node,
7495 			 &info->block_group_cache_tree);
7496 		spin_unlock(&info->block_group_cache_lock);
7497 
7498 		down_write(&block_group->space_info->groups_sem);
7499 		list_del(&block_group->list);
7500 		up_write(&block_group->space_info->groups_sem);
7501 
7502 		if (block_group->cached == BTRFS_CACHE_STARTED)
7503 			wait_block_group_cache_done(block_group);
7504 
7505 		/*
7506 		 * We haven't cached this block group, which means we could
7507 		 * possibly have excluded extents on this block group.
7508 		 */
7509 		if (block_group->cached == BTRFS_CACHE_NO)
7510 			free_excluded_extents(info->extent_root, block_group);
7511 
7512 		btrfs_remove_free_space_cache(block_group);
7513 		btrfs_put_block_group(block_group);
7514 
7515 		spin_lock(&info->block_group_cache_lock);
7516 	}
7517 	spin_unlock(&info->block_group_cache_lock);
7518 
7519 	/* now that all the block groups are freed, go through and
7520 	 * free all the space_info structs.  This is only called during
7521 	 * the final stages of unmount, and so we know nobody is
7522 	 * using them.  We call synchronize_rcu() once before we start,
7523 	 * just to be on the safe side.
7524 	 */
7525 	synchronize_rcu();
7526 
7527 	release_global_block_rsv(info);
7528 
7529 	while(!list_empty(&info->space_info)) {
7530 		space_info = list_entry(info->space_info.next,
7531 					struct btrfs_space_info,
7532 					list);
7533 		if (space_info->bytes_pinned > 0 ||
7534 		    space_info->bytes_reserved > 0 ||
7535 		    space_info->bytes_may_use > 0) {
7536 			WARN_ON(1);
7537 			dump_space_info(space_info, 0, 0);
7538 		}
7539 		list_del(&space_info->list);
7540 		kfree(space_info);
7541 	}
7542 	return 0;
7543 }
7544 
7545 static void __link_block_group(struct btrfs_space_info *space_info,
7546 			       struct btrfs_block_group_cache *cache)
7547 {
7548 	int index = get_block_group_index(cache);
7549 
7550 	down_write(&space_info->groups_sem);
7551 	list_add_tail(&cache->list, &space_info->block_groups[index]);
7552 	up_write(&space_info->groups_sem);
7553 }
7554 
7555 int btrfs_read_block_groups(struct btrfs_root *root)
7556 {
7557 	struct btrfs_path *path;
7558 	int ret;
7559 	struct btrfs_block_group_cache *cache;
7560 	struct btrfs_fs_info *info = root->fs_info;
7561 	struct btrfs_space_info *space_info;
7562 	struct btrfs_key key;
7563 	struct btrfs_key found_key;
7564 	struct extent_buffer *leaf;
7565 	int need_clear = 0;
7566 	u64 cache_gen;
7567 
7568 	root = info->extent_root;
7569 	key.objectid = 0;
7570 	key.offset = 0;
7571 	btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7572 	path = btrfs_alloc_path();
7573 	if (!path)
7574 		return -ENOMEM;
7575 	path->reada = 1;
7576 
7577 	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7578 	if (btrfs_test_opt(root, SPACE_CACHE) &&
7579 	    btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7580 		need_clear = 1;
7581 	if (btrfs_test_opt(root, CLEAR_CACHE))
7582 		need_clear = 1;
7583 
7584 	while (1) {
7585 		ret = find_first_block_group(root, path, &key);
7586 		if (ret > 0)
7587 			break;
7588 		if (ret != 0)
7589 			goto error;
7590 		leaf = path->nodes[0];
7591 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7592 		cache = kzalloc(sizeof(*cache), GFP_NOFS);
7593 		if (!cache) {
7594 			ret = -ENOMEM;
7595 			goto error;
7596 		}
7597 		cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7598 						GFP_NOFS);
7599 		if (!cache->free_space_ctl) {
7600 			kfree(cache);
7601 			ret = -ENOMEM;
7602 			goto error;
7603 		}
7604 
7605 		atomic_set(&cache->count, 1);
7606 		spin_lock_init(&cache->lock);
7607 		cache->fs_info = info;
7608 		INIT_LIST_HEAD(&cache->list);
7609 		INIT_LIST_HEAD(&cache->cluster_list);
7610 
7611 		if (need_clear)
7612 			cache->disk_cache_state = BTRFS_DC_CLEAR;
7613 
7614 		read_extent_buffer(leaf, &cache->item,
7615 				   btrfs_item_ptr_offset(leaf, path->slots[0]),
7616 				   sizeof(cache->item));
7617 		memcpy(&cache->key, &found_key, sizeof(found_key));
7618 
7619 		key.objectid = found_key.objectid + found_key.offset;
7620 		btrfs_release_path(path);
7621 		cache->flags = btrfs_block_group_flags(&cache->item);
7622 		cache->sectorsize = root->sectorsize;
7623 
7624 		btrfs_init_free_space_ctl(cache);
7625 
7626 		/*
7627 		 * We need to exclude the super stripes now so that the space
7628 		 * info has super bytes accounted for, otherwise we'll think
7629 		 * we have more space than we actually do.
7630 		 */
7631 		exclude_super_stripes(root, cache);
7632 
7633 		/*
7634 		 * check for two cases, either we are full, and therefore
7635 		 * don't need to bother with the caching work since we won't
7636 		 * find any space, or we are empty, and we can just add all
7637 		 * the space in and be done with it.  This saves us _alot_ of
7638 		 * time, particularly in the full case.
7639 		 */
7640 		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7641 			cache->last_byte_to_unpin = (u64)-1;
7642 			cache->cached = BTRFS_CACHE_FINISHED;
7643 			free_excluded_extents(root, cache);
7644 		} else if (btrfs_block_group_used(&cache->item) == 0) {
7645 			cache->last_byte_to_unpin = (u64)-1;
7646 			cache->cached = BTRFS_CACHE_FINISHED;
7647 			add_new_free_space(cache, root->fs_info,
7648 					   found_key.objectid,
7649 					   found_key.objectid +
7650 					   found_key.offset);
7651 			free_excluded_extents(root, cache);
7652 		}
7653 
7654 		ret = update_space_info(info, cache->flags, found_key.offset,
7655 					btrfs_block_group_used(&cache->item),
7656 					&space_info);
7657 		BUG_ON(ret); /* -ENOMEM */
7658 		cache->space_info = space_info;
7659 		spin_lock(&cache->space_info->lock);
7660 		cache->space_info->bytes_readonly += cache->bytes_super;
7661 		spin_unlock(&cache->space_info->lock);
7662 
7663 		__link_block_group(space_info, cache);
7664 
7665 		ret = btrfs_add_block_group_cache(root->fs_info, cache);
7666 		BUG_ON(ret); /* Logic error */
7667 
7668 		set_avail_alloc_bits(root->fs_info, cache->flags);
7669 		if (btrfs_chunk_readonly(root, cache->key.objectid))
7670 			set_block_group_ro(cache, 1);
7671 	}
7672 
7673 	list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7674 		if (!(get_alloc_profile(root, space_info->flags) &
7675 		      (BTRFS_BLOCK_GROUP_RAID10 |
7676 		       BTRFS_BLOCK_GROUP_RAID1 |
7677 		       BTRFS_BLOCK_GROUP_DUP)))
7678 			continue;
7679 		/*
7680 		 * avoid allocating from un-mirrored block group if there are
7681 		 * mirrored block groups.
7682 		 */
7683 		list_for_each_entry(cache, &space_info->block_groups[3], list)
7684 			set_block_group_ro(cache, 1);
7685 		list_for_each_entry(cache, &space_info->block_groups[4], list)
7686 			set_block_group_ro(cache, 1);
7687 	}
7688 
7689 	init_global_block_rsv(info);
7690 	ret = 0;
7691 error:
7692 	btrfs_free_path(path);
7693 	return ret;
7694 }
7695 
7696 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7697 			   struct btrfs_root *root, u64 bytes_used,
7698 			   u64 type, u64 chunk_objectid, u64 chunk_offset,
7699 			   u64 size)
7700 {
7701 	int ret;
7702 	struct btrfs_root *extent_root;
7703 	struct btrfs_block_group_cache *cache;
7704 
7705 	extent_root = root->fs_info->extent_root;
7706 
7707 	root->fs_info->last_trans_log_full_commit = trans->transid;
7708 
7709 	cache = kzalloc(sizeof(*cache), GFP_NOFS);
7710 	if (!cache)
7711 		return -ENOMEM;
7712 	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7713 					GFP_NOFS);
7714 	if (!cache->free_space_ctl) {
7715 		kfree(cache);
7716 		return -ENOMEM;
7717 	}
7718 
7719 	cache->key.objectid = chunk_offset;
7720 	cache->key.offset = size;
7721 	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7722 	cache->sectorsize = root->sectorsize;
7723 	cache->fs_info = root->fs_info;
7724 
7725 	atomic_set(&cache->count, 1);
7726 	spin_lock_init(&cache->lock);
7727 	INIT_LIST_HEAD(&cache->list);
7728 	INIT_LIST_HEAD(&cache->cluster_list);
7729 
7730 	btrfs_init_free_space_ctl(cache);
7731 
7732 	btrfs_set_block_group_used(&cache->item, bytes_used);
7733 	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7734 	cache->flags = type;
7735 	btrfs_set_block_group_flags(&cache->item, type);
7736 
7737 	cache->last_byte_to_unpin = (u64)-1;
7738 	cache->cached = BTRFS_CACHE_FINISHED;
7739 	exclude_super_stripes(root, cache);
7740 
7741 	add_new_free_space(cache, root->fs_info, chunk_offset,
7742 			   chunk_offset + size);
7743 
7744 	free_excluded_extents(root, cache);
7745 
7746 	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7747 				&cache->space_info);
7748 	BUG_ON(ret); /* -ENOMEM */
7749 	update_global_block_rsv(root->fs_info);
7750 
7751 	spin_lock(&cache->space_info->lock);
7752 	cache->space_info->bytes_readonly += cache->bytes_super;
7753 	spin_unlock(&cache->space_info->lock);
7754 
7755 	__link_block_group(cache->space_info, cache);
7756 
7757 	ret = btrfs_add_block_group_cache(root->fs_info, cache);
7758 	BUG_ON(ret); /* Logic error */
7759 
7760 	ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7761 				sizeof(cache->item));
7762 	if (ret) {
7763 		btrfs_abort_transaction(trans, extent_root, ret);
7764 		return ret;
7765 	}
7766 
7767 	set_avail_alloc_bits(extent_root->fs_info, type);
7768 
7769 	return 0;
7770 }
7771 
7772 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7773 {
7774 	u64 extra_flags = chunk_to_extended(flags) &
7775 				BTRFS_EXTENDED_PROFILE_MASK;
7776 
7777 	if (flags & BTRFS_BLOCK_GROUP_DATA)
7778 		fs_info->avail_data_alloc_bits &= ~extra_flags;
7779 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
7780 		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
7781 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7782 		fs_info->avail_system_alloc_bits &= ~extra_flags;
7783 }
7784 
7785 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7786 			     struct btrfs_root *root, u64 group_start)
7787 {
7788 	struct btrfs_path *path;
7789 	struct btrfs_block_group_cache *block_group;
7790 	struct btrfs_free_cluster *cluster;
7791 	struct btrfs_root *tree_root = root->fs_info->tree_root;
7792 	struct btrfs_key key;
7793 	struct inode *inode;
7794 	int ret;
7795 	int index;
7796 	int factor;
7797 
7798 	root = root->fs_info->extent_root;
7799 
7800 	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7801 	BUG_ON(!block_group);
7802 	BUG_ON(!block_group->ro);
7803 
7804 	/*
7805 	 * Free the reserved super bytes from this block group before
7806 	 * remove it.
7807 	 */
7808 	free_excluded_extents(root, block_group);
7809 
7810 	memcpy(&key, &block_group->key, sizeof(key));
7811 	index = get_block_group_index(block_group);
7812 	if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7813 				  BTRFS_BLOCK_GROUP_RAID1 |
7814 				  BTRFS_BLOCK_GROUP_RAID10))
7815 		factor = 2;
7816 	else
7817 		factor = 1;
7818 
7819 	/* make sure this block group isn't part of an allocation cluster */
7820 	cluster = &root->fs_info->data_alloc_cluster;
7821 	spin_lock(&cluster->refill_lock);
7822 	btrfs_return_cluster_to_free_space(block_group, cluster);
7823 	spin_unlock(&cluster->refill_lock);
7824 
7825 	/*
7826 	 * make sure this block group isn't part of a metadata
7827 	 * allocation cluster
7828 	 */
7829 	cluster = &root->fs_info->meta_alloc_cluster;
7830 	spin_lock(&cluster->refill_lock);
7831 	btrfs_return_cluster_to_free_space(block_group, cluster);
7832 	spin_unlock(&cluster->refill_lock);
7833 
7834 	path = btrfs_alloc_path();
7835 	if (!path) {
7836 		ret = -ENOMEM;
7837 		goto out;
7838 	}
7839 
7840 	inode = lookup_free_space_inode(tree_root, block_group, path);
7841 	if (!IS_ERR(inode)) {
7842 		ret = btrfs_orphan_add(trans, inode);
7843 		if (ret) {
7844 			btrfs_add_delayed_iput(inode);
7845 			goto out;
7846 		}
7847 		clear_nlink(inode);
7848 		/* One for the block groups ref */
7849 		spin_lock(&block_group->lock);
7850 		if (block_group->iref) {
7851 			block_group->iref = 0;
7852 			block_group->inode = NULL;
7853 			spin_unlock(&block_group->lock);
7854 			iput(inode);
7855 		} else {
7856 			spin_unlock(&block_group->lock);
7857 		}
7858 		/* One for our lookup ref */
7859 		btrfs_add_delayed_iput(inode);
7860 	}
7861 
7862 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7863 	key.offset = block_group->key.objectid;
7864 	key.type = 0;
7865 
7866 	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7867 	if (ret < 0)
7868 		goto out;
7869 	if (ret > 0)
7870 		btrfs_release_path(path);
7871 	if (ret == 0) {
7872 		ret = btrfs_del_item(trans, tree_root, path);
7873 		if (ret)
7874 			goto out;
7875 		btrfs_release_path(path);
7876 	}
7877 
7878 	spin_lock(&root->fs_info->block_group_cache_lock);
7879 	rb_erase(&block_group->cache_node,
7880 		 &root->fs_info->block_group_cache_tree);
7881 	spin_unlock(&root->fs_info->block_group_cache_lock);
7882 
7883 	down_write(&block_group->space_info->groups_sem);
7884 	/*
7885 	 * we must use list_del_init so people can check to see if they
7886 	 * are still on the list after taking the semaphore
7887 	 */
7888 	list_del_init(&block_group->list);
7889 	if (list_empty(&block_group->space_info->block_groups[index]))
7890 		clear_avail_alloc_bits(root->fs_info, block_group->flags);
7891 	up_write(&block_group->space_info->groups_sem);
7892 
7893 	if (block_group->cached == BTRFS_CACHE_STARTED)
7894 		wait_block_group_cache_done(block_group);
7895 
7896 	btrfs_remove_free_space_cache(block_group);
7897 
7898 	spin_lock(&block_group->space_info->lock);
7899 	block_group->space_info->total_bytes -= block_group->key.offset;
7900 	block_group->space_info->bytes_readonly -= block_group->key.offset;
7901 	block_group->space_info->disk_total -= block_group->key.offset * factor;
7902 	spin_unlock(&block_group->space_info->lock);
7903 
7904 	memcpy(&key, &block_group->key, sizeof(key));
7905 
7906 	btrfs_clear_space_info_full(root->fs_info);
7907 
7908 	btrfs_put_block_group(block_group);
7909 	btrfs_put_block_group(block_group);
7910 
7911 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7912 	if (ret > 0)
7913 		ret = -EIO;
7914 	if (ret < 0)
7915 		goto out;
7916 
7917 	ret = btrfs_del_item(trans, root, path);
7918 out:
7919 	btrfs_free_path(path);
7920 	return ret;
7921 }
7922 
7923 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
7924 {
7925 	struct btrfs_space_info *space_info;
7926 	struct btrfs_super_block *disk_super;
7927 	u64 features;
7928 	u64 flags;
7929 	int mixed = 0;
7930 	int ret;
7931 
7932 	disk_super = fs_info->super_copy;
7933 	if (!btrfs_super_root(disk_super))
7934 		return 1;
7935 
7936 	features = btrfs_super_incompat_flags(disk_super);
7937 	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
7938 		mixed = 1;
7939 
7940 	flags = BTRFS_BLOCK_GROUP_SYSTEM;
7941 	ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7942 	if (ret)
7943 		goto out;
7944 
7945 	if (mixed) {
7946 		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
7947 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7948 	} else {
7949 		flags = BTRFS_BLOCK_GROUP_METADATA;
7950 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7951 		if (ret)
7952 			goto out;
7953 
7954 		flags = BTRFS_BLOCK_GROUP_DATA;
7955 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7956 	}
7957 out:
7958 	return ret;
7959 }
7960 
7961 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
7962 {
7963 	return unpin_extent_range(root, start, end);
7964 }
7965 
7966 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
7967 			       u64 num_bytes, u64 *actual_bytes)
7968 {
7969 	return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
7970 }
7971 
7972 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7973 {
7974 	struct btrfs_fs_info *fs_info = root->fs_info;
7975 	struct btrfs_block_group_cache *cache = NULL;
7976 	u64 group_trimmed;
7977 	u64 start;
7978 	u64 end;
7979 	u64 trimmed = 0;
7980 	u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
7981 	int ret = 0;
7982 
7983 	/*
7984 	 * try to trim all FS space, our block group may start from non-zero.
7985 	 */
7986 	if (range->len == total_bytes)
7987 		cache = btrfs_lookup_first_block_group(fs_info, range->start);
7988 	else
7989 		cache = btrfs_lookup_block_group(fs_info, range->start);
7990 
7991 	while (cache) {
7992 		if (cache->key.objectid >= (range->start + range->len)) {
7993 			btrfs_put_block_group(cache);
7994 			break;
7995 		}
7996 
7997 		start = max(range->start, cache->key.objectid);
7998 		end = min(range->start + range->len,
7999 				cache->key.objectid + cache->key.offset);
8000 
8001 		if (end - start >= range->minlen) {
8002 			if (!block_group_cache_done(cache)) {
8003 				ret = cache_block_group(cache, NULL, root, 0);
8004 				if (!ret)
8005 					wait_block_group_cache_done(cache);
8006 			}
8007 			ret = btrfs_trim_block_group(cache,
8008 						     &group_trimmed,
8009 						     start,
8010 						     end,
8011 						     range->minlen);
8012 
8013 			trimmed += group_trimmed;
8014 			if (ret) {
8015 				btrfs_put_block_group(cache);
8016 				break;
8017 			}
8018 		}
8019 
8020 		cache = next_block_group(fs_info->tree_root, cache);
8021 	}
8022 
8023 	range->len = trimmed;
8024 	return ret;
8025 }
8026