xref: /openbmc/linux/fs/btrfs/extent-tree.c (revision 9d749629)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 
38 #undef SCRAMBLE_DELAYED_REFS
39 
40 /*
41  * control flags for do_chunk_alloc's force field
42  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
43  * if we really need one.
44  *
45  * CHUNK_ALLOC_LIMITED means to only try and allocate one
46  * if we have very few chunks already allocated.  This is
47  * used as part of the clustering code to help make sure
48  * we have a good pool of storage to cluster in, without
49  * filling the FS with empty chunks
50  *
51  * CHUNK_ALLOC_FORCE means it must try to allocate one
52  *
53  */
54 enum {
55 	CHUNK_ALLOC_NO_FORCE = 0,
56 	CHUNK_ALLOC_LIMITED = 1,
57 	CHUNK_ALLOC_FORCE = 2,
58 };
59 
60 /*
61  * Control how reservations are dealt with.
62  *
63  * RESERVE_FREE - freeing a reservation.
64  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
65  *   ENOSPC accounting
66  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
67  *   bytes_may_use as the ENOSPC accounting is done elsewhere
68  */
69 enum {
70 	RESERVE_FREE = 0,
71 	RESERVE_ALLOC = 1,
72 	RESERVE_ALLOC_NO_ACCOUNT = 2,
73 };
74 
75 static int update_block_group(struct btrfs_trans_handle *trans,
76 			      struct btrfs_root *root,
77 			      u64 bytenr, u64 num_bytes, int alloc);
78 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
79 				struct btrfs_root *root,
80 				u64 bytenr, u64 num_bytes, u64 parent,
81 				u64 root_objectid, u64 owner_objectid,
82 				u64 owner_offset, int refs_to_drop,
83 				struct btrfs_delayed_extent_op *extra_op);
84 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
85 				    struct extent_buffer *leaf,
86 				    struct btrfs_extent_item *ei);
87 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
88 				      struct btrfs_root *root,
89 				      u64 parent, u64 root_objectid,
90 				      u64 flags, u64 owner, u64 offset,
91 				      struct btrfs_key *ins, int ref_mod);
92 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
93 				     struct btrfs_root *root,
94 				     u64 parent, u64 root_objectid,
95 				     u64 flags, struct btrfs_disk_key *key,
96 				     int level, struct btrfs_key *ins);
97 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
98 			  struct btrfs_root *extent_root, u64 flags,
99 			  int force);
100 static int find_next_key(struct btrfs_path *path, int level,
101 			 struct btrfs_key *key);
102 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
103 			    int dump_block_groups);
104 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
105 				       u64 num_bytes, int reserve);
106 
107 static noinline int
108 block_group_cache_done(struct btrfs_block_group_cache *cache)
109 {
110 	smp_mb();
111 	return cache->cached == BTRFS_CACHE_FINISHED;
112 }
113 
114 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
115 {
116 	return (cache->flags & bits) == bits;
117 }
118 
119 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
120 {
121 	atomic_inc(&cache->count);
122 }
123 
124 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
125 {
126 	if (atomic_dec_and_test(&cache->count)) {
127 		WARN_ON(cache->pinned > 0);
128 		WARN_ON(cache->reserved > 0);
129 		kfree(cache->free_space_ctl);
130 		kfree(cache);
131 	}
132 }
133 
134 /*
135  * this adds the block group to the fs_info rb tree for the block group
136  * cache
137  */
138 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
139 				struct btrfs_block_group_cache *block_group)
140 {
141 	struct rb_node **p;
142 	struct rb_node *parent = NULL;
143 	struct btrfs_block_group_cache *cache;
144 
145 	spin_lock(&info->block_group_cache_lock);
146 	p = &info->block_group_cache_tree.rb_node;
147 
148 	while (*p) {
149 		parent = *p;
150 		cache = rb_entry(parent, struct btrfs_block_group_cache,
151 				 cache_node);
152 		if (block_group->key.objectid < cache->key.objectid) {
153 			p = &(*p)->rb_left;
154 		} else if (block_group->key.objectid > cache->key.objectid) {
155 			p = &(*p)->rb_right;
156 		} else {
157 			spin_unlock(&info->block_group_cache_lock);
158 			return -EEXIST;
159 		}
160 	}
161 
162 	rb_link_node(&block_group->cache_node, parent, p);
163 	rb_insert_color(&block_group->cache_node,
164 			&info->block_group_cache_tree);
165 	spin_unlock(&info->block_group_cache_lock);
166 
167 	return 0;
168 }
169 
170 /*
171  * This will return the block group at or after bytenr if contains is 0, else
172  * it will return the block group that contains the bytenr
173  */
174 static struct btrfs_block_group_cache *
175 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
176 			      int contains)
177 {
178 	struct btrfs_block_group_cache *cache, *ret = NULL;
179 	struct rb_node *n;
180 	u64 end, start;
181 
182 	spin_lock(&info->block_group_cache_lock);
183 	n = info->block_group_cache_tree.rb_node;
184 
185 	while (n) {
186 		cache = rb_entry(n, struct btrfs_block_group_cache,
187 				 cache_node);
188 		end = cache->key.objectid + cache->key.offset - 1;
189 		start = cache->key.objectid;
190 
191 		if (bytenr < start) {
192 			if (!contains && (!ret || start < ret->key.objectid))
193 				ret = cache;
194 			n = n->rb_left;
195 		} else if (bytenr > start) {
196 			if (contains && bytenr <= end) {
197 				ret = cache;
198 				break;
199 			}
200 			n = n->rb_right;
201 		} else {
202 			ret = cache;
203 			break;
204 		}
205 	}
206 	if (ret)
207 		btrfs_get_block_group(ret);
208 	spin_unlock(&info->block_group_cache_lock);
209 
210 	return ret;
211 }
212 
213 static int add_excluded_extent(struct btrfs_root *root,
214 			       u64 start, u64 num_bytes)
215 {
216 	u64 end = start + num_bytes - 1;
217 	set_extent_bits(&root->fs_info->freed_extents[0],
218 			start, end, EXTENT_UPTODATE, GFP_NOFS);
219 	set_extent_bits(&root->fs_info->freed_extents[1],
220 			start, end, EXTENT_UPTODATE, GFP_NOFS);
221 	return 0;
222 }
223 
224 static void free_excluded_extents(struct btrfs_root *root,
225 				  struct btrfs_block_group_cache *cache)
226 {
227 	u64 start, end;
228 
229 	start = cache->key.objectid;
230 	end = start + cache->key.offset - 1;
231 
232 	clear_extent_bits(&root->fs_info->freed_extents[0],
233 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
234 	clear_extent_bits(&root->fs_info->freed_extents[1],
235 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
236 }
237 
238 static int exclude_super_stripes(struct btrfs_root *root,
239 				 struct btrfs_block_group_cache *cache)
240 {
241 	u64 bytenr;
242 	u64 *logical;
243 	int stripe_len;
244 	int i, nr, ret;
245 
246 	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
247 		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
248 		cache->bytes_super += stripe_len;
249 		ret = add_excluded_extent(root, cache->key.objectid,
250 					  stripe_len);
251 		BUG_ON(ret); /* -ENOMEM */
252 	}
253 
254 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
255 		bytenr = btrfs_sb_offset(i);
256 		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
257 				       cache->key.objectid, bytenr,
258 				       0, &logical, &nr, &stripe_len);
259 		BUG_ON(ret); /* -ENOMEM */
260 
261 		while (nr--) {
262 			cache->bytes_super += stripe_len;
263 			ret = add_excluded_extent(root, logical[nr],
264 						  stripe_len);
265 			BUG_ON(ret); /* -ENOMEM */
266 		}
267 
268 		kfree(logical);
269 	}
270 	return 0;
271 }
272 
273 static struct btrfs_caching_control *
274 get_caching_control(struct btrfs_block_group_cache *cache)
275 {
276 	struct btrfs_caching_control *ctl;
277 
278 	spin_lock(&cache->lock);
279 	if (cache->cached != BTRFS_CACHE_STARTED) {
280 		spin_unlock(&cache->lock);
281 		return NULL;
282 	}
283 
284 	/* We're loading it the fast way, so we don't have a caching_ctl. */
285 	if (!cache->caching_ctl) {
286 		spin_unlock(&cache->lock);
287 		return NULL;
288 	}
289 
290 	ctl = cache->caching_ctl;
291 	atomic_inc(&ctl->count);
292 	spin_unlock(&cache->lock);
293 	return ctl;
294 }
295 
296 static void put_caching_control(struct btrfs_caching_control *ctl)
297 {
298 	if (atomic_dec_and_test(&ctl->count))
299 		kfree(ctl);
300 }
301 
302 /*
303  * this is only called by cache_block_group, since we could have freed extents
304  * we need to check the pinned_extents for any extents that can't be used yet
305  * since their free space will be released as soon as the transaction commits.
306  */
307 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
308 			      struct btrfs_fs_info *info, u64 start, u64 end)
309 {
310 	u64 extent_start, extent_end, size, total_added = 0;
311 	int ret;
312 
313 	while (start < end) {
314 		ret = find_first_extent_bit(info->pinned_extents, start,
315 					    &extent_start, &extent_end,
316 					    EXTENT_DIRTY | EXTENT_UPTODATE,
317 					    NULL);
318 		if (ret)
319 			break;
320 
321 		if (extent_start <= start) {
322 			start = extent_end + 1;
323 		} else if (extent_start > start && extent_start < end) {
324 			size = extent_start - start;
325 			total_added += size;
326 			ret = btrfs_add_free_space(block_group, start,
327 						   size);
328 			BUG_ON(ret); /* -ENOMEM or logic error */
329 			start = extent_end + 1;
330 		} else {
331 			break;
332 		}
333 	}
334 
335 	if (start < end) {
336 		size = end - start;
337 		total_added += size;
338 		ret = btrfs_add_free_space(block_group, start, size);
339 		BUG_ON(ret); /* -ENOMEM or logic error */
340 	}
341 
342 	return total_added;
343 }
344 
345 static noinline void caching_thread(struct btrfs_work *work)
346 {
347 	struct btrfs_block_group_cache *block_group;
348 	struct btrfs_fs_info *fs_info;
349 	struct btrfs_caching_control *caching_ctl;
350 	struct btrfs_root *extent_root;
351 	struct btrfs_path *path;
352 	struct extent_buffer *leaf;
353 	struct btrfs_key key;
354 	u64 total_found = 0;
355 	u64 last = 0;
356 	u32 nritems;
357 	int ret = 0;
358 
359 	caching_ctl = container_of(work, struct btrfs_caching_control, work);
360 	block_group = caching_ctl->block_group;
361 	fs_info = block_group->fs_info;
362 	extent_root = fs_info->extent_root;
363 
364 	path = btrfs_alloc_path();
365 	if (!path)
366 		goto out;
367 
368 	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
369 
370 	/*
371 	 * We don't want to deadlock with somebody trying to allocate a new
372 	 * extent for the extent root while also trying to search the extent
373 	 * root to add free space.  So we skip locking and search the commit
374 	 * root, since its read-only
375 	 */
376 	path->skip_locking = 1;
377 	path->search_commit_root = 1;
378 	path->reada = 1;
379 
380 	key.objectid = last;
381 	key.offset = 0;
382 	key.type = BTRFS_EXTENT_ITEM_KEY;
383 again:
384 	mutex_lock(&caching_ctl->mutex);
385 	/* need to make sure the commit_root doesn't disappear */
386 	down_read(&fs_info->extent_commit_sem);
387 
388 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
389 	if (ret < 0)
390 		goto err;
391 
392 	leaf = path->nodes[0];
393 	nritems = btrfs_header_nritems(leaf);
394 
395 	while (1) {
396 		if (btrfs_fs_closing(fs_info) > 1) {
397 			last = (u64)-1;
398 			break;
399 		}
400 
401 		if (path->slots[0] < nritems) {
402 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
403 		} else {
404 			ret = find_next_key(path, 0, &key);
405 			if (ret)
406 				break;
407 
408 			if (need_resched() ||
409 			    btrfs_next_leaf(extent_root, path)) {
410 				caching_ctl->progress = last;
411 				btrfs_release_path(path);
412 				up_read(&fs_info->extent_commit_sem);
413 				mutex_unlock(&caching_ctl->mutex);
414 				cond_resched();
415 				goto again;
416 			}
417 			leaf = path->nodes[0];
418 			nritems = btrfs_header_nritems(leaf);
419 			continue;
420 		}
421 
422 		if (key.objectid < block_group->key.objectid) {
423 			path->slots[0]++;
424 			continue;
425 		}
426 
427 		if (key.objectid >= block_group->key.objectid +
428 		    block_group->key.offset)
429 			break;
430 
431 		if (key.type == BTRFS_EXTENT_ITEM_KEY) {
432 			total_found += add_new_free_space(block_group,
433 							  fs_info, last,
434 							  key.objectid);
435 			last = key.objectid + key.offset;
436 
437 			if (total_found > (1024 * 1024 * 2)) {
438 				total_found = 0;
439 				wake_up(&caching_ctl->wait);
440 			}
441 		}
442 		path->slots[0]++;
443 	}
444 	ret = 0;
445 
446 	total_found += add_new_free_space(block_group, fs_info, last,
447 					  block_group->key.objectid +
448 					  block_group->key.offset);
449 	caching_ctl->progress = (u64)-1;
450 
451 	spin_lock(&block_group->lock);
452 	block_group->caching_ctl = NULL;
453 	block_group->cached = BTRFS_CACHE_FINISHED;
454 	spin_unlock(&block_group->lock);
455 
456 err:
457 	btrfs_free_path(path);
458 	up_read(&fs_info->extent_commit_sem);
459 
460 	free_excluded_extents(extent_root, block_group);
461 
462 	mutex_unlock(&caching_ctl->mutex);
463 out:
464 	wake_up(&caching_ctl->wait);
465 
466 	put_caching_control(caching_ctl);
467 	btrfs_put_block_group(block_group);
468 }
469 
470 static int cache_block_group(struct btrfs_block_group_cache *cache,
471 			     struct btrfs_trans_handle *trans,
472 			     struct btrfs_root *root,
473 			     int load_cache_only)
474 {
475 	DEFINE_WAIT(wait);
476 	struct btrfs_fs_info *fs_info = cache->fs_info;
477 	struct btrfs_caching_control *caching_ctl;
478 	int ret = 0;
479 
480 	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
481 	if (!caching_ctl)
482 		return -ENOMEM;
483 
484 	INIT_LIST_HEAD(&caching_ctl->list);
485 	mutex_init(&caching_ctl->mutex);
486 	init_waitqueue_head(&caching_ctl->wait);
487 	caching_ctl->block_group = cache;
488 	caching_ctl->progress = cache->key.objectid;
489 	atomic_set(&caching_ctl->count, 1);
490 	caching_ctl->work.func = caching_thread;
491 
492 	spin_lock(&cache->lock);
493 	/*
494 	 * This should be a rare occasion, but this could happen I think in the
495 	 * case where one thread starts to load the space cache info, and then
496 	 * some other thread starts a transaction commit which tries to do an
497 	 * allocation while the other thread is still loading the space cache
498 	 * info.  The previous loop should have kept us from choosing this block
499 	 * group, but if we've moved to the state where we will wait on caching
500 	 * block groups we need to first check if we're doing a fast load here,
501 	 * so we can wait for it to finish, otherwise we could end up allocating
502 	 * from a block group who's cache gets evicted for one reason or
503 	 * another.
504 	 */
505 	while (cache->cached == BTRFS_CACHE_FAST) {
506 		struct btrfs_caching_control *ctl;
507 
508 		ctl = cache->caching_ctl;
509 		atomic_inc(&ctl->count);
510 		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
511 		spin_unlock(&cache->lock);
512 
513 		schedule();
514 
515 		finish_wait(&ctl->wait, &wait);
516 		put_caching_control(ctl);
517 		spin_lock(&cache->lock);
518 	}
519 
520 	if (cache->cached != BTRFS_CACHE_NO) {
521 		spin_unlock(&cache->lock);
522 		kfree(caching_ctl);
523 		return 0;
524 	}
525 	WARN_ON(cache->caching_ctl);
526 	cache->caching_ctl = caching_ctl;
527 	cache->cached = BTRFS_CACHE_FAST;
528 	spin_unlock(&cache->lock);
529 
530 	/*
531 	 * We can't do the read from on-disk cache during a commit since we need
532 	 * to have the normal tree locking.  Also if we are currently trying to
533 	 * allocate blocks for the tree root we can't do the fast caching since
534 	 * we likely hold important locks.
535 	 */
536 	if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
537 		ret = load_free_space_cache(fs_info, cache);
538 
539 		spin_lock(&cache->lock);
540 		if (ret == 1) {
541 			cache->caching_ctl = NULL;
542 			cache->cached = BTRFS_CACHE_FINISHED;
543 			cache->last_byte_to_unpin = (u64)-1;
544 		} else {
545 			if (load_cache_only) {
546 				cache->caching_ctl = NULL;
547 				cache->cached = BTRFS_CACHE_NO;
548 			} else {
549 				cache->cached = BTRFS_CACHE_STARTED;
550 			}
551 		}
552 		spin_unlock(&cache->lock);
553 		wake_up(&caching_ctl->wait);
554 		if (ret == 1) {
555 			put_caching_control(caching_ctl);
556 			free_excluded_extents(fs_info->extent_root, cache);
557 			return 0;
558 		}
559 	} else {
560 		/*
561 		 * We are not going to do the fast caching, set cached to the
562 		 * appropriate value and wakeup any waiters.
563 		 */
564 		spin_lock(&cache->lock);
565 		if (load_cache_only) {
566 			cache->caching_ctl = NULL;
567 			cache->cached = BTRFS_CACHE_NO;
568 		} else {
569 			cache->cached = BTRFS_CACHE_STARTED;
570 		}
571 		spin_unlock(&cache->lock);
572 		wake_up(&caching_ctl->wait);
573 	}
574 
575 	if (load_cache_only) {
576 		put_caching_control(caching_ctl);
577 		return 0;
578 	}
579 
580 	down_write(&fs_info->extent_commit_sem);
581 	atomic_inc(&caching_ctl->count);
582 	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
583 	up_write(&fs_info->extent_commit_sem);
584 
585 	btrfs_get_block_group(cache);
586 
587 	btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
588 
589 	return ret;
590 }
591 
592 /*
593  * return the block group that starts at or after bytenr
594  */
595 static struct btrfs_block_group_cache *
596 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
597 {
598 	struct btrfs_block_group_cache *cache;
599 
600 	cache = block_group_cache_tree_search(info, bytenr, 0);
601 
602 	return cache;
603 }
604 
605 /*
606  * return the block group that contains the given bytenr
607  */
608 struct btrfs_block_group_cache *btrfs_lookup_block_group(
609 						 struct btrfs_fs_info *info,
610 						 u64 bytenr)
611 {
612 	struct btrfs_block_group_cache *cache;
613 
614 	cache = block_group_cache_tree_search(info, bytenr, 1);
615 
616 	return cache;
617 }
618 
619 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
620 						  u64 flags)
621 {
622 	struct list_head *head = &info->space_info;
623 	struct btrfs_space_info *found;
624 
625 	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
626 
627 	rcu_read_lock();
628 	list_for_each_entry_rcu(found, head, list) {
629 		if (found->flags & flags) {
630 			rcu_read_unlock();
631 			return found;
632 		}
633 	}
634 	rcu_read_unlock();
635 	return NULL;
636 }
637 
638 /*
639  * after adding space to the filesystem, we need to clear the full flags
640  * on all the space infos.
641  */
642 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
643 {
644 	struct list_head *head = &info->space_info;
645 	struct btrfs_space_info *found;
646 
647 	rcu_read_lock();
648 	list_for_each_entry_rcu(found, head, list)
649 		found->full = 0;
650 	rcu_read_unlock();
651 }
652 
653 u64 btrfs_find_block_group(struct btrfs_root *root,
654 			   u64 search_start, u64 search_hint, int owner)
655 {
656 	struct btrfs_block_group_cache *cache;
657 	u64 used;
658 	u64 last = max(search_hint, search_start);
659 	u64 group_start = 0;
660 	int full_search = 0;
661 	int factor = 9;
662 	int wrapped = 0;
663 again:
664 	while (1) {
665 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
666 		if (!cache)
667 			break;
668 
669 		spin_lock(&cache->lock);
670 		last = cache->key.objectid + cache->key.offset;
671 		used = btrfs_block_group_used(&cache->item);
672 
673 		if ((full_search || !cache->ro) &&
674 		    block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
675 			if (used + cache->pinned + cache->reserved <
676 			    div_factor(cache->key.offset, factor)) {
677 				group_start = cache->key.objectid;
678 				spin_unlock(&cache->lock);
679 				btrfs_put_block_group(cache);
680 				goto found;
681 			}
682 		}
683 		spin_unlock(&cache->lock);
684 		btrfs_put_block_group(cache);
685 		cond_resched();
686 	}
687 	if (!wrapped) {
688 		last = search_start;
689 		wrapped = 1;
690 		goto again;
691 	}
692 	if (!full_search && factor < 10) {
693 		last = search_start;
694 		full_search = 1;
695 		factor = 10;
696 		goto again;
697 	}
698 found:
699 	return group_start;
700 }
701 
702 /* simple helper to search for an existing extent at a given offset */
703 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
704 {
705 	int ret;
706 	struct btrfs_key key;
707 	struct btrfs_path *path;
708 
709 	path = btrfs_alloc_path();
710 	if (!path)
711 		return -ENOMEM;
712 
713 	key.objectid = start;
714 	key.offset = len;
715 	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
716 	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
717 				0, 0);
718 	btrfs_free_path(path);
719 	return ret;
720 }
721 
722 /*
723  * helper function to lookup reference count and flags of extent.
724  *
725  * the head node for delayed ref is used to store the sum of all the
726  * reference count modifications queued up in the rbtree. the head
727  * node may also store the extent flags to set. This way you can check
728  * to see what the reference count and extent flags would be if all of
729  * the delayed refs are not processed.
730  */
731 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
732 			     struct btrfs_root *root, u64 bytenr,
733 			     u64 num_bytes, u64 *refs, u64 *flags)
734 {
735 	struct btrfs_delayed_ref_head *head;
736 	struct btrfs_delayed_ref_root *delayed_refs;
737 	struct btrfs_path *path;
738 	struct btrfs_extent_item *ei;
739 	struct extent_buffer *leaf;
740 	struct btrfs_key key;
741 	u32 item_size;
742 	u64 num_refs;
743 	u64 extent_flags;
744 	int ret;
745 
746 	path = btrfs_alloc_path();
747 	if (!path)
748 		return -ENOMEM;
749 
750 	key.objectid = bytenr;
751 	key.type = BTRFS_EXTENT_ITEM_KEY;
752 	key.offset = num_bytes;
753 	if (!trans) {
754 		path->skip_locking = 1;
755 		path->search_commit_root = 1;
756 	}
757 again:
758 	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
759 				&key, path, 0, 0);
760 	if (ret < 0)
761 		goto out_free;
762 
763 	if (ret == 0) {
764 		leaf = path->nodes[0];
765 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
766 		if (item_size >= sizeof(*ei)) {
767 			ei = btrfs_item_ptr(leaf, path->slots[0],
768 					    struct btrfs_extent_item);
769 			num_refs = btrfs_extent_refs(leaf, ei);
770 			extent_flags = btrfs_extent_flags(leaf, ei);
771 		} else {
772 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
773 			struct btrfs_extent_item_v0 *ei0;
774 			BUG_ON(item_size != sizeof(*ei0));
775 			ei0 = btrfs_item_ptr(leaf, path->slots[0],
776 					     struct btrfs_extent_item_v0);
777 			num_refs = btrfs_extent_refs_v0(leaf, ei0);
778 			/* FIXME: this isn't correct for data */
779 			extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
780 #else
781 			BUG();
782 #endif
783 		}
784 		BUG_ON(num_refs == 0);
785 	} else {
786 		num_refs = 0;
787 		extent_flags = 0;
788 		ret = 0;
789 	}
790 
791 	if (!trans)
792 		goto out;
793 
794 	delayed_refs = &trans->transaction->delayed_refs;
795 	spin_lock(&delayed_refs->lock);
796 	head = btrfs_find_delayed_ref_head(trans, bytenr);
797 	if (head) {
798 		if (!mutex_trylock(&head->mutex)) {
799 			atomic_inc(&head->node.refs);
800 			spin_unlock(&delayed_refs->lock);
801 
802 			btrfs_release_path(path);
803 
804 			/*
805 			 * Mutex was contended, block until it's released and try
806 			 * again
807 			 */
808 			mutex_lock(&head->mutex);
809 			mutex_unlock(&head->mutex);
810 			btrfs_put_delayed_ref(&head->node);
811 			goto again;
812 		}
813 		if (head->extent_op && head->extent_op->update_flags)
814 			extent_flags |= head->extent_op->flags_to_set;
815 		else
816 			BUG_ON(num_refs == 0);
817 
818 		num_refs += head->node.ref_mod;
819 		mutex_unlock(&head->mutex);
820 	}
821 	spin_unlock(&delayed_refs->lock);
822 out:
823 	WARN_ON(num_refs == 0);
824 	if (refs)
825 		*refs = num_refs;
826 	if (flags)
827 		*flags = extent_flags;
828 out_free:
829 	btrfs_free_path(path);
830 	return ret;
831 }
832 
833 /*
834  * Back reference rules.  Back refs have three main goals:
835  *
836  * 1) differentiate between all holders of references to an extent so that
837  *    when a reference is dropped we can make sure it was a valid reference
838  *    before freeing the extent.
839  *
840  * 2) Provide enough information to quickly find the holders of an extent
841  *    if we notice a given block is corrupted or bad.
842  *
843  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
844  *    maintenance.  This is actually the same as #2, but with a slightly
845  *    different use case.
846  *
847  * There are two kinds of back refs. The implicit back refs is optimized
848  * for pointers in non-shared tree blocks. For a given pointer in a block,
849  * back refs of this kind provide information about the block's owner tree
850  * and the pointer's key. These information allow us to find the block by
851  * b-tree searching. The full back refs is for pointers in tree blocks not
852  * referenced by their owner trees. The location of tree block is recorded
853  * in the back refs. Actually the full back refs is generic, and can be
854  * used in all cases the implicit back refs is used. The major shortcoming
855  * of the full back refs is its overhead. Every time a tree block gets
856  * COWed, we have to update back refs entry for all pointers in it.
857  *
858  * For a newly allocated tree block, we use implicit back refs for
859  * pointers in it. This means most tree related operations only involve
860  * implicit back refs. For a tree block created in old transaction, the
861  * only way to drop a reference to it is COW it. So we can detect the
862  * event that tree block loses its owner tree's reference and do the
863  * back refs conversion.
864  *
865  * When a tree block is COW'd through a tree, there are four cases:
866  *
867  * The reference count of the block is one and the tree is the block's
868  * owner tree. Nothing to do in this case.
869  *
870  * The reference count of the block is one and the tree is not the
871  * block's owner tree. In this case, full back refs is used for pointers
872  * in the block. Remove these full back refs, add implicit back refs for
873  * every pointers in the new block.
874  *
875  * The reference count of the block is greater than one and the tree is
876  * the block's owner tree. In this case, implicit back refs is used for
877  * pointers in the block. Add full back refs for every pointers in the
878  * block, increase lower level extents' reference counts. The original
879  * implicit back refs are entailed to the new block.
880  *
881  * The reference count of the block is greater than one and the tree is
882  * not the block's owner tree. Add implicit back refs for every pointer in
883  * the new block, increase lower level extents' reference count.
884  *
885  * Back Reference Key composing:
886  *
887  * The key objectid corresponds to the first byte in the extent,
888  * The key type is used to differentiate between types of back refs.
889  * There are different meanings of the key offset for different types
890  * of back refs.
891  *
892  * File extents can be referenced by:
893  *
894  * - multiple snapshots, subvolumes, or different generations in one subvol
895  * - different files inside a single subvolume
896  * - different offsets inside a file (bookend extents in file.c)
897  *
898  * The extent ref structure for the implicit back refs has fields for:
899  *
900  * - Objectid of the subvolume root
901  * - objectid of the file holding the reference
902  * - original offset in the file
903  * - how many bookend extents
904  *
905  * The key offset for the implicit back refs is hash of the first
906  * three fields.
907  *
908  * The extent ref structure for the full back refs has field for:
909  *
910  * - number of pointers in the tree leaf
911  *
912  * The key offset for the implicit back refs is the first byte of
913  * the tree leaf
914  *
915  * When a file extent is allocated, The implicit back refs is used.
916  * the fields are filled in:
917  *
918  *     (root_key.objectid, inode objectid, offset in file, 1)
919  *
920  * When a file extent is removed file truncation, we find the
921  * corresponding implicit back refs and check the following fields:
922  *
923  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
924  *
925  * Btree extents can be referenced by:
926  *
927  * - Different subvolumes
928  *
929  * Both the implicit back refs and the full back refs for tree blocks
930  * only consist of key. The key offset for the implicit back refs is
931  * objectid of block's owner tree. The key offset for the full back refs
932  * is the first byte of parent block.
933  *
934  * When implicit back refs is used, information about the lowest key and
935  * level of the tree block are required. These information are stored in
936  * tree block info structure.
937  */
938 
939 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
940 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
941 				  struct btrfs_root *root,
942 				  struct btrfs_path *path,
943 				  u64 owner, u32 extra_size)
944 {
945 	struct btrfs_extent_item *item;
946 	struct btrfs_extent_item_v0 *ei0;
947 	struct btrfs_extent_ref_v0 *ref0;
948 	struct btrfs_tree_block_info *bi;
949 	struct extent_buffer *leaf;
950 	struct btrfs_key key;
951 	struct btrfs_key found_key;
952 	u32 new_size = sizeof(*item);
953 	u64 refs;
954 	int ret;
955 
956 	leaf = path->nodes[0];
957 	BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
958 
959 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
960 	ei0 = btrfs_item_ptr(leaf, path->slots[0],
961 			     struct btrfs_extent_item_v0);
962 	refs = btrfs_extent_refs_v0(leaf, ei0);
963 
964 	if (owner == (u64)-1) {
965 		while (1) {
966 			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
967 				ret = btrfs_next_leaf(root, path);
968 				if (ret < 0)
969 					return ret;
970 				BUG_ON(ret > 0); /* Corruption */
971 				leaf = path->nodes[0];
972 			}
973 			btrfs_item_key_to_cpu(leaf, &found_key,
974 					      path->slots[0]);
975 			BUG_ON(key.objectid != found_key.objectid);
976 			if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
977 				path->slots[0]++;
978 				continue;
979 			}
980 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
981 					      struct btrfs_extent_ref_v0);
982 			owner = btrfs_ref_objectid_v0(leaf, ref0);
983 			break;
984 		}
985 	}
986 	btrfs_release_path(path);
987 
988 	if (owner < BTRFS_FIRST_FREE_OBJECTID)
989 		new_size += sizeof(*bi);
990 
991 	new_size -= sizeof(*ei0);
992 	ret = btrfs_search_slot(trans, root, &key, path,
993 				new_size + extra_size, 1);
994 	if (ret < 0)
995 		return ret;
996 	BUG_ON(ret); /* Corruption */
997 
998 	btrfs_extend_item(trans, root, path, new_size);
999 
1000 	leaf = path->nodes[0];
1001 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1002 	btrfs_set_extent_refs(leaf, item, refs);
1003 	/* FIXME: get real generation */
1004 	btrfs_set_extent_generation(leaf, item, 0);
1005 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1006 		btrfs_set_extent_flags(leaf, item,
1007 				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
1008 				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
1009 		bi = (struct btrfs_tree_block_info *)(item + 1);
1010 		/* FIXME: get first key of the block */
1011 		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1012 		btrfs_set_tree_block_level(leaf, bi, (int)owner);
1013 	} else {
1014 		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1015 	}
1016 	btrfs_mark_buffer_dirty(leaf);
1017 	return 0;
1018 }
1019 #endif
1020 
1021 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1022 {
1023 	u32 high_crc = ~(u32)0;
1024 	u32 low_crc = ~(u32)0;
1025 	__le64 lenum;
1026 
1027 	lenum = cpu_to_le64(root_objectid);
1028 	high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1029 	lenum = cpu_to_le64(owner);
1030 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1031 	lenum = cpu_to_le64(offset);
1032 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1033 
1034 	return ((u64)high_crc << 31) ^ (u64)low_crc;
1035 }
1036 
1037 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1038 				     struct btrfs_extent_data_ref *ref)
1039 {
1040 	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1041 				    btrfs_extent_data_ref_objectid(leaf, ref),
1042 				    btrfs_extent_data_ref_offset(leaf, ref));
1043 }
1044 
1045 static int match_extent_data_ref(struct extent_buffer *leaf,
1046 				 struct btrfs_extent_data_ref *ref,
1047 				 u64 root_objectid, u64 owner, u64 offset)
1048 {
1049 	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1050 	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1051 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
1052 		return 0;
1053 	return 1;
1054 }
1055 
1056 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1057 					   struct btrfs_root *root,
1058 					   struct btrfs_path *path,
1059 					   u64 bytenr, u64 parent,
1060 					   u64 root_objectid,
1061 					   u64 owner, u64 offset)
1062 {
1063 	struct btrfs_key key;
1064 	struct btrfs_extent_data_ref *ref;
1065 	struct extent_buffer *leaf;
1066 	u32 nritems;
1067 	int ret;
1068 	int recow;
1069 	int err = -ENOENT;
1070 
1071 	key.objectid = bytenr;
1072 	if (parent) {
1073 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1074 		key.offset = parent;
1075 	} else {
1076 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1077 		key.offset = hash_extent_data_ref(root_objectid,
1078 						  owner, offset);
1079 	}
1080 again:
1081 	recow = 0;
1082 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1083 	if (ret < 0) {
1084 		err = ret;
1085 		goto fail;
1086 	}
1087 
1088 	if (parent) {
1089 		if (!ret)
1090 			return 0;
1091 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1092 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1093 		btrfs_release_path(path);
1094 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1095 		if (ret < 0) {
1096 			err = ret;
1097 			goto fail;
1098 		}
1099 		if (!ret)
1100 			return 0;
1101 #endif
1102 		goto fail;
1103 	}
1104 
1105 	leaf = path->nodes[0];
1106 	nritems = btrfs_header_nritems(leaf);
1107 	while (1) {
1108 		if (path->slots[0] >= nritems) {
1109 			ret = btrfs_next_leaf(root, path);
1110 			if (ret < 0)
1111 				err = ret;
1112 			if (ret)
1113 				goto fail;
1114 
1115 			leaf = path->nodes[0];
1116 			nritems = btrfs_header_nritems(leaf);
1117 			recow = 1;
1118 		}
1119 
1120 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1121 		if (key.objectid != bytenr ||
1122 		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
1123 			goto fail;
1124 
1125 		ref = btrfs_item_ptr(leaf, path->slots[0],
1126 				     struct btrfs_extent_data_ref);
1127 
1128 		if (match_extent_data_ref(leaf, ref, root_objectid,
1129 					  owner, offset)) {
1130 			if (recow) {
1131 				btrfs_release_path(path);
1132 				goto again;
1133 			}
1134 			err = 0;
1135 			break;
1136 		}
1137 		path->slots[0]++;
1138 	}
1139 fail:
1140 	return err;
1141 }
1142 
1143 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1144 					   struct btrfs_root *root,
1145 					   struct btrfs_path *path,
1146 					   u64 bytenr, u64 parent,
1147 					   u64 root_objectid, u64 owner,
1148 					   u64 offset, int refs_to_add)
1149 {
1150 	struct btrfs_key key;
1151 	struct extent_buffer *leaf;
1152 	u32 size;
1153 	u32 num_refs;
1154 	int ret;
1155 
1156 	key.objectid = bytenr;
1157 	if (parent) {
1158 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1159 		key.offset = parent;
1160 		size = sizeof(struct btrfs_shared_data_ref);
1161 	} else {
1162 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1163 		key.offset = hash_extent_data_ref(root_objectid,
1164 						  owner, offset);
1165 		size = sizeof(struct btrfs_extent_data_ref);
1166 	}
1167 
1168 	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1169 	if (ret && ret != -EEXIST)
1170 		goto fail;
1171 
1172 	leaf = path->nodes[0];
1173 	if (parent) {
1174 		struct btrfs_shared_data_ref *ref;
1175 		ref = btrfs_item_ptr(leaf, path->slots[0],
1176 				     struct btrfs_shared_data_ref);
1177 		if (ret == 0) {
1178 			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1179 		} else {
1180 			num_refs = btrfs_shared_data_ref_count(leaf, ref);
1181 			num_refs += refs_to_add;
1182 			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1183 		}
1184 	} else {
1185 		struct btrfs_extent_data_ref *ref;
1186 		while (ret == -EEXIST) {
1187 			ref = btrfs_item_ptr(leaf, path->slots[0],
1188 					     struct btrfs_extent_data_ref);
1189 			if (match_extent_data_ref(leaf, ref, root_objectid,
1190 						  owner, offset))
1191 				break;
1192 			btrfs_release_path(path);
1193 			key.offset++;
1194 			ret = btrfs_insert_empty_item(trans, root, path, &key,
1195 						      size);
1196 			if (ret && ret != -EEXIST)
1197 				goto fail;
1198 
1199 			leaf = path->nodes[0];
1200 		}
1201 		ref = btrfs_item_ptr(leaf, path->slots[0],
1202 				     struct btrfs_extent_data_ref);
1203 		if (ret == 0) {
1204 			btrfs_set_extent_data_ref_root(leaf, ref,
1205 						       root_objectid);
1206 			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1207 			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1208 			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1209 		} else {
1210 			num_refs = btrfs_extent_data_ref_count(leaf, ref);
1211 			num_refs += refs_to_add;
1212 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1213 		}
1214 	}
1215 	btrfs_mark_buffer_dirty(leaf);
1216 	ret = 0;
1217 fail:
1218 	btrfs_release_path(path);
1219 	return ret;
1220 }
1221 
1222 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1223 					   struct btrfs_root *root,
1224 					   struct btrfs_path *path,
1225 					   int refs_to_drop)
1226 {
1227 	struct btrfs_key key;
1228 	struct btrfs_extent_data_ref *ref1 = NULL;
1229 	struct btrfs_shared_data_ref *ref2 = NULL;
1230 	struct extent_buffer *leaf;
1231 	u32 num_refs = 0;
1232 	int ret = 0;
1233 
1234 	leaf = path->nodes[0];
1235 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1236 
1237 	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1238 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1239 				      struct btrfs_extent_data_ref);
1240 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1241 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1242 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1243 				      struct btrfs_shared_data_ref);
1244 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1245 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1246 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1247 		struct btrfs_extent_ref_v0 *ref0;
1248 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1249 				      struct btrfs_extent_ref_v0);
1250 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1251 #endif
1252 	} else {
1253 		BUG();
1254 	}
1255 
1256 	BUG_ON(num_refs < refs_to_drop);
1257 	num_refs -= refs_to_drop;
1258 
1259 	if (num_refs == 0) {
1260 		ret = btrfs_del_item(trans, root, path);
1261 	} else {
1262 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1263 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1264 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1265 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1266 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1267 		else {
1268 			struct btrfs_extent_ref_v0 *ref0;
1269 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1270 					struct btrfs_extent_ref_v0);
1271 			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1272 		}
1273 #endif
1274 		btrfs_mark_buffer_dirty(leaf);
1275 	}
1276 	return ret;
1277 }
1278 
1279 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1280 					  struct btrfs_path *path,
1281 					  struct btrfs_extent_inline_ref *iref)
1282 {
1283 	struct btrfs_key key;
1284 	struct extent_buffer *leaf;
1285 	struct btrfs_extent_data_ref *ref1;
1286 	struct btrfs_shared_data_ref *ref2;
1287 	u32 num_refs = 0;
1288 
1289 	leaf = path->nodes[0];
1290 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1291 	if (iref) {
1292 		if (btrfs_extent_inline_ref_type(leaf, iref) ==
1293 		    BTRFS_EXTENT_DATA_REF_KEY) {
1294 			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1295 			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1296 		} else {
1297 			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1298 			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1299 		}
1300 	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1301 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1302 				      struct btrfs_extent_data_ref);
1303 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1304 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1305 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1306 				      struct btrfs_shared_data_ref);
1307 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1308 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1309 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1310 		struct btrfs_extent_ref_v0 *ref0;
1311 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1312 				      struct btrfs_extent_ref_v0);
1313 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1314 #endif
1315 	} else {
1316 		WARN_ON(1);
1317 	}
1318 	return num_refs;
1319 }
1320 
1321 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1322 					  struct btrfs_root *root,
1323 					  struct btrfs_path *path,
1324 					  u64 bytenr, u64 parent,
1325 					  u64 root_objectid)
1326 {
1327 	struct btrfs_key key;
1328 	int ret;
1329 
1330 	key.objectid = bytenr;
1331 	if (parent) {
1332 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1333 		key.offset = parent;
1334 	} else {
1335 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1336 		key.offset = root_objectid;
1337 	}
1338 
1339 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1340 	if (ret > 0)
1341 		ret = -ENOENT;
1342 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1343 	if (ret == -ENOENT && parent) {
1344 		btrfs_release_path(path);
1345 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1346 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1347 		if (ret > 0)
1348 			ret = -ENOENT;
1349 	}
1350 #endif
1351 	return ret;
1352 }
1353 
1354 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1355 					  struct btrfs_root *root,
1356 					  struct btrfs_path *path,
1357 					  u64 bytenr, u64 parent,
1358 					  u64 root_objectid)
1359 {
1360 	struct btrfs_key key;
1361 	int ret;
1362 
1363 	key.objectid = bytenr;
1364 	if (parent) {
1365 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1366 		key.offset = parent;
1367 	} else {
1368 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1369 		key.offset = root_objectid;
1370 	}
1371 
1372 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1373 	btrfs_release_path(path);
1374 	return ret;
1375 }
1376 
1377 static inline int extent_ref_type(u64 parent, u64 owner)
1378 {
1379 	int type;
1380 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1381 		if (parent > 0)
1382 			type = BTRFS_SHARED_BLOCK_REF_KEY;
1383 		else
1384 			type = BTRFS_TREE_BLOCK_REF_KEY;
1385 	} else {
1386 		if (parent > 0)
1387 			type = BTRFS_SHARED_DATA_REF_KEY;
1388 		else
1389 			type = BTRFS_EXTENT_DATA_REF_KEY;
1390 	}
1391 	return type;
1392 }
1393 
1394 static int find_next_key(struct btrfs_path *path, int level,
1395 			 struct btrfs_key *key)
1396 
1397 {
1398 	for (; level < BTRFS_MAX_LEVEL; level++) {
1399 		if (!path->nodes[level])
1400 			break;
1401 		if (path->slots[level] + 1 >=
1402 		    btrfs_header_nritems(path->nodes[level]))
1403 			continue;
1404 		if (level == 0)
1405 			btrfs_item_key_to_cpu(path->nodes[level], key,
1406 					      path->slots[level] + 1);
1407 		else
1408 			btrfs_node_key_to_cpu(path->nodes[level], key,
1409 					      path->slots[level] + 1);
1410 		return 0;
1411 	}
1412 	return 1;
1413 }
1414 
1415 /*
1416  * look for inline back ref. if back ref is found, *ref_ret is set
1417  * to the address of inline back ref, and 0 is returned.
1418  *
1419  * if back ref isn't found, *ref_ret is set to the address where it
1420  * should be inserted, and -ENOENT is returned.
1421  *
1422  * if insert is true and there are too many inline back refs, the path
1423  * points to the extent item, and -EAGAIN is returned.
1424  *
1425  * NOTE: inline back refs are ordered in the same way that back ref
1426  *	 items in the tree are ordered.
1427  */
1428 static noinline_for_stack
1429 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1430 				 struct btrfs_root *root,
1431 				 struct btrfs_path *path,
1432 				 struct btrfs_extent_inline_ref **ref_ret,
1433 				 u64 bytenr, u64 num_bytes,
1434 				 u64 parent, u64 root_objectid,
1435 				 u64 owner, u64 offset, int insert)
1436 {
1437 	struct btrfs_key key;
1438 	struct extent_buffer *leaf;
1439 	struct btrfs_extent_item *ei;
1440 	struct btrfs_extent_inline_ref *iref;
1441 	u64 flags;
1442 	u64 item_size;
1443 	unsigned long ptr;
1444 	unsigned long end;
1445 	int extra_size;
1446 	int type;
1447 	int want;
1448 	int ret;
1449 	int err = 0;
1450 
1451 	key.objectid = bytenr;
1452 	key.type = BTRFS_EXTENT_ITEM_KEY;
1453 	key.offset = num_bytes;
1454 
1455 	want = extent_ref_type(parent, owner);
1456 	if (insert) {
1457 		extra_size = btrfs_extent_inline_ref_size(want);
1458 		path->keep_locks = 1;
1459 	} else
1460 		extra_size = -1;
1461 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1462 	if (ret < 0) {
1463 		err = ret;
1464 		goto out;
1465 	}
1466 	if (ret && !insert) {
1467 		err = -ENOENT;
1468 		goto out;
1469 	}
1470 	BUG_ON(ret); /* Corruption */
1471 
1472 	leaf = path->nodes[0];
1473 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1474 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1475 	if (item_size < sizeof(*ei)) {
1476 		if (!insert) {
1477 			err = -ENOENT;
1478 			goto out;
1479 		}
1480 		ret = convert_extent_item_v0(trans, root, path, owner,
1481 					     extra_size);
1482 		if (ret < 0) {
1483 			err = ret;
1484 			goto out;
1485 		}
1486 		leaf = path->nodes[0];
1487 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1488 	}
1489 #endif
1490 	BUG_ON(item_size < sizeof(*ei));
1491 
1492 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1493 	flags = btrfs_extent_flags(leaf, ei);
1494 
1495 	ptr = (unsigned long)(ei + 1);
1496 	end = (unsigned long)ei + item_size;
1497 
1498 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1499 		ptr += sizeof(struct btrfs_tree_block_info);
1500 		BUG_ON(ptr > end);
1501 	} else {
1502 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1503 	}
1504 
1505 	err = -ENOENT;
1506 	while (1) {
1507 		if (ptr >= end) {
1508 			WARN_ON(ptr > end);
1509 			break;
1510 		}
1511 		iref = (struct btrfs_extent_inline_ref *)ptr;
1512 		type = btrfs_extent_inline_ref_type(leaf, iref);
1513 		if (want < type)
1514 			break;
1515 		if (want > type) {
1516 			ptr += btrfs_extent_inline_ref_size(type);
1517 			continue;
1518 		}
1519 
1520 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1521 			struct btrfs_extent_data_ref *dref;
1522 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1523 			if (match_extent_data_ref(leaf, dref, root_objectid,
1524 						  owner, offset)) {
1525 				err = 0;
1526 				break;
1527 			}
1528 			if (hash_extent_data_ref_item(leaf, dref) <
1529 			    hash_extent_data_ref(root_objectid, owner, offset))
1530 				break;
1531 		} else {
1532 			u64 ref_offset;
1533 			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1534 			if (parent > 0) {
1535 				if (parent == ref_offset) {
1536 					err = 0;
1537 					break;
1538 				}
1539 				if (ref_offset < parent)
1540 					break;
1541 			} else {
1542 				if (root_objectid == ref_offset) {
1543 					err = 0;
1544 					break;
1545 				}
1546 				if (ref_offset < root_objectid)
1547 					break;
1548 			}
1549 		}
1550 		ptr += btrfs_extent_inline_ref_size(type);
1551 	}
1552 	if (err == -ENOENT && insert) {
1553 		if (item_size + extra_size >=
1554 		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1555 			err = -EAGAIN;
1556 			goto out;
1557 		}
1558 		/*
1559 		 * To add new inline back ref, we have to make sure
1560 		 * there is no corresponding back ref item.
1561 		 * For simplicity, we just do not add new inline back
1562 		 * ref if there is any kind of item for this block
1563 		 */
1564 		if (find_next_key(path, 0, &key) == 0 &&
1565 		    key.objectid == bytenr &&
1566 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1567 			err = -EAGAIN;
1568 			goto out;
1569 		}
1570 	}
1571 	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1572 out:
1573 	if (insert) {
1574 		path->keep_locks = 0;
1575 		btrfs_unlock_up_safe(path, 1);
1576 	}
1577 	return err;
1578 }
1579 
1580 /*
1581  * helper to add new inline back ref
1582  */
1583 static noinline_for_stack
1584 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1585 				 struct btrfs_root *root,
1586 				 struct btrfs_path *path,
1587 				 struct btrfs_extent_inline_ref *iref,
1588 				 u64 parent, u64 root_objectid,
1589 				 u64 owner, u64 offset, int refs_to_add,
1590 				 struct btrfs_delayed_extent_op *extent_op)
1591 {
1592 	struct extent_buffer *leaf;
1593 	struct btrfs_extent_item *ei;
1594 	unsigned long ptr;
1595 	unsigned long end;
1596 	unsigned long item_offset;
1597 	u64 refs;
1598 	int size;
1599 	int type;
1600 
1601 	leaf = path->nodes[0];
1602 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1603 	item_offset = (unsigned long)iref - (unsigned long)ei;
1604 
1605 	type = extent_ref_type(parent, owner);
1606 	size = btrfs_extent_inline_ref_size(type);
1607 
1608 	btrfs_extend_item(trans, root, path, size);
1609 
1610 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1611 	refs = btrfs_extent_refs(leaf, ei);
1612 	refs += refs_to_add;
1613 	btrfs_set_extent_refs(leaf, ei, refs);
1614 	if (extent_op)
1615 		__run_delayed_extent_op(extent_op, leaf, ei);
1616 
1617 	ptr = (unsigned long)ei + item_offset;
1618 	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1619 	if (ptr < end - size)
1620 		memmove_extent_buffer(leaf, ptr + size, ptr,
1621 				      end - size - ptr);
1622 
1623 	iref = (struct btrfs_extent_inline_ref *)ptr;
1624 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1625 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1626 		struct btrfs_extent_data_ref *dref;
1627 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1628 		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1629 		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1630 		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1631 		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1632 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1633 		struct btrfs_shared_data_ref *sref;
1634 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1635 		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1636 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1637 	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1638 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1639 	} else {
1640 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1641 	}
1642 	btrfs_mark_buffer_dirty(leaf);
1643 }
1644 
1645 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1646 				 struct btrfs_root *root,
1647 				 struct btrfs_path *path,
1648 				 struct btrfs_extent_inline_ref **ref_ret,
1649 				 u64 bytenr, u64 num_bytes, u64 parent,
1650 				 u64 root_objectid, u64 owner, u64 offset)
1651 {
1652 	int ret;
1653 
1654 	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1655 					   bytenr, num_bytes, parent,
1656 					   root_objectid, owner, offset, 0);
1657 	if (ret != -ENOENT)
1658 		return ret;
1659 
1660 	btrfs_release_path(path);
1661 	*ref_ret = NULL;
1662 
1663 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1664 		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1665 					    root_objectid);
1666 	} else {
1667 		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1668 					     root_objectid, owner, offset);
1669 	}
1670 	return ret;
1671 }
1672 
1673 /*
1674  * helper to update/remove inline back ref
1675  */
1676 static noinline_for_stack
1677 void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1678 				  struct btrfs_root *root,
1679 				  struct btrfs_path *path,
1680 				  struct btrfs_extent_inline_ref *iref,
1681 				  int refs_to_mod,
1682 				  struct btrfs_delayed_extent_op *extent_op)
1683 {
1684 	struct extent_buffer *leaf;
1685 	struct btrfs_extent_item *ei;
1686 	struct btrfs_extent_data_ref *dref = NULL;
1687 	struct btrfs_shared_data_ref *sref = NULL;
1688 	unsigned long ptr;
1689 	unsigned long end;
1690 	u32 item_size;
1691 	int size;
1692 	int type;
1693 	u64 refs;
1694 
1695 	leaf = path->nodes[0];
1696 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1697 	refs = btrfs_extent_refs(leaf, ei);
1698 	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1699 	refs += refs_to_mod;
1700 	btrfs_set_extent_refs(leaf, ei, refs);
1701 	if (extent_op)
1702 		__run_delayed_extent_op(extent_op, leaf, ei);
1703 
1704 	type = btrfs_extent_inline_ref_type(leaf, iref);
1705 
1706 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1707 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1708 		refs = btrfs_extent_data_ref_count(leaf, dref);
1709 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1710 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1711 		refs = btrfs_shared_data_ref_count(leaf, sref);
1712 	} else {
1713 		refs = 1;
1714 		BUG_ON(refs_to_mod != -1);
1715 	}
1716 
1717 	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1718 	refs += refs_to_mod;
1719 
1720 	if (refs > 0) {
1721 		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1722 			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1723 		else
1724 			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1725 	} else {
1726 		size =  btrfs_extent_inline_ref_size(type);
1727 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1728 		ptr = (unsigned long)iref;
1729 		end = (unsigned long)ei + item_size;
1730 		if (ptr + size < end)
1731 			memmove_extent_buffer(leaf, ptr, ptr + size,
1732 					      end - ptr - size);
1733 		item_size -= size;
1734 		btrfs_truncate_item(trans, root, path, item_size, 1);
1735 	}
1736 	btrfs_mark_buffer_dirty(leaf);
1737 }
1738 
1739 static noinline_for_stack
1740 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1741 				 struct btrfs_root *root,
1742 				 struct btrfs_path *path,
1743 				 u64 bytenr, u64 num_bytes, u64 parent,
1744 				 u64 root_objectid, u64 owner,
1745 				 u64 offset, int refs_to_add,
1746 				 struct btrfs_delayed_extent_op *extent_op)
1747 {
1748 	struct btrfs_extent_inline_ref *iref;
1749 	int ret;
1750 
1751 	ret = lookup_inline_extent_backref(trans, root, path, &iref,
1752 					   bytenr, num_bytes, parent,
1753 					   root_objectid, owner, offset, 1);
1754 	if (ret == 0) {
1755 		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1756 		update_inline_extent_backref(trans, root, path, iref,
1757 					     refs_to_add, extent_op);
1758 	} else if (ret == -ENOENT) {
1759 		setup_inline_extent_backref(trans, root, path, iref, parent,
1760 					    root_objectid, owner, offset,
1761 					    refs_to_add, extent_op);
1762 		ret = 0;
1763 	}
1764 	return ret;
1765 }
1766 
1767 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1768 				 struct btrfs_root *root,
1769 				 struct btrfs_path *path,
1770 				 u64 bytenr, u64 parent, u64 root_objectid,
1771 				 u64 owner, u64 offset, int refs_to_add)
1772 {
1773 	int ret;
1774 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1775 		BUG_ON(refs_to_add != 1);
1776 		ret = insert_tree_block_ref(trans, root, path, bytenr,
1777 					    parent, root_objectid);
1778 	} else {
1779 		ret = insert_extent_data_ref(trans, root, path, bytenr,
1780 					     parent, root_objectid,
1781 					     owner, offset, refs_to_add);
1782 	}
1783 	return ret;
1784 }
1785 
1786 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1787 				 struct btrfs_root *root,
1788 				 struct btrfs_path *path,
1789 				 struct btrfs_extent_inline_ref *iref,
1790 				 int refs_to_drop, int is_data)
1791 {
1792 	int ret = 0;
1793 
1794 	BUG_ON(!is_data && refs_to_drop != 1);
1795 	if (iref) {
1796 		update_inline_extent_backref(trans, root, path, iref,
1797 					     -refs_to_drop, NULL);
1798 	} else if (is_data) {
1799 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1800 	} else {
1801 		ret = btrfs_del_item(trans, root, path);
1802 	}
1803 	return ret;
1804 }
1805 
1806 static int btrfs_issue_discard(struct block_device *bdev,
1807 				u64 start, u64 len)
1808 {
1809 	return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1810 }
1811 
1812 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1813 				u64 num_bytes, u64 *actual_bytes)
1814 {
1815 	int ret;
1816 	u64 discarded_bytes = 0;
1817 	struct btrfs_bio *bbio = NULL;
1818 
1819 
1820 	/* Tell the block device(s) that the sectors can be discarded */
1821 	ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1822 			      bytenr, &num_bytes, &bbio, 0);
1823 	/* Error condition is -ENOMEM */
1824 	if (!ret) {
1825 		struct btrfs_bio_stripe *stripe = bbio->stripes;
1826 		int i;
1827 
1828 
1829 		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1830 			if (!stripe->dev->can_discard)
1831 				continue;
1832 
1833 			ret = btrfs_issue_discard(stripe->dev->bdev,
1834 						  stripe->physical,
1835 						  stripe->length);
1836 			if (!ret)
1837 				discarded_bytes += stripe->length;
1838 			else if (ret != -EOPNOTSUPP)
1839 				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1840 
1841 			/*
1842 			 * Just in case we get back EOPNOTSUPP for some reason,
1843 			 * just ignore the return value so we don't screw up
1844 			 * people calling discard_extent.
1845 			 */
1846 			ret = 0;
1847 		}
1848 		kfree(bbio);
1849 	}
1850 
1851 	if (actual_bytes)
1852 		*actual_bytes = discarded_bytes;
1853 
1854 
1855 	return ret;
1856 }
1857 
1858 /* Can return -ENOMEM */
1859 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1860 			 struct btrfs_root *root,
1861 			 u64 bytenr, u64 num_bytes, u64 parent,
1862 			 u64 root_objectid, u64 owner, u64 offset, int for_cow)
1863 {
1864 	int ret;
1865 	struct btrfs_fs_info *fs_info = root->fs_info;
1866 
1867 	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1868 	       root_objectid == BTRFS_TREE_LOG_OBJECTID);
1869 
1870 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1871 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1872 					num_bytes,
1873 					parent, root_objectid, (int)owner,
1874 					BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1875 	} else {
1876 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1877 					num_bytes,
1878 					parent, root_objectid, owner, offset,
1879 					BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1880 	}
1881 	return ret;
1882 }
1883 
1884 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1885 				  struct btrfs_root *root,
1886 				  u64 bytenr, u64 num_bytes,
1887 				  u64 parent, u64 root_objectid,
1888 				  u64 owner, u64 offset, int refs_to_add,
1889 				  struct btrfs_delayed_extent_op *extent_op)
1890 {
1891 	struct btrfs_path *path;
1892 	struct extent_buffer *leaf;
1893 	struct btrfs_extent_item *item;
1894 	u64 refs;
1895 	int ret;
1896 	int err = 0;
1897 
1898 	path = btrfs_alloc_path();
1899 	if (!path)
1900 		return -ENOMEM;
1901 
1902 	path->reada = 1;
1903 	path->leave_spinning = 1;
1904 	/* this will setup the path even if it fails to insert the back ref */
1905 	ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1906 					   path, bytenr, num_bytes, parent,
1907 					   root_objectid, owner, offset,
1908 					   refs_to_add, extent_op);
1909 	if (ret == 0)
1910 		goto out;
1911 
1912 	if (ret != -EAGAIN) {
1913 		err = ret;
1914 		goto out;
1915 	}
1916 
1917 	leaf = path->nodes[0];
1918 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1919 	refs = btrfs_extent_refs(leaf, item);
1920 	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1921 	if (extent_op)
1922 		__run_delayed_extent_op(extent_op, leaf, item);
1923 
1924 	btrfs_mark_buffer_dirty(leaf);
1925 	btrfs_release_path(path);
1926 
1927 	path->reada = 1;
1928 	path->leave_spinning = 1;
1929 
1930 	/* now insert the actual backref */
1931 	ret = insert_extent_backref(trans, root->fs_info->extent_root,
1932 				    path, bytenr, parent, root_objectid,
1933 				    owner, offset, refs_to_add);
1934 	if (ret)
1935 		btrfs_abort_transaction(trans, root, ret);
1936 out:
1937 	btrfs_free_path(path);
1938 	return err;
1939 }
1940 
1941 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1942 				struct btrfs_root *root,
1943 				struct btrfs_delayed_ref_node *node,
1944 				struct btrfs_delayed_extent_op *extent_op,
1945 				int insert_reserved)
1946 {
1947 	int ret = 0;
1948 	struct btrfs_delayed_data_ref *ref;
1949 	struct btrfs_key ins;
1950 	u64 parent = 0;
1951 	u64 ref_root = 0;
1952 	u64 flags = 0;
1953 
1954 	ins.objectid = node->bytenr;
1955 	ins.offset = node->num_bytes;
1956 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1957 
1958 	ref = btrfs_delayed_node_to_data_ref(node);
1959 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1960 		parent = ref->parent;
1961 	else
1962 		ref_root = ref->root;
1963 
1964 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1965 		if (extent_op) {
1966 			BUG_ON(extent_op->update_key);
1967 			flags |= extent_op->flags_to_set;
1968 		}
1969 		ret = alloc_reserved_file_extent(trans, root,
1970 						 parent, ref_root, flags,
1971 						 ref->objectid, ref->offset,
1972 						 &ins, node->ref_mod);
1973 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1974 		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1975 					     node->num_bytes, parent,
1976 					     ref_root, ref->objectid,
1977 					     ref->offset, node->ref_mod,
1978 					     extent_op);
1979 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1980 		ret = __btrfs_free_extent(trans, root, node->bytenr,
1981 					  node->num_bytes, parent,
1982 					  ref_root, ref->objectid,
1983 					  ref->offset, node->ref_mod,
1984 					  extent_op);
1985 	} else {
1986 		BUG();
1987 	}
1988 	return ret;
1989 }
1990 
1991 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1992 				    struct extent_buffer *leaf,
1993 				    struct btrfs_extent_item *ei)
1994 {
1995 	u64 flags = btrfs_extent_flags(leaf, ei);
1996 	if (extent_op->update_flags) {
1997 		flags |= extent_op->flags_to_set;
1998 		btrfs_set_extent_flags(leaf, ei, flags);
1999 	}
2000 
2001 	if (extent_op->update_key) {
2002 		struct btrfs_tree_block_info *bi;
2003 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2004 		bi = (struct btrfs_tree_block_info *)(ei + 1);
2005 		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2006 	}
2007 }
2008 
2009 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2010 				 struct btrfs_root *root,
2011 				 struct btrfs_delayed_ref_node *node,
2012 				 struct btrfs_delayed_extent_op *extent_op)
2013 {
2014 	struct btrfs_key key;
2015 	struct btrfs_path *path;
2016 	struct btrfs_extent_item *ei;
2017 	struct extent_buffer *leaf;
2018 	u32 item_size;
2019 	int ret;
2020 	int err = 0;
2021 
2022 	if (trans->aborted)
2023 		return 0;
2024 
2025 	path = btrfs_alloc_path();
2026 	if (!path)
2027 		return -ENOMEM;
2028 
2029 	key.objectid = node->bytenr;
2030 	key.type = BTRFS_EXTENT_ITEM_KEY;
2031 	key.offset = node->num_bytes;
2032 
2033 	path->reada = 1;
2034 	path->leave_spinning = 1;
2035 	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2036 				path, 0, 1);
2037 	if (ret < 0) {
2038 		err = ret;
2039 		goto out;
2040 	}
2041 	if (ret > 0) {
2042 		err = -EIO;
2043 		goto out;
2044 	}
2045 
2046 	leaf = path->nodes[0];
2047 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2048 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2049 	if (item_size < sizeof(*ei)) {
2050 		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2051 					     path, (u64)-1, 0);
2052 		if (ret < 0) {
2053 			err = ret;
2054 			goto out;
2055 		}
2056 		leaf = path->nodes[0];
2057 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2058 	}
2059 #endif
2060 	BUG_ON(item_size < sizeof(*ei));
2061 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2062 	__run_delayed_extent_op(extent_op, leaf, ei);
2063 
2064 	btrfs_mark_buffer_dirty(leaf);
2065 out:
2066 	btrfs_free_path(path);
2067 	return err;
2068 }
2069 
2070 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2071 				struct btrfs_root *root,
2072 				struct btrfs_delayed_ref_node *node,
2073 				struct btrfs_delayed_extent_op *extent_op,
2074 				int insert_reserved)
2075 {
2076 	int ret = 0;
2077 	struct btrfs_delayed_tree_ref *ref;
2078 	struct btrfs_key ins;
2079 	u64 parent = 0;
2080 	u64 ref_root = 0;
2081 
2082 	ins.objectid = node->bytenr;
2083 	ins.offset = node->num_bytes;
2084 	ins.type = BTRFS_EXTENT_ITEM_KEY;
2085 
2086 	ref = btrfs_delayed_node_to_tree_ref(node);
2087 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2088 		parent = ref->parent;
2089 	else
2090 		ref_root = ref->root;
2091 
2092 	BUG_ON(node->ref_mod != 1);
2093 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2094 		BUG_ON(!extent_op || !extent_op->update_flags ||
2095 		       !extent_op->update_key);
2096 		ret = alloc_reserved_tree_block(trans, root,
2097 						parent, ref_root,
2098 						extent_op->flags_to_set,
2099 						&extent_op->key,
2100 						ref->level, &ins);
2101 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2102 		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2103 					     node->num_bytes, parent, ref_root,
2104 					     ref->level, 0, 1, extent_op);
2105 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2106 		ret = __btrfs_free_extent(trans, root, node->bytenr,
2107 					  node->num_bytes, parent, ref_root,
2108 					  ref->level, 0, 1, extent_op);
2109 	} else {
2110 		BUG();
2111 	}
2112 	return ret;
2113 }
2114 
2115 /* helper function to actually process a single delayed ref entry */
2116 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2117 			       struct btrfs_root *root,
2118 			       struct btrfs_delayed_ref_node *node,
2119 			       struct btrfs_delayed_extent_op *extent_op,
2120 			       int insert_reserved)
2121 {
2122 	int ret = 0;
2123 
2124 	if (trans->aborted)
2125 		return 0;
2126 
2127 	if (btrfs_delayed_ref_is_head(node)) {
2128 		struct btrfs_delayed_ref_head *head;
2129 		/*
2130 		 * we've hit the end of the chain and we were supposed
2131 		 * to insert this extent into the tree.  But, it got
2132 		 * deleted before we ever needed to insert it, so all
2133 		 * we have to do is clean up the accounting
2134 		 */
2135 		BUG_ON(extent_op);
2136 		head = btrfs_delayed_node_to_head(node);
2137 		if (insert_reserved) {
2138 			btrfs_pin_extent(root, node->bytenr,
2139 					 node->num_bytes, 1);
2140 			if (head->is_data) {
2141 				ret = btrfs_del_csums(trans, root,
2142 						      node->bytenr,
2143 						      node->num_bytes);
2144 			}
2145 		}
2146 		mutex_unlock(&head->mutex);
2147 		return ret;
2148 	}
2149 
2150 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2151 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2152 		ret = run_delayed_tree_ref(trans, root, node, extent_op,
2153 					   insert_reserved);
2154 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2155 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
2156 		ret = run_delayed_data_ref(trans, root, node, extent_op,
2157 					   insert_reserved);
2158 	else
2159 		BUG();
2160 	return ret;
2161 }
2162 
2163 static noinline struct btrfs_delayed_ref_node *
2164 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2165 {
2166 	struct rb_node *node;
2167 	struct btrfs_delayed_ref_node *ref;
2168 	int action = BTRFS_ADD_DELAYED_REF;
2169 again:
2170 	/*
2171 	 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2172 	 * this prevents ref count from going down to zero when
2173 	 * there still are pending delayed ref.
2174 	 */
2175 	node = rb_prev(&head->node.rb_node);
2176 	while (1) {
2177 		if (!node)
2178 			break;
2179 		ref = rb_entry(node, struct btrfs_delayed_ref_node,
2180 				rb_node);
2181 		if (ref->bytenr != head->node.bytenr)
2182 			break;
2183 		if (ref->action == action)
2184 			return ref;
2185 		node = rb_prev(node);
2186 	}
2187 	if (action == BTRFS_ADD_DELAYED_REF) {
2188 		action = BTRFS_DROP_DELAYED_REF;
2189 		goto again;
2190 	}
2191 	return NULL;
2192 }
2193 
2194 /*
2195  * Returns 0 on success or if called with an already aborted transaction.
2196  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2197  */
2198 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2199 				       struct btrfs_root *root,
2200 				       struct list_head *cluster)
2201 {
2202 	struct btrfs_delayed_ref_root *delayed_refs;
2203 	struct btrfs_delayed_ref_node *ref;
2204 	struct btrfs_delayed_ref_head *locked_ref = NULL;
2205 	struct btrfs_delayed_extent_op *extent_op;
2206 	struct btrfs_fs_info *fs_info = root->fs_info;
2207 	int ret;
2208 	int count = 0;
2209 	int must_insert_reserved = 0;
2210 
2211 	delayed_refs = &trans->transaction->delayed_refs;
2212 	while (1) {
2213 		if (!locked_ref) {
2214 			/* pick a new head ref from the cluster list */
2215 			if (list_empty(cluster))
2216 				break;
2217 
2218 			locked_ref = list_entry(cluster->next,
2219 				     struct btrfs_delayed_ref_head, cluster);
2220 
2221 			/* grab the lock that says we are going to process
2222 			 * all the refs for this head */
2223 			ret = btrfs_delayed_ref_lock(trans, locked_ref);
2224 
2225 			/*
2226 			 * we may have dropped the spin lock to get the head
2227 			 * mutex lock, and that might have given someone else
2228 			 * time to free the head.  If that's true, it has been
2229 			 * removed from our list and we can move on.
2230 			 */
2231 			if (ret == -EAGAIN) {
2232 				locked_ref = NULL;
2233 				count++;
2234 				continue;
2235 			}
2236 		}
2237 
2238 		/*
2239 		 * We need to try and merge add/drops of the same ref since we
2240 		 * can run into issues with relocate dropping the implicit ref
2241 		 * and then it being added back again before the drop can
2242 		 * finish.  If we merged anything we need to re-loop so we can
2243 		 * get a good ref.
2244 		 */
2245 		btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2246 					 locked_ref);
2247 
2248 		/*
2249 		 * locked_ref is the head node, so we have to go one
2250 		 * node back for any delayed ref updates
2251 		 */
2252 		ref = select_delayed_ref(locked_ref);
2253 
2254 		if (ref && ref->seq &&
2255 		    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2256 			/*
2257 			 * there are still refs with lower seq numbers in the
2258 			 * process of being added. Don't run this ref yet.
2259 			 */
2260 			list_del_init(&locked_ref->cluster);
2261 			mutex_unlock(&locked_ref->mutex);
2262 			locked_ref = NULL;
2263 			delayed_refs->num_heads_ready++;
2264 			spin_unlock(&delayed_refs->lock);
2265 			cond_resched();
2266 			spin_lock(&delayed_refs->lock);
2267 			continue;
2268 		}
2269 
2270 		/*
2271 		 * record the must insert reserved flag before we
2272 		 * drop the spin lock.
2273 		 */
2274 		must_insert_reserved = locked_ref->must_insert_reserved;
2275 		locked_ref->must_insert_reserved = 0;
2276 
2277 		extent_op = locked_ref->extent_op;
2278 		locked_ref->extent_op = NULL;
2279 
2280 		if (!ref) {
2281 			/* All delayed refs have been processed, Go ahead
2282 			 * and send the head node to run_one_delayed_ref,
2283 			 * so that any accounting fixes can happen
2284 			 */
2285 			ref = &locked_ref->node;
2286 
2287 			if (extent_op && must_insert_reserved) {
2288 				kfree(extent_op);
2289 				extent_op = NULL;
2290 			}
2291 
2292 			if (extent_op) {
2293 				spin_unlock(&delayed_refs->lock);
2294 
2295 				ret = run_delayed_extent_op(trans, root,
2296 							    ref, extent_op);
2297 				kfree(extent_op);
2298 
2299 				if (ret) {
2300 					list_del_init(&locked_ref->cluster);
2301 					mutex_unlock(&locked_ref->mutex);
2302 
2303 					printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
2304 					spin_lock(&delayed_refs->lock);
2305 					return ret;
2306 				}
2307 
2308 				goto next;
2309 			}
2310 
2311 			list_del_init(&locked_ref->cluster);
2312 			locked_ref = NULL;
2313 		}
2314 
2315 		ref->in_tree = 0;
2316 		rb_erase(&ref->rb_node, &delayed_refs->root);
2317 		delayed_refs->num_entries--;
2318 		if (locked_ref) {
2319 			/*
2320 			 * when we play the delayed ref, also correct the
2321 			 * ref_mod on head
2322 			 */
2323 			switch (ref->action) {
2324 			case BTRFS_ADD_DELAYED_REF:
2325 			case BTRFS_ADD_DELAYED_EXTENT:
2326 				locked_ref->node.ref_mod -= ref->ref_mod;
2327 				break;
2328 			case BTRFS_DROP_DELAYED_REF:
2329 				locked_ref->node.ref_mod += ref->ref_mod;
2330 				break;
2331 			default:
2332 				WARN_ON(1);
2333 			}
2334 		}
2335 		spin_unlock(&delayed_refs->lock);
2336 
2337 		ret = run_one_delayed_ref(trans, root, ref, extent_op,
2338 					  must_insert_reserved);
2339 
2340 		btrfs_put_delayed_ref(ref);
2341 		kfree(extent_op);
2342 		count++;
2343 
2344 		if (ret) {
2345 			if (locked_ref) {
2346 				list_del_init(&locked_ref->cluster);
2347 				mutex_unlock(&locked_ref->mutex);
2348 			}
2349 			printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
2350 			spin_lock(&delayed_refs->lock);
2351 			return ret;
2352 		}
2353 
2354 next:
2355 		cond_resched();
2356 		spin_lock(&delayed_refs->lock);
2357 	}
2358 	return count;
2359 }
2360 
2361 #ifdef SCRAMBLE_DELAYED_REFS
2362 /*
2363  * Normally delayed refs get processed in ascending bytenr order. This
2364  * correlates in most cases to the order added. To expose dependencies on this
2365  * order, we start to process the tree in the middle instead of the beginning
2366  */
2367 static u64 find_middle(struct rb_root *root)
2368 {
2369 	struct rb_node *n = root->rb_node;
2370 	struct btrfs_delayed_ref_node *entry;
2371 	int alt = 1;
2372 	u64 middle;
2373 	u64 first = 0, last = 0;
2374 
2375 	n = rb_first(root);
2376 	if (n) {
2377 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2378 		first = entry->bytenr;
2379 	}
2380 	n = rb_last(root);
2381 	if (n) {
2382 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2383 		last = entry->bytenr;
2384 	}
2385 	n = root->rb_node;
2386 
2387 	while (n) {
2388 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2389 		WARN_ON(!entry->in_tree);
2390 
2391 		middle = entry->bytenr;
2392 
2393 		if (alt)
2394 			n = n->rb_left;
2395 		else
2396 			n = n->rb_right;
2397 
2398 		alt = 1 - alt;
2399 	}
2400 	return middle;
2401 }
2402 #endif
2403 
2404 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2405 					 struct btrfs_fs_info *fs_info)
2406 {
2407 	struct qgroup_update *qgroup_update;
2408 	int ret = 0;
2409 
2410 	if (list_empty(&trans->qgroup_ref_list) !=
2411 	    !trans->delayed_ref_elem.seq) {
2412 		/* list without seq or seq without list */
2413 		printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
2414 			list_empty(&trans->qgroup_ref_list) ? "" : " not",
2415 			trans->delayed_ref_elem.seq);
2416 		BUG();
2417 	}
2418 
2419 	if (!trans->delayed_ref_elem.seq)
2420 		return 0;
2421 
2422 	while (!list_empty(&trans->qgroup_ref_list)) {
2423 		qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2424 						 struct qgroup_update, list);
2425 		list_del(&qgroup_update->list);
2426 		if (!ret)
2427 			ret = btrfs_qgroup_account_ref(
2428 					trans, fs_info, qgroup_update->node,
2429 					qgroup_update->extent_op);
2430 		kfree(qgroup_update);
2431 	}
2432 
2433 	btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2434 
2435 	return ret;
2436 }
2437 
2438 /*
2439  * this starts processing the delayed reference count updates and
2440  * extent insertions we have queued up so far.  count can be
2441  * 0, which means to process everything in the tree at the start
2442  * of the run (but not newly added entries), or it can be some target
2443  * number you'd like to process.
2444  *
2445  * Returns 0 on success or if called with an aborted transaction
2446  * Returns <0 on error and aborts the transaction
2447  */
2448 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2449 			   struct btrfs_root *root, unsigned long count)
2450 {
2451 	struct rb_node *node;
2452 	struct btrfs_delayed_ref_root *delayed_refs;
2453 	struct btrfs_delayed_ref_node *ref;
2454 	struct list_head cluster;
2455 	int ret;
2456 	u64 delayed_start;
2457 	int run_all = count == (unsigned long)-1;
2458 	int run_most = 0;
2459 	int loops;
2460 
2461 	/* We'll clean this up in btrfs_cleanup_transaction */
2462 	if (trans->aborted)
2463 		return 0;
2464 
2465 	if (root == root->fs_info->extent_root)
2466 		root = root->fs_info->tree_root;
2467 
2468 	btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2469 
2470 	delayed_refs = &trans->transaction->delayed_refs;
2471 	INIT_LIST_HEAD(&cluster);
2472 again:
2473 	loops = 0;
2474 	spin_lock(&delayed_refs->lock);
2475 
2476 #ifdef SCRAMBLE_DELAYED_REFS
2477 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2478 #endif
2479 
2480 	if (count == 0) {
2481 		count = delayed_refs->num_entries * 2;
2482 		run_most = 1;
2483 	}
2484 	while (1) {
2485 		if (!(run_all || run_most) &&
2486 		    delayed_refs->num_heads_ready < 64)
2487 			break;
2488 
2489 		/*
2490 		 * go find something we can process in the rbtree.  We start at
2491 		 * the beginning of the tree, and then build a cluster
2492 		 * of refs to process starting at the first one we are able to
2493 		 * lock
2494 		 */
2495 		delayed_start = delayed_refs->run_delayed_start;
2496 		ret = btrfs_find_ref_cluster(trans, &cluster,
2497 					     delayed_refs->run_delayed_start);
2498 		if (ret)
2499 			break;
2500 
2501 		ret = run_clustered_refs(trans, root, &cluster);
2502 		if (ret < 0) {
2503 			spin_unlock(&delayed_refs->lock);
2504 			btrfs_abort_transaction(trans, root, ret);
2505 			return ret;
2506 		}
2507 
2508 		count -= min_t(unsigned long, ret, count);
2509 
2510 		if (count == 0)
2511 			break;
2512 
2513 		if (delayed_start >= delayed_refs->run_delayed_start) {
2514 			if (loops == 0) {
2515 				/*
2516 				 * btrfs_find_ref_cluster looped. let's do one
2517 				 * more cycle. if we don't run any delayed ref
2518 				 * during that cycle (because we can't because
2519 				 * all of them are blocked), bail out.
2520 				 */
2521 				loops = 1;
2522 			} else {
2523 				/*
2524 				 * no runnable refs left, stop trying
2525 				 */
2526 				BUG_ON(run_all);
2527 				break;
2528 			}
2529 		}
2530 		if (ret) {
2531 			/* refs were run, let's reset staleness detection */
2532 			loops = 0;
2533 		}
2534 	}
2535 
2536 	if (run_all) {
2537 		if (!list_empty(&trans->new_bgs)) {
2538 			spin_unlock(&delayed_refs->lock);
2539 			btrfs_create_pending_block_groups(trans, root);
2540 			spin_lock(&delayed_refs->lock);
2541 		}
2542 
2543 		node = rb_first(&delayed_refs->root);
2544 		if (!node)
2545 			goto out;
2546 		count = (unsigned long)-1;
2547 
2548 		while (node) {
2549 			ref = rb_entry(node, struct btrfs_delayed_ref_node,
2550 				       rb_node);
2551 			if (btrfs_delayed_ref_is_head(ref)) {
2552 				struct btrfs_delayed_ref_head *head;
2553 
2554 				head = btrfs_delayed_node_to_head(ref);
2555 				atomic_inc(&ref->refs);
2556 
2557 				spin_unlock(&delayed_refs->lock);
2558 				/*
2559 				 * Mutex was contended, block until it's
2560 				 * released and try again
2561 				 */
2562 				mutex_lock(&head->mutex);
2563 				mutex_unlock(&head->mutex);
2564 
2565 				btrfs_put_delayed_ref(ref);
2566 				cond_resched();
2567 				goto again;
2568 			}
2569 			node = rb_next(node);
2570 		}
2571 		spin_unlock(&delayed_refs->lock);
2572 		schedule_timeout(1);
2573 		goto again;
2574 	}
2575 out:
2576 	spin_unlock(&delayed_refs->lock);
2577 	assert_qgroups_uptodate(trans);
2578 	return 0;
2579 }
2580 
2581 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2582 				struct btrfs_root *root,
2583 				u64 bytenr, u64 num_bytes, u64 flags,
2584 				int is_data)
2585 {
2586 	struct btrfs_delayed_extent_op *extent_op;
2587 	int ret;
2588 
2589 	extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2590 	if (!extent_op)
2591 		return -ENOMEM;
2592 
2593 	extent_op->flags_to_set = flags;
2594 	extent_op->update_flags = 1;
2595 	extent_op->update_key = 0;
2596 	extent_op->is_data = is_data ? 1 : 0;
2597 
2598 	ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2599 					  num_bytes, extent_op);
2600 	if (ret)
2601 		kfree(extent_op);
2602 	return ret;
2603 }
2604 
2605 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2606 				      struct btrfs_root *root,
2607 				      struct btrfs_path *path,
2608 				      u64 objectid, u64 offset, u64 bytenr)
2609 {
2610 	struct btrfs_delayed_ref_head *head;
2611 	struct btrfs_delayed_ref_node *ref;
2612 	struct btrfs_delayed_data_ref *data_ref;
2613 	struct btrfs_delayed_ref_root *delayed_refs;
2614 	struct rb_node *node;
2615 	int ret = 0;
2616 
2617 	ret = -ENOENT;
2618 	delayed_refs = &trans->transaction->delayed_refs;
2619 	spin_lock(&delayed_refs->lock);
2620 	head = btrfs_find_delayed_ref_head(trans, bytenr);
2621 	if (!head)
2622 		goto out;
2623 
2624 	if (!mutex_trylock(&head->mutex)) {
2625 		atomic_inc(&head->node.refs);
2626 		spin_unlock(&delayed_refs->lock);
2627 
2628 		btrfs_release_path(path);
2629 
2630 		/*
2631 		 * Mutex was contended, block until it's released and let
2632 		 * caller try again
2633 		 */
2634 		mutex_lock(&head->mutex);
2635 		mutex_unlock(&head->mutex);
2636 		btrfs_put_delayed_ref(&head->node);
2637 		return -EAGAIN;
2638 	}
2639 
2640 	node = rb_prev(&head->node.rb_node);
2641 	if (!node)
2642 		goto out_unlock;
2643 
2644 	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2645 
2646 	if (ref->bytenr != bytenr)
2647 		goto out_unlock;
2648 
2649 	ret = 1;
2650 	if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2651 		goto out_unlock;
2652 
2653 	data_ref = btrfs_delayed_node_to_data_ref(ref);
2654 
2655 	node = rb_prev(node);
2656 	if (node) {
2657 		int seq = ref->seq;
2658 
2659 		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2660 		if (ref->bytenr == bytenr && ref->seq == seq)
2661 			goto out_unlock;
2662 	}
2663 
2664 	if (data_ref->root != root->root_key.objectid ||
2665 	    data_ref->objectid != objectid || data_ref->offset != offset)
2666 		goto out_unlock;
2667 
2668 	ret = 0;
2669 out_unlock:
2670 	mutex_unlock(&head->mutex);
2671 out:
2672 	spin_unlock(&delayed_refs->lock);
2673 	return ret;
2674 }
2675 
2676 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2677 					struct btrfs_root *root,
2678 					struct btrfs_path *path,
2679 					u64 objectid, u64 offset, u64 bytenr)
2680 {
2681 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2682 	struct extent_buffer *leaf;
2683 	struct btrfs_extent_data_ref *ref;
2684 	struct btrfs_extent_inline_ref *iref;
2685 	struct btrfs_extent_item *ei;
2686 	struct btrfs_key key;
2687 	u32 item_size;
2688 	int ret;
2689 
2690 	key.objectid = bytenr;
2691 	key.offset = (u64)-1;
2692 	key.type = BTRFS_EXTENT_ITEM_KEY;
2693 
2694 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2695 	if (ret < 0)
2696 		goto out;
2697 	BUG_ON(ret == 0); /* Corruption */
2698 
2699 	ret = -ENOENT;
2700 	if (path->slots[0] == 0)
2701 		goto out;
2702 
2703 	path->slots[0]--;
2704 	leaf = path->nodes[0];
2705 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2706 
2707 	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2708 		goto out;
2709 
2710 	ret = 1;
2711 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2712 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2713 	if (item_size < sizeof(*ei)) {
2714 		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2715 		goto out;
2716 	}
2717 #endif
2718 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2719 
2720 	if (item_size != sizeof(*ei) +
2721 	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2722 		goto out;
2723 
2724 	if (btrfs_extent_generation(leaf, ei) <=
2725 	    btrfs_root_last_snapshot(&root->root_item))
2726 		goto out;
2727 
2728 	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2729 	if (btrfs_extent_inline_ref_type(leaf, iref) !=
2730 	    BTRFS_EXTENT_DATA_REF_KEY)
2731 		goto out;
2732 
2733 	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2734 	if (btrfs_extent_refs(leaf, ei) !=
2735 	    btrfs_extent_data_ref_count(leaf, ref) ||
2736 	    btrfs_extent_data_ref_root(leaf, ref) !=
2737 	    root->root_key.objectid ||
2738 	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2739 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
2740 		goto out;
2741 
2742 	ret = 0;
2743 out:
2744 	return ret;
2745 }
2746 
2747 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2748 			  struct btrfs_root *root,
2749 			  u64 objectid, u64 offset, u64 bytenr)
2750 {
2751 	struct btrfs_path *path;
2752 	int ret;
2753 	int ret2;
2754 
2755 	path = btrfs_alloc_path();
2756 	if (!path)
2757 		return -ENOENT;
2758 
2759 	do {
2760 		ret = check_committed_ref(trans, root, path, objectid,
2761 					  offset, bytenr);
2762 		if (ret && ret != -ENOENT)
2763 			goto out;
2764 
2765 		ret2 = check_delayed_ref(trans, root, path, objectid,
2766 					 offset, bytenr);
2767 	} while (ret2 == -EAGAIN);
2768 
2769 	if (ret2 && ret2 != -ENOENT) {
2770 		ret = ret2;
2771 		goto out;
2772 	}
2773 
2774 	if (ret != -ENOENT || ret2 != -ENOENT)
2775 		ret = 0;
2776 out:
2777 	btrfs_free_path(path);
2778 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2779 		WARN_ON(ret > 0);
2780 	return ret;
2781 }
2782 
2783 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2784 			   struct btrfs_root *root,
2785 			   struct extent_buffer *buf,
2786 			   int full_backref, int inc, int for_cow)
2787 {
2788 	u64 bytenr;
2789 	u64 num_bytes;
2790 	u64 parent;
2791 	u64 ref_root;
2792 	u32 nritems;
2793 	struct btrfs_key key;
2794 	struct btrfs_file_extent_item *fi;
2795 	int i;
2796 	int level;
2797 	int ret = 0;
2798 	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2799 			    u64, u64, u64, u64, u64, u64, int);
2800 
2801 	ref_root = btrfs_header_owner(buf);
2802 	nritems = btrfs_header_nritems(buf);
2803 	level = btrfs_header_level(buf);
2804 
2805 	if (!root->ref_cows && level == 0)
2806 		return 0;
2807 
2808 	if (inc)
2809 		process_func = btrfs_inc_extent_ref;
2810 	else
2811 		process_func = btrfs_free_extent;
2812 
2813 	if (full_backref)
2814 		parent = buf->start;
2815 	else
2816 		parent = 0;
2817 
2818 	for (i = 0; i < nritems; i++) {
2819 		if (level == 0) {
2820 			btrfs_item_key_to_cpu(buf, &key, i);
2821 			if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2822 				continue;
2823 			fi = btrfs_item_ptr(buf, i,
2824 					    struct btrfs_file_extent_item);
2825 			if (btrfs_file_extent_type(buf, fi) ==
2826 			    BTRFS_FILE_EXTENT_INLINE)
2827 				continue;
2828 			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2829 			if (bytenr == 0)
2830 				continue;
2831 
2832 			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2833 			key.offset -= btrfs_file_extent_offset(buf, fi);
2834 			ret = process_func(trans, root, bytenr, num_bytes,
2835 					   parent, ref_root, key.objectid,
2836 					   key.offset, for_cow);
2837 			if (ret)
2838 				goto fail;
2839 		} else {
2840 			bytenr = btrfs_node_blockptr(buf, i);
2841 			num_bytes = btrfs_level_size(root, level - 1);
2842 			ret = process_func(trans, root, bytenr, num_bytes,
2843 					   parent, ref_root, level - 1, 0,
2844 					   for_cow);
2845 			if (ret)
2846 				goto fail;
2847 		}
2848 	}
2849 	return 0;
2850 fail:
2851 	return ret;
2852 }
2853 
2854 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2855 		  struct extent_buffer *buf, int full_backref, int for_cow)
2856 {
2857 	return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2858 }
2859 
2860 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2861 		  struct extent_buffer *buf, int full_backref, int for_cow)
2862 {
2863 	return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2864 }
2865 
2866 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2867 				 struct btrfs_root *root,
2868 				 struct btrfs_path *path,
2869 				 struct btrfs_block_group_cache *cache)
2870 {
2871 	int ret;
2872 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2873 	unsigned long bi;
2874 	struct extent_buffer *leaf;
2875 
2876 	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2877 	if (ret < 0)
2878 		goto fail;
2879 	BUG_ON(ret); /* Corruption */
2880 
2881 	leaf = path->nodes[0];
2882 	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2883 	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2884 	btrfs_mark_buffer_dirty(leaf);
2885 	btrfs_release_path(path);
2886 fail:
2887 	if (ret) {
2888 		btrfs_abort_transaction(trans, root, ret);
2889 		return ret;
2890 	}
2891 	return 0;
2892 
2893 }
2894 
2895 static struct btrfs_block_group_cache *
2896 next_block_group(struct btrfs_root *root,
2897 		 struct btrfs_block_group_cache *cache)
2898 {
2899 	struct rb_node *node;
2900 	spin_lock(&root->fs_info->block_group_cache_lock);
2901 	node = rb_next(&cache->cache_node);
2902 	btrfs_put_block_group(cache);
2903 	if (node) {
2904 		cache = rb_entry(node, struct btrfs_block_group_cache,
2905 				 cache_node);
2906 		btrfs_get_block_group(cache);
2907 	} else
2908 		cache = NULL;
2909 	spin_unlock(&root->fs_info->block_group_cache_lock);
2910 	return cache;
2911 }
2912 
2913 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2914 			    struct btrfs_trans_handle *trans,
2915 			    struct btrfs_path *path)
2916 {
2917 	struct btrfs_root *root = block_group->fs_info->tree_root;
2918 	struct inode *inode = NULL;
2919 	u64 alloc_hint = 0;
2920 	int dcs = BTRFS_DC_ERROR;
2921 	int num_pages = 0;
2922 	int retries = 0;
2923 	int ret = 0;
2924 
2925 	/*
2926 	 * If this block group is smaller than 100 megs don't bother caching the
2927 	 * block group.
2928 	 */
2929 	if (block_group->key.offset < (100 * 1024 * 1024)) {
2930 		spin_lock(&block_group->lock);
2931 		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2932 		spin_unlock(&block_group->lock);
2933 		return 0;
2934 	}
2935 
2936 again:
2937 	inode = lookup_free_space_inode(root, block_group, path);
2938 	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2939 		ret = PTR_ERR(inode);
2940 		btrfs_release_path(path);
2941 		goto out;
2942 	}
2943 
2944 	if (IS_ERR(inode)) {
2945 		BUG_ON(retries);
2946 		retries++;
2947 
2948 		if (block_group->ro)
2949 			goto out_free;
2950 
2951 		ret = create_free_space_inode(root, trans, block_group, path);
2952 		if (ret)
2953 			goto out_free;
2954 		goto again;
2955 	}
2956 
2957 	/* We've already setup this transaction, go ahead and exit */
2958 	if (block_group->cache_generation == trans->transid &&
2959 	    i_size_read(inode)) {
2960 		dcs = BTRFS_DC_SETUP;
2961 		goto out_put;
2962 	}
2963 
2964 	/*
2965 	 * We want to set the generation to 0, that way if anything goes wrong
2966 	 * from here on out we know not to trust this cache when we load up next
2967 	 * time.
2968 	 */
2969 	BTRFS_I(inode)->generation = 0;
2970 	ret = btrfs_update_inode(trans, root, inode);
2971 	WARN_ON(ret);
2972 
2973 	if (i_size_read(inode) > 0) {
2974 		ret = btrfs_truncate_free_space_cache(root, trans, path,
2975 						      inode);
2976 		if (ret)
2977 			goto out_put;
2978 	}
2979 
2980 	spin_lock(&block_group->lock);
2981 	if (block_group->cached != BTRFS_CACHE_FINISHED ||
2982 	    !btrfs_test_opt(root, SPACE_CACHE)) {
2983 		/*
2984 		 * don't bother trying to write stuff out _if_
2985 		 * a) we're not cached,
2986 		 * b) we're with nospace_cache mount option.
2987 		 */
2988 		dcs = BTRFS_DC_WRITTEN;
2989 		spin_unlock(&block_group->lock);
2990 		goto out_put;
2991 	}
2992 	spin_unlock(&block_group->lock);
2993 
2994 	/*
2995 	 * Try to preallocate enough space based on how big the block group is.
2996 	 * Keep in mind this has to include any pinned space which could end up
2997 	 * taking up quite a bit since it's not folded into the other space
2998 	 * cache.
2999 	 */
3000 	num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3001 	if (!num_pages)
3002 		num_pages = 1;
3003 
3004 	num_pages *= 16;
3005 	num_pages *= PAGE_CACHE_SIZE;
3006 
3007 	ret = btrfs_check_data_free_space(inode, num_pages);
3008 	if (ret)
3009 		goto out_put;
3010 
3011 	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3012 					      num_pages, num_pages,
3013 					      &alloc_hint);
3014 	if (!ret)
3015 		dcs = BTRFS_DC_SETUP;
3016 	btrfs_free_reserved_data_space(inode, num_pages);
3017 
3018 out_put:
3019 	iput(inode);
3020 out_free:
3021 	btrfs_release_path(path);
3022 out:
3023 	spin_lock(&block_group->lock);
3024 	if (!ret && dcs == BTRFS_DC_SETUP)
3025 		block_group->cache_generation = trans->transid;
3026 	block_group->disk_cache_state = dcs;
3027 	spin_unlock(&block_group->lock);
3028 
3029 	return ret;
3030 }
3031 
3032 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3033 				   struct btrfs_root *root)
3034 {
3035 	struct btrfs_block_group_cache *cache;
3036 	int err = 0;
3037 	struct btrfs_path *path;
3038 	u64 last = 0;
3039 
3040 	path = btrfs_alloc_path();
3041 	if (!path)
3042 		return -ENOMEM;
3043 
3044 again:
3045 	while (1) {
3046 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
3047 		while (cache) {
3048 			if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3049 				break;
3050 			cache = next_block_group(root, cache);
3051 		}
3052 		if (!cache) {
3053 			if (last == 0)
3054 				break;
3055 			last = 0;
3056 			continue;
3057 		}
3058 		err = cache_save_setup(cache, trans, path);
3059 		last = cache->key.objectid + cache->key.offset;
3060 		btrfs_put_block_group(cache);
3061 	}
3062 
3063 	while (1) {
3064 		if (last == 0) {
3065 			err = btrfs_run_delayed_refs(trans, root,
3066 						     (unsigned long)-1);
3067 			if (err) /* File system offline */
3068 				goto out;
3069 		}
3070 
3071 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
3072 		while (cache) {
3073 			if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3074 				btrfs_put_block_group(cache);
3075 				goto again;
3076 			}
3077 
3078 			if (cache->dirty)
3079 				break;
3080 			cache = next_block_group(root, cache);
3081 		}
3082 		if (!cache) {
3083 			if (last == 0)
3084 				break;
3085 			last = 0;
3086 			continue;
3087 		}
3088 
3089 		if (cache->disk_cache_state == BTRFS_DC_SETUP)
3090 			cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3091 		cache->dirty = 0;
3092 		last = cache->key.objectid + cache->key.offset;
3093 
3094 		err = write_one_cache_group(trans, root, path, cache);
3095 		if (err) /* File system offline */
3096 			goto out;
3097 
3098 		btrfs_put_block_group(cache);
3099 	}
3100 
3101 	while (1) {
3102 		/*
3103 		 * I don't think this is needed since we're just marking our
3104 		 * preallocated extent as written, but just in case it can't
3105 		 * hurt.
3106 		 */
3107 		if (last == 0) {
3108 			err = btrfs_run_delayed_refs(trans, root,
3109 						     (unsigned long)-1);
3110 			if (err) /* File system offline */
3111 				goto out;
3112 		}
3113 
3114 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
3115 		while (cache) {
3116 			/*
3117 			 * Really this shouldn't happen, but it could if we
3118 			 * couldn't write the entire preallocated extent and
3119 			 * splitting the extent resulted in a new block.
3120 			 */
3121 			if (cache->dirty) {
3122 				btrfs_put_block_group(cache);
3123 				goto again;
3124 			}
3125 			if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3126 				break;
3127 			cache = next_block_group(root, cache);
3128 		}
3129 		if (!cache) {
3130 			if (last == 0)
3131 				break;
3132 			last = 0;
3133 			continue;
3134 		}
3135 
3136 		err = btrfs_write_out_cache(root, trans, cache, path);
3137 
3138 		/*
3139 		 * If we didn't have an error then the cache state is still
3140 		 * NEED_WRITE, so we can set it to WRITTEN.
3141 		 */
3142 		if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3143 			cache->disk_cache_state = BTRFS_DC_WRITTEN;
3144 		last = cache->key.objectid + cache->key.offset;
3145 		btrfs_put_block_group(cache);
3146 	}
3147 out:
3148 
3149 	btrfs_free_path(path);
3150 	return err;
3151 }
3152 
3153 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3154 {
3155 	struct btrfs_block_group_cache *block_group;
3156 	int readonly = 0;
3157 
3158 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3159 	if (!block_group || block_group->ro)
3160 		readonly = 1;
3161 	if (block_group)
3162 		btrfs_put_block_group(block_group);
3163 	return readonly;
3164 }
3165 
3166 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3167 			     u64 total_bytes, u64 bytes_used,
3168 			     struct btrfs_space_info **space_info)
3169 {
3170 	struct btrfs_space_info *found;
3171 	int i;
3172 	int factor;
3173 
3174 	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3175 		     BTRFS_BLOCK_GROUP_RAID10))
3176 		factor = 2;
3177 	else
3178 		factor = 1;
3179 
3180 	found = __find_space_info(info, flags);
3181 	if (found) {
3182 		spin_lock(&found->lock);
3183 		found->total_bytes += total_bytes;
3184 		found->disk_total += total_bytes * factor;
3185 		found->bytes_used += bytes_used;
3186 		found->disk_used += bytes_used * factor;
3187 		found->full = 0;
3188 		spin_unlock(&found->lock);
3189 		*space_info = found;
3190 		return 0;
3191 	}
3192 	found = kzalloc(sizeof(*found), GFP_NOFS);
3193 	if (!found)
3194 		return -ENOMEM;
3195 
3196 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3197 		INIT_LIST_HEAD(&found->block_groups[i]);
3198 	init_rwsem(&found->groups_sem);
3199 	spin_lock_init(&found->lock);
3200 	found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3201 	found->total_bytes = total_bytes;
3202 	found->disk_total = total_bytes * factor;
3203 	found->bytes_used = bytes_used;
3204 	found->disk_used = bytes_used * factor;
3205 	found->bytes_pinned = 0;
3206 	found->bytes_reserved = 0;
3207 	found->bytes_readonly = 0;
3208 	found->bytes_may_use = 0;
3209 	found->full = 0;
3210 	found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3211 	found->chunk_alloc = 0;
3212 	found->flush = 0;
3213 	init_waitqueue_head(&found->wait);
3214 	*space_info = found;
3215 	list_add_rcu(&found->list, &info->space_info);
3216 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3217 		info->data_sinfo = found;
3218 	return 0;
3219 }
3220 
3221 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3222 {
3223 	u64 extra_flags = chunk_to_extended(flags) &
3224 				BTRFS_EXTENDED_PROFILE_MASK;
3225 
3226 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3227 		fs_info->avail_data_alloc_bits |= extra_flags;
3228 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
3229 		fs_info->avail_metadata_alloc_bits |= extra_flags;
3230 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3231 		fs_info->avail_system_alloc_bits |= extra_flags;
3232 }
3233 
3234 /*
3235  * returns target flags in extended format or 0 if restripe for this
3236  * chunk_type is not in progress
3237  *
3238  * should be called with either volume_mutex or balance_lock held
3239  */
3240 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3241 {
3242 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3243 	u64 target = 0;
3244 
3245 	if (!bctl)
3246 		return 0;
3247 
3248 	if (flags & BTRFS_BLOCK_GROUP_DATA &&
3249 	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3250 		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3251 	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3252 		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3253 		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3254 	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3255 		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3256 		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3257 	}
3258 
3259 	return target;
3260 }
3261 
3262 /*
3263  * @flags: available profiles in extended format (see ctree.h)
3264  *
3265  * Returns reduced profile in chunk format.  If profile changing is in
3266  * progress (either running or paused) picks the target profile (if it's
3267  * already available), otherwise falls back to plain reducing.
3268  */
3269 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3270 {
3271 	/*
3272 	 * we add in the count of missing devices because we want
3273 	 * to make sure that any RAID levels on a degraded FS
3274 	 * continue to be honored.
3275 	 */
3276 	u64 num_devices = root->fs_info->fs_devices->rw_devices +
3277 		root->fs_info->fs_devices->missing_devices;
3278 	u64 target;
3279 
3280 	/*
3281 	 * see if restripe for this chunk_type is in progress, if so
3282 	 * try to reduce to the target profile
3283 	 */
3284 	spin_lock(&root->fs_info->balance_lock);
3285 	target = get_restripe_target(root->fs_info, flags);
3286 	if (target) {
3287 		/* pick target profile only if it's already available */
3288 		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3289 			spin_unlock(&root->fs_info->balance_lock);
3290 			return extended_to_chunk(target);
3291 		}
3292 	}
3293 	spin_unlock(&root->fs_info->balance_lock);
3294 
3295 	if (num_devices == 1)
3296 		flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3297 	if (num_devices < 4)
3298 		flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3299 
3300 	if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3301 	    (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3302 		      BTRFS_BLOCK_GROUP_RAID10))) {
3303 		flags &= ~BTRFS_BLOCK_GROUP_DUP;
3304 	}
3305 
3306 	if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3307 	    (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3308 		flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3309 	}
3310 
3311 	if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3312 	    ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3313 	     (flags & BTRFS_BLOCK_GROUP_RAID10) |
3314 	     (flags & BTRFS_BLOCK_GROUP_DUP))) {
3315 		flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3316 	}
3317 
3318 	return extended_to_chunk(flags);
3319 }
3320 
3321 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3322 {
3323 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3324 		flags |= root->fs_info->avail_data_alloc_bits;
3325 	else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3326 		flags |= root->fs_info->avail_system_alloc_bits;
3327 	else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3328 		flags |= root->fs_info->avail_metadata_alloc_bits;
3329 
3330 	return btrfs_reduce_alloc_profile(root, flags);
3331 }
3332 
3333 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3334 {
3335 	u64 flags;
3336 
3337 	if (data)
3338 		flags = BTRFS_BLOCK_GROUP_DATA;
3339 	else if (root == root->fs_info->chunk_root)
3340 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
3341 	else
3342 		flags = BTRFS_BLOCK_GROUP_METADATA;
3343 
3344 	return get_alloc_profile(root, flags);
3345 }
3346 
3347 /*
3348  * This will check the space that the inode allocates from to make sure we have
3349  * enough space for bytes.
3350  */
3351 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3352 {
3353 	struct btrfs_space_info *data_sinfo;
3354 	struct btrfs_root *root = BTRFS_I(inode)->root;
3355 	struct btrfs_fs_info *fs_info = root->fs_info;
3356 	u64 used;
3357 	int ret = 0, committed = 0, alloc_chunk = 1;
3358 
3359 	/* make sure bytes are sectorsize aligned */
3360 	bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3361 
3362 	if (root == root->fs_info->tree_root ||
3363 	    BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3364 		alloc_chunk = 0;
3365 		committed = 1;
3366 	}
3367 
3368 	data_sinfo = fs_info->data_sinfo;
3369 	if (!data_sinfo)
3370 		goto alloc;
3371 
3372 again:
3373 	/* make sure we have enough space to handle the data first */
3374 	spin_lock(&data_sinfo->lock);
3375 	used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3376 		data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3377 		data_sinfo->bytes_may_use;
3378 
3379 	if (used + bytes > data_sinfo->total_bytes) {
3380 		struct btrfs_trans_handle *trans;
3381 
3382 		/*
3383 		 * if we don't have enough free bytes in this space then we need
3384 		 * to alloc a new chunk.
3385 		 */
3386 		if (!data_sinfo->full && alloc_chunk) {
3387 			u64 alloc_target;
3388 
3389 			data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3390 			spin_unlock(&data_sinfo->lock);
3391 alloc:
3392 			alloc_target = btrfs_get_alloc_profile(root, 1);
3393 			trans = btrfs_join_transaction(root);
3394 			if (IS_ERR(trans))
3395 				return PTR_ERR(trans);
3396 
3397 			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3398 					     alloc_target,
3399 					     CHUNK_ALLOC_NO_FORCE);
3400 			btrfs_end_transaction(trans, root);
3401 			if (ret < 0) {
3402 				if (ret != -ENOSPC)
3403 					return ret;
3404 				else
3405 					goto commit_trans;
3406 			}
3407 
3408 			if (!data_sinfo)
3409 				data_sinfo = fs_info->data_sinfo;
3410 
3411 			goto again;
3412 		}
3413 
3414 		/*
3415 		 * If we have less pinned bytes than we want to allocate then
3416 		 * don't bother committing the transaction, it won't help us.
3417 		 */
3418 		if (data_sinfo->bytes_pinned < bytes)
3419 			committed = 1;
3420 		spin_unlock(&data_sinfo->lock);
3421 
3422 		/* commit the current transaction and try again */
3423 commit_trans:
3424 		if (!committed &&
3425 		    !atomic_read(&root->fs_info->open_ioctl_trans)) {
3426 			committed = 1;
3427 			trans = btrfs_join_transaction(root);
3428 			if (IS_ERR(trans))
3429 				return PTR_ERR(trans);
3430 			ret = btrfs_commit_transaction(trans, root);
3431 			if (ret)
3432 				return ret;
3433 			goto again;
3434 		}
3435 
3436 		return -ENOSPC;
3437 	}
3438 	data_sinfo->bytes_may_use += bytes;
3439 	trace_btrfs_space_reservation(root->fs_info, "space_info",
3440 				      data_sinfo->flags, bytes, 1);
3441 	spin_unlock(&data_sinfo->lock);
3442 
3443 	return 0;
3444 }
3445 
3446 /*
3447  * Called if we need to clear a data reservation for this inode.
3448  */
3449 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3450 {
3451 	struct btrfs_root *root = BTRFS_I(inode)->root;
3452 	struct btrfs_space_info *data_sinfo;
3453 
3454 	/* make sure bytes are sectorsize aligned */
3455 	bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3456 
3457 	data_sinfo = root->fs_info->data_sinfo;
3458 	spin_lock(&data_sinfo->lock);
3459 	data_sinfo->bytes_may_use -= bytes;
3460 	trace_btrfs_space_reservation(root->fs_info, "space_info",
3461 				      data_sinfo->flags, bytes, 0);
3462 	spin_unlock(&data_sinfo->lock);
3463 }
3464 
3465 static void force_metadata_allocation(struct btrfs_fs_info *info)
3466 {
3467 	struct list_head *head = &info->space_info;
3468 	struct btrfs_space_info *found;
3469 
3470 	rcu_read_lock();
3471 	list_for_each_entry_rcu(found, head, list) {
3472 		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3473 			found->force_alloc = CHUNK_ALLOC_FORCE;
3474 	}
3475 	rcu_read_unlock();
3476 }
3477 
3478 static int should_alloc_chunk(struct btrfs_root *root,
3479 			      struct btrfs_space_info *sinfo, int force)
3480 {
3481 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3482 	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3483 	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3484 	u64 thresh;
3485 
3486 	if (force == CHUNK_ALLOC_FORCE)
3487 		return 1;
3488 
3489 	/*
3490 	 * We need to take into account the global rsv because for all intents
3491 	 * and purposes it's used space.  Don't worry about locking the
3492 	 * global_rsv, it doesn't change except when the transaction commits.
3493 	 */
3494 	if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3495 		num_allocated += global_rsv->size;
3496 
3497 	/*
3498 	 * in limited mode, we want to have some free space up to
3499 	 * about 1% of the FS size.
3500 	 */
3501 	if (force == CHUNK_ALLOC_LIMITED) {
3502 		thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3503 		thresh = max_t(u64, 64 * 1024 * 1024,
3504 			       div_factor_fine(thresh, 1));
3505 
3506 		if (num_bytes - num_allocated < thresh)
3507 			return 1;
3508 	}
3509 
3510 	if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3511 		return 0;
3512 	return 1;
3513 }
3514 
3515 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3516 {
3517 	u64 num_dev;
3518 
3519 	if (type & BTRFS_BLOCK_GROUP_RAID10 ||
3520 	    type & BTRFS_BLOCK_GROUP_RAID0)
3521 		num_dev = root->fs_info->fs_devices->rw_devices;
3522 	else if (type & BTRFS_BLOCK_GROUP_RAID1)
3523 		num_dev = 2;
3524 	else
3525 		num_dev = 1;	/* DUP or single */
3526 
3527 	/* metadata for updaing devices and chunk tree */
3528 	return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3529 }
3530 
3531 static void check_system_chunk(struct btrfs_trans_handle *trans,
3532 			       struct btrfs_root *root, u64 type)
3533 {
3534 	struct btrfs_space_info *info;
3535 	u64 left;
3536 	u64 thresh;
3537 
3538 	info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3539 	spin_lock(&info->lock);
3540 	left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3541 		info->bytes_reserved - info->bytes_readonly;
3542 	spin_unlock(&info->lock);
3543 
3544 	thresh = get_system_chunk_thresh(root, type);
3545 	if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3546 		printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
3547 		       left, thresh, type);
3548 		dump_space_info(info, 0, 0);
3549 	}
3550 
3551 	if (left < thresh) {
3552 		u64 flags;
3553 
3554 		flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3555 		btrfs_alloc_chunk(trans, root, flags);
3556 	}
3557 }
3558 
3559 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3560 			  struct btrfs_root *extent_root, u64 flags, int force)
3561 {
3562 	struct btrfs_space_info *space_info;
3563 	struct btrfs_fs_info *fs_info = extent_root->fs_info;
3564 	int wait_for_alloc = 0;
3565 	int ret = 0;
3566 
3567 	space_info = __find_space_info(extent_root->fs_info, flags);
3568 	if (!space_info) {
3569 		ret = update_space_info(extent_root->fs_info, flags,
3570 					0, 0, &space_info);
3571 		BUG_ON(ret); /* -ENOMEM */
3572 	}
3573 	BUG_ON(!space_info); /* Logic error */
3574 
3575 again:
3576 	spin_lock(&space_info->lock);
3577 	if (force < space_info->force_alloc)
3578 		force = space_info->force_alloc;
3579 	if (space_info->full) {
3580 		spin_unlock(&space_info->lock);
3581 		return 0;
3582 	}
3583 
3584 	if (!should_alloc_chunk(extent_root, space_info, force)) {
3585 		spin_unlock(&space_info->lock);
3586 		return 0;
3587 	} else if (space_info->chunk_alloc) {
3588 		wait_for_alloc = 1;
3589 	} else {
3590 		space_info->chunk_alloc = 1;
3591 	}
3592 
3593 	spin_unlock(&space_info->lock);
3594 
3595 	mutex_lock(&fs_info->chunk_mutex);
3596 
3597 	/*
3598 	 * The chunk_mutex is held throughout the entirety of a chunk
3599 	 * allocation, so once we've acquired the chunk_mutex we know that the
3600 	 * other guy is done and we need to recheck and see if we should
3601 	 * allocate.
3602 	 */
3603 	if (wait_for_alloc) {
3604 		mutex_unlock(&fs_info->chunk_mutex);
3605 		wait_for_alloc = 0;
3606 		goto again;
3607 	}
3608 
3609 	/*
3610 	 * If we have mixed data/metadata chunks we want to make sure we keep
3611 	 * allocating mixed chunks instead of individual chunks.
3612 	 */
3613 	if (btrfs_mixed_space_info(space_info))
3614 		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3615 
3616 	/*
3617 	 * if we're doing a data chunk, go ahead and make sure that
3618 	 * we keep a reasonable number of metadata chunks allocated in the
3619 	 * FS as well.
3620 	 */
3621 	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3622 		fs_info->data_chunk_allocations++;
3623 		if (!(fs_info->data_chunk_allocations %
3624 		      fs_info->metadata_ratio))
3625 			force_metadata_allocation(fs_info);
3626 	}
3627 
3628 	/*
3629 	 * Check if we have enough space in SYSTEM chunk because we may need
3630 	 * to update devices.
3631 	 */
3632 	check_system_chunk(trans, extent_root, flags);
3633 
3634 	ret = btrfs_alloc_chunk(trans, extent_root, flags);
3635 	if (ret < 0 && ret != -ENOSPC)
3636 		goto out;
3637 
3638 	spin_lock(&space_info->lock);
3639 	if (ret)
3640 		space_info->full = 1;
3641 	else
3642 		ret = 1;
3643 
3644 	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3645 	space_info->chunk_alloc = 0;
3646 	spin_unlock(&space_info->lock);
3647 out:
3648 	mutex_unlock(&fs_info->chunk_mutex);
3649 	return ret;
3650 }
3651 
3652 static int can_overcommit(struct btrfs_root *root,
3653 			  struct btrfs_space_info *space_info, u64 bytes,
3654 			  enum btrfs_reserve_flush_enum flush)
3655 {
3656 	u64 profile = btrfs_get_alloc_profile(root, 0);
3657 	u64 avail;
3658 	u64 used;
3659 
3660 	used = space_info->bytes_used + space_info->bytes_reserved +
3661 		space_info->bytes_pinned + space_info->bytes_readonly +
3662 		space_info->bytes_may_use;
3663 
3664 	spin_lock(&root->fs_info->free_chunk_lock);
3665 	avail = root->fs_info->free_chunk_space;
3666 	spin_unlock(&root->fs_info->free_chunk_lock);
3667 
3668 	/*
3669 	 * If we have dup, raid1 or raid10 then only half of the free
3670 	 * space is actually useable.
3671 	 */
3672 	if (profile & (BTRFS_BLOCK_GROUP_DUP |
3673 		       BTRFS_BLOCK_GROUP_RAID1 |
3674 		       BTRFS_BLOCK_GROUP_RAID10))
3675 		avail >>= 1;
3676 
3677 	/*
3678 	 * If we aren't flushing all things, let us overcommit up to
3679 	 * 1/2th of the space. If we can flush, don't let us overcommit
3680 	 * too much, let it overcommit up to 1/8 of the space.
3681 	 */
3682 	if (flush == BTRFS_RESERVE_FLUSH_ALL)
3683 		avail >>= 3;
3684 	else
3685 		avail >>= 1;
3686 
3687 	if (used + bytes < space_info->total_bytes + avail)
3688 		return 1;
3689 	return 0;
3690 }
3691 
3692 static int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
3693 					       unsigned long nr_pages,
3694 					       enum wb_reason reason)
3695 {
3696 	if (!writeback_in_progress(sb->s_bdi) &&
3697 	    down_read_trylock(&sb->s_umount)) {
3698 		writeback_inodes_sb_nr(sb, nr_pages, reason);
3699 		up_read(&sb->s_umount);
3700 		return 1;
3701 	}
3702 
3703 	return 0;
3704 }
3705 
3706 /*
3707  * shrink metadata reservation for delalloc
3708  */
3709 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3710 			    bool wait_ordered)
3711 {
3712 	struct btrfs_block_rsv *block_rsv;
3713 	struct btrfs_space_info *space_info;
3714 	struct btrfs_trans_handle *trans;
3715 	u64 delalloc_bytes;
3716 	u64 max_reclaim;
3717 	long time_left;
3718 	unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3719 	int loops = 0;
3720 	enum btrfs_reserve_flush_enum flush;
3721 
3722 	trans = (struct btrfs_trans_handle *)current->journal_info;
3723 	block_rsv = &root->fs_info->delalloc_block_rsv;
3724 	space_info = block_rsv->space_info;
3725 
3726 	smp_mb();
3727 	delalloc_bytes = root->fs_info->delalloc_bytes;
3728 	if (delalloc_bytes == 0) {
3729 		if (trans)
3730 			return;
3731 		btrfs_wait_ordered_extents(root, 0);
3732 		return;
3733 	}
3734 
3735 	while (delalloc_bytes && loops < 3) {
3736 		max_reclaim = min(delalloc_bytes, to_reclaim);
3737 		nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3738 		writeback_inodes_sb_nr_if_idle_safe(root->fs_info->sb,
3739 						    nr_pages,
3740 						    WB_REASON_FS_FREE_SPACE);
3741 
3742 		/*
3743 		 * We need to wait for the async pages to actually start before
3744 		 * we do anything.
3745 		 */
3746 		wait_event(root->fs_info->async_submit_wait,
3747 			   !atomic_read(&root->fs_info->async_delalloc_pages));
3748 
3749 		if (!trans)
3750 			flush = BTRFS_RESERVE_FLUSH_ALL;
3751 		else
3752 			flush = BTRFS_RESERVE_NO_FLUSH;
3753 		spin_lock(&space_info->lock);
3754 		if (can_overcommit(root, space_info, orig, flush)) {
3755 			spin_unlock(&space_info->lock);
3756 			break;
3757 		}
3758 		spin_unlock(&space_info->lock);
3759 
3760 		loops++;
3761 		if (wait_ordered && !trans) {
3762 			btrfs_wait_ordered_extents(root, 0);
3763 		} else {
3764 			time_left = schedule_timeout_killable(1);
3765 			if (time_left)
3766 				break;
3767 		}
3768 		smp_mb();
3769 		delalloc_bytes = root->fs_info->delalloc_bytes;
3770 	}
3771 }
3772 
3773 /**
3774  * maybe_commit_transaction - possibly commit the transaction if its ok to
3775  * @root - the root we're allocating for
3776  * @bytes - the number of bytes we want to reserve
3777  * @force - force the commit
3778  *
3779  * This will check to make sure that committing the transaction will actually
3780  * get us somewhere and then commit the transaction if it does.  Otherwise it
3781  * will return -ENOSPC.
3782  */
3783 static int may_commit_transaction(struct btrfs_root *root,
3784 				  struct btrfs_space_info *space_info,
3785 				  u64 bytes, int force)
3786 {
3787 	struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3788 	struct btrfs_trans_handle *trans;
3789 
3790 	trans = (struct btrfs_trans_handle *)current->journal_info;
3791 	if (trans)
3792 		return -EAGAIN;
3793 
3794 	if (force)
3795 		goto commit;
3796 
3797 	/* See if there is enough pinned space to make this reservation */
3798 	spin_lock(&space_info->lock);
3799 	if (space_info->bytes_pinned >= bytes) {
3800 		spin_unlock(&space_info->lock);
3801 		goto commit;
3802 	}
3803 	spin_unlock(&space_info->lock);
3804 
3805 	/*
3806 	 * See if there is some space in the delayed insertion reservation for
3807 	 * this reservation.
3808 	 */
3809 	if (space_info != delayed_rsv->space_info)
3810 		return -ENOSPC;
3811 
3812 	spin_lock(&space_info->lock);
3813 	spin_lock(&delayed_rsv->lock);
3814 	if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3815 		spin_unlock(&delayed_rsv->lock);
3816 		spin_unlock(&space_info->lock);
3817 		return -ENOSPC;
3818 	}
3819 	spin_unlock(&delayed_rsv->lock);
3820 	spin_unlock(&space_info->lock);
3821 
3822 commit:
3823 	trans = btrfs_join_transaction(root);
3824 	if (IS_ERR(trans))
3825 		return -ENOSPC;
3826 
3827 	return btrfs_commit_transaction(trans, root);
3828 }
3829 
3830 enum flush_state {
3831 	FLUSH_DELAYED_ITEMS_NR	=	1,
3832 	FLUSH_DELAYED_ITEMS	=	2,
3833 	FLUSH_DELALLOC		=	3,
3834 	FLUSH_DELALLOC_WAIT	=	4,
3835 	ALLOC_CHUNK		=	5,
3836 	COMMIT_TRANS		=	6,
3837 };
3838 
3839 static int flush_space(struct btrfs_root *root,
3840 		       struct btrfs_space_info *space_info, u64 num_bytes,
3841 		       u64 orig_bytes, int state)
3842 {
3843 	struct btrfs_trans_handle *trans;
3844 	int nr;
3845 	int ret = 0;
3846 
3847 	switch (state) {
3848 	case FLUSH_DELAYED_ITEMS_NR:
3849 	case FLUSH_DELAYED_ITEMS:
3850 		if (state == FLUSH_DELAYED_ITEMS_NR) {
3851 			u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
3852 
3853 			nr = (int)div64_u64(num_bytes, bytes);
3854 			if (!nr)
3855 				nr = 1;
3856 			nr *= 2;
3857 		} else {
3858 			nr = -1;
3859 		}
3860 		trans = btrfs_join_transaction(root);
3861 		if (IS_ERR(trans)) {
3862 			ret = PTR_ERR(trans);
3863 			break;
3864 		}
3865 		ret = btrfs_run_delayed_items_nr(trans, root, nr);
3866 		btrfs_end_transaction(trans, root);
3867 		break;
3868 	case FLUSH_DELALLOC:
3869 	case FLUSH_DELALLOC_WAIT:
3870 		shrink_delalloc(root, num_bytes, orig_bytes,
3871 				state == FLUSH_DELALLOC_WAIT);
3872 		break;
3873 	case ALLOC_CHUNK:
3874 		trans = btrfs_join_transaction(root);
3875 		if (IS_ERR(trans)) {
3876 			ret = PTR_ERR(trans);
3877 			break;
3878 		}
3879 		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3880 				     btrfs_get_alloc_profile(root, 0),
3881 				     CHUNK_ALLOC_NO_FORCE);
3882 		btrfs_end_transaction(trans, root);
3883 		if (ret == -ENOSPC)
3884 			ret = 0;
3885 		break;
3886 	case COMMIT_TRANS:
3887 		ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3888 		break;
3889 	default:
3890 		ret = -ENOSPC;
3891 		break;
3892 	}
3893 
3894 	return ret;
3895 }
3896 /**
3897  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3898  * @root - the root we're allocating for
3899  * @block_rsv - the block_rsv we're allocating for
3900  * @orig_bytes - the number of bytes we want
3901  * @flush - whether or not we can flush to make our reservation
3902  *
3903  * This will reserve orgi_bytes number of bytes from the space info associated
3904  * with the block_rsv.  If there is not enough space it will make an attempt to
3905  * flush out space to make room.  It will do this by flushing delalloc if
3906  * possible or committing the transaction.  If flush is 0 then no attempts to
3907  * regain reservations will be made and this will fail if there is not enough
3908  * space already.
3909  */
3910 static int reserve_metadata_bytes(struct btrfs_root *root,
3911 				  struct btrfs_block_rsv *block_rsv,
3912 				  u64 orig_bytes,
3913 				  enum btrfs_reserve_flush_enum flush)
3914 {
3915 	struct btrfs_space_info *space_info = block_rsv->space_info;
3916 	u64 used;
3917 	u64 num_bytes = orig_bytes;
3918 	int flush_state = FLUSH_DELAYED_ITEMS_NR;
3919 	int ret = 0;
3920 	bool flushing = false;
3921 
3922 again:
3923 	ret = 0;
3924 	spin_lock(&space_info->lock);
3925 	/*
3926 	 * We only want to wait if somebody other than us is flushing and we
3927 	 * are actually allowed to flush all things.
3928 	 */
3929 	while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
3930 	       space_info->flush) {
3931 		spin_unlock(&space_info->lock);
3932 		/*
3933 		 * If we have a trans handle we can't wait because the flusher
3934 		 * may have to commit the transaction, which would mean we would
3935 		 * deadlock since we are waiting for the flusher to finish, but
3936 		 * hold the current transaction open.
3937 		 */
3938 		if (current->journal_info)
3939 			return -EAGAIN;
3940 		ret = wait_event_killable(space_info->wait, !space_info->flush);
3941 		/* Must have been killed, return */
3942 		if (ret)
3943 			return -EINTR;
3944 
3945 		spin_lock(&space_info->lock);
3946 	}
3947 
3948 	ret = -ENOSPC;
3949 	used = space_info->bytes_used + space_info->bytes_reserved +
3950 		space_info->bytes_pinned + space_info->bytes_readonly +
3951 		space_info->bytes_may_use;
3952 
3953 	/*
3954 	 * The idea here is that we've not already over-reserved the block group
3955 	 * then we can go ahead and save our reservation first and then start
3956 	 * flushing if we need to.  Otherwise if we've already overcommitted
3957 	 * lets start flushing stuff first and then come back and try to make
3958 	 * our reservation.
3959 	 */
3960 	if (used <= space_info->total_bytes) {
3961 		if (used + orig_bytes <= space_info->total_bytes) {
3962 			space_info->bytes_may_use += orig_bytes;
3963 			trace_btrfs_space_reservation(root->fs_info,
3964 				"space_info", space_info->flags, orig_bytes, 1);
3965 			ret = 0;
3966 		} else {
3967 			/*
3968 			 * Ok set num_bytes to orig_bytes since we aren't
3969 			 * overocmmitted, this way we only try and reclaim what
3970 			 * we need.
3971 			 */
3972 			num_bytes = orig_bytes;
3973 		}
3974 	} else {
3975 		/*
3976 		 * Ok we're over committed, set num_bytes to the overcommitted
3977 		 * amount plus the amount of bytes that we need for this
3978 		 * reservation.
3979 		 */
3980 		num_bytes = used - space_info->total_bytes +
3981 			(orig_bytes * 2);
3982 	}
3983 
3984 	if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
3985 		space_info->bytes_may_use += orig_bytes;
3986 		trace_btrfs_space_reservation(root->fs_info, "space_info",
3987 					      space_info->flags, orig_bytes,
3988 					      1);
3989 		ret = 0;
3990 	}
3991 
3992 	/*
3993 	 * Couldn't make our reservation, save our place so while we're trying
3994 	 * to reclaim space we can actually use it instead of somebody else
3995 	 * stealing it from us.
3996 	 *
3997 	 * We make the other tasks wait for the flush only when we can flush
3998 	 * all things.
3999 	 */
4000 	if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4001 		flushing = true;
4002 		space_info->flush = 1;
4003 	}
4004 
4005 	spin_unlock(&space_info->lock);
4006 
4007 	if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4008 		goto out;
4009 
4010 	ret = flush_space(root, space_info, num_bytes, orig_bytes,
4011 			  flush_state);
4012 	flush_state++;
4013 
4014 	/*
4015 	 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4016 	 * would happen. So skip delalloc flush.
4017 	 */
4018 	if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4019 	    (flush_state == FLUSH_DELALLOC ||
4020 	     flush_state == FLUSH_DELALLOC_WAIT))
4021 		flush_state = ALLOC_CHUNK;
4022 
4023 	if (!ret)
4024 		goto again;
4025 	else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4026 		 flush_state < COMMIT_TRANS)
4027 		goto again;
4028 	else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4029 		 flush_state <= COMMIT_TRANS)
4030 		goto again;
4031 
4032 out:
4033 	if (flushing) {
4034 		spin_lock(&space_info->lock);
4035 		space_info->flush = 0;
4036 		wake_up_all(&space_info->wait);
4037 		spin_unlock(&space_info->lock);
4038 	}
4039 	return ret;
4040 }
4041 
4042 static struct btrfs_block_rsv *get_block_rsv(
4043 					const struct btrfs_trans_handle *trans,
4044 					const struct btrfs_root *root)
4045 {
4046 	struct btrfs_block_rsv *block_rsv = NULL;
4047 
4048 	if (root->ref_cows)
4049 		block_rsv = trans->block_rsv;
4050 
4051 	if (root == root->fs_info->csum_root && trans->adding_csums)
4052 		block_rsv = trans->block_rsv;
4053 
4054 	if (!block_rsv)
4055 		block_rsv = root->block_rsv;
4056 
4057 	if (!block_rsv)
4058 		block_rsv = &root->fs_info->empty_block_rsv;
4059 
4060 	return block_rsv;
4061 }
4062 
4063 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4064 			       u64 num_bytes)
4065 {
4066 	int ret = -ENOSPC;
4067 	spin_lock(&block_rsv->lock);
4068 	if (block_rsv->reserved >= num_bytes) {
4069 		block_rsv->reserved -= num_bytes;
4070 		if (block_rsv->reserved < block_rsv->size)
4071 			block_rsv->full = 0;
4072 		ret = 0;
4073 	}
4074 	spin_unlock(&block_rsv->lock);
4075 	return ret;
4076 }
4077 
4078 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4079 				u64 num_bytes, int update_size)
4080 {
4081 	spin_lock(&block_rsv->lock);
4082 	block_rsv->reserved += num_bytes;
4083 	if (update_size)
4084 		block_rsv->size += num_bytes;
4085 	else if (block_rsv->reserved >= block_rsv->size)
4086 		block_rsv->full = 1;
4087 	spin_unlock(&block_rsv->lock);
4088 }
4089 
4090 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4091 				    struct btrfs_block_rsv *block_rsv,
4092 				    struct btrfs_block_rsv *dest, u64 num_bytes)
4093 {
4094 	struct btrfs_space_info *space_info = block_rsv->space_info;
4095 
4096 	spin_lock(&block_rsv->lock);
4097 	if (num_bytes == (u64)-1)
4098 		num_bytes = block_rsv->size;
4099 	block_rsv->size -= num_bytes;
4100 	if (block_rsv->reserved >= block_rsv->size) {
4101 		num_bytes = block_rsv->reserved - block_rsv->size;
4102 		block_rsv->reserved = block_rsv->size;
4103 		block_rsv->full = 1;
4104 	} else {
4105 		num_bytes = 0;
4106 	}
4107 	spin_unlock(&block_rsv->lock);
4108 
4109 	if (num_bytes > 0) {
4110 		if (dest) {
4111 			spin_lock(&dest->lock);
4112 			if (!dest->full) {
4113 				u64 bytes_to_add;
4114 
4115 				bytes_to_add = dest->size - dest->reserved;
4116 				bytes_to_add = min(num_bytes, bytes_to_add);
4117 				dest->reserved += bytes_to_add;
4118 				if (dest->reserved >= dest->size)
4119 					dest->full = 1;
4120 				num_bytes -= bytes_to_add;
4121 			}
4122 			spin_unlock(&dest->lock);
4123 		}
4124 		if (num_bytes) {
4125 			spin_lock(&space_info->lock);
4126 			space_info->bytes_may_use -= num_bytes;
4127 			trace_btrfs_space_reservation(fs_info, "space_info",
4128 					space_info->flags, num_bytes, 0);
4129 			space_info->reservation_progress++;
4130 			spin_unlock(&space_info->lock);
4131 		}
4132 	}
4133 }
4134 
4135 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4136 				   struct btrfs_block_rsv *dst, u64 num_bytes)
4137 {
4138 	int ret;
4139 
4140 	ret = block_rsv_use_bytes(src, num_bytes);
4141 	if (ret)
4142 		return ret;
4143 
4144 	block_rsv_add_bytes(dst, num_bytes, 1);
4145 	return 0;
4146 }
4147 
4148 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4149 {
4150 	memset(rsv, 0, sizeof(*rsv));
4151 	spin_lock_init(&rsv->lock);
4152 	rsv->type = type;
4153 }
4154 
4155 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4156 					      unsigned short type)
4157 {
4158 	struct btrfs_block_rsv *block_rsv;
4159 	struct btrfs_fs_info *fs_info = root->fs_info;
4160 
4161 	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4162 	if (!block_rsv)
4163 		return NULL;
4164 
4165 	btrfs_init_block_rsv(block_rsv, type);
4166 	block_rsv->space_info = __find_space_info(fs_info,
4167 						  BTRFS_BLOCK_GROUP_METADATA);
4168 	return block_rsv;
4169 }
4170 
4171 void btrfs_free_block_rsv(struct btrfs_root *root,
4172 			  struct btrfs_block_rsv *rsv)
4173 {
4174 	if (!rsv)
4175 		return;
4176 	btrfs_block_rsv_release(root, rsv, (u64)-1);
4177 	kfree(rsv);
4178 }
4179 
4180 int btrfs_block_rsv_add(struct btrfs_root *root,
4181 			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4182 			enum btrfs_reserve_flush_enum flush)
4183 {
4184 	int ret;
4185 
4186 	if (num_bytes == 0)
4187 		return 0;
4188 
4189 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4190 	if (!ret) {
4191 		block_rsv_add_bytes(block_rsv, num_bytes, 1);
4192 		return 0;
4193 	}
4194 
4195 	return ret;
4196 }
4197 
4198 int btrfs_block_rsv_check(struct btrfs_root *root,
4199 			  struct btrfs_block_rsv *block_rsv, int min_factor)
4200 {
4201 	u64 num_bytes = 0;
4202 	int ret = -ENOSPC;
4203 
4204 	if (!block_rsv)
4205 		return 0;
4206 
4207 	spin_lock(&block_rsv->lock);
4208 	num_bytes = div_factor(block_rsv->size, min_factor);
4209 	if (block_rsv->reserved >= num_bytes)
4210 		ret = 0;
4211 	spin_unlock(&block_rsv->lock);
4212 
4213 	return ret;
4214 }
4215 
4216 int btrfs_block_rsv_refill(struct btrfs_root *root,
4217 			   struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4218 			   enum btrfs_reserve_flush_enum flush)
4219 {
4220 	u64 num_bytes = 0;
4221 	int ret = -ENOSPC;
4222 
4223 	if (!block_rsv)
4224 		return 0;
4225 
4226 	spin_lock(&block_rsv->lock);
4227 	num_bytes = min_reserved;
4228 	if (block_rsv->reserved >= num_bytes)
4229 		ret = 0;
4230 	else
4231 		num_bytes -= block_rsv->reserved;
4232 	spin_unlock(&block_rsv->lock);
4233 
4234 	if (!ret)
4235 		return 0;
4236 
4237 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4238 	if (!ret) {
4239 		block_rsv_add_bytes(block_rsv, num_bytes, 0);
4240 		return 0;
4241 	}
4242 
4243 	return ret;
4244 }
4245 
4246 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4247 			    struct btrfs_block_rsv *dst_rsv,
4248 			    u64 num_bytes)
4249 {
4250 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4251 }
4252 
4253 void btrfs_block_rsv_release(struct btrfs_root *root,
4254 			     struct btrfs_block_rsv *block_rsv,
4255 			     u64 num_bytes)
4256 {
4257 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4258 	if (global_rsv->full || global_rsv == block_rsv ||
4259 	    block_rsv->space_info != global_rsv->space_info)
4260 		global_rsv = NULL;
4261 	block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4262 				num_bytes);
4263 }
4264 
4265 /*
4266  * helper to calculate size of global block reservation.
4267  * the desired value is sum of space used by extent tree,
4268  * checksum tree and root tree
4269  */
4270 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4271 {
4272 	struct btrfs_space_info *sinfo;
4273 	u64 num_bytes;
4274 	u64 meta_used;
4275 	u64 data_used;
4276 	int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4277 
4278 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4279 	spin_lock(&sinfo->lock);
4280 	data_used = sinfo->bytes_used;
4281 	spin_unlock(&sinfo->lock);
4282 
4283 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4284 	spin_lock(&sinfo->lock);
4285 	if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4286 		data_used = 0;
4287 	meta_used = sinfo->bytes_used;
4288 	spin_unlock(&sinfo->lock);
4289 
4290 	num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4291 		    csum_size * 2;
4292 	num_bytes += div64_u64(data_used + meta_used, 50);
4293 
4294 	if (num_bytes * 3 > meta_used)
4295 		num_bytes = div64_u64(meta_used, 3);
4296 
4297 	return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4298 }
4299 
4300 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4301 {
4302 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4303 	struct btrfs_space_info *sinfo = block_rsv->space_info;
4304 	u64 num_bytes;
4305 
4306 	num_bytes = calc_global_metadata_size(fs_info);
4307 
4308 	spin_lock(&sinfo->lock);
4309 	spin_lock(&block_rsv->lock);
4310 
4311 	block_rsv->size = num_bytes;
4312 
4313 	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4314 		    sinfo->bytes_reserved + sinfo->bytes_readonly +
4315 		    sinfo->bytes_may_use;
4316 
4317 	if (sinfo->total_bytes > num_bytes) {
4318 		num_bytes = sinfo->total_bytes - num_bytes;
4319 		block_rsv->reserved += num_bytes;
4320 		sinfo->bytes_may_use += num_bytes;
4321 		trace_btrfs_space_reservation(fs_info, "space_info",
4322 				      sinfo->flags, num_bytes, 1);
4323 	}
4324 
4325 	if (block_rsv->reserved >= block_rsv->size) {
4326 		num_bytes = block_rsv->reserved - block_rsv->size;
4327 		sinfo->bytes_may_use -= num_bytes;
4328 		trace_btrfs_space_reservation(fs_info, "space_info",
4329 				      sinfo->flags, num_bytes, 0);
4330 		sinfo->reservation_progress++;
4331 		block_rsv->reserved = block_rsv->size;
4332 		block_rsv->full = 1;
4333 	}
4334 
4335 	spin_unlock(&block_rsv->lock);
4336 	spin_unlock(&sinfo->lock);
4337 }
4338 
4339 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4340 {
4341 	struct btrfs_space_info *space_info;
4342 
4343 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4344 	fs_info->chunk_block_rsv.space_info = space_info;
4345 
4346 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4347 	fs_info->global_block_rsv.space_info = space_info;
4348 	fs_info->delalloc_block_rsv.space_info = space_info;
4349 	fs_info->trans_block_rsv.space_info = space_info;
4350 	fs_info->empty_block_rsv.space_info = space_info;
4351 	fs_info->delayed_block_rsv.space_info = space_info;
4352 
4353 	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4354 	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4355 	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4356 	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4357 	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4358 
4359 	update_global_block_rsv(fs_info);
4360 }
4361 
4362 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4363 {
4364 	block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4365 				(u64)-1);
4366 	WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4367 	WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4368 	WARN_ON(fs_info->trans_block_rsv.size > 0);
4369 	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4370 	WARN_ON(fs_info->chunk_block_rsv.size > 0);
4371 	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4372 	WARN_ON(fs_info->delayed_block_rsv.size > 0);
4373 	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4374 }
4375 
4376 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4377 				  struct btrfs_root *root)
4378 {
4379 	if (!trans->block_rsv)
4380 		return;
4381 
4382 	if (!trans->bytes_reserved)
4383 		return;
4384 
4385 	trace_btrfs_space_reservation(root->fs_info, "transaction",
4386 				      trans->transid, trans->bytes_reserved, 0);
4387 	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4388 	trans->bytes_reserved = 0;
4389 }
4390 
4391 /* Can only return 0 or -ENOSPC */
4392 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4393 				  struct inode *inode)
4394 {
4395 	struct btrfs_root *root = BTRFS_I(inode)->root;
4396 	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4397 	struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4398 
4399 	/*
4400 	 * We need to hold space in order to delete our orphan item once we've
4401 	 * added it, so this takes the reservation so we can release it later
4402 	 * when we are truly done with the orphan item.
4403 	 */
4404 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4405 	trace_btrfs_space_reservation(root->fs_info, "orphan",
4406 				      btrfs_ino(inode), num_bytes, 1);
4407 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4408 }
4409 
4410 void btrfs_orphan_release_metadata(struct inode *inode)
4411 {
4412 	struct btrfs_root *root = BTRFS_I(inode)->root;
4413 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4414 	trace_btrfs_space_reservation(root->fs_info, "orphan",
4415 				      btrfs_ino(inode), num_bytes, 0);
4416 	btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4417 }
4418 
4419 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4420 				struct btrfs_pending_snapshot *pending)
4421 {
4422 	struct btrfs_root *root = pending->root;
4423 	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4424 	struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4425 	/*
4426 	 * two for root back/forward refs, two for directory entries,
4427 	 * one for root of the snapshot and one for parent inode.
4428 	 */
4429 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
4430 	dst_rsv->space_info = src_rsv->space_info;
4431 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4432 }
4433 
4434 /**
4435  * drop_outstanding_extent - drop an outstanding extent
4436  * @inode: the inode we're dropping the extent for
4437  *
4438  * This is called when we are freeing up an outstanding extent, either called
4439  * after an error or after an extent is written.  This will return the number of
4440  * reserved extents that need to be freed.  This must be called with
4441  * BTRFS_I(inode)->lock held.
4442  */
4443 static unsigned drop_outstanding_extent(struct inode *inode)
4444 {
4445 	unsigned drop_inode_space = 0;
4446 	unsigned dropped_extents = 0;
4447 
4448 	BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4449 	BTRFS_I(inode)->outstanding_extents--;
4450 
4451 	if (BTRFS_I(inode)->outstanding_extents == 0 &&
4452 	    test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4453 			       &BTRFS_I(inode)->runtime_flags))
4454 		drop_inode_space = 1;
4455 
4456 	/*
4457 	 * If we have more or the same amount of outsanding extents than we have
4458 	 * reserved then we need to leave the reserved extents count alone.
4459 	 */
4460 	if (BTRFS_I(inode)->outstanding_extents >=
4461 	    BTRFS_I(inode)->reserved_extents)
4462 		return drop_inode_space;
4463 
4464 	dropped_extents = BTRFS_I(inode)->reserved_extents -
4465 		BTRFS_I(inode)->outstanding_extents;
4466 	BTRFS_I(inode)->reserved_extents -= dropped_extents;
4467 	return dropped_extents + drop_inode_space;
4468 }
4469 
4470 /**
4471  * calc_csum_metadata_size - return the amount of metada space that must be
4472  *	reserved/free'd for the given bytes.
4473  * @inode: the inode we're manipulating
4474  * @num_bytes: the number of bytes in question
4475  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4476  *
4477  * This adjusts the number of csum_bytes in the inode and then returns the
4478  * correct amount of metadata that must either be reserved or freed.  We
4479  * calculate how many checksums we can fit into one leaf and then divide the
4480  * number of bytes that will need to be checksumed by this value to figure out
4481  * how many checksums will be required.  If we are adding bytes then the number
4482  * may go up and we will return the number of additional bytes that must be
4483  * reserved.  If it is going down we will return the number of bytes that must
4484  * be freed.
4485  *
4486  * This must be called with BTRFS_I(inode)->lock held.
4487  */
4488 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4489 				   int reserve)
4490 {
4491 	struct btrfs_root *root = BTRFS_I(inode)->root;
4492 	u64 csum_size;
4493 	int num_csums_per_leaf;
4494 	int num_csums;
4495 	int old_csums;
4496 
4497 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4498 	    BTRFS_I(inode)->csum_bytes == 0)
4499 		return 0;
4500 
4501 	old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4502 	if (reserve)
4503 		BTRFS_I(inode)->csum_bytes += num_bytes;
4504 	else
4505 		BTRFS_I(inode)->csum_bytes -= num_bytes;
4506 	csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4507 	num_csums_per_leaf = (int)div64_u64(csum_size,
4508 					    sizeof(struct btrfs_csum_item) +
4509 					    sizeof(struct btrfs_disk_key));
4510 	num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4511 	num_csums = num_csums + num_csums_per_leaf - 1;
4512 	num_csums = num_csums / num_csums_per_leaf;
4513 
4514 	old_csums = old_csums + num_csums_per_leaf - 1;
4515 	old_csums = old_csums / num_csums_per_leaf;
4516 
4517 	/* No change, no need to reserve more */
4518 	if (old_csums == num_csums)
4519 		return 0;
4520 
4521 	if (reserve)
4522 		return btrfs_calc_trans_metadata_size(root,
4523 						      num_csums - old_csums);
4524 
4525 	return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4526 }
4527 
4528 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4529 {
4530 	struct btrfs_root *root = BTRFS_I(inode)->root;
4531 	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4532 	u64 to_reserve = 0;
4533 	u64 csum_bytes;
4534 	unsigned nr_extents = 0;
4535 	int extra_reserve = 0;
4536 	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4537 	int ret = 0;
4538 	bool delalloc_lock = true;
4539 
4540 	/* If we are a free space inode we need to not flush since we will be in
4541 	 * the middle of a transaction commit.  We also don't need the delalloc
4542 	 * mutex since we won't race with anybody.  We need this mostly to make
4543 	 * lockdep shut its filthy mouth.
4544 	 */
4545 	if (btrfs_is_free_space_inode(inode)) {
4546 		flush = BTRFS_RESERVE_NO_FLUSH;
4547 		delalloc_lock = false;
4548 	}
4549 
4550 	if (flush != BTRFS_RESERVE_NO_FLUSH &&
4551 	    btrfs_transaction_in_commit(root->fs_info))
4552 		schedule_timeout(1);
4553 
4554 	if (delalloc_lock)
4555 		mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4556 
4557 	num_bytes = ALIGN(num_bytes, root->sectorsize);
4558 
4559 	spin_lock(&BTRFS_I(inode)->lock);
4560 	BTRFS_I(inode)->outstanding_extents++;
4561 
4562 	if (BTRFS_I(inode)->outstanding_extents >
4563 	    BTRFS_I(inode)->reserved_extents)
4564 		nr_extents = BTRFS_I(inode)->outstanding_extents -
4565 			BTRFS_I(inode)->reserved_extents;
4566 
4567 	/*
4568 	 * Add an item to reserve for updating the inode when we complete the
4569 	 * delalloc io.
4570 	 */
4571 	if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4572 		      &BTRFS_I(inode)->runtime_flags)) {
4573 		nr_extents++;
4574 		extra_reserve = 1;
4575 	}
4576 
4577 	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4578 	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4579 	csum_bytes = BTRFS_I(inode)->csum_bytes;
4580 	spin_unlock(&BTRFS_I(inode)->lock);
4581 
4582 	if (root->fs_info->quota_enabled)
4583 		ret = btrfs_qgroup_reserve(root, num_bytes +
4584 					   nr_extents * root->leafsize);
4585 
4586 	/*
4587 	 * ret != 0 here means the qgroup reservation failed, we go straight to
4588 	 * the shared error handling then.
4589 	 */
4590 	if (ret == 0)
4591 		ret = reserve_metadata_bytes(root, block_rsv,
4592 					     to_reserve, flush);
4593 
4594 	if (ret) {
4595 		u64 to_free = 0;
4596 		unsigned dropped;
4597 
4598 		spin_lock(&BTRFS_I(inode)->lock);
4599 		dropped = drop_outstanding_extent(inode);
4600 		/*
4601 		 * If the inodes csum_bytes is the same as the original
4602 		 * csum_bytes then we know we haven't raced with any free()ers
4603 		 * so we can just reduce our inodes csum bytes and carry on.
4604 		 * Otherwise we have to do the normal free thing to account for
4605 		 * the case that the free side didn't free up its reserve
4606 		 * because of this outstanding reservation.
4607 		 */
4608 		if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4609 			calc_csum_metadata_size(inode, num_bytes, 0);
4610 		else
4611 			to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4612 		spin_unlock(&BTRFS_I(inode)->lock);
4613 		if (dropped)
4614 			to_free += btrfs_calc_trans_metadata_size(root, dropped);
4615 
4616 		if (to_free) {
4617 			btrfs_block_rsv_release(root, block_rsv, to_free);
4618 			trace_btrfs_space_reservation(root->fs_info,
4619 						      "delalloc",
4620 						      btrfs_ino(inode),
4621 						      to_free, 0);
4622 		}
4623 		if (root->fs_info->quota_enabled) {
4624 			btrfs_qgroup_free(root, num_bytes +
4625 						nr_extents * root->leafsize);
4626 		}
4627 		if (delalloc_lock)
4628 			mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4629 		return ret;
4630 	}
4631 
4632 	spin_lock(&BTRFS_I(inode)->lock);
4633 	if (extra_reserve) {
4634 		set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4635 			&BTRFS_I(inode)->runtime_flags);
4636 		nr_extents--;
4637 	}
4638 	BTRFS_I(inode)->reserved_extents += nr_extents;
4639 	spin_unlock(&BTRFS_I(inode)->lock);
4640 
4641 	if (delalloc_lock)
4642 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4643 
4644 	if (to_reserve)
4645 		trace_btrfs_space_reservation(root->fs_info,"delalloc",
4646 					      btrfs_ino(inode), to_reserve, 1);
4647 	block_rsv_add_bytes(block_rsv, to_reserve, 1);
4648 
4649 	return 0;
4650 }
4651 
4652 /**
4653  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4654  * @inode: the inode to release the reservation for
4655  * @num_bytes: the number of bytes we're releasing
4656  *
4657  * This will release the metadata reservation for an inode.  This can be called
4658  * once we complete IO for a given set of bytes to release their metadata
4659  * reservations.
4660  */
4661 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4662 {
4663 	struct btrfs_root *root = BTRFS_I(inode)->root;
4664 	u64 to_free = 0;
4665 	unsigned dropped;
4666 
4667 	num_bytes = ALIGN(num_bytes, root->sectorsize);
4668 	spin_lock(&BTRFS_I(inode)->lock);
4669 	dropped = drop_outstanding_extent(inode);
4670 
4671 	to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4672 	spin_unlock(&BTRFS_I(inode)->lock);
4673 	if (dropped > 0)
4674 		to_free += btrfs_calc_trans_metadata_size(root, dropped);
4675 
4676 	trace_btrfs_space_reservation(root->fs_info, "delalloc",
4677 				      btrfs_ino(inode), to_free, 0);
4678 	if (root->fs_info->quota_enabled) {
4679 		btrfs_qgroup_free(root, num_bytes +
4680 					dropped * root->leafsize);
4681 	}
4682 
4683 	btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4684 				to_free);
4685 }
4686 
4687 /**
4688  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4689  * @inode: inode we're writing to
4690  * @num_bytes: the number of bytes we want to allocate
4691  *
4692  * This will do the following things
4693  *
4694  * o reserve space in the data space info for num_bytes
4695  * o reserve space in the metadata space info based on number of outstanding
4696  *   extents and how much csums will be needed
4697  * o add to the inodes ->delalloc_bytes
4698  * o add it to the fs_info's delalloc inodes list.
4699  *
4700  * This will return 0 for success and -ENOSPC if there is no space left.
4701  */
4702 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4703 {
4704 	int ret;
4705 
4706 	ret = btrfs_check_data_free_space(inode, num_bytes);
4707 	if (ret)
4708 		return ret;
4709 
4710 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4711 	if (ret) {
4712 		btrfs_free_reserved_data_space(inode, num_bytes);
4713 		return ret;
4714 	}
4715 
4716 	return 0;
4717 }
4718 
4719 /**
4720  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4721  * @inode: inode we're releasing space for
4722  * @num_bytes: the number of bytes we want to free up
4723  *
4724  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4725  * called in the case that we don't need the metadata AND data reservations
4726  * anymore.  So if there is an error or we insert an inline extent.
4727  *
4728  * This function will release the metadata space that was not used and will
4729  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4730  * list if there are no delalloc bytes left.
4731  */
4732 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4733 {
4734 	btrfs_delalloc_release_metadata(inode, num_bytes);
4735 	btrfs_free_reserved_data_space(inode, num_bytes);
4736 }
4737 
4738 static int update_block_group(struct btrfs_trans_handle *trans,
4739 			      struct btrfs_root *root,
4740 			      u64 bytenr, u64 num_bytes, int alloc)
4741 {
4742 	struct btrfs_block_group_cache *cache = NULL;
4743 	struct btrfs_fs_info *info = root->fs_info;
4744 	u64 total = num_bytes;
4745 	u64 old_val;
4746 	u64 byte_in_group;
4747 	int factor;
4748 
4749 	/* block accounting for super block */
4750 	spin_lock(&info->delalloc_lock);
4751 	old_val = btrfs_super_bytes_used(info->super_copy);
4752 	if (alloc)
4753 		old_val += num_bytes;
4754 	else
4755 		old_val -= num_bytes;
4756 	btrfs_set_super_bytes_used(info->super_copy, old_val);
4757 	spin_unlock(&info->delalloc_lock);
4758 
4759 	while (total) {
4760 		cache = btrfs_lookup_block_group(info, bytenr);
4761 		if (!cache)
4762 			return -ENOENT;
4763 		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4764 				    BTRFS_BLOCK_GROUP_RAID1 |
4765 				    BTRFS_BLOCK_GROUP_RAID10))
4766 			factor = 2;
4767 		else
4768 			factor = 1;
4769 		/*
4770 		 * If this block group has free space cache written out, we
4771 		 * need to make sure to load it if we are removing space.  This
4772 		 * is because we need the unpinning stage to actually add the
4773 		 * space back to the block group, otherwise we will leak space.
4774 		 */
4775 		if (!alloc && cache->cached == BTRFS_CACHE_NO)
4776 			cache_block_group(cache, trans, NULL, 1);
4777 
4778 		byte_in_group = bytenr - cache->key.objectid;
4779 		WARN_ON(byte_in_group > cache->key.offset);
4780 
4781 		spin_lock(&cache->space_info->lock);
4782 		spin_lock(&cache->lock);
4783 
4784 		if (btrfs_test_opt(root, SPACE_CACHE) &&
4785 		    cache->disk_cache_state < BTRFS_DC_CLEAR)
4786 			cache->disk_cache_state = BTRFS_DC_CLEAR;
4787 
4788 		cache->dirty = 1;
4789 		old_val = btrfs_block_group_used(&cache->item);
4790 		num_bytes = min(total, cache->key.offset - byte_in_group);
4791 		if (alloc) {
4792 			old_val += num_bytes;
4793 			btrfs_set_block_group_used(&cache->item, old_val);
4794 			cache->reserved -= num_bytes;
4795 			cache->space_info->bytes_reserved -= num_bytes;
4796 			cache->space_info->bytes_used += num_bytes;
4797 			cache->space_info->disk_used += num_bytes * factor;
4798 			spin_unlock(&cache->lock);
4799 			spin_unlock(&cache->space_info->lock);
4800 		} else {
4801 			old_val -= num_bytes;
4802 			btrfs_set_block_group_used(&cache->item, old_val);
4803 			cache->pinned += num_bytes;
4804 			cache->space_info->bytes_pinned += num_bytes;
4805 			cache->space_info->bytes_used -= num_bytes;
4806 			cache->space_info->disk_used -= num_bytes * factor;
4807 			spin_unlock(&cache->lock);
4808 			spin_unlock(&cache->space_info->lock);
4809 
4810 			set_extent_dirty(info->pinned_extents,
4811 					 bytenr, bytenr + num_bytes - 1,
4812 					 GFP_NOFS | __GFP_NOFAIL);
4813 		}
4814 		btrfs_put_block_group(cache);
4815 		total -= num_bytes;
4816 		bytenr += num_bytes;
4817 	}
4818 	return 0;
4819 }
4820 
4821 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4822 {
4823 	struct btrfs_block_group_cache *cache;
4824 	u64 bytenr;
4825 
4826 	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4827 	if (!cache)
4828 		return 0;
4829 
4830 	bytenr = cache->key.objectid;
4831 	btrfs_put_block_group(cache);
4832 
4833 	return bytenr;
4834 }
4835 
4836 static int pin_down_extent(struct btrfs_root *root,
4837 			   struct btrfs_block_group_cache *cache,
4838 			   u64 bytenr, u64 num_bytes, int reserved)
4839 {
4840 	spin_lock(&cache->space_info->lock);
4841 	spin_lock(&cache->lock);
4842 	cache->pinned += num_bytes;
4843 	cache->space_info->bytes_pinned += num_bytes;
4844 	if (reserved) {
4845 		cache->reserved -= num_bytes;
4846 		cache->space_info->bytes_reserved -= num_bytes;
4847 	}
4848 	spin_unlock(&cache->lock);
4849 	spin_unlock(&cache->space_info->lock);
4850 
4851 	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4852 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4853 	return 0;
4854 }
4855 
4856 /*
4857  * this function must be called within transaction
4858  */
4859 int btrfs_pin_extent(struct btrfs_root *root,
4860 		     u64 bytenr, u64 num_bytes, int reserved)
4861 {
4862 	struct btrfs_block_group_cache *cache;
4863 
4864 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4865 	BUG_ON(!cache); /* Logic error */
4866 
4867 	pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4868 
4869 	btrfs_put_block_group(cache);
4870 	return 0;
4871 }
4872 
4873 /*
4874  * this function must be called within transaction
4875  */
4876 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4877 				    struct btrfs_root *root,
4878 				    u64 bytenr, u64 num_bytes)
4879 {
4880 	struct btrfs_block_group_cache *cache;
4881 
4882 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4883 	BUG_ON(!cache); /* Logic error */
4884 
4885 	/*
4886 	 * pull in the free space cache (if any) so that our pin
4887 	 * removes the free space from the cache.  We have load_only set
4888 	 * to one because the slow code to read in the free extents does check
4889 	 * the pinned extents.
4890 	 */
4891 	cache_block_group(cache, trans, root, 1);
4892 
4893 	pin_down_extent(root, cache, bytenr, num_bytes, 0);
4894 
4895 	/* remove us from the free space cache (if we're there at all) */
4896 	btrfs_remove_free_space(cache, bytenr, num_bytes);
4897 	btrfs_put_block_group(cache);
4898 	return 0;
4899 }
4900 
4901 /**
4902  * btrfs_update_reserved_bytes - update the block_group and space info counters
4903  * @cache:	The cache we are manipulating
4904  * @num_bytes:	The number of bytes in question
4905  * @reserve:	One of the reservation enums
4906  *
4907  * This is called by the allocator when it reserves space, or by somebody who is
4908  * freeing space that was never actually used on disk.  For example if you
4909  * reserve some space for a new leaf in transaction A and before transaction A
4910  * commits you free that leaf, you call this with reserve set to 0 in order to
4911  * clear the reservation.
4912  *
4913  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4914  * ENOSPC accounting.  For data we handle the reservation through clearing the
4915  * delalloc bits in the io_tree.  We have to do this since we could end up
4916  * allocating less disk space for the amount of data we have reserved in the
4917  * case of compression.
4918  *
4919  * If this is a reservation and the block group has become read only we cannot
4920  * make the reservation and return -EAGAIN, otherwise this function always
4921  * succeeds.
4922  */
4923 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4924 				       u64 num_bytes, int reserve)
4925 {
4926 	struct btrfs_space_info *space_info = cache->space_info;
4927 	int ret = 0;
4928 
4929 	spin_lock(&space_info->lock);
4930 	spin_lock(&cache->lock);
4931 	if (reserve != RESERVE_FREE) {
4932 		if (cache->ro) {
4933 			ret = -EAGAIN;
4934 		} else {
4935 			cache->reserved += num_bytes;
4936 			space_info->bytes_reserved += num_bytes;
4937 			if (reserve == RESERVE_ALLOC) {
4938 				trace_btrfs_space_reservation(cache->fs_info,
4939 						"space_info", space_info->flags,
4940 						num_bytes, 0);
4941 				space_info->bytes_may_use -= num_bytes;
4942 			}
4943 		}
4944 	} else {
4945 		if (cache->ro)
4946 			space_info->bytes_readonly += num_bytes;
4947 		cache->reserved -= num_bytes;
4948 		space_info->bytes_reserved -= num_bytes;
4949 		space_info->reservation_progress++;
4950 	}
4951 	spin_unlock(&cache->lock);
4952 	spin_unlock(&space_info->lock);
4953 	return ret;
4954 }
4955 
4956 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4957 				struct btrfs_root *root)
4958 {
4959 	struct btrfs_fs_info *fs_info = root->fs_info;
4960 	struct btrfs_caching_control *next;
4961 	struct btrfs_caching_control *caching_ctl;
4962 	struct btrfs_block_group_cache *cache;
4963 
4964 	down_write(&fs_info->extent_commit_sem);
4965 
4966 	list_for_each_entry_safe(caching_ctl, next,
4967 				 &fs_info->caching_block_groups, list) {
4968 		cache = caching_ctl->block_group;
4969 		if (block_group_cache_done(cache)) {
4970 			cache->last_byte_to_unpin = (u64)-1;
4971 			list_del_init(&caching_ctl->list);
4972 			put_caching_control(caching_ctl);
4973 		} else {
4974 			cache->last_byte_to_unpin = caching_ctl->progress;
4975 		}
4976 	}
4977 
4978 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4979 		fs_info->pinned_extents = &fs_info->freed_extents[1];
4980 	else
4981 		fs_info->pinned_extents = &fs_info->freed_extents[0];
4982 
4983 	up_write(&fs_info->extent_commit_sem);
4984 
4985 	update_global_block_rsv(fs_info);
4986 }
4987 
4988 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4989 {
4990 	struct btrfs_fs_info *fs_info = root->fs_info;
4991 	struct btrfs_block_group_cache *cache = NULL;
4992 	struct btrfs_space_info *space_info;
4993 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4994 	u64 len;
4995 	bool readonly;
4996 
4997 	while (start <= end) {
4998 		readonly = false;
4999 		if (!cache ||
5000 		    start >= cache->key.objectid + cache->key.offset) {
5001 			if (cache)
5002 				btrfs_put_block_group(cache);
5003 			cache = btrfs_lookup_block_group(fs_info, start);
5004 			BUG_ON(!cache); /* Logic error */
5005 		}
5006 
5007 		len = cache->key.objectid + cache->key.offset - start;
5008 		len = min(len, end + 1 - start);
5009 
5010 		if (start < cache->last_byte_to_unpin) {
5011 			len = min(len, cache->last_byte_to_unpin - start);
5012 			btrfs_add_free_space(cache, start, len);
5013 		}
5014 
5015 		start += len;
5016 		space_info = cache->space_info;
5017 
5018 		spin_lock(&space_info->lock);
5019 		spin_lock(&cache->lock);
5020 		cache->pinned -= len;
5021 		space_info->bytes_pinned -= len;
5022 		if (cache->ro) {
5023 			space_info->bytes_readonly += len;
5024 			readonly = true;
5025 		}
5026 		spin_unlock(&cache->lock);
5027 		if (!readonly && global_rsv->space_info == space_info) {
5028 			spin_lock(&global_rsv->lock);
5029 			if (!global_rsv->full) {
5030 				len = min(len, global_rsv->size -
5031 					  global_rsv->reserved);
5032 				global_rsv->reserved += len;
5033 				space_info->bytes_may_use += len;
5034 				if (global_rsv->reserved >= global_rsv->size)
5035 					global_rsv->full = 1;
5036 			}
5037 			spin_unlock(&global_rsv->lock);
5038 		}
5039 		spin_unlock(&space_info->lock);
5040 	}
5041 
5042 	if (cache)
5043 		btrfs_put_block_group(cache);
5044 	return 0;
5045 }
5046 
5047 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5048 			       struct btrfs_root *root)
5049 {
5050 	struct btrfs_fs_info *fs_info = root->fs_info;
5051 	struct extent_io_tree *unpin;
5052 	u64 start;
5053 	u64 end;
5054 	int ret;
5055 
5056 	if (trans->aborted)
5057 		return 0;
5058 
5059 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5060 		unpin = &fs_info->freed_extents[1];
5061 	else
5062 		unpin = &fs_info->freed_extents[0];
5063 
5064 	while (1) {
5065 		ret = find_first_extent_bit(unpin, 0, &start, &end,
5066 					    EXTENT_DIRTY, NULL);
5067 		if (ret)
5068 			break;
5069 
5070 		if (btrfs_test_opt(root, DISCARD))
5071 			ret = btrfs_discard_extent(root, start,
5072 						   end + 1 - start, NULL);
5073 
5074 		clear_extent_dirty(unpin, start, end, GFP_NOFS);
5075 		unpin_extent_range(root, start, end);
5076 		cond_resched();
5077 	}
5078 
5079 	return 0;
5080 }
5081 
5082 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5083 				struct btrfs_root *root,
5084 				u64 bytenr, u64 num_bytes, u64 parent,
5085 				u64 root_objectid, u64 owner_objectid,
5086 				u64 owner_offset, int refs_to_drop,
5087 				struct btrfs_delayed_extent_op *extent_op)
5088 {
5089 	struct btrfs_key key;
5090 	struct btrfs_path *path;
5091 	struct btrfs_fs_info *info = root->fs_info;
5092 	struct btrfs_root *extent_root = info->extent_root;
5093 	struct extent_buffer *leaf;
5094 	struct btrfs_extent_item *ei;
5095 	struct btrfs_extent_inline_ref *iref;
5096 	int ret;
5097 	int is_data;
5098 	int extent_slot = 0;
5099 	int found_extent = 0;
5100 	int num_to_del = 1;
5101 	u32 item_size;
5102 	u64 refs;
5103 
5104 	path = btrfs_alloc_path();
5105 	if (!path)
5106 		return -ENOMEM;
5107 
5108 	path->reada = 1;
5109 	path->leave_spinning = 1;
5110 
5111 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5112 	BUG_ON(!is_data && refs_to_drop != 1);
5113 
5114 	ret = lookup_extent_backref(trans, extent_root, path, &iref,
5115 				    bytenr, num_bytes, parent,
5116 				    root_objectid, owner_objectid,
5117 				    owner_offset);
5118 	if (ret == 0) {
5119 		extent_slot = path->slots[0];
5120 		while (extent_slot >= 0) {
5121 			btrfs_item_key_to_cpu(path->nodes[0], &key,
5122 					      extent_slot);
5123 			if (key.objectid != bytenr)
5124 				break;
5125 			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5126 			    key.offset == num_bytes) {
5127 				found_extent = 1;
5128 				break;
5129 			}
5130 			if (path->slots[0] - extent_slot > 5)
5131 				break;
5132 			extent_slot--;
5133 		}
5134 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5135 		item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5136 		if (found_extent && item_size < sizeof(*ei))
5137 			found_extent = 0;
5138 #endif
5139 		if (!found_extent) {
5140 			BUG_ON(iref);
5141 			ret = remove_extent_backref(trans, extent_root, path,
5142 						    NULL, refs_to_drop,
5143 						    is_data);
5144 			if (ret) {
5145 				btrfs_abort_transaction(trans, extent_root, ret);
5146 				goto out;
5147 			}
5148 			btrfs_release_path(path);
5149 			path->leave_spinning = 1;
5150 
5151 			key.objectid = bytenr;
5152 			key.type = BTRFS_EXTENT_ITEM_KEY;
5153 			key.offset = num_bytes;
5154 
5155 			ret = btrfs_search_slot(trans, extent_root,
5156 						&key, path, -1, 1);
5157 			if (ret) {
5158 				printk(KERN_ERR "umm, got %d back from search"
5159 				       ", was looking for %llu\n", ret,
5160 				       (unsigned long long)bytenr);
5161 				if (ret > 0)
5162 					btrfs_print_leaf(extent_root,
5163 							 path->nodes[0]);
5164 			}
5165 			if (ret < 0) {
5166 				btrfs_abort_transaction(trans, extent_root, ret);
5167 				goto out;
5168 			}
5169 			extent_slot = path->slots[0];
5170 		}
5171 	} else if (ret == -ENOENT) {
5172 		btrfs_print_leaf(extent_root, path->nodes[0]);
5173 		WARN_ON(1);
5174 		printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5175 		       "parent %llu root %llu  owner %llu offset %llu\n",
5176 		       (unsigned long long)bytenr,
5177 		       (unsigned long long)parent,
5178 		       (unsigned long long)root_objectid,
5179 		       (unsigned long long)owner_objectid,
5180 		       (unsigned long long)owner_offset);
5181 	} else {
5182 		btrfs_abort_transaction(trans, extent_root, ret);
5183 		goto out;
5184 	}
5185 
5186 	leaf = path->nodes[0];
5187 	item_size = btrfs_item_size_nr(leaf, extent_slot);
5188 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5189 	if (item_size < sizeof(*ei)) {
5190 		BUG_ON(found_extent || extent_slot != path->slots[0]);
5191 		ret = convert_extent_item_v0(trans, extent_root, path,
5192 					     owner_objectid, 0);
5193 		if (ret < 0) {
5194 			btrfs_abort_transaction(trans, extent_root, ret);
5195 			goto out;
5196 		}
5197 
5198 		btrfs_release_path(path);
5199 		path->leave_spinning = 1;
5200 
5201 		key.objectid = bytenr;
5202 		key.type = BTRFS_EXTENT_ITEM_KEY;
5203 		key.offset = num_bytes;
5204 
5205 		ret = btrfs_search_slot(trans, extent_root, &key, path,
5206 					-1, 1);
5207 		if (ret) {
5208 			printk(KERN_ERR "umm, got %d back from search"
5209 			       ", was looking for %llu\n", ret,
5210 			       (unsigned long long)bytenr);
5211 			btrfs_print_leaf(extent_root, path->nodes[0]);
5212 		}
5213 		if (ret < 0) {
5214 			btrfs_abort_transaction(trans, extent_root, ret);
5215 			goto out;
5216 		}
5217 
5218 		extent_slot = path->slots[0];
5219 		leaf = path->nodes[0];
5220 		item_size = btrfs_item_size_nr(leaf, extent_slot);
5221 	}
5222 #endif
5223 	BUG_ON(item_size < sizeof(*ei));
5224 	ei = btrfs_item_ptr(leaf, extent_slot,
5225 			    struct btrfs_extent_item);
5226 	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5227 		struct btrfs_tree_block_info *bi;
5228 		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5229 		bi = (struct btrfs_tree_block_info *)(ei + 1);
5230 		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5231 	}
5232 
5233 	refs = btrfs_extent_refs(leaf, ei);
5234 	BUG_ON(refs < refs_to_drop);
5235 	refs -= refs_to_drop;
5236 
5237 	if (refs > 0) {
5238 		if (extent_op)
5239 			__run_delayed_extent_op(extent_op, leaf, ei);
5240 		/*
5241 		 * In the case of inline back ref, reference count will
5242 		 * be updated by remove_extent_backref
5243 		 */
5244 		if (iref) {
5245 			BUG_ON(!found_extent);
5246 		} else {
5247 			btrfs_set_extent_refs(leaf, ei, refs);
5248 			btrfs_mark_buffer_dirty(leaf);
5249 		}
5250 		if (found_extent) {
5251 			ret = remove_extent_backref(trans, extent_root, path,
5252 						    iref, refs_to_drop,
5253 						    is_data);
5254 			if (ret) {
5255 				btrfs_abort_transaction(trans, extent_root, ret);
5256 				goto out;
5257 			}
5258 		}
5259 	} else {
5260 		if (found_extent) {
5261 			BUG_ON(is_data && refs_to_drop !=
5262 			       extent_data_ref_count(root, path, iref));
5263 			if (iref) {
5264 				BUG_ON(path->slots[0] != extent_slot);
5265 			} else {
5266 				BUG_ON(path->slots[0] != extent_slot + 1);
5267 				path->slots[0] = extent_slot;
5268 				num_to_del = 2;
5269 			}
5270 		}
5271 
5272 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5273 				      num_to_del);
5274 		if (ret) {
5275 			btrfs_abort_transaction(trans, extent_root, ret);
5276 			goto out;
5277 		}
5278 		btrfs_release_path(path);
5279 
5280 		if (is_data) {
5281 			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5282 			if (ret) {
5283 				btrfs_abort_transaction(trans, extent_root, ret);
5284 				goto out;
5285 			}
5286 		}
5287 
5288 		ret = update_block_group(trans, root, bytenr, num_bytes, 0);
5289 		if (ret) {
5290 			btrfs_abort_transaction(trans, extent_root, ret);
5291 			goto out;
5292 		}
5293 	}
5294 out:
5295 	btrfs_free_path(path);
5296 	return ret;
5297 }
5298 
5299 /*
5300  * when we free an block, it is possible (and likely) that we free the last
5301  * delayed ref for that extent as well.  This searches the delayed ref tree for
5302  * a given extent, and if there are no other delayed refs to be processed, it
5303  * removes it from the tree.
5304  */
5305 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5306 				      struct btrfs_root *root, u64 bytenr)
5307 {
5308 	struct btrfs_delayed_ref_head *head;
5309 	struct btrfs_delayed_ref_root *delayed_refs;
5310 	struct btrfs_delayed_ref_node *ref;
5311 	struct rb_node *node;
5312 	int ret = 0;
5313 
5314 	delayed_refs = &trans->transaction->delayed_refs;
5315 	spin_lock(&delayed_refs->lock);
5316 	head = btrfs_find_delayed_ref_head(trans, bytenr);
5317 	if (!head)
5318 		goto out;
5319 
5320 	node = rb_prev(&head->node.rb_node);
5321 	if (!node)
5322 		goto out;
5323 
5324 	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5325 
5326 	/* there are still entries for this ref, we can't drop it */
5327 	if (ref->bytenr == bytenr)
5328 		goto out;
5329 
5330 	if (head->extent_op) {
5331 		if (!head->must_insert_reserved)
5332 			goto out;
5333 		kfree(head->extent_op);
5334 		head->extent_op = NULL;
5335 	}
5336 
5337 	/*
5338 	 * waiting for the lock here would deadlock.  If someone else has it
5339 	 * locked they are already in the process of dropping it anyway
5340 	 */
5341 	if (!mutex_trylock(&head->mutex))
5342 		goto out;
5343 
5344 	/*
5345 	 * at this point we have a head with no other entries.  Go
5346 	 * ahead and process it.
5347 	 */
5348 	head->node.in_tree = 0;
5349 	rb_erase(&head->node.rb_node, &delayed_refs->root);
5350 
5351 	delayed_refs->num_entries--;
5352 
5353 	/*
5354 	 * we don't take a ref on the node because we're removing it from the
5355 	 * tree, so we just steal the ref the tree was holding.
5356 	 */
5357 	delayed_refs->num_heads--;
5358 	if (list_empty(&head->cluster))
5359 		delayed_refs->num_heads_ready--;
5360 
5361 	list_del_init(&head->cluster);
5362 	spin_unlock(&delayed_refs->lock);
5363 
5364 	BUG_ON(head->extent_op);
5365 	if (head->must_insert_reserved)
5366 		ret = 1;
5367 
5368 	mutex_unlock(&head->mutex);
5369 	btrfs_put_delayed_ref(&head->node);
5370 	return ret;
5371 out:
5372 	spin_unlock(&delayed_refs->lock);
5373 	return 0;
5374 }
5375 
5376 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5377 			   struct btrfs_root *root,
5378 			   struct extent_buffer *buf,
5379 			   u64 parent, int last_ref)
5380 {
5381 	struct btrfs_block_group_cache *cache = NULL;
5382 	int ret;
5383 
5384 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5385 		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5386 					buf->start, buf->len,
5387 					parent, root->root_key.objectid,
5388 					btrfs_header_level(buf),
5389 					BTRFS_DROP_DELAYED_REF, NULL, 0);
5390 		BUG_ON(ret); /* -ENOMEM */
5391 	}
5392 
5393 	if (!last_ref)
5394 		return;
5395 
5396 	cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5397 
5398 	if (btrfs_header_generation(buf) == trans->transid) {
5399 		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5400 			ret = check_ref_cleanup(trans, root, buf->start);
5401 			if (!ret)
5402 				goto out;
5403 		}
5404 
5405 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5406 			pin_down_extent(root, cache, buf->start, buf->len, 1);
5407 			goto out;
5408 		}
5409 
5410 		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5411 
5412 		btrfs_add_free_space(cache, buf->start, buf->len);
5413 		btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5414 	}
5415 out:
5416 	/*
5417 	 * Deleting the buffer, clear the corrupt flag since it doesn't matter
5418 	 * anymore.
5419 	 */
5420 	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5421 	btrfs_put_block_group(cache);
5422 }
5423 
5424 /* Can return -ENOMEM */
5425 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5426 		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5427 		      u64 owner, u64 offset, int for_cow)
5428 {
5429 	int ret;
5430 	struct btrfs_fs_info *fs_info = root->fs_info;
5431 
5432 	/*
5433 	 * tree log blocks never actually go into the extent allocation
5434 	 * tree, just update pinning info and exit early.
5435 	 */
5436 	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5437 		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5438 		/* unlocks the pinned mutex */
5439 		btrfs_pin_extent(root, bytenr, num_bytes, 1);
5440 		ret = 0;
5441 	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5442 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5443 					num_bytes,
5444 					parent, root_objectid, (int)owner,
5445 					BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5446 	} else {
5447 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5448 						num_bytes,
5449 						parent, root_objectid, owner,
5450 						offset, BTRFS_DROP_DELAYED_REF,
5451 						NULL, for_cow);
5452 	}
5453 	return ret;
5454 }
5455 
5456 static u64 stripe_align(struct btrfs_root *root, u64 val)
5457 {
5458 	u64 mask = ((u64)root->stripesize - 1);
5459 	u64 ret = (val + mask) & ~mask;
5460 	return ret;
5461 }
5462 
5463 /*
5464  * when we wait for progress in the block group caching, its because
5465  * our allocation attempt failed at least once.  So, we must sleep
5466  * and let some progress happen before we try again.
5467  *
5468  * This function will sleep at least once waiting for new free space to
5469  * show up, and then it will check the block group free space numbers
5470  * for our min num_bytes.  Another option is to have it go ahead
5471  * and look in the rbtree for a free extent of a given size, but this
5472  * is a good start.
5473  */
5474 static noinline int
5475 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5476 				u64 num_bytes)
5477 {
5478 	struct btrfs_caching_control *caching_ctl;
5479 	DEFINE_WAIT(wait);
5480 
5481 	caching_ctl = get_caching_control(cache);
5482 	if (!caching_ctl)
5483 		return 0;
5484 
5485 	wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5486 		   (cache->free_space_ctl->free_space >= num_bytes));
5487 
5488 	put_caching_control(caching_ctl);
5489 	return 0;
5490 }
5491 
5492 static noinline int
5493 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5494 {
5495 	struct btrfs_caching_control *caching_ctl;
5496 	DEFINE_WAIT(wait);
5497 
5498 	caching_ctl = get_caching_control(cache);
5499 	if (!caching_ctl)
5500 		return 0;
5501 
5502 	wait_event(caching_ctl->wait, block_group_cache_done(cache));
5503 
5504 	put_caching_control(caching_ctl);
5505 	return 0;
5506 }
5507 
5508 int __get_raid_index(u64 flags)
5509 {
5510 	int index;
5511 
5512 	if (flags & BTRFS_BLOCK_GROUP_RAID10)
5513 		index = 0;
5514 	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5515 		index = 1;
5516 	else if (flags & BTRFS_BLOCK_GROUP_DUP)
5517 		index = 2;
5518 	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5519 		index = 3;
5520 	else
5521 		index = 4;
5522 
5523 	return index;
5524 }
5525 
5526 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5527 {
5528 	return __get_raid_index(cache->flags);
5529 }
5530 
5531 enum btrfs_loop_type {
5532 	LOOP_CACHING_NOWAIT = 0,
5533 	LOOP_CACHING_WAIT = 1,
5534 	LOOP_ALLOC_CHUNK = 2,
5535 	LOOP_NO_EMPTY_SIZE = 3,
5536 };
5537 
5538 /*
5539  * walks the btree of allocated extents and find a hole of a given size.
5540  * The key ins is changed to record the hole:
5541  * ins->objectid == block start
5542  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5543  * ins->offset == number of blocks
5544  * Any available blocks before search_start are skipped.
5545  */
5546 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5547 				     struct btrfs_root *orig_root,
5548 				     u64 num_bytes, u64 empty_size,
5549 				     u64 hint_byte, struct btrfs_key *ins,
5550 				     u64 data)
5551 {
5552 	int ret = 0;
5553 	struct btrfs_root *root = orig_root->fs_info->extent_root;
5554 	struct btrfs_free_cluster *last_ptr = NULL;
5555 	struct btrfs_block_group_cache *block_group = NULL;
5556 	struct btrfs_block_group_cache *used_block_group;
5557 	u64 search_start = 0;
5558 	int empty_cluster = 2 * 1024 * 1024;
5559 	struct btrfs_space_info *space_info;
5560 	int loop = 0;
5561 	int index = __get_raid_index(data);
5562 	int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5563 		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5564 	bool found_uncached_bg = false;
5565 	bool failed_cluster_refill = false;
5566 	bool failed_alloc = false;
5567 	bool use_cluster = true;
5568 	bool have_caching_bg = false;
5569 
5570 	WARN_ON(num_bytes < root->sectorsize);
5571 	btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5572 	ins->objectid = 0;
5573 	ins->offset = 0;
5574 
5575 	trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5576 
5577 	space_info = __find_space_info(root->fs_info, data);
5578 	if (!space_info) {
5579 		printk(KERN_ERR "No space info for %llu\n", data);
5580 		return -ENOSPC;
5581 	}
5582 
5583 	/*
5584 	 * If the space info is for both data and metadata it means we have a
5585 	 * small filesystem and we can't use the clustering stuff.
5586 	 */
5587 	if (btrfs_mixed_space_info(space_info))
5588 		use_cluster = false;
5589 
5590 	if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5591 		last_ptr = &root->fs_info->meta_alloc_cluster;
5592 		if (!btrfs_test_opt(root, SSD))
5593 			empty_cluster = 64 * 1024;
5594 	}
5595 
5596 	if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5597 	    btrfs_test_opt(root, SSD)) {
5598 		last_ptr = &root->fs_info->data_alloc_cluster;
5599 	}
5600 
5601 	if (last_ptr) {
5602 		spin_lock(&last_ptr->lock);
5603 		if (last_ptr->block_group)
5604 			hint_byte = last_ptr->window_start;
5605 		spin_unlock(&last_ptr->lock);
5606 	}
5607 
5608 	search_start = max(search_start, first_logical_byte(root, 0));
5609 	search_start = max(search_start, hint_byte);
5610 
5611 	if (!last_ptr)
5612 		empty_cluster = 0;
5613 
5614 	if (search_start == hint_byte) {
5615 		block_group = btrfs_lookup_block_group(root->fs_info,
5616 						       search_start);
5617 		used_block_group = block_group;
5618 		/*
5619 		 * we don't want to use the block group if it doesn't match our
5620 		 * allocation bits, or if its not cached.
5621 		 *
5622 		 * However if we are re-searching with an ideal block group
5623 		 * picked out then we don't care that the block group is cached.
5624 		 */
5625 		if (block_group && block_group_bits(block_group, data) &&
5626 		    block_group->cached != BTRFS_CACHE_NO) {
5627 			down_read(&space_info->groups_sem);
5628 			if (list_empty(&block_group->list) ||
5629 			    block_group->ro) {
5630 				/*
5631 				 * someone is removing this block group,
5632 				 * we can't jump into the have_block_group
5633 				 * target because our list pointers are not
5634 				 * valid
5635 				 */
5636 				btrfs_put_block_group(block_group);
5637 				up_read(&space_info->groups_sem);
5638 			} else {
5639 				index = get_block_group_index(block_group);
5640 				goto have_block_group;
5641 			}
5642 		} else if (block_group) {
5643 			btrfs_put_block_group(block_group);
5644 		}
5645 	}
5646 search:
5647 	have_caching_bg = false;
5648 	down_read(&space_info->groups_sem);
5649 	list_for_each_entry(block_group, &space_info->block_groups[index],
5650 			    list) {
5651 		u64 offset;
5652 		int cached;
5653 
5654 		used_block_group = block_group;
5655 		btrfs_get_block_group(block_group);
5656 		search_start = block_group->key.objectid;
5657 
5658 		/*
5659 		 * this can happen if we end up cycling through all the
5660 		 * raid types, but we want to make sure we only allocate
5661 		 * for the proper type.
5662 		 */
5663 		if (!block_group_bits(block_group, data)) {
5664 		    u64 extra = BTRFS_BLOCK_GROUP_DUP |
5665 				BTRFS_BLOCK_GROUP_RAID1 |
5666 				BTRFS_BLOCK_GROUP_RAID10;
5667 
5668 			/*
5669 			 * if they asked for extra copies and this block group
5670 			 * doesn't provide them, bail.  This does allow us to
5671 			 * fill raid0 from raid1.
5672 			 */
5673 			if ((data & extra) && !(block_group->flags & extra))
5674 				goto loop;
5675 		}
5676 
5677 have_block_group:
5678 		cached = block_group_cache_done(block_group);
5679 		if (unlikely(!cached)) {
5680 			found_uncached_bg = true;
5681 			ret = cache_block_group(block_group, trans,
5682 						orig_root, 0);
5683 			BUG_ON(ret < 0);
5684 			ret = 0;
5685 		}
5686 
5687 		if (unlikely(block_group->ro))
5688 			goto loop;
5689 
5690 		/*
5691 		 * Ok we want to try and use the cluster allocator, so
5692 		 * lets look there
5693 		 */
5694 		if (last_ptr) {
5695 			/*
5696 			 * the refill lock keeps out other
5697 			 * people trying to start a new cluster
5698 			 */
5699 			spin_lock(&last_ptr->refill_lock);
5700 			used_block_group = last_ptr->block_group;
5701 			if (used_block_group != block_group &&
5702 			    (!used_block_group ||
5703 			     used_block_group->ro ||
5704 			     !block_group_bits(used_block_group, data))) {
5705 				used_block_group = block_group;
5706 				goto refill_cluster;
5707 			}
5708 
5709 			if (used_block_group != block_group)
5710 				btrfs_get_block_group(used_block_group);
5711 
5712 			offset = btrfs_alloc_from_cluster(used_block_group,
5713 			  last_ptr, num_bytes, used_block_group->key.objectid);
5714 			if (offset) {
5715 				/* we have a block, we're done */
5716 				spin_unlock(&last_ptr->refill_lock);
5717 				trace_btrfs_reserve_extent_cluster(root,
5718 					block_group, search_start, num_bytes);
5719 				goto checks;
5720 			}
5721 
5722 			WARN_ON(last_ptr->block_group != used_block_group);
5723 			if (used_block_group != block_group) {
5724 				btrfs_put_block_group(used_block_group);
5725 				used_block_group = block_group;
5726 			}
5727 refill_cluster:
5728 			BUG_ON(used_block_group != block_group);
5729 			/* If we are on LOOP_NO_EMPTY_SIZE, we can't
5730 			 * set up a new clusters, so lets just skip it
5731 			 * and let the allocator find whatever block
5732 			 * it can find.  If we reach this point, we
5733 			 * will have tried the cluster allocator
5734 			 * plenty of times and not have found
5735 			 * anything, so we are likely way too
5736 			 * fragmented for the clustering stuff to find
5737 			 * anything.
5738 			 *
5739 			 * However, if the cluster is taken from the
5740 			 * current block group, release the cluster
5741 			 * first, so that we stand a better chance of
5742 			 * succeeding in the unclustered
5743 			 * allocation.  */
5744 			if (loop >= LOOP_NO_EMPTY_SIZE &&
5745 			    last_ptr->block_group != block_group) {
5746 				spin_unlock(&last_ptr->refill_lock);
5747 				goto unclustered_alloc;
5748 			}
5749 
5750 			/*
5751 			 * this cluster didn't work out, free it and
5752 			 * start over
5753 			 */
5754 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
5755 
5756 			if (loop >= LOOP_NO_EMPTY_SIZE) {
5757 				spin_unlock(&last_ptr->refill_lock);
5758 				goto unclustered_alloc;
5759 			}
5760 
5761 			/* allocate a cluster in this block group */
5762 			ret = btrfs_find_space_cluster(trans, root,
5763 					       block_group, last_ptr,
5764 					       search_start, num_bytes,
5765 					       empty_cluster + empty_size);
5766 			if (ret == 0) {
5767 				/*
5768 				 * now pull our allocation out of this
5769 				 * cluster
5770 				 */
5771 				offset = btrfs_alloc_from_cluster(block_group,
5772 						  last_ptr, num_bytes,
5773 						  search_start);
5774 				if (offset) {
5775 					/* we found one, proceed */
5776 					spin_unlock(&last_ptr->refill_lock);
5777 					trace_btrfs_reserve_extent_cluster(root,
5778 						block_group, search_start,
5779 						num_bytes);
5780 					goto checks;
5781 				}
5782 			} else if (!cached && loop > LOOP_CACHING_NOWAIT
5783 				   && !failed_cluster_refill) {
5784 				spin_unlock(&last_ptr->refill_lock);
5785 
5786 				failed_cluster_refill = true;
5787 				wait_block_group_cache_progress(block_group,
5788 				       num_bytes + empty_cluster + empty_size);
5789 				goto have_block_group;
5790 			}
5791 
5792 			/*
5793 			 * at this point we either didn't find a cluster
5794 			 * or we weren't able to allocate a block from our
5795 			 * cluster.  Free the cluster we've been trying
5796 			 * to use, and go to the next block group
5797 			 */
5798 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
5799 			spin_unlock(&last_ptr->refill_lock);
5800 			goto loop;
5801 		}
5802 
5803 unclustered_alloc:
5804 		spin_lock(&block_group->free_space_ctl->tree_lock);
5805 		if (cached &&
5806 		    block_group->free_space_ctl->free_space <
5807 		    num_bytes + empty_cluster + empty_size) {
5808 			spin_unlock(&block_group->free_space_ctl->tree_lock);
5809 			goto loop;
5810 		}
5811 		spin_unlock(&block_group->free_space_ctl->tree_lock);
5812 
5813 		offset = btrfs_find_space_for_alloc(block_group, search_start,
5814 						    num_bytes, empty_size);
5815 		/*
5816 		 * If we didn't find a chunk, and we haven't failed on this
5817 		 * block group before, and this block group is in the middle of
5818 		 * caching and we are ok with waiting, then go ahead and wait
5819 		 * for progress to be made, and set failed_alloc to true.
5820 		 *
5821 		 * If failed_alloc is true then we've already waited on this
5822 		 * block group once and should move on to the next block group.
5823 		 */
5824 		if (!offset && !failed_alloc && !cached &&
5825 		    loop > LOOP_CACHING_NOWAIT) {
5826 			wait_block_group_cache_progress(block_group,
5827 						num_bytes + empty_size);
5828 			failed_alloc = true;
5829 			goto have_block_group;
5830 		} else if (!offset) {
5831 			if (!cached)
5832 				have_caching_bg = true;
5833 			goto loop;
5834 		}
5835 checks:
5836 		search_start = stripe_align(root, offset);
5837 
5838 		/* move on to the next group */
5839 		if (search_start + num_bytes >
5840 		    used_block_group->key.objectid + used_block_group->key.offset) {
5841 			btrfs_add_free_space(used_block_group, offset, num_bytes);
5842 			goto loop;
5843 		}
5844 
5845 		if (offset < search_start)
5846 			btrfs_add_free_space(used_block_group, offset,
5847 					     search_start - offset);
5848 		BUG_ON(offset > search_start);
5849 
5850 		ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5851 						  alloc_type);
5852 		if (ret == -EAGAIN) {
5853 			btrfs_add_free_space(used_block_group, offset, num_bytes);
5854 			goto loop;
5855 		}
5856 
5857 		/* we are all good, lets return */
5858 		ins->objectid = search_start;
5859 		ins->offset = num_bytes;
5860 
5861 		trace_btrfs_reserve_extent(orig_root, block_group,
5862 					   search_start, num_bytes);
5863 		if (used_block_group != block_group)
5864 			btrfs_put_block_group(used_block_group);
5865 		btrfs_put_block_group(block_group);
5866 		break;
5867 loop:
5868 		failed_cluster_refill = false;
5869 		failed_alloc = false;
5870 		BUG_ON(index != get_block_group_index(block_group));
5871 		if (used_block_group != block_group)
5872 			btrfs_put_block_group(used_block_group);
5873 		btrfs_put_block_group(block_group);
5874 	}
5875 	up_read(&space_info->groups_sem);
5876 
5877 	if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5878 		goto search;
5879 
5880 	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5881 		goto search;
5882 
5883 	/*
5884 	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5885 	 *			caching kthreads as we move along
5886 	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5887 	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5888 	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5889 	 *			again
5890 	 */
5891 	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5892 		index = 0;
5893 		loop++;
5894 		if (loop == LOOP_ALLOC_CHUNK) {
5895 			ret = do_chunk_alloc(trans, root, data,
5896 					     CHUNK_ALLOC_FORCE);
5897 			/*
5898 			 * Do not bail out on ENOSPC since we
5899 			 * can do more things.
5900 			 */
5901 			if (ret < 0 && ret != -ENOSPC) {
5902 				btrfs_abort_transaction(trans,
5903 							root, ret);
5904 				goto out;
5905 			}
5906 		}
5907 
5908 		if (loop == LOOP_NO_EMPTY_SIZE) {
5909 			empty_size = 0;
5910 			empty_cluster = 0;
5911 		}
5912 
5913 		goto search;
5914 	} else if (!ins->objectid) {
5915 		ret = -ENOSPC;
5916 	} else if (ins->objectid) {
5917 		ret = 0;
5918 	}
5919 out:
5920 
5921 	return ret;
5922 }
5923 
5924 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5925 			    int dump_block_groups)
5926 {
5927 	struct btrfs_block_group_cache *cache;
5928 	int index = 0;
5929 
5930 	spin_lock(&info->lock);
5931 	printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5932 	       (unsigned long long)info->flags,
5933 	       (unsigned long long)(info->total_bytes - info->bytes_used -
5934 				    info->bytes_pinned - info->bytes_reserved -
5935 				    info->bytes_readonly),
5936 	       (info->full) ? "" : "not ");
5937 	printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5938 	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
5939 	       (unsigned long long)info->total_bytes,
5940 	       (unsigned long long)info->bytes_used,
5941 	       (unsigned long long)info->bytes_pinned,
5942 	       (unsigned long long)info->bytes_reserved,
5943 	       (unsigned long long)info->bytes_may_use,
5944 	       (unsigned long long)info->bytes_readonly);
5945 	spin_unlock(&info->lock);
5946 
5947 	if (!dump_block_groups)
5948 		return;
5949 
5950 	down_read(&info->groups_sem);
5951 again:
5952 	list_for_each_entry(cache, &info->block_groups[index], list) {
5953 		spin_lock(&cache->lock);
5954 		printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
5955 		       (unsigned long long)cache->key.objectid,
5956 		       (unsigned long long)cache->key.offset,
5957 		       (unsigned long long)btrfs_block_group_used(&cache->item),
5958 		       (unsigned long long)cache->pinned,
5959 		       (unsigned long long)cache->reserved,
5960 		       cache->ro ? "[readonly]" : "");
5961 		btrfs_dump_free_space(cache, bytes);
5962 		spin_unlock(&cache->lock);
5963 	}
5964 	if (++index < BTRFS_NR_RAID_TYPES)
5965 		goto again;
5966 	up_read(&info->groups_sem);
5967 }
5968 
5969 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5970 			 struct btrfs_root *root,
5971 			 u64 num_bytes, u64 min_alloc_size,
5972 			 u64 empty_size, u64 hint_byte,
5973 			 struct btrfs_key *ins, u64 data)
5974 {
5975 	bool final_tried = false;
5976 	int ret;
5977 
5978 	data = btrfs_get_alloc_profile(root, data);
5979 again:
5980 	WARN_ON(num_bytes < root->sectorsize);
5981 	ret = find_free_extent(trans, root, num_bytes, empty_size,
5982 			       hint_byte, ins, data);
5983 
5984 	if (ret == -ENOSPC) {
5985 		if (!final_tried) {
5986 			num_bytes = num_bytes >> 1;
5987 			num_bytes = num_bytes & ~(root->sectorsize - 1);
5988 			num_bytes = max(num_bytes, min_alloc_size);
5989 			if (num_bytes == min_alloc_size)
5990 				final_tried = true;
5991 			goto again;
5992 		} else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
5993 			struct btrfs_space_info *sinfo;
5994 
5995 			sinfo = __find_space_info(root->fs_info, data);
5996 			printk(KERN_ERR "btrfs allocation failed flags %llu, "
5997 			       "wanted %llu\n", (unsigned long long)data,
5998 			       (unsigned long long)num_bytes);
5999 			if (sinfo)
6000 				dump_space_info(sinfo, num_bytes, 1);
6001 		}
6002 	}
6003 
6004 	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6005 
6006 	return ret;
6007 }
6008 
6009 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6010 					u64 start, u64 len, int pin)
6011 {
6012 	struct btrfs_block_group_cache *cache;
6013 	int ret = 0;
6014 
6015 	cache = btrfs_lookup_block_group(root->fs_info, start);
6016 	if (!cache) {
6017 		printk(KERN_ERR "Unable to find block group for %llu\n",
6018 		       (unsigned long long)start);
6019 		return -ENOSPC;
6020 	}
6021 
6022 	if (btrfs_test_opt(root, DISCARD))
6023 		ret = btrfs_discard_extent(root, start, len, NULL);
6024 
6025 	if (pin)
6026 		pin_down_extent(root, cache, start, len, 1);
6027 	else {
6028 		btrfs_add_free_space(cache, start, len);
6029 		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6030 	}
6031 	btrfs_put_block_group(cache);
6032 
6033 	trace_btrfs_reserved_extent_free(root, start, len);
6034 
6035 	return ret;
6036 }
6037 
6038 int btrfs_free_reserved_extent(struct btrfs_root *root,
6039 					u64 start, u64 len)
6040 {
6041 	return __btrfs_free_reserved_extent(root, start, len, 0);
6042 }
6043 
6044 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6045 				       u64 start, u64 len)
6046 {
6047 	return __btrfs_free_reserved_extent(root, start, len, 1);
6048 }
6049 
6050 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6051 				      struct btrfs_root *root,
6052 				      u64 parent, u64 root_objectid,
6053 				      u64 flags, u64 owner, u64 offset,
6054 				      struct btrfs_key *ins, int ref_mod)
6055 {
6056 	int ret;
6057 	struct btrfs_fs_info *fs_info = root->fs_info;
6058 	struct btrfs_extent_item *extent_item;
6059 	struct btrfs_extent_inline_ref *iref;
6060 	struct btrfs_path *path;
6061 	struct extent_buffer *leaf;
6062 	int type;
6063 	u32 size;
6064 
6065 	if (parent > 0)
6066 		type = BTRFS_SHARED_DATA_REF_KEY;
6067 	else
6068 		type = BTRFS_EXTENT_DATA_REF_KEY;
6069 
6070 	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6071 
6072 	path = btrfs_alloc_path();
6073 	if (!path)
6074 		return -ENOMEM;
6075 
6076 	path->leave_spinning = 1;
6077 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6078 				      ins, size);
6079 	if (ret) {
6080 		btrfs_free_path(path);
6081 		return ret;
6082 	}
6083 
6084 	leaf = path->nodes[0];
6085 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
6086 				     struct btrfs_extent_item);
6087 	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6088 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6089 	btrfs_set_extent_flags(leaf, extent_item,
6090 			       flags | BTRFS_EXTENT_FLAG_DATA);
6091 
6092 	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6093 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
6094 	if (parent > 0) {
6095 		struct btrfs_shared_data_ref *ref;
6096 		ref = (struct btrfs_shared_data_ref *)(iref + 1);
6097 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6098 		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6099 	} else {
6100 		struct btrfs_extent_data_ref *ref;
6101 		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6102 		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6103 		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6104 		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6105 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6106 	}
6107 
6108 	btrfs_mark_buffer_dirty(path->nodes[0]);
6109 	btrfs_free_path(path);
6110 
6111 	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
6112 	if (ret) { /* -ENOENT, logic error */
6113 		printk(KERN_ERR "btrfs update block group failed for %llu "
6114 		       "%llu\n", (unsigned long long)ins->objectid,
6115 		       (unsigned long long)ins->offset);
6116 		BUG();
6117 	}
6118 	return ret;
6119 }
6120 
6121 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6122 				     struct btrfs_root *root,
6123 				     u64 parent, u64 root_objectid,
6124 				     u64 flags, struct btrfs_disk_key *key,
6125 				     int level, struct btrfs_key *ins)
6126 {
6127 	int ret;
6128 	struct btrfs_fs_info *fs_info = root->fs_info;
6129 	struct btrfs_extent_item *extent_item;
6130 	struct btrfs_tree_block_info *block_info;
6131 	struct btrfs_extent_inline_ref *iref;
6132 	struct btrfs_path *path;
6133 	struct extent_buffer *leaf;
6134 	u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
6135 
6136 	path = btrfs_alloc_path();
6137 	if (!path)
6138 		return -ENOMEM;
6139 
6140 	path->leave_spinning = 1;
6141 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6142 				      ins, size);
6143 	if (ret) {
6144 		btrfs_free_path(path);
6145 		return ret;
6146 	}
6147 
6148 	leaf = path->nodes[0];
6149 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
6150 				     struct btrfs_extent_item);
6151 	btrfs_set_extent_refs(leaf, extent_item, 1);
6152 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6153 	btrfs_set_extent_flags(leaf, extent_item,
6154 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6155 	block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6156 
6157 	btrfs_set_tree_block_key(leaf, block_info, key);
6158 	btrfs_set_tree_block_level(leaf, block_info, level);
6159 
6160 	iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6161 	if (parent > 0) {
6162 		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6163 		btrfs_set_extent_inline_ref_type(leaf, iref,
6164 						 BTRFS_SHARED_BLOCK_REF_KEY);
6165 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6166 	} else {
6167 		btrfs_set_extent_inline_ref_type(leaf, iref,
6168 						 BTRFS_TREE_BLOCK_REF_KEY);
6169 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6170 	}
6171 
6172 	btrfs_mark_buffer_dirty(leaf);
6173 	btrfs_free_path(path);
6174 
6175 	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
6176 	if (ret) { /* -ENOENT, logic error */
6177 		printk(KERN_ERR "btrfs update block group failed for %llu "
6178 		       "%llu\n", (unsigned long long)ins->objectid,
6179 		       (unsigned long long)ins->offset);
6180 		BUG();
6181 	}
6182 	return ret;
6183 }
6184 
6185 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6186 				     struct btrfs_root *root,
6187 				     u64 root_objectid, u64 owner,
6188 				     u64 offset, struct btrfs_key *ins)
6189 {
6190 	int ret;
6191 
6192 	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6193 
6194 	ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6195 					 ins->offset, 0,
6196 					 root_objectid, owner, offset,
6197 					 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6198 	return ret;
6199 }
6200 
6201 /*
6202  * this is used by the tree logging recovery code.  It records that
6203  * an extent has been allocated and makes sure to clear the free
6204  * space cache bits as well
6205  */
6206 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6207 				   struct btrfs_root *root,
6208 				   u64 root_objectid, u64 owner, u64 offset,
6209 				   struct btrfs_key *ins)
6210 {
6211 	int ret;
6212 	struct btrfs_block_group_cache *block_group;
6213 	struct btrfs_caching_control *caching_ctl;
6214 	u64 start = ins->objectid;
6215 	u64 num_bytes = ins->offset;
6216 
6217 	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6218 	cache_block_group(block_group, trans, NULL, 0);
6219 	caching_ctl = get_caching_control(block_group);
6220 
6221 	if (!caching_ctl) {
6222 		BUG_ON(!block_group_cache_done(block_group));
6223 		ret = btrfs_remove_free_space(block_group, start, num_bytes);
6224 		BUG_ON(ret); /* -ENOMEM */
6225 	} else {
6226 		mutex_lock(&caching_ctl->mutex);
6227 
6228 		if (start >= caching_ctl->progress) {
6229 			ret = add_excluded_extent(root, start, num_bytes);
6230 			BUG_ON(ret); /* -ENOMEM */
6231 		} else if (start + num_bytes <= caching_ctl->progress) {
6232 			ret = btrfs_remove_free_space(block_group,
6233 						      start, num_bytes);
6234 			BUG_ON(ret); /* -ENOMEM */
6235 		} else {
6236 			num_bytes = caching_ctl->progress - start;
6237 			ret = btrfs_remove_free_space(block_group,
6238 						      start, num_bytes);
6239 			BUG_ON(ret); /* -ENOMEM */
6240 
6241 			start = caching_ctl->progress;
6242 			num_bytes = ins->objectid + ins->offset -
6243 				    caching_ctl->progress;
6244 			ret = add_excluded_extent(root, start, num_bytes);
6245 			BUG_ON(ret); /* -ENOMEM */
6246 		}
6247 
6248 		mutex_unlock(&caching_ctl->mutex);
6249 		put_caching_control(caching_ctl);
6250 	}
6251 
6252 	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6253 					  RESERVE_ALLOC_NO_ACCOUNT);
6254 	BUG_ON(ret); /* logic error */
6255 	btrfs_put_block_group(block_group);
6256 	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6257 					 0, owner, offset, ins, 1);
6258 	return ret;
6259 }
6260 
6261 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6262 					    struct btrfs_root *root,
6263 					    u64 bytenr, u32 blocksize,
6264 					    int level)
6265 {
6266 	struct extent_buffer *buf;
6267 
6268 	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6269 	if (!buf)
6270 		return ERR_PTR(-ENOMEM);
6271 	btrfs_set_header_generation(buf, trans->transid);
6272 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6273 	btrfs_tree_lock(buf);
6274 	clean_tree_block(trans, root, buf);
6275 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6276 
6277 	btrfs_set_lock_blocking(buf);
6278 	btrfs_set_buffer_uptodate(buf);
6279 
6280 	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6281 		/*
6282 		 * we allow two log transactions at a time, use different
6283 		 * EXENT bit to differentiate dirty pages.
6284 		 */
6285 		if (root->log_transid % 2 == 0)
6286 			set_extent_dirty(&root->dirty_log_pages, buf->start,
6287 					buf->start + buf->len - 1, GFP_NOFS);
6288 		else
6289 			set_extent_new(&root->dirty_log_pages, buf->start,
6290 					buf->start + buf->len - 1, GFP_NOFS);
6291 	} else {
6292 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6293 			 buf->start + buf->len - 1, GFP_NOFS);
6294 	}
6295 	trans->blocks_used++;
6296 	/* this returns a buffer locked for blocking */
6297 	return buf;
6298 }
6299 
6300 static struct btrfs_block_rsv *
6301 use_block_rsv(struct btrfs_trans_handle *trans,
6302 	      struct btrfs_root *root, u32 blocksize)
6303 {
6304 	struct btrfs_block_rsv *block_rsv;
6305 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6306 	int ret;
6307 
6308 	block_rsv = get_block_rsv(trans, root);
6309 
6310 	if (block_rsv->size == 0) {
6311 		ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6312 					     BTRFS_RESERVE_NO_FLUSH);
6313 		/*
6314 		 * If we couldn't reserve metadata bytes try and use some from
6315 		 * the global reserve.
6316 		 */
6317 		if (ret && block_rsv != global_rsv) {
6318 			ret = block_rsv_use_bytes(global_rsv, blocksize);
6319 			if (!ret)
6320 				return global_rsv;
6321 			return ERR_PTR(ret);
6322 		} else if (ret) {
6323 			return ERR_PTR(ret);
6324 		}
6325 		return block_rsv;
6326 	}
6327 
6328 	ret = block_rsv_use_bytes(block_rsv, blocksize);
6329 	if (!ret)
6330 		return block_rsv;
6331 	if (ret && !block_rsv->failfast) {
6332 		static DEFINE_RATELIMIT_STATE(_rs,
6333 				DEFAULT_RATELIMIT_INTERVAL,
6334 				/*DEFAULT_RATELIMIT_BURST*/ 2);
6335 		if (__ratelimit(&_rs))
6336 			WARN(1, KERN_DEBUG "btrfs: block rsv returned %d\n",
6337 			     ret);
6338 		ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6339 					     BTRFS_RESERVE_NO_FLUSH);
6340 		if (!ret) {
6341 			return block_rsv;
6342 		} else if (ret && block_rsv != global_rsv) {
6343 			ret = block_rsv_use_bytes(global_rsv, blocksize);
6344 			if (!ret)
6345 				return global_rsv;
6346 		}
6347 	}
6348 
6349 	return ERR_PTR(-ENOSPC);
6350 }
6351 
6352 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6353 			    struct btrfs_block_rsv *block_rsv, u32 blocksize)
6354 {
6355 	block_rsv_add_bytes(block_rsv, blocksize, 0);
6356 	block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6357 }
6358 
6359 /*
6360  * finds a free extent and does all the dirty work required for allocation
6361  * returns the key for the extent through ins, and a tree buffer for
6362  * the first block of the extent through buf.
6363  *
6364  * returns the tree buffer or NULL.
6365  */
6366 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6367 					struct btrfs_root *root, u32 blocksize,
6368 					u64 parent, u64 root_objectid,
6369 					struct btrfs_disk_key *key, int level,
6370 					u64 hint, u64 empty_size)
6371 {
6372 	struct btrfs_key ins;
6373 	struct btrfs_block_rsv *block_rsv;
6374 	struct extent_buffer *buf;
6375 	u64 flags = 0;
6376 	int ret;
6377 
6378 
6379 	block_rsv = use_block_rsv(trans, root, blocksize);
6380 	if (IS_ERR(block_rsv))
6381 		return ERR_CAST(block_rsv);
6382 
6383 	ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6384 				   empty_size, hint, &ins, 0);
6385 	if (ret) {
6386 		unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6387 		return ERR_PTR(ret);
6388 	}
6389 
6390 	buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6391 				    blocksize, level);
6392 	BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6393 
6394 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6395 		if (parent == 0)
6396 			parent = ins.objectid;
6397 		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6398 	} else
6399 		BUG_ON(parent > 0);
6400 
6401 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6402 		struct btrfs_delayed_extent_op *extent_op;
6403 		extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6404 		BUG_ON(!extent_op); /* -ENOMEM */
6405 		if (key)
6406 			memcpy(&extent_op->key, key, sizeof(extent_op->key));
6407 		else
6408 			memset(&extent_op->key, 0, sizeof(extent_op->key));
6409 		extent_op->flags_to_set = flags;
6410 		extent_op->update_key = 1;
6411 		extent_op->update_flags = 1;
6412 		extent_op->is_data = 0;
6413 
6414 		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6415 					ins.objectid,
6416 					ins.offset, parent, root_objectid,
6417 					level, BTRFS_ADD_DELAYED_EXTENT,
6418 					extent_op, 0);
6419 		BUG_ON(ret); /* -ENOMEM */
6420 	}
6421 	return buf;
6422 }
6423 
6424 struct walk_control {
6425 	u64 refs[BTRFS_MAX_LEVEL];
6426 	u64 flags[BTRFS_MAX_LEVEL];
6427 	struct btrfs_key update_progress;
6428 	int stage;
6429 	int level;
6430 	int shared_level;
6431 	int update_ref;
6432 	int keep_locks;
6433 	int reada_slot;
6434 	int reada_count;
6435 	int for_reloc;
6436 };
6437 
6438 #define DROP_REFERENCE	1
6439 #define UPDATE_BACKREF	2
6440 
6441 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6442 				     struct btrfs_root *root,
6443 				     struct walk_control *wc,
6444 				     struct btrfs_path *path)
6445 {
6446 	u64 bytenr;
6447 	u64 generation;
6448 	u64 refs;
6449 	u64 flags;
6450 	u32 nritems;
6451 	u32 blocksize;
6452 	struct btrfs_key key;
6453 	struct extent_buffer *eb;
6454 	int ret;
6455 	int slot;
6456 	int nread = 0;
6457 
6458 	if (path->slots[wc->level] < wc->reada_slot) {
6459 		wc->reada_count = wc->reada_count * 2 / 3;
6460 		wc->reada_count = max(wc->reada_count, 2);
6461 	} else {
6462 		wc->reada_count = wc->reada_count * 3 / 2;
6463 		wc->reada_count = min_t(int, wc->reada_count,
6464 					BTRFS_NODEPTRS_PER_BLOCK(root));
6465 	}
6466 
6467 	eb = path->nodes[wc->level];
6468 	nritems = btrfs_header_nritems(eb);
6469 	blocksize = btrfs_level_size(root, wc->level - 1);
6470 
6471 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6472 		if (nread >= wc->reada_count)
6473 			break;
6474 
6475 		cond_resched();
6476 		bytenr = btrfs_node_blockptr(eb, slot);
6477 		generation = btrfs_node_ptr_generation(eb, slot);
6478 
6479 		if (slot == path->slots[wc->level])
6480 			goto reada;
6481 
6482 		if (wc->stage == UPDATE_BACKREF &&
6483 		    generation <= root->root_key.offset)
6484 			continue;
6485 
6486 		/* We don't lock the tree block, it's OK to be racy here */
6487 		ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6488 					       &refs, &flags);
6489 		/* We don't care about errors in readahead. */
6490 		if (ret < 0)
6491 			continue;
6492 		BUG_ON(refs == 0);
6493 
6494 		if (wc->stage == DROP_REFERENCE) {
6495 			if (refs == 1)
6496 				goto reada;
6497 
6498 			if (wc->level == 1 &&
6499 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6500 				continue;
6501 			if (!wc->update_ref ||
6502 			    generation <= root->root_key.offset)
6503 				continue;
6504 			btrfs_node_key_to_cpu(eb, &key, slot);
6505 			ret = btrfs_comp_cpu_keys(&key,
6506 						  &wc->update_progress);
6507 			if (ret < 0)
6508 				continue;
6509 		} else {
6510 			if (wc->level == 1 &&
6511 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6512 				continue;
6513 		}
6514 reada:
6515 		ret = readahead_tree_block(root, bytenr, blocksize,
6516 					   generation);
6517 		if (ret)
6518 			break;
6519 		nread++;
6520 	}
6521 	wc->reada_slot = slot;
6522 }
6523 
6524 /*
6525  * hepler to process tree block while walking down the tree.
6526  *
6527  * when wc->stage == UPDATE_BACKREF, this function updates
6528  * back refs for pointers in the block.
6529  *
6530  * NOTE: return value 1 means we should stop walking down.
6531  */
6532 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6533 				   struct btrfs_root *root,
6534 				   struct btrfs_path *path,
6535 				   struct walk_control *wc, int lookup_info)
6536 {
6537 	int level = wc->level;
6538 	struct extent_buffer *eb = path->nodes[level];
6539 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6540 	int ret;
6541 
6542 	if (wc->stage == UPDATE_BACKREF &&
6543 	    btrfs_header_owner(eb) != root->root_key.objectid)
6544 		return 1;
6545 
6546 	/*
6547 	 * when reference count of tree block is 1, it won't increase
6548 	 * again. once full backref flag is set, we never clear it.
6549 	 */
6550 	if (lookup_info &&
6551 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6552 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6553 		BUG_ON(!path->locks[level]);
6554 		ret = btrfs_lookup_extent_info(trans, root,
6555 					       eb->start, eb->len,
6556 					       &wc->refs[level],
6557 					       &wc->flags[level]);
6558 		BUG_ON(ret == -ENOMEM);
6559 		if (ret)
6560 			return ret;
6561 		BUG_ON(wc->refs[level] == 0);
6562 	}
6563 
6564 	if (wc->stage == DROP_REFERENCE) {
6565 		if (wc->refs[level] > 1)
6566 			return 1;
6567 
6568 		if (path->locks[level] && !wc->keep_locks) {
6569 			btrfs_tree_unlock_rw(eb, path->locks[level]);
6570 			path->locks[level] = 0;
6571 		}
6572 		return 0;
6573 	}
6574 
6575 	/* wc->stage == UPDATE_BACKREF */
6576 	if (!(wc->flags[level] & flag)) {
6577 		BUG_ON(!path->locks[level]);
6578 		ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6579 		BUG_ON(ret); /* -ENOMEM */
6580 		ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6581 		BUG_ON(ret); /* -ENOMEM */
6582 		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6583 						  eb->len, flag, 0);
6584 		BUG_ON(ret); /* -ENOMEM */
6585 		wc->flags[level] |= flag;
6586 	}
6587 
6588 	/*
6589 	 * the block is shared by multiple trees, so it's not good to
6590 	 * keep the tree lock
6591 	 */
6592 	if (path->locks[level] && level > 0) {
6593 		btrfs_tree_unlock_rw(eb, path->locks[level]);
6594 		path->locks[level] = 0;
6595 	}
6596 	return 0;
6597 }
6598 
6599 /*
6600  * hepler to process tree block pointer.
6601  *
6602  * when wc->stage == DROP_REFERENCE, this function checks
6603  * reference count of the block pointed to. if the block
6604  * is shared and we need update back refs for the subtree
6605  * rooted at the block, this function changes wc->stage to
6606  * UPDATE_BACKREF. if the block is shared and there is no
6607  * need to update back, this function drops the reference
6608  * to the block.
6609  *
6610  * NOTE: return value 1 means we should stop walking down.
6611  */
6612 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6613 				 struct btrfs_root *root,
6614 				 struct btrfs_path *path,
6615 				 struct walk_control *wc, int *lookup_info)
6616 {
6617 	u64 bytenr;
6618 	u64 generation;
6619 	u64 parent;
6620 	u32 blocksize;
6621 	struct btrfs_key key;
6622 	struct extent_buffer *next;
6623 	int level = wc->level;
6624 	int reada = 0;
6625 	int ret = 0;
6626 
6627 	generation = btrfs_node_ptr_generation(path->nodes[level],
6628 					       path->slots[level]);
6629 	/*
6630 	 * if the lower level block was created before the snapshot
6631 	 * was created, we know there is no need to update back refs
6632 	 * for the subtree
6633 	 */
6634 	if (wc->stage == UPDATE_BACKREF &&
6635 	    generation <= root->root_key.offset) {
6636 		*lookup_info = 1;
6637 		return 1;
6638 	}
6639 
6640 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6641 	blocksize = btrfs_level_size(root, level - 1);
6642 
6643 	next = btrfs_find_tree_block(root, bytenr, blocksize);
6644 	if (!next) {
6645 		next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6646 		if (!next)
6647 			return -ENOMEM;
6648 		reada = 1;
6649 	}
6650 	btrfs_tree_lock(next);
6651 	btrfs_set_lock_blocking(next);
6652 
6653 	ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6654 				       &wc->refs[level - 1],
6655 				       &wc->flags[level - 1]);
6656 	if (ret < 0) {
6657 		btrfs_tree_unlock(next);
6658 		return ret;
6659 	}
6660 
6661 	BUG_ON(wc->refs[level - 1] == 0);
6662 	*lookup_info = 0;
6663 
6664 	if (wc->stage == DROP_REFERENCE) {
6665 		if (wc->refs[level - 1] > 1) {
6666 			if (level == 1 &&
6667 			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6668 				goto skip;
6669 
6670 			if (!wc->update_ref ||
6671 			    generation <= root->root_key.offset)
6672 				goto skip;
6673 
6674 			btrfs_node_key_to_cpu(path->nodes[level], &key,
6675 					      path->slots[level]);
6676 			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6677 			if (ret < 0)
6678 				goto skip;
6679 
6680 			wc->stage = UPDATE_BACKREF;
6681 			wc->shared_level = level - 1;
6682 		}
6683 	} else {
6684 		if (level == 1 &&
6685 		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6686 			goto skip;
6687 	}
6688 
6689 	if (!btrfs_buffer_uptodate(next, generation, 0)) {
6690 		btrfs_tree_unlock(next);
6691 		free_extent_buffer(next);
6692 		next = NULL;
6693 		*lookup_info = 1;
6694 	}
6695 
6696 	if (!next) {
6697 		if (reada && level == 1)
6698 			reada_walk_down(trans, root, wc, path);
6699 		next = read_tree_block(root, bytenr, blocksize, generation);
6700 		if (!next)
6701 			return -EIO;
6702 		btrfs_tree_lock(next);
6703 		btrfs_set_lock_blocking(next);
6704 	}
6705 
6706 	level--;
6707 	BUG_ON(level != btrfs_header_level(next));
6708 	path->nodes[level] = next;
6709 	path->slots[level] = 0;
6710 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6711 	wc->level = level;
6712 	if (wc->level == 1)
6713 		wc->reada_slot = 0;
6714 	return 0;
6715 skip:
6716 	wc->refs[level - 1] = 0;
6717 	wc->flags[level - 1] = 0;
6718 	if (wc->stage == DROP_REFERENCE) {
6719 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6720 			parent = path->nodes[level]->start;
6721 		} else {
6722 			BUG_ON(root->root_key.objectid !=
6723 			       btrfs_header_owner(path->nodes[level]));
6724 			parent = 0;
6725 		}
6726 
6727 		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6728 				root->root_key.objectid, level - 1, 0, 0);
6729 		BUG_ON(ret); /* -ENOMEM */
6730 	}
6731 	btrfs_tree_unlock(next);
6732 	free_extent_buffer(next);
6733 	*lookup_info = 1;
6734 	return 1;
6735 }
6736 
6737 /*
6738  * hepler to process tree block while walking up the tree.
6739  *
6740  * when wc->stage == DROP_REFERENCE, this function drops
6741  * reference count on the block.
6742  *
6743  * when wc->stage == UPDATE_BACKREF, this function changes
6744  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6745  * to UPDATE_BACKREF previously while processing the block.
6746  *
6747  * NOTE: return value 1 means we should stop walking up.
6748  */
6749 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6750 				 struct btrfs_root *root,
6751 				 struct btrfs_path *path,
6752 				 struct walk_control *wc)
6753 {
6754 	int ret;
6755 	int level = wc->level;
6756 	struct extent_buffer *eb = path->nodes[level];
6757 	u64 parent = 0;
6758 
6759 	if (wc->stage == UPDATE_BACKREF) {
6760 		BUG_ON(wc->shared_level < level);
6761 		if (level < wc->shared_level)
6762 			goto out;
6763 
6764 		ret = find_next_key(path, level + 1, &wc->update_progress);
6765 		if (ret > 0)
6766 			wc->update_ref = 0;
6767 
6768 		wc->stage = DROP_REFERENCE;
6769 		wc->shared_level = -1;
6770 		path->slots[level] = 0;
6771 
6772 		/*
6773 		 * check reference count again if the block isn't locked.
6774 		 * we should start walking down the tree again if reference
6775 		 * count is one.
6776 		 */
6777 		if (!path->locks[level]) {
6778 			BUG_ON(level == 0);
6779 			btrfs_tree_lock(eb);
6780 			btrfs_set_lock_blocking(eb);
6781 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6782 
6783 			ret = btrfs_lookup_extent_info(trans, root,
6784 						       eb->start, eb->len,
6785 						       &wc->refs[level],
6786 						       &wc->flags[level]);
6787 			if (ret < 0) {
6788 				btrfs_tree_unlock_rw(eb, path->locks[level]);
6789 				path->locks[level] = 0;
6790 				return ret;
6791 			}
6792 			BUG_ON(wc->refs[level] == 0);
6793 			if (wc->refs[level] == 1) {
6794 				btrfs_tree_unlock_rw(eb, path->locks[level]);
6795 				path->locks[level] = 0;
6796 				return 1;
6797 			}
6798 		}
6799 	}
6800 
6801 	/* wc->stage == DROP_REFERENCE */
6802 	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6803 
6804 	if (wc->refs[level] == 1) {
6805 		if (level == 0) {
6806 			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6807 				ret = btrfs_dec_ref(trans, root, eb, 1,
6808 						    wc->for_reloc);
6809 			else
6810 				ret = btrfs_dec_ref(trans, root, eb, 0,
6811 						    wc->for_reloc);
6812 			BUG_ON(ret); /* -ENOMEM */
6813 		}
6814 		/* make block locked assertion in clean_tree_block happy */
6815 		if (!path->locks[level] &&
6816 		    btrfs_header_generation(eb) == trans->transid) {
6817 			btrfs_tree_lock(eb);
6818 			btrfs_set_lock_blocking(eb);
6819 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6820 		}
6821 		clean_tree_block(trans, root, eb);
6822 	}
6823 
6824 	if (eb == root->node) {
6825 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6826 			parent = eb->start;
6827 		else
6828 			BUG_ON(root->root_key.objectid !=
6829 			       btrfs_header_owner(eb));
6830 	} else {
6831 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6832 			parent = path->nodes[level + 1]->start;
6833 		else
6834 			BUG_ON(root->root_key.objectid !=
6835 			       btrfs_header_owner(path->nodes[level + 1]));
6836 	}
6837 
6838 	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6839 out:
6840 	wc->refs[level] = 0;
6841 	wc->flags[level] = 0;
6842 	return 0;
6843 }
6844 
6845 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6846 				   struct btrfs_root *root,
6847 				   struct btrfs_path *path,
6848 				   struct walk_control *wc)
6849 {
6850 	int level = wc->level;
6851 	int lookup_info = 1;
6852 	int ret;
6853 
6854 	while (level >= 0) {
6855 		ret = walk_down_proc(trans, root, path, wc, lookup_info);
6856 		if (ret > 0)
6857 			break;
6858 
6859 		if (level == 0)
6860 			break;
6861 
6862 		if (path->slots[level] >=
6863 		    btrfs_header_nritems(path->nodes[level]))
6864 			break;
6865 
6866 		ret = do_walk_down(trans, root, path, wc, &lookup_info);
6867 		if (ret > 0) {
6868 			path->slots[level]++;
6869 			continue;
6870 		} else if (ret < 0)
6871 			return ret;
6872 		level = wc->level;
6873 	}
6874 	return 0;
6875 }
6876 
6877 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6878 				 struct btrfs_root *root,
6879 				 struct btrfs_path *path,
6880 				 struct walk_control *wc, int max_level)
6881 {
6882 	int level = wc->level;
6883 	int ret;
6884 
6885 	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6886 	while (level < max_level && path->nodes[level]) {
6887 		wc->level = level;
6888 		if (path->slots[level] + 1 <
6889 		    btrfs_header_nritems(path->nodes[level])) {
6890 			path->slots[level]++;
6891 			return 0;
6892 		} else {
6893 			ret = walk_up_proc(trans, root, path, wc);
6894 			if (ret > 0)
6895 				return 0;
6896 
6897 			if (path->locks[level]) {
6898 				btrfs_tree_unlock_rw(path->nodes[level],
6899 						     path->locks[level]);
6900 				path->locks[level] = 0;
6901 			}
6902 			free_extent_buffer(path->nodes[level]);
6903 			path->nodes[level] = NULL;
6904 			level++;
6905 		}
6906 	}
6907 	return 1;
6908 }
6909 
6910 /*
6911  * drop a subvolume tree.
6912  *
6913  * this function traverses the tree freeing any blocks that only
6914  * referenced by the tree.
6915  *
6916  * when a shared tree block is found. this function decreases its
6917  * reference count by one. if update_ref is true, this function
6918  * also make sure backrefs for the shared block and all lower level
6919  * blocks are properly updated.
6920  */
6921 int btrfs_drop_snapshot(struct btrfs_root *root,
6922 			 struct btrfs_block_rsv *block_rsv, int update_ref,
6923 			 int for_reloc)
6924 {
6925 	struct btrfs_path *path;
6926 	struct btrfs_trans_handle *trans;
6927 	struct btrfs_root *tree_root = root->fs_info->tree_root;
6928 	struct btrfs_root_item *root_item = &root->root_item;
6929 	struct walk_control *wc;
6930 	struct btrfs_key key;
6931 	int err = 0;
6932 	int ret;
6933 	int level;
6934 
6935 	path = btrfs_alloc_path();
6936 	if (!path) {
6937 		err = -ENOMEM;
6938 		goto out;
6939 	}
6940 
6941 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
6942 	if (!wc) {
6943 		btrfs_free_path(path);
6944 		err = -ENOMEM;
6945 		goto out;
6946 	}
6947 
6948 	trans = btrfs_start_transaction(tree_root, 0);
6949 	if (IS_ERR(trans)) {
6950 		err = PTR_ERR(trans);
6951 		goto out_free;
6952 	}
6953 
6954 	if (block_rsv)
6955 		trans->block_rsv = block_rsv;
6956 
6957 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6958 		level = btrfs_header_level(root->node);
6959 		path->nodes[level] = btrfs_lock_root_node(root);
6960 		btrfs_set_lock_blocking(path->nodes[level]);
6961 		path->slots[level] = 0;
6962 		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6963 		memset(&wc->update_progress, 0,
6964 		       sizeof(wc->update_progress));
6965 	} else {
6966 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6967 		memcpy(&wc->update_progress, &key,
6968 		       sizeof(wc->update_progress));
6969 
6970 		level = root_item->drop_level;
6971 		BUG_ON(level == 0);
6972 		path->lowest_level = level;
6973 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6974 		path->lowest_level = 0;
6975 		if (ret < 0) {
6976 			err = ret;
6977 			goto out_end_trans;
6978 		}
6979 		WARN_ON(ret > 0);
6980 
6981 		/*
6982 		 * unlock our path, this is safe because only this
6983 		 * function is allowed to delete this snapshot
6984 		 */
6985 		btrfs_unlock_up_safe(path, 0);
6986 
6987 		level = btrfs_header_level(root->node);
6988 		while (1) {
6989 			btrfs_tree_lock(path->nodes[level]);
6990 			btrfs_set_lock_blocking(path->nodes[level]);
6991 
6992 			ret = btrfs_lookup_extent_info(trans, root,
6993 						path->nodes[level]->start,
6994 						path->nodes[level]->len,
6995 						&wc->refs[level],
6996 						&wc->flags[level]);
6997 			if (ret < 0) {
6998 				err = ret;
6999 				goto out_end_trans;
7000 			}
7001 			BUG_ON(wc->refs[level] == 0);
7002 
7003 			if (level == root_item->drop_level)
7004 				break;
7005 
7006 			btrfs_tree_unlock(path->nodes[level]);
7007 			WARN_ON(wc->refs[level] != 1);
7008 			level--;
7009 		}
7010 	}
7011 
7012 	wc->level = level;
7013 	wc->shared_level = -1;
7014 	wc->stage = DROP_REFERENCE;
7015 	wc->update_ref = update_ref;
7016 	wc->keep_locks = 0;
7017 	wc->for_reloc = for_reloc;
7018 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7019 
7020 	while (1) {
7021 		ret = walk_down_tree(trans, root, path, wc);
7022 		if (ret < 0) {
7023 			err = ret;
7024 			break;
7025 		}
7026 
7027 		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7028 		if (ret < 0) {
7029 			err = ret;
7030 			break;
7031 		}
7032 
7033 		if (ret > 0) {
7034 			BUG_ON(wc->stage != DROP_REFERENCE);
7035 			break;
7036 		}
7037 
7038 		if (wc->stage == DROP_REFERENCE) {
7039 			level = wc->level;
7040 			btrfs_node_key(path->nodes[level],
7041 				       &root_item->drop_progress,
7042 				       path->slots[level]);
7043 			root_item->drop_level = level;
7044 		}
7045 
7046 		BUG_ON(wc->level == 0);
7047 		if (btrfs_should_end_transaction(trans, tree_root)) {
7048 			ret = btrfs_update_root(trans, tree_root,
7049 						&root->root_key,
7050 						root_item);
7051 			if (ret) {
7052 				btrfs_abort_transaction(trans, tree_root, ret);
7053 				err = ret;
7054 				goto out_end_trans;
7055 			}
7056 
7057 			btrfs_end_transaction_throttle(trans, tree_root);
7058 			trans = btrfs_start_transaction(tree_root, 0);
7059 			if (IS_ERR(trans)) {
7060 				err = PTR_ERR(trans);
7061 				goto out_free;
7062 			}
7063 			if (block_rsv)
7064 				trans->block_rsv = block_rsv;
7065 		}
7066 	}
7067 	btrfs_release_path(path);
7068 	if (err)
7069 		goto out_end_trans;
7070 
7071 	ret = btrfs_del_root(trans, tree_root, &root->root_key);
7072 	if (ret) {
7073 		btrfs_abort_transaction(trans, tree_root, ret);
7074 		goto out_end_trans;
7075 	}
7076 
7077 	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7078 		ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7079 					   NULL, NULL);
7080 		if (ret < 0) {
7081 			btrfs_abort_transaction(trans, tree_root, ret);
7082 			err = ret;
7083 			goto out_end_trans;
7084 		} else if (ret > 0) {
7085 			/* if we fail to delete the orphan item this time
7086 			 * around, it'll get picked up the next time.
7087 			 *
7088 			 * The most common failure here is just -ENOENT.
7089 			 */
7090 			btrfs_del_orphan_item(trans, tree_root,
7091 					      root->root_key.objectid);
7092 		}
7093 	}
7094 
7095 	if (root->in_radix) {
7096 		btrfs_free_fs_root(tree_root->fs_info, root);
7097 	} else {
7098 		free_extent_buffer(root->node);
7099 		free_extent_buffer(root->commit_root);
7100 		kfree(root);
7101 	}
7102 out_end_trans:
7103 	btrfs_end_transaction_throttle(trans, tree_root);
7104 out_free:
7105 	kfree(wc);
7106 	btrfs_free_path(path);
7107 out:
7108 	if (err)
7109 		btrfs_std_error(root->fs_info, err);
7110 	return err;
7111 }
7112 
7113 /*
7114  * drop subtree rooted at tree block 'node'.
7115  *
7116  * NOTE: this function will unlock and release tree block 'node'
7117  * only used by relocation code
7118  */
7119 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7120 			struct btrfs_root *root,
7121 			struct extent_buffer *node,
7122 			struct extent_buffer *parent)
7123 {
7124 	struct btrfs_path *path;
7125 	struct walk_control *wc;
7126 	int level;
7127 	int parent_level;
7128 	int ret = 0;
7129 	int wret;
7130 
7131 	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7132 
7133 	path = btrfs_alloc_path();
7134 	if (!path)
7135 		return -ENOMEM;
7136 
7137 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
7138 	if (!wc) {
7139 		btrfs_free_path(path);
7140 		return -ENOMEM;
7141 	}
7142 
7143 	btrfs_assert_tree_locked(parent);
7144 	parent_level = btrfs_header_level(parent);
7145 	extent_buffer_get(parent);
7146 	path->nodes[parent_level] = parent;
7147 	path->slots[parent_level] = btrfs_header_nritems(parent);
7148 
7149 	btrfs_assert_tree_locked(node);
7150 	level = btrfs_header_level(node);
7151 	path->nodes[level] = node;
7152 	path->slots[level] = 0;
7153 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7154 
7155 	wc->refs[parent_level] = 1;
7156 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7157 	wc->level = level;
7158 	wc->shared_level = -1;
7159 	wc->stage = DROP_REFERENCE;
7160 	wc->update_ref = 0;
7161 	wc->keep_locks = 1;
7162 	wc->for_reloc = 1;
7163 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7164 
7165 	while (1) {
7166 		wret = walk_down_tree(trans, root, path, wc);
7167 		if (wret < 0) {
7168 			ret = wret;
7169 			break;
7170 		}
7171 
7172 		wret = walk_up_tree(trans, root, path, wc, parent_level);
7173 		if (wret < 0)
7174 			ret = wret;
7175 		if (wret != 0)
7176 			break;
7177 	}
7178 
7179 	kfree(wc);
7180 	btrfs_free_path(path);
7181 	return ret;
7182 }
7183 
7184 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7185 {
7186 	u64 num_devices;
7187 	u64 stripped;
7188 
7189 	/*
7190 	 * if restripe for this chunk_type is on pick target profile and
7191 	 * return, otherwise do the usual balance
7192 	 */
7193 	stripped = get_restripe_target(root->fs_info, flags);
7194 	if (stripped)
7195 		return extended_to_chunk(stripped);
7196 
7197 	/*
7198 	 * we add in the count of missing devices because we want
7199 	 * to make sure that any RAID levels on a degraded FS
7200 	 * continue to be honored.
7201 	 */
7202 	num_devices = root->fs_info->fs_devices->rw_devices +
7203 		root->fs_info->fs_devices->missing_devices;
7204 
7205 	stripped = BTRFS_BLOCK_GROUP_RAID0 |
7206 		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7207 
7208 	if (num_devices == 1) {
7209 		stripped |= BTRFS_BLOCK_GROUP_DUP;
7210 		stripped = flags & ~stripped;
7211 
7212 		/* turn raid0 into single device chunks */
7213 		if (flags & BTRFS_BLOCK_GROUP_RAID0)
7214 			return stripped;
7215 
7216 		/* turn mirroring into duplication */
7217 		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7218 			     BTRFS_BLOCK_GROUP_RAID10))
7219 			return stripped | BTRFS_BLOCK_GROUP_DUP;
7220 	} else {
7221 		/* they already had raid on here, just return */
7222 		if (flags & stripped)
7223 			return flags;
7224 
7225 		stripped |= BTRFS_BLOCK_GROUP_DUP;
7226 		stripped = flags & ~stripped;
7227 
7228 		/* switch duplicated blocks with raid1 */
7229 		if (flags & BTRFS_BLOCK_GROUP_DUP)
7230 			return stripped | BTRFS_BLOCK_GROUP_RAID1;
7231 
7232 		/* this is drive concat, leave it alone */
7233 	}
7234 
7235 	return flags;
7236 }
7237 
7238 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7239 {
7240 	struct btrfs_space_info *sinfo = cache->space_info;
7241 	u64 num_bytes;
7242 	u64 min_allocable_bytes;
7243 	int ret = -ENOSPC;
7244 
7245 
7246 	/*
7247 	 * We need some metadata space and system metadata space for
7248 	 * allocating chunks in some corner cases until we force to set
7249 	 * it to be readonly.
7250 	 */
7251 	if ((sinfo->flags &
7252 	     (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7253 	    !force)
7254 		min_allocable_bytes = 1 * 1024 * 1024;
7255 	else
7256 		min_allocable_bytes = 0;
7257 
7258 	spin_lock(&sinfo->lock);
7259 	spin_lock(&cache->lock);
7260 
7261 	if (cache->ro) {
7262 		ret = 0;
7263 		goto out;
7264 	}
7265 
7266 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7267 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
7268 
7269 	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7270 	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7271 	    min_allocable_bytes <= sinfo->total_bytes) {
7272 		sinfo->bytes_readonly += num_bytes;
7273 		cache->ro = 1;
7274 		ret = 0;
7275 	}
7276 out:
7277 	spin_unlock(&cache->lock);
7278 	spin_unlock(&sinfo->lock);
7279 	return ret;
7280 }
7281 
7282 int btrfs_set_block_group_ro(struct btrfs_root *root,
7283 			     struct btrfs_block_group_cache *cache)
7284 
7285 {
7286 	struct btrfs_trans_handle *trans;
7287 	u64 alloc_flags;
7288 	int ret;
7289 
7290 	BUG_ON(cache->ro);
7291 
7292 	trans = btrfs_join_transaction(root);
7293 	if (IS_ERR(trans))
7294 		return PTR_ERR(trans);
7295 
7296 	alloc_flags = update_block_group_flags(root, cache->flags);
7297 	if (alloc_flags != cache->flags) {
7298 		ret = do_chunk_alloc(trans, root, alloc_flags,
7299 				     CHUNK_ALLOC_FORCE);
7300 		if (ret < 0)
7301 			goto out;
7302 	}
7303 
7304 	ret = set_block_group_ro(cache, 0);
7305 	if (!ret)
7306 		goto out;
7307 	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7308 	ret = do_chunk_alloc(trans, root, alloc_flags,
7309 			     CHUNK_ALLOC_FORCE);
7310 	if (ret < 0)
7311 		goto out;
7312 	ret = set_block_group_ro(cache, 0);
7313 out:
7314 	btrfs_end_transaction(trans, root);
7315 	return ret;
7316 }
7317 
7318 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7319 			    struct btrfs_root *root, u64 type)
7320 {
7321 	u64 alloc_flags = get_alloc_profile(root, type);
7322 	return do_chunk_alloc(trans, root, alloc_flags,
7323 			      CHUNK_ALLOC_FORCE);
7324 }
7325 
7326 /*
7327  * helper to account the unused space of all the readonly block group in the
7328  * list. takes mirrors into account.
7329  */
7330 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7331 {
7332 	struct btrfs_block_group_cache *block_group;
7333 	u64 free_bytes = 0;
7334 	int factor;
7335 
7336 	list_for_each_entry(block_group, groups_list, list) {
7337 		spin_lock(&block_group->lock);
7338 
7339 		if (!block_group->ro) {
7340 			spin_unlock(&block_group->lock);
7341 			continue;
7342 		}
7343 
7344 		if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7345 					  BTRFS_BLOCK_GROUP_RAID10 |
7346 					  BTRFS_BLOCK_GROUP_DUP))
7347 			factor = 2;
7348 		else
7349 			factor = 1;
7350 
7351 		free_bytes += (block_group->key.offset -
7352 			       btrfs_block_group_used(&block_group->item)) *
7353 			       factor;
7354 
7355 		spin_unlock(&block_group->lock);
7356 	}
7357 
7358 	return free_bytes;
7359 }
7360 
7361 /*
7362  * helper to account the unused space of all the readonly block group in the
7363  * space_info. takes mirrors into account.
7364  */
7365 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7366 {
7367 	int i;
7368 	u64 free_bytes = 0;
7369 
7370 	spin_lock(&sinfo->lock);
7371 
7372 	for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7373 		if (!list_empty(&sinfo->block_groups[i]))
7374 			free_bytes += __btrfs_get_ro_block_group_free_space(
7375 						&sinfo->block_groups[i]);
7376 
7377 	spin_unlock(&sinfo->lock);
7378 
7379 	return free_bytes;
7380 }
7381 
7382 void btrfs_set_block_group_rw(struct btrfs_root *root,
7383 			      struct btrfs_block_group_cache *cache)
7384 {
7385 	struct btrfs_space_info *sinfo = cache->space_info;
7386 	u64 num_bytes;
7387 
7388 	BUG_ON(!cache->ro);
7389 
7390 	spin_lock(&sinfo->lock);
7391 	spin_lock(&cache->lock);
7392 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7393 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
7394 	sinfo->bytes_readonly -= num_bytes;
7395 	cache->ro = 0;
7396 	spin_unlock(&cache->lock);
7397 	spin_unlock(&sinfo->lock);
7398 }
7399 
7400 /*
7401  * checks to see if its even possible to relocate this block group.
7402  *
7403  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7404  * ok to go ahead and try.
7405  */
7406 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7407 {
7408 	struct btrfs_block_group_cache *block_group;
7409 	struct btrfs_space_info *space_info;
7410 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7411 	struct btrfs_device *device;
7412 	u64 min_free;
7413 	u64 dev_min = 1;
7414 	u64 dev_nr = 0;
7415 	u64 target;
7416 	int index;
7417 	int full = 0;
7418 	int ret = 0;
7419 
7420 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7421 
7422 	/* odd, couldn't find the block group, leave it alone */
7423 	if (!block_group)
7424 		return -1;
7425 
7426 	min_free = btrfs_block_group_used(&block_group->item);
7427 
7428 	/* no bytes used, we're good */
7429 	if (!min_free)
7430 		goto out;
7431 
7432 	space_info = block_group->space_info;
7433 	spin_lock(&space_info->lock);
7434 
7435 	full = space_info->full;
7436 
7437 	/*
7438 	 * if this is the last block group we have in this space, we can't
7439 	 * relocate it unless we're able to allocate a new chunk below.
7440 	 *
7441 	 * Otherwise, we need to make sure we have room in the space to handle
7442 	 * all of the extents from this block group.  If we can, we're good
7443 	 */
7444 	if ((space_info->total_bytes != block_group->key.offset) &&
7445 	    (space_info->bytes_used + space_info->bytes_reserved +
7446 	     space_info->bytes_pinned + space_info->bytes_readonly +
7447 	     min_free < space_info->total_bytes)) {
7448 		spin_unlock(&space_info->lock);
7449 		goto out;
7450 	}
7451 	spin_unlock(&space_info->lock);
7452 
7453 	/*
7454 	 * ok we don't have enough space, but maybe we have free space on our
7455 	 * devices to allocate new chunks for relocation, so loop through our
7456 	 * alloc devices and guess if we have enough space.  if this block
7457 	 * group is going to be restriped, run checks against the target
7458 	 * profile instead of the current one.
7459 	 */
7460 	ret = -1;
7461 
7462 	/*
7463 	 * index:
7464 	 *      0: raid10
7465 	 *      1: raid1
7466 	 *      2: dup
7467 	 *      3: raid0
7468 	 *      4: single
7469 	 */
7470 	target = get_restripe_target(root->fs_info, block_group->flags);
7471 	if (target) {
7472 		index = __get_raid_index(extended_to_chunk(target));
7473 	} else {
7474 		/*
7475 		 * this is just a balance, so if we were marked as full
7476 		 * we know there is no space for a new chunk
7477 		 */
7478 		if (full)
7479 			goto out;
7480 
7481 		index = get_block_group_index(block_group);
7482 	}
7483 
7484 	if (index == 0) {
7485 		dev_min = 4;
7486 		/* Divide by 2 */
7487 		min_free >>= 1;
7488 	} else if (index == 1) {
7489 		dev_min = 2;
7490 	} else if (index == 2) {
7491 		/* Multiply by 2 */
7492 		min_free <<= 1;
7493 	} else if (index == 3) {
7494 		dev_min = fs_devices->rw_devices;
7495 		do_div(min_free, dev_min);
7496 	}
7497 
7498 	mutex_lock(&root->fs_info->chunk_mutex);
7499 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7500 		u64 dev_offset;
7501 
7502 		/*
7503 		 * check to make sure we can actually find a chunk with enough
7504 		 * space to fit our block group in.
7505 		 */
7506 		if (device->total_bytes > device->bytes_used + min_free &&
7507 		    !device->is_tgtdev_for_dev_replace) {
7508 			ret = find_free_dev_extent(device, min_free,
7509 						   &dev_offset, NULL);
7510 			if (!ret)
7511 				dev_nr++;
7512 
7513 			if (dev_nr >= dev_min)
7514 				break;
7515 
7516 			ret = -1;
7517 		}
7518 	}
7519 	mutex_unlock(&root->fs_info->chunk_mutex);
7520 out:
7521 	btrfs_put_block_group(block_group);
7522 	return ret;
7523 }
7524 
7525 static int find_first_block_group(struct btrfs_root *root,
7526 		struct btrfs_path *path, struct btrfs_key *key)
7527 {
7528 	int ret = 0;
7529 	struct btrfs_key found_key;
7530 	struct extent_buffer *leaf;
7531 	int slot;
7532 
7533 	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7534 	if (ret < 0)
7535 		goto out;
7536 
7537 	while (1) {
7538 		slot = path->slots[0];
7539 		leaf = path->nodes[0];
7540 		if (slot >= btrfs_header_nritems(leaf)) {
7541 			ret = btrfs_next_leaf(root, path);
7542 			if (ret == 0)
7543 				continue;
7544 			if (ret < 0)
7545 				goto out;
7546 			break;
7547 		}
7548 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7549 
7550 		if (found_key.objectid >= key->objectid &&
7551 		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7552 			ret = 0;
7553 			goto out;
7554 		}
7555 		path->slots[0]++;
7556 	}
7557 out:
7558 	return ret;
7559 }
7560 
7561 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7562 {
7563 	struct btrfs_block_group_cache *block_group;
7564 	u64 last = 0;
7565 
7566 	while (1) {
7567 		struct inode *inode;
7568 
7569 		block_group = btrfs_lookup_first_block_group(info, last);
7570 		while (block_group) {
7571 			spin_lock(&block_group->lock);
7572 			if (block_group->iref)
7573 				break;
7574 			spin_unlock(&block_group->lock);
7575 			block_group = next_block_group(info->tree_root,
7576 						       block_group);
7577 		}
7578 		if (!block_group) {
7579 			if (last == 0)
7580 				break;
7581 			last = 0;
7582 			continue;
7583 		}
7584 
7585 		inode = block_group->inode;
7586 		block_group->iref = 0;
7587 		block_group->inode = NULL;
7588 		spin_unlock(&block_group->lock);
7589 		iput(inode);
7590 		last = block_group->key.objectid + block_group->key.offset;
7591 		btrfs_put_block_group(block_group);
7592 	}
7593 }
7594 
7595 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7596 {
7597 	struct btrfs_block_group_cache *block_group;
7598 	struct btrfs_space_info *space_info;
7599 	struct btrfs_caching_control *caching_ctl;
7600 	struct rb_node *n;
7601 
7602 	down_write(&info->extent_commit_sem);
7603 	while (!list_empty(&info->caching_block_groups)) {
7604 		caching_ctl = list_entry(info->caching_block_groups.next,
7605 					 struct btrfs_caching_control, list);
7606 		list_del(&caching_ctl->list);
7607 		put_caching_control(caching_ctl);
7608 	}
7609 	up_write(&info->extent_commit_sem);
7610 
7611 	spin_lock(&info->block_group_cache_lock);
7612 	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7613 		block_group = rb_entry(n, struct btrfs_block_group_cache,
7614 				       cache_node);
7615 		rb_erase(&block_group->cache_node,
7616 			 &info->block_group_cache_tree);
7617 		spin_unlock(&info->block_group_cache_lock);
7618 
7619 		down_write(&block_group->space_info->groups_sem);
7620 		list_del(&block_group->list);
7621 		up_write(&block_group->space_info->groups_sem);
7622 
7623 		if (block_group->cached == BTRFS_CACHE_STARTED)
7624 			wait_block_group_cache_done(block_group);
7625 
7626 		/*
7627 		 * We haven't cached this block group, which means we could
7628 		 * possibly have excluded extents on this block group.
7629 		 */
7630 		if (block_group->cached == BTRFS_CACHE_NO)
7631 			free_excluded_extents(info->extent_root, block_group);
7632 
7633 		btrfs_remove_free_space_cache(block_group);
7634 		btrfs_put_block_group(block_group);
7635 
7636 		spin_lock(&info->block_group_cache_lock);
7637 	}
7638 	spin_unlock(&info->block_group_cache_lock);
7639 
7640 	/* now that all the block groups are freed, go through and
7641 	 * free all the space_info structs.  This is only called during
7642 	 * the final stages of unmount, and so we know nobody is
7643 	 * using them.  We call synchronize_rcu() once before we start,
7644 	 * just to be on the safe side.
7645 	 */
7646 	synchronize_rcu();
7647 
7648 	release_global_block_rsv(info);
7649 
7650 	while(!list_empty(&info->space_info)) {
7651 		space_info = list_entry(info->space_info.next,
7652 					struct btrfs_space_info,
7653 					list);
7654 		if (space_info->bytes_pinned > 0 ||
7655 		    space_info->bytes_reserved > 0 ||
7656 		    space_info->bytes_may_use > 0) {
7657 			WARN_ON(1);
7658 			dump_space_info(space_info, 0, 0);
7659 		}
7660 		list_del(&space_info->list);
7661 		kfree(space_info);
7662 	}
7663 	return 0;
7664 }
7665 
7666 static void __link_block_group(struct btrfs_space_info *space_info,
7667 			       struct btrfs_block_group_cache *cache)
7668 {
7669 	int index = get_block_group_index(cache);
7670 
7671 	down_write(&space_info->groups_sem);
7672 	list_add_tail(&cache->list, &space_info->block_groups[index]);
7673 	up_write(&space_info->groups_sem);
7674 }
7675 
7676 int btrfs_read_block_groups(struct btrfs_root *root)
7677 {
7678 	struct btrfs_path *path;
7679 	int ret;
7680 	struct btrfs_block_group_cache *cache;
7681 	struct btrfs_fs_info *info = root->fs_info;
7682 	struct btrfs_space_info *space_info;
7683 	struct btrfs_key key;
7684 	struct btrfs_key found_key;
7685 	struct extent_buffer *leaf;
7686 	int need_clear = 0;
7687 	u64 cache_gen;
7688 
7689 	root = info->extent_root;
7690 	key.objectid = 0;
7691 	key.offset = 0;
7692 	btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7693 	path = btrfs_alloc_path();
7694 	if (!path)
7695 		return -ENOMEM;
7696 	path->reada = 1;
7697 
7698 	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7699 	if (btrfs_test_opt(root, SPACE_CACHE) &&
7700 	    btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7701 		need_clear = 1;
7702 	if (btrfs_test_opt(root, CLEAR_CACHE))
7703 		need_clear = 1;
7704 
7705 	while (1) {
7706 		ret = find_first_block_group(root, path, &key);
7707 		if (ret > 0)
7708 			break;
7709 		if (ret != 0)
7710 			goto error;
7711 		leaf = path->nodes[0];
7712 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7713 		cache = kzalloc(sizeof(*cache), GFP_NOFS);
7714 		if (!cache) {
7715 			ret = -ENOMEM;
7716 			goto error;
7717 		}
7718 		cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7719 						GFP_NOFS);
7720 		if (!cache->free_space_ctl) {
7721 			kfree(cache);
7722 			ret = -ENOMEM;
7723 			goto error;
7724 		}
7725 
7726 		atomic_set(&cache->count, 1);
7727 		spin_lock_init(&cache->lock);
7728 		cache->fs_info = info;
7729 		INIT_LIST_HEAD(&cache->list);
7730 		INIT_LIST_HEAD(&cache->cluster_list);
7731 
7732 		if (need_clear) {
7733 			/*
7734 			 * When we mount with old space cache, we need to
7735 			 * set BTRFS_DC_CLEAR and set dirty flag.
7736 			 *
7737 			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
7738 			 *    truncate the old free space cache inode and
7739 			 *    setup a new one.
7740 			 * b) Setting 'dirty flag' makes sure that we flush
7741 			 *    the new space cache info onto disk.
7742 			 */
7743 			cache->disk_cache_state = BTRFS_DC_CLEAR;
7744 			if (btrfs_test_opt(root, SPACE_CACHE))
7745 				cache->dirty = 1;
7746 		}
7747 
7748 		read_extent_buffer(leaf, &cache->item,
7749 				   btrfs_item_ptr_offset(leaf, path->slots[0]),
7750 				   sizeof(cache->item));
7751 		memcpy(&cache->key, &found_key, sizeof(found_key));
7752 
7753 		key.objectid = found_key.objectid + found_key.offset;
7754 		btrfs_release_path(path);
7755 		cache->flags = btrfs_block_group_flags(&cache->item);
7756 		cache->sectorsize = root->sectorsize;
7757 
7758 		btrfs_init_free_space_ctl(cache);
7759 
7760 		/*
7761 		 * We need to exclude the super stripes now so that the space
7762 		 * info has super bytes accounted for, otherwise we'll think
7763 		 * we have more space than we actually do.
7764 		 */
7765 		exclude_super_stripes(root, cache);
7766 
7767 		/*
7768 		 * check for two cases, either we are full, and therefore
7769 		 * don't need to bother with the caching work since we won't
7770 		 * find any space, or we are empty, and we can just add all
7771 		 * the space in and be done with it.  This saves us _alot_ of
7772 		 * time, particularly in the full case.
7773 		 */
7774 		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7775 			cache->last_byte_to_unpin = (u64)-1;
7776 			cache->cached = BTRFS_CACHE_FINISHED;
7777 			free_excluded_extents(root, cache);
7778 		} else if (btrfs_block_group_used(&cache->item) == 0) {
7779 			cache->last_byte_to_unpin = (u64)-1;
7780 			cache->cached = BTRFS_CACHE_FINISHED;
7781 			add_new_free_space(cache, root->fs_info,
7782 					   found_key.objectid,
7783 					   found_key.objectid +
7784 					   found_key.offset);
7785 			free_excluded_extents(root, cache);
7786 		}
7787 
7788 		ret = update_space_info(info, cache->flags, found_key.offset,
7789 					btrfs_block_group_used(&cache->item),
7790 					&space_info);
7791 		BUG_ON(ret); /* -ENOMEM */
7792 		cache->space_info = space_info;
7793 		spin_lock(&cache->space_info->lock);
7794 		cache->space_info->bytes_readonly += cache->bytes_super;
7795 		spin_unlock(&cache->space_info->lock);
7796 
7797 		__link_block_group(space_info, cache);
7798 
7799 		ret = btrfs_add_block_group_cache(root->fs_info, cache);
7800 		BUG_ON(ret); /* Logic error */
7801 
7802 		set_avail_alloc_bits(root->fs_info, cache->flags);
7803 		if (btrfs_chunk_readonly(root, cache->key.objectid))
7804 			set_block_group_ro(cache, 1);
7805 	}
7806 
7807 	list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7808 		if (!(get_alloc_profile(root, space_info->flags) &
7809 		      (BTRFS_BLOCK_GROUP_RAID10 |
7810 		       BTRFS_BLOCK_GROUP_RAID1 |
7811 		       BTRFS_BLOCK_GROUP_DUP)))
7812 			continue;
7813 		/*
7814 		 * avoid allocating from un-mirrored block group if there are
7815 		 * mirrored block groups.
7816 		 */
7817 		list_for_each_entry(cache, &space_info->block_groups[3], list)
7818 			set_block_group_ro(cache, 1);
7819 		list_for_each_entry(cache, &space_info->block_groups[4], list)
7820 			set_block_group_ro(cache, 1);
7821 	}
7822 
7823 	init_global_block_rsv(info);
7824 	ret = 0;
7825 error:
7826 	btrfs_free_path(path);
7827 	return ret;
7828 }
7829 
7830 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
7831 				       struct btrfs_root *root)
7832 {
7833 	struct btrfs_block_group_cache *block_group, *tmp;
7834 	struct btrfs_root *extent_root = root->fs_info->extent_root;
7835 	struct btrfs_block_group_item item;
7836 	struct btrfs_key key;
7837 	int ret = 0;
7838 
7839 	list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
7840 				 new_bg_list) {
7841 		list_del_init(&block_group->new_bg_list);
7842 
7843 		if (ret)
7844 			continue;
7845 
7846 		spin_lock(&block_group->lock);
7847 		memcpy(&item, &block_group->item, sizeof(item));
7848 		memcpy(&key, &block_group->key, sizeof(key));
7849 		spin_unlock(&block_group->lock);
7850 
7851 		ret = btrfs_insert_item(trans, extent_root, &key, &item,
7852 					sizeof(item));
7853 		if (ret)
7854 			btrfs_abort_transaction(trans, extent_root, ret);
7855 	}
7856 }
7857 
7858 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7859 			   struct btrfs_root *root, u64 bytes_used,
7860 			   u64 type, u64 chunk_objectid, u64 chunk_offset,
7861 			   u64 size)
7862 {
7863 	int ret;
7864 	struct btrfs_root *extent_root;
7865 	struct btrfs_block_group_cache *cache;
7866 
7867 	extent_root = root->fs_info->extent_root;
7868 
7869 	root->fs_info->last_trans_log_full_commit = trans->transid;
7870 
7871 	cache = kzalloc(sizeof(*cache), GFP_NOFS);
7872 	if (!cache)
7873 		return -ENOMEM;
7874 	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7875 					GFP_NOFS);
7876 	if (!cache->free_space_ctl) {
7877 		kfree(cache);
7878 		return -ENOMEM;
7879 	}
7880 
7881 	cache->key.objectid = chunk_offset;
7882 	cache->key.offset = size;
7883 	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7884 	cache->sectorsize = root->sectorsize;
7885 	cache->fs_info = root->fs_info;
7886 
7887 	atomic_set(&cache->count, 1);
7888 	spin_lock_init(&cache->lock);
7889 	INIT_LIST_HEAD(&cache->list);
7890 	INIT_LIST_HEAD(&cache->cluster_list);
7891 	INIT_LIST_HEAD(&cache->new_bg_list);
7892 
7893 	btrfs_init_free_space_ctl(cache);
7894 
7895 	btrfs_set_block_group_used(&cache->item, bytes_used);
7896 	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7897 	cache->flags = type;
7898 	btrfs_set_block_group_flags(&cache->item, type);
7899 
7900 	cache->last_byte_to_unpin = (u64)-1;
7901 	cache->cached = BTRFS_CACHE_FINISHED;
7902 	exclude_super_stripes(root, cache);
7903 
7904 	add_new_free_space(cache, root->fs_info, chunk_offset,
7905 			   chunk_offset + size);
7906 
7907 	free_excluded_extents(root, cache);
7908 
7909 	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7910 				&cache->space_info);
7911 	BUG_ON(ret); /* -ENOMEM */
7912 	update_global_block_rsv(root->fs_info);
7913 
7914 	spin_lock(&cache->space_info->lock);
7915 	cache->space_info->bytes_readonly += cache->bytes_super;
7916 	spin_unlock(&cache->space_info->lock);
7917 
7918 	__link_block_group(cache->space_info, cache);
7919 
7920 	ret = btrfs_add_block_group_cache(root->fs_info, cache);
7921 	BUG_ON(ret); /* Logic error */
7922 
7923 	list_add_tail(&cache->new_bg_list, &trans->new_bgs);
7924 
7925 	set_avail_alloc_bits(extent_root->fs_info, type);
7926 
7927 	return 0;
7928 }
7929 
7930 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7931 {
7932 	u64 extra_flags = chunk_to_extended(flags) &
7933 				BTRFS_EXTENDED_PROFILE_MASK;
7934 
7935 	if (flags & BTRFS_BLOCK_GROUP_DATA)
7936 		fs_info->avail_data_alloc_bits &= ~extra_flags;
7937 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
7938 		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
7939 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7940 		fs_info->avail_system_alloc_bits &= ~extra_flags;
7941 }
7942 
7943 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7944 			     struct btrfs_root *root, u64 group_start)
7945 {
7946 	struct btrfs_path *path;
7947 	struct btrfs_block_group_cache *block_group;
7948 	struct btrfs_free_cluster *cluster;
7949 	struct btrfs_root *tree_root = root->fs_info->tree_root;
7950 	struct btrfs_key key;
7951 	struct inode *inode;
7952 	int ret;
7953 	int index;
7954 	int factor;
7955 
7956 	root = root->fs_info->extent_root;
7957 
7958 	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7959 	BUG_ON(!block_group);
7960 	BUG_ON(!block_group->ro);
7961 
7962 	/*
7963 	 * Free the reserved super bytes from this block group before
7964 	 * remove it.
7965 	 */
7966 	free_excluded_extents(root, block_group);
7967 
7968 	memcpy(&key, &block_group->key, sizeof(key));
7969 	index = get_block_group_index(block_group);
7970 	if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7971 				  BTRFS_BLOCK_GROUP_RAID1 |
7972 				  BTRFS_BLOCK_GROUP_RAID10))
7973 		factor = 2;
7974 	else
7975 		factor = 1;
7976 
7977 	/* make sure this block group isn't part of an allocation cluster */
7978 	cluster = &root->fs_info->data_alloc_cluster;
7979 	spin_lock(&cluster->refill_lock);
7980 	btrfs_return_cluster_to_free_space(block_group, cluster);
7981 	spin_unlock(&cluster->refill_lock);
7982 
7983 	/*
7984 	 * make sure this block group isn't part of a metadata
7985 	 * allocation cluster
7986 	 */
7987 	cluster = &root->fs_info->meta_alloc_cluster;
7988 	spin_lock(&cluster->refill_lock);
7989 	btrfs_return_cluster_to_free_space(block_group, cluster);
7990 	spin_unlock(&cluster->refill_lock);
7991 
7992 	path = btrfs_alloc_path();
7993 	if (!path) {
7994 		ret = -ENOMEM;
7995 		goto out;
7996 	}
7997 
7998 	inode = lookup_free_space_inode(tree_root, block_group, path);
7999 	if (!IS_ERR(inode)) {
8000 		ret = btrfs_orphan_add(trans, inode);
8001 		if (ret) {
8002 			btrfs_add_delayed_iput(inode);
8003 			goto out;
8004 		}
8005 		clear_nlink(inode);
8006 		/* One for the block groups ref */
8007 		spin_lock(&block_group->lock);
8008 		if (block_group->iref) {
8009 			block_group->iref = 0;
8010 			block_group->inode = NULL;
8011 			spin_unlock(&block_group->lock);
8012 			iput(inode);
8013 		} else {
8014 			spin_unlock(&block_group->lock);
8015 		}
8016 		/* One for our lookup ref */
8017 		btrfs_add_delayed_iput(inode);
8018 	}
8019 
8020 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8021 	key.offset = block_group->key.objectid;
8022 	key.type = 0;
8023 
8024 	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8025 	if (ret < 0)
8026 		goto out;
8027 	if (ret > 0)
8028 		btrfs_release_path(path);
8029 	if (ret == 0) {
8030 		ret = btrfs_del_item(trans, tree_root, path);
8031 		if (ret)
8032 			goto out;
8033 		btrfs_release_path(path);
8034 	}
8035 
8036 	spin_lock(&root->fs_info->block_group_cache_lock);
8037 	rb_erase(&block_group->cache_node,
8038 		 &root->fs_info->block_group_cache_tree);
8039 	spin_unlock(&root->fs_info->block_group_cache_lock);
8040 
8041 	down_write(&block_group->space_info->groups_sem);
8042 	/*
8043 	 * we must use list_del_init so people can check to see if they
8044 	 * are still on the list after taking the semaphore
8045 	 */
8046 	list_del_init(&block_group->list);
8047 	if (list_empty(&block_group->space_info->block_groups[index]))
8048 		clear_avail_alloc_bits(root->fs_info, block_group->flags);
8049 	up_write(&block_group->space_info->groups_sem);
8050 
8051 	if (block_group->cached == BTRFS_CACHE_STARTED)
8052 		wait_block_group_cache_done(block_group);
8053 
8054 	btrfs_remove_free_space_cache(block_group);
8055 
8056 	spin_lock(&block_group->space_info->lock);
8057 	block_group->space_info->total_bytes -= block_group->key.offset;
8058 	block_group->space_info->bytes_readonly -= block_group->key.offset;
8059 	block_group->space_info->disk_total -= block_group->key.offset * factor;
8060 	spin_unlock(&block_group->space_info->lock);
8061 
8062 	memcpy(&key, &block_group->key, sizeof(key));
8063 
8064 	btrfs_clear_space_info_full(root->fs_info);
8065 
8066 	btrfs_put_block_group(block_group);
8067 	btrfs_put_block_group(block_group);
8068 
8069 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8070 	if (ret > 0)
8071 		ret = -EIO;
8072 	if (ret < 0)
8073 		goto out;
8074 
8075 	ret = btrfs_del_item(trans, root, path);
8076 out:
8077 	btrfs_free_path(path);
8078 	return ret;
8079 }
8080 
8081 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8082 {
8083 	struct btrfs_space_info *space_info;
8084 	struct btrfs_super_block *disk_super;
8085 	u64 features;
8086 	u64 flags;
8087 	int mixed = 0;
8088 	int ret;
8089 
8090 	disk_super = fs_info->super_copy;
8091 	if (!btrfs_super_root(disk_super))
8092 		return 1;
8093 
8094 	features = btrfs_super_incompat_flags(disk_super);
8095 	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8096 		mixed = 1;
8097 
8098 	flags = BTRFS_BLOCK_GROUP_SYSTEM;
8099 	ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8100 	if (ret)
8101 		goto out;
8102 
8103 	if (mixed) {
8104 		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8105 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8106 	} else {
8107 		flags = BTRFS_BLOCK_GROUP_METADATA;
8108 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8109 		if (ret)
8110 			goto out;
8111 
8112 		flags = BTRFS_BLOCK_GROUP_DATA;
8113 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8114 	}
8115 out:
8116 	return ret;
8117 }
8118 
8119 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8120 {
8121 	return unpin_extent_range(root, start, end);
8122 }
8123 
8124 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8125 			       u64 num_bytes, u64 *actual_bytes)
8126 {
8127 	return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8128 }
8129 
8130 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8131 {
8132 	struct btrfs_fs_info *fs_info = root->fs_info;
8133 	struct btrfs_block_group_cache *cache = NULL;
8134 	u64 group_trimmed;
8135 	u64 start;
8136 	u64 end;
8137 	u64 trimmed = 0;
8138 	u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8139 	int ret = 0;
8140 
8141 	/*
8142 	 * try to trim all FS space, our block group may start from non-zero.
8143 	 */
8144 	if (range->len == total_bytes)
8145 		cache = btrfs_lookup_first_block_group(fs_info, range->start);
8146 	else
8147 		cache = btrfs_lookup_block_group(fs_info, range->start);
8148 
8149 	while (cache) {
8150 		if (cache->key.objectid >= (range->start + range->len)) {
8151 			btrfs_put_block_group(cache);
8152 			break;
8153 		}
8154 
8155 		start = max(range->start, cache->key.objectid);
8156 		end = min(range->start + range->len,
8157 				cache->key.objectid + cache->key.offset);
8158 
8159 		if (end - start >= range->minlen) {
8160 			if (!block_group_cache_done(cache)) {
8161 				ret = cache_block_group(cache, NULL, root, 0);
8162 				if (!ret)
8163 					wait_block_group_cache_done(cache);
8164 			}
8165 			ret = btrfs_trim_block_group(cache,
8166 						     &group_trimmed,
8167 						     start,
8168 						     end,
8169 						     range->minlen);
8170 
8171 			trimmed += group_trimmed;
8172 			if (ret) {
8173 				btrfs_put_block_group(cache);
8174 				break;
8175 			}
8176 		}
8177 
8178 		cache = next_block_group(fs_info->tree_root, cache);
8179 	}
8180 
8181 	range->len = trimmed;
8182 	return ret;
8183 }
8184