xref: /openbmc/linux/fs/btrfs/extent-tree.c (revision 6774def6)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39 
40 #undef SCRAMBLE_DELAYED_REFS
41 
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57 	CHUNK_ALLOC_NO_FORCE = 0,
58 	CHUNK_ALLOC_LIMITED = 1,
59 	CHUNK_ALLOC_FORCE = 2,
60 };
61 
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72 	RESERVE_FREE = 0,
73 	RESERVE_ALLOC = 1,
74 	RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76 
77 static int update_block_group(struct btrfs_root *root,
78 			      u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80 				struct btrfs_root *root,
81 				u64 bytenr, u64 num_bytes, u64 parent,
82 				u64 root_objectid, u64 owner_objectid,
83 				u64 owner_offset, int refs_to_drop,
84 				struct btrfs_delayed_extent_op *extra_op,
85 				int no_quota);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87 				    struct extent_buffer *leaf,
88 				    struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90 				      struct btrfs_root *root,
91 				      u64 parent, u64 root_objectid,
92 				      u64 flags, u64 owner, u64 offset,
93 				      struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95 				     struct btrfs_root *root,
96 				     u64 parent, u64 root_objectid,
97 				     u64 flags, struct btrfs_disk_key *key,
98 				     int level, struct btrfs_key *ins,
99 				     int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101 			  struct btrfs_root *extent_root, u64 flags,
102 			  int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104 			 struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106 			    int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108 				       u64 num_bytes, int reserve,
109 				       int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111 			       u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113 		     u64 bytenr, u64 num_bytes, int reserved);
114 
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118 	smp_mb();
119 	return cache->cached == BTRFS_CACHE_FINISHED ||
120 		cache->cached == BTRFS_CACHE_ERROR;
121 }
122 
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125 	return (cache->flags & bits) == bits;
126 }
127 
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130 	atomic_inc(&cache->count);
131 }
132 
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135 	if (atomic_dec_and_test(&cache->count)) {
136 		WARN_ON(cache->pinned > 0);
137 		WARN_ON(cache->reserved > 0);
138 		kfree(cache->free_space_ctl);
139 		kfree(cache);
140 	}
141 }
142 
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148 				struct btrfs_block_group_cache *block_group)
149 {
150 	struct rb_node **p;
151 	struct rb_node *parent = NULL;
152 	struct btrfs_block_group_cache *cache;
153 
154 	spin_lock(&info->block_group_cache_lock);
155 	p = &info->block_group_cache_tree.rb_node;
156 
157 	while (*p) {
158 		parent = *p;
159 		cache = rb_entry(parent, struct btrfs_block_group_cache,
160 				 cache_node);
161 		if (block_group->key.objectid < cache->key.objectid) {
162 			p = &(*p)->rb_left;
163 		} else if (block_group->key.objectid > cache->key.objectid) {
164 			p = &(*p)->rb_right;
165 		} else {
166 			spin_unlock(&info->block_group_cache_lock);
167 			return -EEXIST;
168 		}
169 	}
170 
171 	rb_link_node(&block_group->cache_node, parent, p);
172 	rb_insert_color(&block_group->cache_node,
173 			&info->block_group_cache_tree);
174 
175 	if (info->first_logical_byte > block_group->key.objectid)
176 		info->first_logical_byte = block_group->key.objectid;
177 
178 	spin_unlock(&info->block_group_cache_lock);
179 
180 	return 0;
181 }
182 
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189 			      int contains)
190 {
191 	struct btrfs_block_group_cache *cache, *ret = NULL;
192 	struct rb_node *n;
193 	u64 end, start;
194 
195 	spin_lock(&info->block_group_cache_lock);
196 	n = info->block_group_cache_tree.rb_node;
197 
198 	while (n) {
199 		cache = rb_entry(n, struct btrfs_block_group_cache,
200 				 cache_node);
201 		end = cache->key.objectid + cache->key.offset - 1;
202 		start = cache->key.objectid;
203 
204 		if (bytenr < start) {
205 			if (!contains && (!ret || start < ret->key.objectid))
206 				ret = cache;
207 			n = n->rb_left;
208 		} else if (bytenr > start) {
209 			if (contains && bytenr <= end) {
210 				ret = cache;
211 				break;
212 			}
213 			n = n->rb_right;
214 		} else {
215 			ret = cache;
216 			break;
217 		}
218 	}
219 	if (ret) {
220 		btrfs_get_block_group(ret);
221 		if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222 			info->first_logical_byte = ret->key.objectid;
223 	}
224 	spin_unlock(&info->block_group_cache_lock);
225 
226 	return ret;
227 }
228 
229 static int add_excluded_extent(struct btrfs_root *root,
230 			       u64 start, u64 num_bytes)
231 {
232 	u64 end = start + num_bytes - 1;
233 	set_extent_bits(&root->fs_info->freed_extents[0],
234 			start, end, EXTENT_UPTODATE, GFP_NOFS);
235 	set_extent_bits(&root->fs_info->freed_extents[1],
236 			start, end, EXTENT_UPTODATE, GFP_NOFS);
237 	return 0;
238 }
239 
240 static void free_excluded_extents(struct btrfs_root *root,
241 				  struct btrfs_block_group_cache *cache)
242 {
243 	u64 start, end;
244 
245 	start = cache->key.objectid;
246 	end = start + cache->key.offset - 1;
247 
248 	clear_extent_bits(&root->fs_info->freed_extents[0],
249 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
250 	clear_extent_bits(&root->fs_info->freed_extents[1],
251 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253 
254 static int exclude_super_stripes(struct btrfs_root *root,
255 				 struct btrfs_block_group_cache *cache)
256 {
257 	u64 bytenr;
258 	u64 *logical;
259 	int stripe_len;
260 	int i, nr, ret;
261 
262 	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263 		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264 		cache->bytes_super += stripe_len;
265 		ret = add_excluded_extent(root, cache->key.objectid,
266 					  stripe_len);
267 		if (ret)
268 			return ret;
269 	}
270 
271 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272 		bytenr = btrfs_sb_offset(i);
273 		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274 				       cache->key.objectid, bytenr,
275 				       0, &logical, &nr, &stripe_len);
276 		if (ret)
277 			return ret;
278 
279 		while (nr--) {
280 			u64 start, len;
281 
282 			if (logical[nr] > cache->key.objectid +
283 			    cache->key.offset)
284 				continue;
285 
286 			if (logical[nr] + stripe_len <= cache->key.objectid)
287 				continue;
288 
289 			start = logical[nr];
290 			if (start < cache->key.objectid) {
291 				start = cache->key.objectid;
292 				len = (logical[nr] + stripe_len) - start;
293 			} else {
294 				len = min_t(u64, stripe_len,
295 					    cache->key.objectid +
296 					    cache->key.offset - start);
297 			}
298 
299 			cache->bytes_super += len;
300 			ret = add_excluded_extent(root, start, len);
301 			if (ret) {
302 				kfree(logical);
303 				return ret;
304 			}
305 		}
306 
307 		kfree(logical);
308 	}
309 	return 0;
310 }
311 
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315 	struct btrfs_caching_control *ctl;
316 
317 	spin_lock(&cache->lock);
318 	if (cache->cached != BTRFS_CACHE_STARTED) {
319 		spin_unlock(&cache->lock);
320 		return NULL;
321 	}
322 
323 	/* We're loading it the fast way, so we don't have a caching_ctl. */
324 	if (!cache->caching_ctl) {
325 		spin_unlock(&cache->lock);
326 		return NULL;
327 	}
328 
329 	ctl = cache->caching_ctl;
330 	atomic_inc(&ctl->count);
331 	spin_unlock(&cache->lock);
332 	return ctl;
333 }
334 
335 static void put_caching_control(struct btrfs_caching_control *ctl)
336 {
337 	if (atomic_dec_and_test(&ctl->count))
338 		kfree(ctl);
339 }
340 
341 /*
342  * this is only called by cache_block_group, since we could have freed extents
343  * we need to check the pinned_extents for any extents that can't be used yet
344  * since their free space will be released as soon as the transaction commits.
345  */
346 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
347 			      struct btrfs_fs_info *info, u64 start, u64 end)
348 {
349 	u64 extent_start, extent_end, size, total_added = 0;
350 	int ret;
351 
352 	while (start < end) {
353 		ret = find_first_extent_bit(info->pinned_extents, start,
354 					    &extent_start, &extent_end,
355 					    EXTENT_DIRTY | EXTENT_UPTODATE,
356 					    NULL);
357 		if (ret)
358 			break;
359 
360 		if (extent_start <= start) {
361 			start = extent_end + 1;
362 		} else if (extent_start > start && extent_start < end) {
363 			size = extent_start - start;
364 			total_added += size;
365 			ret = btrfs_add_free_space(block_group, start,
366 						   size);
367 			BUG_ON(ret); /* -ENOMEM or logic error */
368 			start = extent_end + 1;
369 		} else {
370 			break;
371 		}
372 	}
373 
374 	if (start < end) {
375 		size = end - start;
376 		total_added += size;
377 		ret = btrfs_add_free_space(block_group, start, size);
378 		BUG_ON(ret); /* -ENOMEM or logic error */
379 	}
380 
381 	return total_added;
382 }
383 
384 static noinline void caching_thread(struct btrfs_work *work)
385 {
386 	struct btrfs_block_group_cache *block_group;
387 	struct btrfs_fs_info *fs_info;
388 	struct btrfs_caching_control *caching_ctl;
389 	struct btrfs_root *extent_root;
390 	struct btrfs_path *path;
391 	struct extent_buffer *leaf;
392 	struct btrfs_key key;
393 	u64 total_found = 0;
394 	u64 last = 0;
395 	u32 nritems;
396 	int ret = -ENOMEM;
397 
398 	caching_ctl = container_of(work, struct btrfs_caching_control, work);
399 	block_group = caching_ctl->block_group;
400 	fs_info = block_group->fs_info;
401 	extent_root = fs_info->extent_root;
402 
403 	path = btrfs_alloc_path();
404 	if (!path)
405 		goto out;
406 
407 	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
408 
409 	/*
410 	 * We don't want to deadlock with somebody trying to allocate a new
411 	 * extent for the extent root while also trying to search the extent
412 	 * root to add free space.  So we skip locking and search the commit
413 	 * root, since its read-only
414 	 */
415 	path->skip_locking = 1;
416 	path->search_commit_root = 1;
417 	path->reada = 1;
418 
419 	key.objectid = last;
420 	key.offset = 0;
421 	key.type = BTRFS_EXTENT_ITEM_KEY;
422 again:
423 	mutex_lock(&caching_ctl->mutex);
424 	/* need to make sure the commit_root doesn't disappear */
425 	down_read(&fs_info->commit_root_sem);
426 
427 next:
428 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
429 	if (ret < 0)
430 		goto err;
431 
432 	leaf = path->nodes[0];
433 	nritems = btrfs_header_nritems(leaf);
434 
435 	while (1) {
436 		if (btrfs_fs_closing(fs_info) > 1) {
437 			last = (u64)-1;
438 			break;
439 		}
440 
441 		if (path->slots[0] < nritems) {
442 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
443 		} else {
444 			ret = find_next_key(path, 0, &key);
445 			if (ret)
446 				break;
447 
448 			if (need_resched() ||
449 			    rwsem_is_contended(&fs_info->commit_root_sem)) {
450 				caching_ctl->progress = last;
451 				btrfs_release_path(path);
452 				up_read(&fs_info->commit_root_sem);
453 				mutex_unlock(&caching_ctl->mutex);
454 				cond_resched();
455 				goto again;
456 			}
457 
458 			ret = btrfs_next_leaf(extent_root, path);
459 			if (ret < 0)
460 				goto err;
461 			if (ret)
462 				break;
463 			leaf = path->nodes[0];
464 			nritems = btrfs_header_nritems(leaf);
465 			continue;
466 		}
467 
468 		if (key.objectid < last) {
469 			key.objectid = last;
470 			key.offset = 0;
471 			key.type = BTRFS_EXTENT_ITEM_KEY;
472 
473 			caching_ctl->progress = last;
474 			btrfs_release_path(path);
475 			goto next;
476 		}
477 
478 		if (key.objectid < block_group->key.objectid) {
479 			path->slots[0]++;
480 			continue;
481 		}
482 
483 		if (key.objectid >= block_group->key.objectid +
484 		    block_group->key.offset)
485 			break;
486 
487 		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
488 		    key.type == BTRFS_METADATA_ITEM_KEY) {
489 			total_found += add_new_free_space(block_group,
490 							  fs_info, last,
491 							  key.objectid);
492 			if (key.type == BTRFS_METADATA_ITEM_KEY)
493 				last = key.objectid +
494 					fs_info->tree_root->nodesize;
495 			else
496 				last = key.objectid + key.offset;
497 
498 			if (total_found > (1024 * 1024 * 2)) {
499 				total_found = 0;
500 				wake_up(&caching_ctl->wait);
501 			}
502 		}
503 		path->slots[0]++;
504 	}
505 	ret = 0;
506 
507 	total_found += add_new_free_space(block_group, fs_info, last,
508 					  block_group->key.objectid +
509 					  block_group->key.offset);
510 	caching_ctl->progress = (u64)-1;
511 
512 	spin_lock(&block_group->lock);
513 	block_group->caching_ctl = NULL;
514 	block_group->cached = BTRFS_CACHE_FINISHED;
515 	spin_unlock(&block_group->lock);
516 
517 err:
518 	btrfs_free_path(path);
519 	up_read(&fs_info->commit_root_sem);
520 
521 	free_excluded_extents(extent_root, block_group);
522 
523 	mutex_unlock(&caching_ctl->mutex);
524 out:
525 	if (ret) {
526 		spin_lock(&block_group->lock);
527 		block_group->caching_ctl = NULL;
528 		block_group->cached = BTRFS_CACHE_ERROR;
529 		spin_unlock(&block_group->lock);
530 	}
531 	wake_up(&caching_ctl->wait);
532 
533 	put_caching_control(caching_ctl);
534 	btrfs_put_block_group(block_group);
535 }
536 
537 static int cache_block_group(struct btrfs_block_group_cache *cache,
538 			     int load_cache_only)
539 {
540 	DEFINE_WAIT(wait);
541 	struct btrfs_fs_info *fs_info = cache->fs_info;
542 	struct btrfs_caching_control *caching_ctl;
543 	int ret = 0;
544 
545 	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
546 	if (!caching_ctl)
547 		return -ENOMEM;
548 
549 	INIT_LIST_HEAD(&caching_ctl->list);
550 	mutex_init(&caching_ctl->mutex);
551 	init_waitqueue_head(&caching_ctl->wait);
552 	caching_ctl->block_group = cache;
553 	caching_ctl->progress = cache->key.objectid;
554 	atomic_set(&caching_ctl->count, 1);
555 	btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
556 			caching_thread, NULL, NULL);
557 
558 	spin_lock(&cache->lock);
559 	/*
560 	 * This should be a rare occasion, but this could happen I think in the
561 	 * case where one thread starts to load the space cache info, and then
562 	 * some other thread starts a transaction commit which tries to do an
563 	 * allocation while the other thread is still loading the space cache
564 	 * info.  The previous loop should have kept us from choosing this block
565 	 * group, but if we've moved to the state where we will wait on caching
566 	 * block groups we need to first check if we're doing a fast load here,
567 	 * so we can wait for it to finish, otherwise we could end up allocating
568 	 * from a block group who's cache gets evicted for one reason or
569 	 * another.
570 	 */
571 	while (cache->cached == BTRFS_CACHE_FAST) {
572 		struct btrfs_caching_control *ctl;
573 
574 		ctl = cache->caching_ctl;
575 		atomic_inc(&ctl->count);
576 		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
577 		spin_unlock(&cache->lock);
578 
579 		schedule();
580 
581 		finish_wait(&ctl->wait, &wait);
582 		put_caching_control(ctl);
583 		spin_lock(&cache->lock);
584 	}
585 
586 	if (cache->cached != BTRFS_CACHE_NO) {
587 		spin_unlock(&cache->lock);
588 		kfree(caching_ctl);
589 		return 0;
590 	}
591 	WARN_ON(cache->caching_ctl);
592 	cache->caching_ctl = caching_ctl;
593 	cache->cached = BTRFS_CACHE_FAST;
594 	spin_unlock(&cache->lock);
595 
596 	if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
597 		ret = load_free_space_cache(fs_info, cache);
598 
599 		spin_lock(&cache->lock);
600 		if (ret == 1) {
601 			cache->caching_ctl = NULL;
602 			cache->cached = BTRFS_CACHE_FINISHED;
603 			cache->last_byte_to_unpin = (u64)-1;
604 		} else {
605 			if (load_cache_only) {
606 				cache->caching_ctl = NULL;
607 				cache->cached = BTRFS_CACHE_NO;
608 			} else {
609 				cache->cached = BTRFS_CACHE_STARTED;
610 			}
611 		}
612 		spin_unlock(&cache->lock);
613 		wake_up(&caching_ctl->wait);
614 		if (ret == 1) {
615 			put_caching_control(caching_ctl);
616 			free_excluded_extents(fs_info->extent_root, cache);
617 			return 0;
618 		}
619 	} else {
620 		/*
621 		 * We are not going to do the fast caching, set cached to the
622 		 * appropriate value and wakeup any waiters.
623 		 */
624 		spin_lock(&cache->lock);
625 		if (load_cache_only) {
626 			cache->caching_ctl = NULL;
627 			cache->cached = BTRFS_CACHE_NO;
628 		} else {
629 			cache->cached = BTRFS_CACHE_STARTED;
630 		}
631 		spin_unlock(&cache->lock);
632 		wake_up(&caching_ctl->wait);
633 	}
634 
635 	if (load_cache_only) {
636 		put_caching_control(caching_ctl);
637 		return 0;
638 	}
639 
640 	down_write(&fs_info->commit_root_sem);
641 	atomic_inc(&caching_ctl->count);
642 	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
643 	up_write(&fs_info->commit_root_sem);
644 
645 	btrfs_get_block_group(cache);
646 
647 	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
648 
649 	return ret;
650 }
651 
652 /*
653  * return the block group that starts at or after bytenr
654  */
655 static struct btrfs_block_group_cache *
656 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
657 {
658 	struct btrfs_block_group_cache *cache;
659 
660 	cache = block_group_cache_tree_search(info, bytenr, 0);
661 
662 	return cache;
663 }
664 
665 /*
666  * return the block group that contains the given bytenr
667  */
668 struct btrfs_block_group_cache *btrfs_lookup_block_group(
669 						 struct btrfs_fs_info *info,
670 						 u64 bytenr)
671 {
672 	struct btrfs_block_group_cache *cache;
673 
674 	cache = block_group_cache_tree_search(info, bytenr, 1);
675 
676 	return cache;
677 }
678 
679 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
680 						  u64 flags)
681 {
682 	struct list_head *head = &info->space_info;
683 	struct btrfs_space_info *found;
684 
685 	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
686 
687 	rcu_read_lock();
688 	list_for_each_entry_rcu(found, head, list) {
689 		if (found->flags & flags) {
690 			rcu_read_unlock();
691 			return found;
692 		}
693 	}
694 	rcu_read_unlock();
695 	return NULL;
696 }
697 
698 /*
699  * after adding space to the filesystem, we need to clear the full flags
700  * on all the space infos.
701  */
702 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
703 {
704 	struct list_head *head = &info->space_info;
705 	struct btrfs_space_info *found;
706 
707 	rcu_read_lock();
708 	list_for_each_entry_rcu(found, head, list)
709 		found->full = 0;
710 	rcu_read_unlock();
711 }
712 
713 /* simple helper to search for an existing data extent at a given offset */
714 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
715 {
716 	int ret;
717 	struct btrfs_key key;
718 	struct btrfs_path *path;
719 
720 	path = btrfs_alloc_path();
721 	if (!path)
722 		return -ENOMEM;
723 
724 	key.objectid = start;
725 	key.offset = len;
726 	key.type = BTRFS_EXTENT_ITEM_KEY;
727 	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
728 				0, 0);
729 	btrfs_free_path(path);
730 	return ret;
731 }
732 
733 /*
734  * helper function to lookup reference count and flags of a tree block.
735  *
736  * the head node for delayed ref is used to store the sum of all the
737  * reference count modifications queued up in the rbtree. the head
738  * node may also store the extent flags to set. This way you can check
739  * to see what the reference count and extent flags would be if all of
740  * the delayed refs are not processed.
741  */
742 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
743 			     struct btrfs_root *root, u64 bytenr,
744 			     u64 offset, int metadata, u64 *refs, u64 *flags)
745 {
746 	struct btrfs_delayed_ref_head *head;
747 	struct btrfs_delayed_ref_root *delayed_refs;
748 	struct btrfs_path *path;
749 	struct btrfs_extent_item *ei;
750 	struct extent_buffer *leaf;
751 	struct btrfs_key key;
752 	u32 item_size;
753 	u64 num_refs;
754 	u64 extent_flags;
755 	int ret;
756 
757 	/*
758 	 * If we don't have skinny metadata, don't bother doing anything
759 	 * different
760 	 */
761 	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
762 		offset = root->nodesize;
763 		metadata = 0;
764 	}
765 
766 	path = btrfs_alloc_path();
767 	if (!path)
768 		return -ENOMEM;
769 
770 	if (!trans) {
771 		path->skip_locking = 1;
772 		path->search_commit_root = 1;
773 	}
774 
775 search_again:
776 	key.objectid = bytenr;
777 	key.offset = offset;
778 	if (metadata)
779 		key.type = BTRFS_METADATA_ITEM_KEY;
780 	else
781 		key.type = BTRFS_EXTENT_ITEM_KEY;
782 
783 	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
784 				&key, path, 0, 0);
785 	if (ret < 0)
786 		goto out_free;
787 
788 	if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
789 		if (path->slots[0]) {
790 			path->slots[0]--;
791 			btrfs_item_key_to_cpu(path->nodes[0], &key,
792 					      path->slots[0]);
793 			if (key.objectid == bytenr &&
794 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
795 			    key.offset == root->nodesize)
796 				ret = 0;
797 		}
798 	}
799 
800 	if (ret == 0) {
801 		leaf = path->nodes[0];
802 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
803 		if (item_size >= sizeof(*ei)) {
804 			ei = btrfs_item_ptr(leaf, path->slots[0],
805 					    struct btrfs_extent_item);
806 			num_refs = btrfs_extent_refs(leaf, ei);
807 			extent_flags = btrfs_extent_flags(leaf, ei);
808 		} else {
809 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
810 			struct btrfs_extent_item_v0 *ei0;
811 			BUG_ON(item_size != sizeof(*ei0));
812 			ei0 = btrfs_item_ptr(leaf, path->slots[0],
813 					     struct btrfs_extent_item_v0);
814 			num_refs = btrfs_extent_refs_v0(leaf, ei0);
815 			/* FIXME: this isn't correct for data */
816 			extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
817 #else
818 			BUG();
819 #endif
820 		}
821 		BUG_ON(num_refs == 0);
822 	} else {
823 		num_refs = 0;
824 		extent_flags = 0;
825 		ret = 0;
826 	}
827 
828 	if (!trans)
829 		goto out;
830 
831 	delayed_refs = &trans->transaction->delayed_refs;
832 	spin_lock(&delayed_refs->lock);
833 	head = btrfs_find_delayed_ref_head(trans, bytenr);
834 	if (head) {
835 		if (!mutex_trylock(&head->mutex)) {
836 			atomic_inc(&head->node.refs);
837 			spin_unlock(&delayed_refs->lock);
838 
839 			btrfs_release_path(path);
840 
841 			/*
842 			 * Mutex was contended, block until it's released and try
843 			 * again
844 			 */
845 			mutex_lock(&head->mutex);
846 			mutex_unlock(&head->mutex);
847 			btrfs_put_delayed_ref(&head->node);
848 			goto search_again;
849 		}
850 		spin_lock(&head->lock);
851 		if (head->extent_op && head->extent_op->update_flags)
852 			extent_flags |= head->extent_op->flags_to_set;
853 		else
854 			BUG_ON(num_refs == 0);
855 
856 		num_refs += head->node.ref_mod;
857 		spin_unlock(&head->lock);
858 		mutex_unlock(&head->mutex);
859 	}
860 	spin_unlock(&delayed_refs->lock);
861 out:
862 	WARN_ON(num_refs == 0);
863 	if (refs)
864 		*refs = num_refs;
865 	if (flags)
866 		*flags = extent_flags;
867 out_free:
868 	btrfs_free_path(path);
869 	return ret;
870 }
871 
872 /*
873  * Back reference rules.  Back refs have three main goals:
874  *
875  * 1) differentiate between all holders of references to an extent so that
876  *    when a reference is dropped we can make sure it was a valid reference
877  *    before freeing the extent.
878  *
879  * 2) Provide enough information to quickly find the holders of an extent
880  *    if we notice a given block is corrupted or bad.
881  *
882  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
883  *    maintenance.  This is actually the same as #2, but with a slightly
884  *    different use case.
885  *
886  * There are two kinds of back refs. The implicit back refs is optimized
887  * for pointers in non-shared tree blocks. For a given pointer in a block,
888  * back refs of this kind provide information about the block's owner tree
889  * and the pointer's key. These information allow us to find the block by
890  * b-tree searching. The full back refs is for pointers in tree blocks not
891  * referenced by their owner trees. The location of tree block is recorded
892  * in the back refs. Actually the full back refs is generic, and can be
893  * used in all cases the implicit back refs is used. The major shortcoming
894  * of the full back refs is its overhead. Every time a tree block gets
895  * COWed, we have to update back refs entry for all pointers in it.
896  *
897  * For a newly allocated tree block, we use implicit back refs for
898  * pointers in it. This means most tree related operations only involve
899  * implicit back refs. For a tree block created in old transaction, the
900  * only way to drop a reference to it is COW it. So we can detect the
901  * event that tree block loses its owner tree's reference and do the
902  * back refs conversion.
903  *
904  * When a tree block is COW'd through a tree, there are four cases:
905  *
906  * The reference count of the block is one and the tree is the block's
907  * owner tree. Nothing to do in this case.
908  *
909  * The reference count of the block is one and the tree is not the
910  * block's owner tree. In this case, full back refs is used for pointers
911  * in the block. Remove these full back refs, add implicit back refs for
912  * every pointers in the new block.
913  *
914  * The reference count of the block is greater than one and the tree is
915  * the block's owner tree. In this case, implicit back refs is used for
916  * pointers in the block. Add full back refs for every pointers in the
917  * block, increase lower level extents' reference counts. The original
918  * implicit back refs are entailed to the new block.
919  *
920  * The reference count of the block is greater than one and the tree is
921  * not the block's owner tree. Add implicit back refs for every pointer in
922  * the new block, increase lower level extents' reference count.
923  *
924  * Back Reference Key composing:
925  *
926  * The key objectid corresponds to the first byte in the extent,
927  * The key type is used to differentiate between types of back refs.
928  * There are different meanings of the key offset for different types
929  * of back refs.
930  *
931  * File extents can be referenced by:
932  *
933  * - multiple snapshots, subvolumes, or different generations in one subvol
934  * - different files inside a single subvolume
935  * - different offsets inside a file (bookend extents in file.c)
936  *
937  * The extent ref structure for the implicit back refs has fields for:
938  *
939  * - Objectid of the subvolume root
940  * - objectid of the file holding the reference
941  * - original offset in the file
942  * - how many bookend extents
943  *
944  * The key offset for the implicit back refs is hash of the first
945  * three fields.
946  *
947  * The extent ref structure for the full back refs has field for:
948  *
949  * - number of pointers in the tree leaf
950  *
951  * The key offset for the implicit back refs is the first byte of
952  * the tree leaf
953  *
954  * When a file extent is allocated, The implicit back refs is used.
955  * the fields are filled in:
956  *
957  *     (root_key.objectid, inode objectid, offset in file, 1)
958  *
959  * When a file extent is removed file truncation, we find the
960  * corresponding implicit back refs and check the following fields:
961  *
962  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
963  *
964  * Btree extents can be referenced by:
965  *
966  * - Different subvolumes
967  *
968  * Both the implicit back refs and the full back refs for tree blocks
969  * only consist of key. The key offset for the implicit back refs is
970  * objectid of block's owner tree. The key offset for the full back refs
971  * is the first byte of parent block.
972  *
973  * When implicit back refs is used, information about the lowest key and
974  * level of the tree block are required. These information are stored in
975  * tree block info structure.
976  */
977 
978 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
979 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
980 				  struct btrfs_root *root,
981 				  struct btrfs_path *path,
982 				  u64 owner, u32 extra_size)
983 {
984 	struct btrfs_extent_item *item;
985 	struct btrfs_extent_item_v0 *ei0;
986 	struct btrfs_extent_ref_v0 *ref0;
987 	struct btrfs_tree_block_info *bi;
988 	struct extent_buffer *leaf;
989 	struct btrfs_key key;
990 	struct btrfs_key found_key;
991 	u32 new_size = sizeof(*item);
992 	u64 refs;
993 	int ret;
994 
995 	leaf = path->nodes[0];
996 	BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
997 
998 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
999 	ei0 = btrfs_item_ptr(leaf, path->slots[0],
1000 			     struct btrfs_extent_item_v0);
1001 	refs = btrfs_extent_refs_v0(leaf, ei0);
1002 
1003 	if (owner == (u64)-1) {
1004 		while (1) {
1005 			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1006 				ret = btrfs_next_leaf(root, path);
1007 				if (ret < 0)
1008 					return ret;
1009 				BUG_ON(ret > 0); /* Corruption */
1010 				leaf = path->nodes[0];
1011 			}
1012 			btrfs_item_key_to_cpu(leaf, &found_key,
1013 					      path->slots[0]);
1014 			BUG_ON(key.objectid != found_key.objectid);
1015 			if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1016 				path->slots[0]++;
1017 				continue;
1018 			}
1019 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1020 					      struct btrfs_extent_ref_v0);
1021 			owner = btrfs_ref_objectid_v0(leaf, ref0);
1022 			break;
1023 		}
1024 	}
1025 	btrfs_release_path(path);
1026 
1027 	if (owner < BTRFS_FIRST_FREE_OBJECTID)
1028 		new_size += sizeof(*bi);
1029 
1030 	new_size -= sizeof(*ei0);
1031 	ret = btrfs_search_slot(trans, root, &key, path,
1032 				new_size + extra_size, 1);
1033 	if (ret < 0)
1034 		return ret;
1035 	BUG_ON(ret); /* Corruption */
1036 
1037 	btrfs_extend_item(root, path, new_size);
1038 
1039 	leaf = path->nodes[0];
1040 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1041 	btrfs_set_extent_refs(leaf, item, refs);
1042 	/* FIXME: get real generation */
1043 	btrfs_set_extent_generation(leaf, item, 0);
1044 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1045 		btrfs_set_extent_flags(leaf, item,
1046 				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
1047 				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
1048 		bi = (struct btrfs_tree_block_info *)(item + 1);
1049 		/* FIXME: get first key of the block */
1050 		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1051 		btrfs_set_tree_block_level(leaf, bi, (int)owner);
1052 	} else {
1053 		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1054 	}
1055 	btrfs_mark_buffer_dirty(leaf);
1056 	return 0;
1057 }
1058 #endif
1059 
1060 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1061 {
1062 	u32 high_crc = ~(u32)0;
1063 	u32 low_crc = ~(u32)0;
1064 	__le64 lenum;
1065 
1066 	lenum = cpu_to_le64(root_objectid);
1067 	high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1068 	lenum = cpu_to_le64(owner);
1069 	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1070 	lenum = cpu_to_le64(offset);
1071 	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1072 
1073 	return ((u64)high_crc << 31) ^ (u64)low_crc;
1074 }
1075 
1076 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1077 				     struct btrfs_extent_data_ref *ref)
1078 {
1079 	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1080 				    btrfs_extent_data_ref_objectid(leaf, ref),
1081 				    btrfs_extent_data_ref_offset(leaf, ref));
1082 }
1083 
1084 static int match_extent_data_ref(struct extent_buffer *leaf,
1085 				 struct btrfs_extent_data_ref *ref,
1086 				 u64 root_objectid, u64 owner, u64 offset)
1087 {
1088 	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1089 	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1090 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
1091 		return 0;
1092 	return 1;
1093 }
1094 
1095 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1096 					   struct btrfs_root *root,
1097 					   struct btrfs_path *path,
1098 					   u64 bytenr, u64 parent,
1099 					   u64 root_objectid,
1100 					   u64 owner, u64 offset)
1101 {
1102 	struct btrfs_key key;
1103 	struct btrfs_extent_data_ref *ref;
1104 	struct extent_buffer *leaf;
1105 	u32 nritems;
1106 	int ret;
1107 	int recow;
1108 	int err = -ENOENT;
1109 
1110 	key.objectid = bytenr;
1111 	if (parent) {
1112 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1113 		key.offset = parent;
1114 	} else {
1115 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1116 		key.offset = hash_extent_data_ref(root_objectid,
1117 						  owner, offset);
1118 	}
1119 again:
1120 	recow = 0;
1121 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1122 	if (ret < 0) {
1123 		err = ret;
1124 		goto fail;
1125 	}
1126 
1127 	if (parent) {
1128 		if (!ret)
1129 			return 0;
1130 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1131 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1132 		btrfs_release_path(path);
1133 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1134 		if (ret < 0) {
1135 			err = ret;
1136 			goto fail;
1137 		}
1138 		if (!ret)
1139 			return 0;
1140 #endif
1141 		goto fail;
1142 	}
1143 
1144 	leaf = path->nodes[0];
1145 	nritems = btrfs_header_nritems(leaf);
1146 	while (1) {
1147 		if (path->slots[0] >= nritems) {
1148 			ret = btrfs_next_leaf(root, path);
1149 			if (ret < 0)
1150 				err = ret;
1151 			if (ret)
1152 				goto fail;
1153 
1154 			leaf = path->nodes[0];
1155 			nritems = btrfs_header_nritems(leaf);
1156 			recow = 1;
1157 		}
1158 
1159 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1160 		if (key.objectid != bytenr ||
1161 		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
1162 			goto fail;
1163 
1164 		ref = btrfs_item_ptr(leaf, path->slots[0],
1165 				     struct btrfs_extent_data_ref);
1166 
1167 		if (match_extent_data_ref(leaf, ref, root_objectid,
1168 					  owner, offset)) {
1169 			if (recow) {
1170 				btrfs_release_path(path);
1171 				goto again;
1172 			}
1173 			err = 0;
1174 			break;
1175 		}
1176 		path->slots[0]++;
1177 	}
1178 fail:
1179 	return err;
1180 }
1181 
1182 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1183 					   struct btrfs_root *root,
1184 					   struct btrfs_path *path,
1185 					   u64 bytenr, u64 parent,
1186 					   u64 root_objectid, u64 owner,
1187 					   u64 offset, int refs_to_add)
1188 {
1189 	struct btrfs_key key;
1190 	struct extent_buffer *leaf;
1191 	u32 size;
1192 	u32 num_refs;
1193 	int ret;
1194 
1195 	key.objectid = bytenr;
1196 	if (parent) {
1197 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1198 		key.offset = parent;
1199 		size = sizeof(struct btrfs_shared_data_ref);
1200 	} else {
1201 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1202 		key.offset = hash_extent_data_ref(root_objectid,
1203 						  owner, offset);
1204 		size = sizeof(struct btrfs_extent_data_ref);
1205 	}
1206 
1207 	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1208 	if (ret && ret != -EEXIST)
1209 		goto fail;
1210 
1211 	leaf = path->nodes[0];
1212 	if (parent) {
1213 		struct btrfs_shared_data_ref *ref;
1214 		ref = btrfs_item_ptr(leaf, path->slots[0],
1215 				     struct btrfs_shared_data_ref);
1216 		if (ret == 0) {
1217 			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1218 		} else {
1219 			num_refs = btrfs_shared_data_ref_count(leaf, ref);
1220 			num_refs += refs_to_add;
1221 			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1222 		}
1223 	} else {
1224 		struct btrfs_extent_data_ref *ref;
1225 		while (ret == -EEXIST) {
1226 			ref = btrfs_item_ptr(leaf, path->slots[0],
1227 					     struct btrfs_extent_data_ref);
1228 			if (match_extent_data_ref(leaf, ref, root_objectid,
1229 						  owner, offset))
1230 				break;
1231 			btrfs_release_path(path);
1232 			key.offset++;
1233 			ret = btrfs_insert_empty_item(trans, root, path, &key,
1234 						      size);
1235 			if (ret && ret != -EEXIST)
1236 				goto fail;
1237 
1238 			leaf = path->nodes[0];
1239 		}
1240 		ref = btrfs_item_ptr(leaf, path->slots[0],
1241 				     struct btrfs_extent_data_ref);
1242 		if (ret == 0) {
1243 			btrfs_set_extent_data_ref_root(leaf, ref,
1244 						       root_objectid);
1245 			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1246 			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1247 			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1248 		} else {
1249 			num_refs = btrfs_extent_data_ref_count(leaf, ref);
1250 			num_refs += refs_to_add;
1251 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1252 		}
1253 	}
1254 	btrfs_mark_buffer_dirty(leaf);
1255 	ret = 0;
1256 fail:
1257 	btrfs_release_path(path);
1258 	return ret;
1259 }
1260 
1261 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1262 					   struct btrfs_root *root,
1263 					   struct btrfs_path *path,
1264 					   int refs_to_drop, int *last_ref)
1265 {
1266 	struct btrfs_key key;
1267 	struct btrfs_extent_data_ref *ref1 = NULL;
1268 	struct btrfs_shared_data_ref *ref2 = NULL;
1269 	struct extent_buffer *leaf;
1270 	u32 num_refs = 0;
1271 	int ret = 0;
1272 
1273 	leaf = path->nodes[0];
1274 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1275 
1276 	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1277 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1278 				      struct btrfs_extent_data_ref);
1279 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1280 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1281 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1282 				      struct btrfs_shared_data_ref);
1283 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1284 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1285 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1286 		struct btrfs_extent_ref_v0 *ref0;
1287 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1288 				      struct btrfs_extent_ref_v0);
1289 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1290 #endif
1291 	} else {
1292 		BUG();
1293 	}
1294 
1295 	BUG_ON(num_refs < refs_to_drop);
1296 	num_refs -= refs_to_drop;
1297 
1298 	if (num_refs == 0) {
1299 		ret = btrfs_del_item(trans, root, path);
1300 		*last_ref = 1;
1301 	} else {
1302 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1303 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1304 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1305 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1306 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1307 		else {
1308 			struct btrfs_extent_ref_v0 *ref0;
1309 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1310 					struct btrfs_extent_ref_v0);
1311 			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1312 		}
1313 #endif
1314 		btrfs_mark_buffer_dirty(leaf);
1315 	}
1316 	return ret;
1317 }
1318 
1319 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1320 					  struct btrfs_path *path,
1321 					  struct btrfs_extent_inline_ref *iref)
1322 {
1323 	struct btrfs_key key;
1324 	struct extent_buffer *leaf;
1325 	struct btrfs_extent_data_ref *ref1;
1326 	struct btrfs_shared_data_ref *ref2;
1327 	u32 num_refs = 0;
1328 
1329 	leaf = path->nodes[0];
1330 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1331 	if (iref) {
1332 		if (btrfs_extent_inline_ref_type(leaf, iref) ==
1333 		    BTRFS_EXTENT_DATA_REF_KEY) {
1334 			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1335 			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1336 		} else {
1337 			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1338 			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1339 		}
1340 	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1341 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1342 				      struct btrfs_extent_data_ref);
1343 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1344 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1345 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1346 				      struct btrfs_shared_data_ref);
1347 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1349 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1350 		struct btrfs_extent_ref_v0 *ref0;
1351 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1352 				      struct btrfs_extent_ref_v0);
1353 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1354 #endif
1355 	} else {
1356 		WARN_ON(1);
1357 	}
1358 	return num_refs;
1359 }
1360 
1361 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1362 					  struct btrfs_root *root,
1363 					  struct btrfs_path *path,
1364 					  u64 bytenr, u64 parent,
1365 					  u64 root_objectid)
1366 {
1367 	struct btrfs_key key;
1368 	int ret;
1369 
1370 	key.objectid = bytenr;
1371 	if (parent) {
1372 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1373 		key.offset = parent;
1374 	} else {
1375 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1376 		key.offset = root_objectid;
1377 	}
1378 
1379 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1380 	if (ret > 0)
1381 		ret = -ENOENT;
1382 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1383 	if (ret == -ENOENT && parent) {
1384 		btrfs_release_path(path);
1385 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1386 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1387 		if (ret > 0)
1388 			ret = -ENOENT;
1389 	}
1390 #endif
1391 	return ret;
1392 }
1393 
1394 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1395 					  struct btrfs_root *root,
1396 					  struct btrfs_path *path,
1397 					  u64 bytenr, u64 parent,
1398 					  u64 root_objectid)
1399 {
1400 	struct btrfs_key key;
1401 	int ret;
1402 
1403 	key.objectid = bytenr;
1404 	if (parent) {
1405 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1406 		key.offset = parent;
1407 	} else {
1408 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1409 		key.offset = root_objectid;
1410 	}
1411 
1412 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1413 	btrfs_release_path(path);
1414 	return ret;
1415 }
1416 
1417 static inline int extent_ref_type(u64 parent, u64 owner)
1418 {
1419 	int type;
1420 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1421 		if (parent > 0)
1422 			type = BTRFS_SHARED_BLOCK_REF_KEY;
1423 		else
1424 			type = BTRFS_TREE_BLOCK_REF_KEY;
1425 	} else {
1426 		if (parent > 0)
1427 			type = BTRFS_SHARED_DATA_REF_KEY;
1428 		else
1429 			type = BTRFS_EXTENT_DATA_REF_KEY;
1430 	}
1431 	return type;
1432 }
1433 
1434 static int find_next_key(struct btrfs_path *path, int level,
1435 			 struct btrfs_key *key)
1436 
1437 {
1438 	for (; level < BTRFS_MAX_LEVEL; level++) {
1439 		if (!path->nodes[level])
1440 			break;
1441 		if (path->slots[level] + 1 >=
1442 		    btrfs_header_nritems(path->nodes[level]))
1443 			continue;
1444 		if (level == 0)
1445 			btrfs_item_key_to_cpu(path->nodes[level], key,
1446 					      path->slots[level] + 1);
1447 		else
1448 			btrfs_node_key_to_cpu(path->nodes[level], key,
1449 					      path->slots[level] + 1);
1450 		return 0;
1451 	}
1452 	return 1;
1453 }
1454 
1455 /*
1456  * look for inline back ref. if back ref is found, *ref_ret is set
1457  * to the address of inline back ref, and 0 is returned.
1458  *
1459  * if back ref isn't found, *ref_ret is set to the address where it
1460  * should be inserted, and -ENOENT is returned.
1461  *
1462  * if insert is true and there are too many inline back refs, the path
1463  * points to the extent item, and -EAGAIN is returned.
1464  *
1465  * NOTE: inline back refs are ordered in the same way that back ref
1466  *	 items in the tree are ordered.
1467  */
1468 static noinline_for_stack
1469 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1470 				 struct btrfs_root *root,
1471 				 struct btrfs_path *path,
1472 				 struct btrfs_extent_inline_ref **ref_ret,
1473 				 u64 bytenr, u64 num_bytes,
1474 				 u64 parent, u64 root_objectid,
1475 				 u64 owner, u64 offset, int insert)
1476 {
1477 	struct btrfs_key key;
1478 	struct extent_buffer *leaf;
1479 	struct btrfs_extent_item *ei;
1480 	struct btrfs_extent_inline_ref *iref;
1481 	u64 flags;
1482 	u64 item_size;
1483 	unsigned long ptr;
1484 	unsigned long end;
1485 	int extra_size;
1486 	int type;
1487 	int want;
1488 	int ret;
1489 	int err = 0;
1490 	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1491 						 SKINNY_METADATA);
1492 
1493 	key.objectid = bytenr;
1494 	key.type = BTRFS_EXTENT_ITEM_KEY;
1495 	key.offset = num_bytes;
1496 
1497 	want = extent_ref_type(parent, owner);
1498 	if (insert) {
1499 		extra_size = btrfs_extent_inline_ref_size(want);
1500 		path->keep_locks = 1;
1501 	} else
1502 		extra_size = -1;
1503 
1504 	/*
1505 	 * Owner is our parent level, so we can just add one to get the level
1506 	 * for the block we are interested in.
1507 	 */
1508 	if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1509 		key.type = BTRFS_METADATA_ITEM_KEY;
1510 		key.offset = owner;
1511 	}
1512 
1513 again:
1514 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1515 	if (ret < 0) {
1516 		err = ret;
1517 		goto out;
1518 	}
1519 
1520 	/*
1521 	 * We may be a newly converted file system which still has the old fat
1522 	 * extent entries for metadata, so try and see if we have one of those.
1523 	 */
1524 	if (ret > 0 && skinny_metadata) {
1525 		skinny_metadata = false;
1526 		if (path->slots[0]) {
1527 			path->slots[0]--;
1528 			btrfs_item_key_to_cpu(path->nodes[0], &key,
1529 					      path->slots[0]);
1530 			if (key.objectid == bytenr &&
1531 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
1532 			    key.offset == num_bytes)
1533 				ret = 0;
1534 		}
1535 		if (ret) {
1536 			key.objectid = bytenr;
1537 			key.type = BTRFS_EXTENT_ITEM_KEY;
1538 			key.offset = num_bytes;
1539 			btrfs_release_path(path);
1540 			goto again;
1541 		}
1542 	}
1543 
1544 	if (ret && !insert) {
1545 		err = -ENOENT;
1546 		goto out;
1547 	} else if (WARN_ON(ret)) {
1548 		err = -EIO;
1549 		goto out;
1550 	}
1551 
1552 	leaf = path->nodes[0];
1553 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1554 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1555 	if (item_size < sizeof(*ei)) {
1556 		if (!insert) {
1557 			err = -ENOENT;
1558 			goto out;
1559 		}
1560 		ret = convert_extent_item_v0(trans, root, path, owner,
1561 					     extra_size);
1562 		if (ret < 0) {
1563 			err = ret;
1564 			goto out;
1565 		}
1566 		leaf = path->nodes[0];
1567 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1568 	}
1569 #endif
1570 	BUG_ON(item_size < sizeof(*ei));
1571 
1572 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1573 	flags = btrfs_extent_flags(leaf, ei);
1574 
1575 	ptr = (unsigned long)(ei + 1);
1576 	end = (unsigned long)ei + item_size;
1577 
1578 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1579 		ptr += sizeof(struct btrfs_tree_block_info);
1580 		BUG_ON(ptr > end);
1581 	}
1582 
1583 	err = -ENOENT;
1584 	while (1) {
1585 		if (ptr >= end) {
1586 			WARN_ON(ptr > end);
1587 			break;
1588 		}
1589 		iref = (struct btrfs_extent_inline_ref *)ptr;
1590 		type = btrfs_extent_inline_ref_type(leaf, iref);
1591 		if (want < type)
1592 			break;
1593 		if (want > type) {
1594 			ptr += btrfs_extent_inline_ref_size(type);
1595 			continue;
1596 		}
1597 
1598 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1599 			struct btrfs_extent_data_ref *dref;
1600 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1601 			if (match_extent_data_ref(leaf, dref, root_objectid,
1602 						  owner, offset)) {
1603 				err = 0;
1604 				break;
1605 			}
1606 			if (hash_extent_data_ref_item(leaf, dref) <
1607 			    hash_extent_data_ref(root_objectid, owner, offset))
1608 				break;
1609 		} else {
1610 			u64 ref_offset;
1611 			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1612 			if (parent > 0) {
1613 				if (parent == ref_offset) {
1614 					err = 0;
1615 					break;
1616 				}
1617 				if (ref_offset < parent)
1618 					break;
1619 			} else {
1620 				if (root_objectid == ref_offset) {
1621 					err = 0;
1622 					break;
1623 				}
1624 				if (ref_offset < root_objectid)
1625 					break;
1626 			}
1627 		}
1628 		ptr += btrfs_extent_inline_ref_size(type);
1629 	}
1630 	if (err == -ENOENT && insert) {
1631 		if (item_size + extra_size >=
1632 		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1633 			err = -EAGAIN;
1634 			goto out;
1635 		}
1636 		/*
1637 		 * To add new inline back ref, we have to make sure
1638 		 * there is no corresponding back ref item.
1639 		 * For simplicity, we just do not add new inline back
1640 		 * ref if there is any kind of item for this block
1641 		 */
1642 		if (find_next_key(path, 0, &key) == 0 &&
1643 		    key.objectid == bytenr &&
1644 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1645 			err = -EAGAIN;
1646 			goto out;
1647 		}
1648 	}
1649 	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1650 out:
1651 	if (insert) {
1652 		path->keep_locks = 0;
1653 		btrfs_unlock_up_safe(path, 1);
1654 	}
1655 	return err;
1656 }
1657 
1658 /*
1659  * helper to add new inline back ref
1660  */
1661 static noinline_for_stack
1662 void setup_inline_extent_backref(struct btrfs_root *root,
1663 				 struct btrfs_path *path,
1664 				 struct btrfs_extent_inline_ref *iref,
1665 				 u64 parent, u64 root_objectid,
1666 				 u64 owner, u64 offset, int refs_to_add,
1667 				 struct btrfs_delayed_extent_op *extent_op)
1668 {
1669 	struct extent_buffer *leaf;
1670 	struct btrfs_extent_item *ei;
1671 	unsigned long ptr;
1672 	unsigned long end;
1673 	unsigned long item_offset;
1674 	u64 refs;
1675 	int size;
1676 	int type;
1677 
1678 	leaf = path->nodes[0];
1679 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1680 	item_offset = (unsigned long)iref - (unsigned long)ei;
1681 
1682 	type = extent_ref_type(parent, owner);
1683 	size = btrfs_extent_inline_ref_size(type);
1684 
1685 	btrfs_extend_item(root, path, size);
1686 
1687 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688 	refs = btrfs_extent_refs(leaf, ei);
1689 	refs += refs_to_add;
1690 	btrfs_set_extent_refs(leaf, ei, refs);
1691 	if (extent_op)
1692 		__run_delayed_extent_op(extent_op, leaf, ei);
1693 
1694 	ptr = (unsigned long)ei + item_offset;
1695 	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1696 	if (ptr < end - size)
1697 		memmove_extent_buffer(leaf, ptr + size, ptr,
1698 				      end - size - ptr);
1699 
1700 	iref = (struct btrfs_extent_inline_ref *)ptr;
1701 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1702 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1703 		struct btrfs_extent_data_ref *dref;
1704 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1705 		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1706 		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1707 		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1708 		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1709 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1710 		struct btrfs_shared_data_ref *sref;
1711 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1712 		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1713 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1714 	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1715 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1716 	} else {
1717 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1718 	}
1719 	btrfs_mark_buffer_dirty(leaf);
1720 }
1721 
1722 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1723 				 struct btrfs_root *root,
1724 				 struct btrfs_path *path,
1725 				 struct btrfs_extent_inline_ref **ref_ret,
1726 				 u64 bytenr, u64 num_bytes, u64 parent,
1727 				 u64 root_objectid, u64 owner, u64 offset)
1728 {
1729 	int ret;
1730 
1731 	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1732 					   bytenr, num_bytes, parent,
1733 					   root_objectid, owner, offset, 0);
1734 	if (ret != -ENOENT)
1735 		return ret;
1736 
1737 	btrfs_release_path(path);
1738 	*ref_ret = NULL;
1739 
1740 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1741 		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1742 					    root_objectid);
1743 	} else {
1744 		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1745 					     root_objectid, owner, offset);
1746 	}
1747 	return ret;
1748 }
1749 
1750 /*
1751  * helper to update/remove inline back ref
1752  */
1753 static noinline_for_stack
1754 void update_inline_extent_backref(struct btrfs_root *root,
1755 				  struct btrfs_path *path,
1756 				  struct btrfs_extent_inline_ref *iref,
1757 				  int refs_to_mod,
1758 				  struct btrfs_delayed_extent_op *extent_op,
1759 				  int *last_ref)
1760 {
1761 	struct extent_buffer *leaf;
1762 	struct btrfs_extent_item *ei;
1763 	struct btrfs_extent_data_ref *dref = NULL;
1764 	struct btrfs_shared_data_ref *sref = NULL;
1765 	unsigned long ptr;
1766 	unsigned long end;
1767 	u32 item_size;
1768 	int size;
1769 	int type;
1770 	u64 refs;
1771 
1772 	leaf = path->nodes[0];
1773 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1774 	refs = btrfs_extent_refs(leaf, ei);
1775 	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1776 	refs += refs_to_mod;
1777 	btrfs_set_extent_refs(leaf, ei, refs);
1778 	if (extent_op)
1779 		__run_delayed_extent_op(extent_op, leaf, ei);
1780 
1781 	type = btrfs_extent_inline_ref_type(leaf, iref);
1782 
1783 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1784 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1785 		refs = btrfs_extent_data_ref_count(leaf, dref);
1786 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1787 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1788 		refs = btrfs_shared_data_ref_count(leaf, sref);
1789 	} else {
1790 		refs = 1;
1791 		BUG_ON(refs_to_mod != -1);
1792 	}
1793 
1794 	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1795 	refs += refs_to_mod;
1796 
1797 	if (refs > 0) {
1798 		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1799 			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1800 		else
1801 			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1802 	} else {
1803 		*last_ref = 1;
1804 		size =  btrfs_extent_inline_ref_size(type);
1805 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1806 		ptr = (unsigned long)iref;
1807 		end = (unsigned long)ei + item_size;
1808 		if (ptr + size < end)
1809 			memmove_extent_buffer(leaf, ptr, ptr + size,
1810 					      end - ptr - size);
1811 		item_size -= size;
1812 		btrfs_truncate_item(root, path, item_size, 1);
1813 	}
1814 	btrfs_mark_buffer_dirty(leaf);
1815 }
1816 
1817 static noinline_for_stack
1818 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1819 				 struct btrfs_root *root,
1820 				 struct btrfs_path *path,
1821 				 u64 bytenr, u64 num_bytes, u64 parent,
1822 				 u64 root_objectid, u64 owner,
1823 				 u64 offset, int refs_to_add,
1824 				 struct btrfs_delayed_extent_op *extent_op)
1825 {
1826 	struct btrfs_extent_inline_ref *iref;
1827 	int ret;
1828 
1829 	ret = lookup_inline_extent_backref(trans, root, path, &iref,
1830 					   bytenr, num_bytes, parent,
1831 					   root_objectid, owner, offset, 1);
1832 	if (ret == 0) {
1833 		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1834 		update_inline_extent_backref(root, path, iref,
1835 					     refs_to_add, extent_op, NULL);
1836 	} else if (ret == -ENOENT) {
1837 		setup_inline_extent_backref(root, path, iref, parent,
1838 					    root_objectid, owner, offset,
1839 					    refs_to_add, extent_op);
1840 		ret = 0;
1841 	}
1842 	return ret;
1843 }
1844 
1845 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1846 				 struct btrfs_root *root,
1847 				 struct btrfs_path *path,
1848 				 u64 bytenr, u64 parent, u64 root_objectid,
1849 				 u64 owner, u64 offset, int refs_to_add)
1850 {
1851 	int ret;
1852 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1853 		BUG_ON(refs_to_add != 1);
1854 		ret = insert_tree_block_ref(trans, root, path, bytenr,
1855 					    parent, root_objectid);
1856 	} else {
1857 		ret = insert_extent_data_ref(trans, root, path, bytenr,
1858 					     parent, root_objectid,
1859 					     owner, offset, refs_to_add);
1860 	}
1861 	return ret;
1862 }
1863 
1864 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1865 				 struct btrfs_root *root,
1866 				 struct btrfs_path *path,
1867 				 struct btrfs_extent_inline_ref *iref,
1868 				 int refs_to_drop, int is_data, int *last_ref)
1869 {
1870 	int ret = 0;
1871 
1872 	BUG_ON(!is_data && refs_to_drop != 1);
1873 	if (iref) {
1874 		update_inline_extent_backref(root, path, iref,
1875 					     -refs_to_drop, NULL, last_ref);
1876 	} else if (is_data) {
1877 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1878 					     last_ref);
1879 	} else {
1880 		*last_ref = 1;
1881 		ret = btrfs_del_item(trans, root, path);
1882 	}
1883 	return ret;
1884 }
1885 
1886 static int btrfs_issue_discard(struct block_device *bdev,
1887 				u64 start, u64 len)
1888 {
1889 	return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1890 }
1891 
1892 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1893 				u64 num_bytes, u64 *actual_bytes)
1894 {
1895 	int ret;
1896 	u64 discarded_bytes = 0;
1897 	struct btrfs_bio *bbio = NULL;
1898 
1899 
1900 	/* Tell the block device(s) that the sectors can be discarded */
1901 	ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1902 			      bytenr, &num_bytes, &bbio, 0);
1903 	/* Error condition is -ENOMEM */
1904 	if (!ret) {
1905 		struct btrfs_bio_stripe *stripe = bbio->stripes;
1906 		int i;
1907 
1908 
1909 		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1910 			if (!stripe->dev->can_discard)
1911 				continue;
1912 
1913 			ret = btrfs_issue_discard(stripe->dev->bdev,
1914 						  stripe->physical,
1915 						  stripe->length);
1916 			if (!ret)
1917 				discarded_bytes += stripe->length;
1918 			else if (ret != -EOPNOTSUPP)
1919 				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1920 
1921 			/*
1922 			 * Just in case we get back EOPNOTSUPP for some reason,
1923 			 * just ignore the return value so we don't screw up
1924 			 * people calling discard_extent.
1925 			 */
1926 			ret = 0;
1927 		}
1928 		kfree(bbio);
1929 	}
1930 
1931 	if (actual_bytes)
1932 		*actual_bytes = discarded_bytes;
1933 
1934 
1935 	if (ret == -EOPNOTSUPP)
1936 		ret = 0;
1937 	return ret;
1938 }
1939 
1940 /* Can return -ENOMEM */
1941 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1942 			 struct btrfs_root *root,
1943 			 u64 bytenr, u64 num_bytes, u64 parent,
1944 			 u64 root_objectid, u64 owner, u64 offset,
1945 			 int no_quota)
1946 {
1947 	int ret;
1948 	struct btrfs_fs_info *fs_info = root->fs_info;
1949 
1950 	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1951 	       root_objectid == BTRFS_TREE_LOG_OBJECTID);
1952 
1953 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1954 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1955 					num_bytes,
1956 					parent, root_objectid, (int)owner,
1957 					BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1958 	} else {
1959 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1960 					num_bytes,
1961 					parent, root_objectid, owner, offset,
1962 					BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1963 	}
1964 	return ret;
1965 }
1966 
1967 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1968 				  struct btrfs_root *root,
1969 				  u64 bytenr, u64 num_bytes,
1970 				  u64 parent, u64 root_objectid,
1971 				  u64 owner, u64 offset, int refs_to_add,
1972 				  int no_quota,
1973 				  struct btrfs_delayed_extent_op *extent_op)
1974 {
1975 	struct btrfs_fs_info *fs_info = root->fs_info;
1976 	struct btrfs_path *path;
1977 	struct extent_buffer *leaf;
1978 	struct btrfs_extent_item *item;
1979 	struct btrfs_key key;
1980 	u64 refs;
1981 	int ret;
1982 	enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_ADD_EXCL;
1983 
1984 	path = btrfs_alloc_path();
1985 	if (!path)
1986 		return -ENOMEM;
1987 
1988 	if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
1989 		no_quota = 1;
1990 
1991 	path->reada = 1;
1992 	path->leave_spinning = 1;
1993 	/* this will setup the path even if it fails to insert the back ref */
1994 	ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
1995 					   bytenr, num_bytes, parent,
1996 					   root_objectid, owner, offset,
1997 					   refs_to_add, extent_op);
1998 	if ((ret < 0 && ret != -EAGAIN) || (!ret && no_quota))
1999 		goto out;
2000 	/*
2001 	 * Ok we were able to insert an inline extent and it appears to be a new
2002 	 * reference, deal with the qgroup accounting.
2003 	 */
2004 	if (!ret && !no_quota) {
2005 		ASSERT(root->fs_info->quota_enabled);
2006 		leaf = path->nodes[0];
2007 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2008 		item = btrfs_item_ptr(leaf, path->slots[0],
2009 				      struct btrfs_extent_item);
2010 		if (btrfs_extent_refs(leaf, item) > (u64)refs_to_add)
2011 			type = BTRFS_QGROUP_OPER_ADD_SHARED;
2012 		btrfs_release_path(path);
2013 
2014 		ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2015 					      bytenr, num_bytes, type, 0);
2016 		goto out;
2017 	}
2018 
2019 	/*
2020 	 * Ok we had -EAGAIN which means we didn't have space to insert and
2021 	 * inline extent ref, so just update the reference count and add a
2022 	 * normal backref.
2023 	 */
2024 	leaf = path->nodes[0];
2025 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2026 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2027 	refs = btrfs_extent_refs(leaf, item);
2028 	if (refs)
2029 		type = BTRFS_QGROUP_OPER_ADD_SHARED;
2030 	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2031 	if (extent_op)
2032 		__run_delayed_extent_op(extent_op, leaf, item);
2033 
2034 	btrfs_mark_buffer_dirty(leaf);
2035 	btrfs_release_path(path);
2036 
2037 	if (!no_quota) {
2038 		ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2039 					      bytenr, num_bytes, type, 0);
2040 		if (ret)
2041 			goto out;
2042 	}
2043 
2044 	path->reada = 1;
2045 	path->leave_spinning = 1;
2046 	/* now insert the actual backref */
2047 	ret = insert_extent_backref(trans, root->fs_info->extent_root,
2048 				    path, bytenr, parent, root_objectid,
2049 				    owner, offset, refs_to_add);
2050 	if (ret)
2051 		btrfs_abort_transaction(trans, root, ret);
2052 out:
2053 	btrfs_free_path(path);
2054 	return ret;
2055 }
2056 
2057 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2058 				struct btrfs_root *root,
2059 				struct btrfs_delayed_ref_node *node,
2060 				struct btrfs_delayed_extent_op *extent_op,
2061 				int insert_reserved)
2062 {
2063 	int ret = 0;
2064 	struct btrfs_delayed_data_ref *ref;
2065 	struct btrfs_key ins;
2066 	u64 parent = 0;
2067 	u64 ref_root = 0;
2068 	u64 flags = 0;
2069 
2070 	ins.objectid = node->bytenr;
2071 	ins.offset = node->num_bytes;
2072 	ins.type = BTRFS_EXTENT_ITEM_KEY;
2073 
2074 	ref = btrfs_delayed_node_to_data_ref(node);
2075 	trace_run_delayed_data_ref(node, ref, node->action);
2076 
2077 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2078 		parent = ref->parent;
2079 	ref_root = ref->root;
2080 
2081 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2082 		if (extent_op)
2083 			flags |= extent_op->flags_to_set;
2084 		ret = alloc_reserved_file_extent(trans, root,
2085 						 parent, ref_root, flags,
2086 						 ref->objectid, ref->offset,
2087 						 &ins, node->ref_mod);
2088 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2089 		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2090 					     node->num_bytes, parent,
2091 					     ref_root, ref->objectid,
2092 					     ref->offset, node->ref_mod,
2093 					     node->no_quota, extent_op);
2094 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2095 		ret = __btrfs_free_extent(trans, root, node->bytenr,
2096 					  node->num_bytes, parent,
2097 					  ref_root, ref->objectid,
2098 					  ref->offset, node->ref_mod,
2099 					  extent_op, node->no_quota);
2100 	} else {
2101 		BUG();
2102 	}
2103 	return ret;
2104 }
2105 
2106 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2107 				    struct extent_buffer *leaf,
2108 				    struct btrfs_extent_item *ei)
2109 {
2110 	u64 flags = btrfs_extent_flags(leaf, ei);
2111 	if (extent_op->update_flags) {
2112 		flags |= extent_op->flags_to_set;
2113 		btrfs_set_extent_flags(leaf, ei, flags);
2114 	}
2115 
2116 	if (extent_op->update_key) {
2117 		struct btrfs_tree_block_info *bi;
2118 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2119 		bi = (struct btrfs_tree_block_info *)(ei + 1);
2120 		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2121 	}
2122 }
2123 
2124 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2125 				 struct btrfs_root *root,
2126 				 struct btrfs_delayed_ref_node *node,
2127 				 struct btrfs_delayed_extent_op *extent_op)
2128 {
2129 	struct btrfs_key key;
2130 	struct btrfs_path *path;
2131 	struct btrfs_extent_item *ei;
2132 	struct extent_buffer *leaf;
2133 	u32 item_size;
2134 	int ret;
2135 	int err = 0;
2136 	int metadata = !extent_op->is_data;
2137 
2138 	if (trans->aborted)
2139 		return 0;
2140 
2141 	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2142 		metadata = 0;
2143 
2144 	path = btrfs_alloc_path();
2145 	if (!path)
2146 		return -ENOMEM;
2147 
2148 	key.objectid = node->bytenr;
2149 
2150 	if (metadata) {
2151 		key.type = BTRFS_METADATA_ITEM_KEY;
2152 		key.offset = extent_op->level;
2153 	} else {
2154 		key.type = BTRFS_EXTENT_ITEM_KEY;
2155 		key.offset = node->num_bytes;
2156 	}
2157 
2158 again:
2159 	path->reada = 1;
2160 	path->leave_spinning = 1;
2161 	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2162 				path, 0, 1);
2163 	if (ret < 0) {
2164 		err = ret;
2165 		goto out;
2166 	}
2167 	if (ret > 0) {
2168 		if (metadata) {
2169 			if (path->slots[0] > 0) {
2170 				path->slots[0]--;
2171 				btrfs_item_key_to_cpu(path->nodes[0], &key,
2172 						      path->slots[0]);
2173 				if (key.objectid == node->bytenr &&
2174 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
2175 				    key.offset == node->num_bytes)
2176 					ret = 0;
2177 			}
2178 			if (ret > 0) {
2179 				btrfs_release_path(path);
2180 				metadata = 0;
2181 
2182 				key.objectid = node->bytenr;
2183 				key.offset = node->num_bytes;
2184 				key.type = BTRFS_EXTENT_ITEM_KEY;
2185 				goto again;
2186 			}
2187 		} else {
2188 			err = -EIO;
2189 			goto out;
2190 		}
2191 	}
2192 
2193 	leaf = path->nodes[0];
2194 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2195 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2196 	if (item_size < sizeof(*ei)) {
2197 		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2198 					     path, (u64)-1, 0);
2199 		if (ret < 0) {
2200 			err = ret;
2201 			goto out;
2202 		}
2203 		leaf = path->nodes[0];
2204 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2205 	}
2206 #endif
2207 	BUG_ON(item_size < sizeof(*ei));
2208 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2209 	__run_delayed_extent_op(extent_op, leaf, ei);
2210 
2211 	btrfs_mark_buffer_dirty(leaf);
2212 out:
2213 	btrfs_free_path(path);
2214 	return err;
2215 }
2216 
2217 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2218 				struct btrfs_root *root,
2219 				struct btrfs_delayed_ref_node *node,
2220 				struct btrfs_delayed_extent_op *extent_op,
2221 				int insert_reserved)
2222 {
2223 	int ret = 0;
2224 	struct btrfs_delayed_tree_ref *ref;
2225 	struct btrfs_key ins;
2226 	u64 parent = 0;
2227 	u64 ref_root = 0;
2228 	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2229 						 SKINNY_METADATA);
2230 
2231 	ref = btrfs_delayed_node_to_tree_ref(node);
2232 	trace_run_delayed_tree_ref(node, ref, node->action);
2233 
2234 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2235 		parent = ref->parent;
2236 	ref_root = ref->root;
2237 
2238 	ins.objectid = node->bytenr;
2239 	if (skinny_metadata) {
2240 		ins.offset = ref->level;
2241 		ins.type = BTRFS_METADATA_ITEM_KEY;
2242 	} else {
2243 		ins.offset = node->num_bytes;
2244 		ins.type = BTRFS_EXTENT_ITEM_KEY;
2245 	}
2246 
2247 	BUG_ON(node->ref_mod != 1);
2248 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2249 		BUG_ON(!extent_op || !extent_op->update_flags);
2250 		ret = alloc_reserved_tree_block(trans, root,
2251 						parent, ref_root,
2252 						extent_op->flags_to_set,
2253 						&extent_op->key,
2254 						ref->level, &ins,
2255 						node->no_quota);
2256 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2257 		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2258 					     node->num_bytes, parent, ref_root,
2259 					     ref->level, 0, 1, node->no_quota,
2260 					     extent_op);
2261 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2262 		ret = __btrfs_free_extent(trans, root, node->bytenr,
2263 					  node->num_bytes, parent, ref_root,
2264 					  ref->level, 0, 1, extent_op,
2265 					  node->no_quota);
2266 	} else {
2267 		BUG();
2268 	}
2269 	return ret;
2270 }
2271 
2272 /* helper function to actually process a single delayed ref entry */
2273 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2274 			       struct btrfs_root *root,
2275 			       struct btrfs_delayed_ref_node *node,
2276 			       struct btrfs_delayed_extent_op *extent_op,
2277 			       int insert_reserved)
2278 {
2279 	int ret = 0;
2280 
2281 	if (trans->aborted) {
2282 		if (insert_reserved)
2283 			btrfs_pin_extent(root, node->bytenr,
2284 					 node->num_bytes, 1);
2285 		return 0;
2286 	}
2287 
2288 	if (btrfs_delayed_ref_is_head(node)) {
2289 		struct btrfs_delayed_ref_head *head;
2290 		/*
2291 		 * we've hit the end of the chain and we were supposed
2292 		 * to insert this extent into the tree.  But, it got
2293 		 * deleted before we ever needed to insert it, so all
2294 		 * we have to do is clean up the accounting
2295 		 */
2296 		BUG_ON(extent_op);
2297 		head = btrfs_delayed_node_to_head(node);
2298 		trace_run_delayed_ref_head(node, head, node->action);
2299 
2300 		if (insert_reserved) {
2301 			btrfs_pin_extent(root, node->bytenr,
2302 					 node->num_bytes, 1);
2303 			if (head->is_data) {
2304 				ret = btrfs_del_csums(trans, root,
2305 						      node->bytenr,
2306 						      node->num_bytes);
2307 			}
2308 		}
2309 		return ret;
2310 	}
2311 
2312 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2313 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2314 		ret = run_delayed_tree_ref(trans, root, node, extent_op,
2315 					   insert_reserved);
2316 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2317 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
2318 		ret = run_delayed_data_ref(trans, root, node, extent_op,
2319 					   insert_reserved);
2320 	else
2321 		BUG();
2322 	return ret;
2323 }
2324 
2325 static noinline struct btrfs_delayed_ref_node *
2326 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2327 {
2328 	struct rb_node *node;
2329 	struct btrfs_delayed_ref_node *ref, *last = NULL;;
2330 
2331 	/*
2332 	 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2333 	 * this prevents ref count from going down to zero when
2334 	 * there still are pending delayed ref.
2335 	 */
2336 	node = rb_first(&head->ref_root);
2337 	while (node) {
2338 		ref = rb_entry(node, struct btrfs_delayed_ref_node,
2339 				rb_node);
2340 		if (ref->action == BTRFS_ADD_DELAYED_REF)
2341 			return ref;
2342 		else if (last == NULL)
2343 			last = ref;
2344 		node = rb_next(node);
2345 	}
2346 	return last;
2347 }
2348 
2349 /*
2350  * Returns 0 on success or if called with an already aborted transaction.
2351  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2352  */
2353 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2354 					     struct btrfs_root *root,
2355 					     unsigned long nr)
2356 {
2357 	struct btrfs_delayed_ref_root *delayed_refs;
2358 	struct btrfs_delayed_ref_node *ref;
2359 	struct btrfs_delayed_ref_head *locked_ref = NULL;
2360 	struct btrfs_delayed_extent_op *extent_op;
2361 	struct btrfs_fs_info *fs_info = root->fs_info;
2362 	ktime_t start = ktime_get();
2363 	int ret;
2364 	unsigned long count = 0;
2365 	unsigned long actual_count = 0;
2366 	int must_insert_reserved = 0;
2367 
2368 	delayed_refs = &trans->transaction->delayed_refs;
2369 	while (1) {
2370 		if (!locked_ref) {
2371 			if (count >= nr)
2372 				break;
2373 
2374 			spin_lock(&delayed_refs->lock);
2375 			locked_ref = btrfs_select_ref_head(trans);
2376 			if (!locked_ref) {
2377 				spin_unlock(&delayed_refs->lock);
2378 				break;
2379 			}
2380 
2381 			/* grab the lock that says we are going to process
2382 			 * all the refs for this head */
2383 			ret = btrfs_delayed_ref_lock(trans, locked_ref);
2384 			spin_unlock(&delayed_refs->lock);
2385 			/*
2386 			 * we may have dropped the spin lock to get the head
2387 			 * mutex lock, and that might have given someone else
2388 			 * time to free the head.  If that's true, it has been
2389 			 * removed from our list and we can move on.
2390 			 */
2391 			if (ret == -EAGAIN) {
2392 				locked_ref = NULL;
2393 				count++;
2394 				continue;
2395 			}
2396 		}
2397 
2398 		/*
2399 		 * We need to try and merge add/drops of the same ref since we
2400 		 * can run into issues with relocate dropping the implicit ref
2401 		 * and then it being added back again before the drop can
2402 		 * finish.  If we merged anything we need to re-loop so we can
2403 		 * get a good ref.
2404 		 */
2405 		spin_lock(&locked_ref->lock);
2406 		btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2407 					 locked_ref);
2408 
2409 		/*
2410 		 * locked_ref is the head node, so we have to go one
2411 		 * node back for any delayed ref updates
2412 		 */
2413 		ref = select_delayed_ref(locked_ref);
2414 
2415 		if (ref && ref->seq &&
2416 		    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2417 			spin_unlock(&locked_ref->lock);
2418 			btrfs_delayed_ref_unlock(locked_ref);
2419 			spin_lock(&delayed_refs->lock);
2420 			locked_ref->processing = 0;
2421 			delayed_refs->num_heads_ready++;
2422 			spin_unlock(&delayed_refs->lock);
2423 			locked_ref = NULL;
2424 			cond_resched();
2425 			count++;
2426 			continue;
2427 		}
2428 
2429 		/*
2430 		 * record the must insert reserved flag before we
2431 		 * drop the spin lock.
2432 		 */
2433 		must_insert_reserved = locked_ref->must_insert_reserved;
2434 		locked_ref->must_insert_reserved = 0;
2435 
2436 		extent_op = locked_ref->extent_op;
2437 		locked_ref->extent_op = NULL;
2438 
2439 		if (!ref) {
2440 
2441 
2442 			/* All delayed refs have been processed, Go ahead
2443 			 * and send the head node to run_one_delayed_ref,
2444 			 * so that any accounting fixes can happen
2445 			 */
2446 			ref = &locked_ref->node;
2447 
2448 			if (extent_op && must_insert_reserved) {
2449 				btrfs_free_delayed_extent_op(extent_op);
2450 				extent_op = NULL;
2451 			}
2452 
2453 			if (extent_op) {
2454 				spin_unlock(&locked_ref->lock);
2455 				ret = run_delayed_extent_op(trans, root,
2456 							    ref, extent_op);
2457 				btrfs_free_delayed_extent_op(extent_op);
2458 
2459 				if (ret) {
2460 					/*
2461 					 * Need to reset must_insert_reserved if
2462 					 * there was an error so the abort stuff
2463 					 * can cleanup the reserved space
2464 					 * properly.
2465 					 */
2466 					if (must_insert_reserved)
2467 						locked_ref->must_insert_reserved = 1;
2468 					locked_ref->processing = 0;
2469 					btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2470 					btrfs_delayed_ref_unlock(locked_ref);
2471 					return ret;
2472 				}
2473 				continue;
2474 			}
2475 
2476 			/*
2477 			 * Need to drop our head ref lock and re-aqcuire the
2478 			 * delayed ref lock and then re-check to make sure
2479 			 * nobody got added.
2480 			 */
2481 			spin_unlock(&locked_ref->lock);
2482 			spin_lock(&delayed_refs->lock);
2483 			spin_lock(&locked_ref->lock);
2484 			if (rb_first(&locked_ref->ref_root) ||
2485 			    locked_ref->extent_op) {
2486 				spin_unlock(&locked_ref->lock);
2487 				spin_unlock(&delayed_refs->lock);
2488 				continue;
2489 			}
2490 			ref->in_tree = 0;
2491 			delayed_refs->num_heads--;
2492 			rb_erase(&locked_ref->href_node,
2493 				 &delayed_refs->href_root);
2494 			spin_unlock(&delayed_refs->lock);
2495 		} else {
2496 			actual_count++;
2497 			ref->in_tree = 0;
2498 			rb_erase(&ref->rb_node, &locked_ref->ref_root);
2499 		}
2500 		atomic_dec(&delayed_refs->num_entries);
2501 
2502 		if (!btrfs_delayed_ref_is_head(ref)) {
2503 			/*
2504 			 * when we play the delayed ref, also correct the
2505 			 * ref_mod on head
2506 			 */
2507 			switch (ref->action) {
2508 			case BTRFS_ADD_DELAYED_REF:
2509 			case BTRFS_ADD_DELAYED_EXTENT:
2510 				locked_ref->node.ref_mod -= ref->ref_mod;
2511 				break;
2512 			case BTRFS_DROP_DELAYED_REF:
2513 				locked_ref->node.ref_mod += ref->ref_mod;
2514 				break;
2515 			default:
2516 				WARN_ON(1);
2517 			}
2518 		}
2519 		spin_unlock(&locked_ref->lock);
2520 
2521 		ret = run_one_delayed_ref(trans, root, ref, extent_op,
2522 					  must_insert_reserved);
2523 
2524 		btrfs_free_delayed_extent_op(extent_op);
2525 		if (ret) {
2526 			locked_ref->processing = 0;
2527 			btrfs_delayed_ref_unlock(locked_ref);
2528 			btrfs_put_delayed_ref(ref);
2529 			btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2530 			return ret;
2531 		}
2532 
2533 		/*
2534 		 * If this node is a head, that means all the refs in this head
2535 		 * have been dealt with, and we will pick the next head to deal
2536 		 * with, so we must unlock the head and drop it from the cluster
2537 		 * list before we release it.
2538 		 */
2539 		if (btrfs_delayed_ref_is_head(ref)) {
2540 			btrfs_delayed_ref_unlock(locked_ref);
2541 			locked_ref = NULL;
2542 		}
2543 		btrfs_put_delayed_ref(ref);
2544 		count++;
2545 		cond_resched();
2546 	}
2547 
2548 	/*
2549 	 * We don't want to include ref heads since we can have empty ref heads
2550 	 * and those will drastically skew our runtime down since we just do
2551 	 * accounting, no actual extent tree updates.
2552 	 */
2553 	if (actual_count > 0) {
2554 		u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2555 		u64 avg;
2556 
2557 		/*
2558 		 * We weigh the current average higher than our current runtime
2559 		 * to avoid large swings in the average.
2560 		 */
2561 		spin_lock(&delayed_refs->lock);
2562 		avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2563 		avg = div64_u64(avg, 4);
2564 		fs_info->avg_delayed_ref_runtime = avg;
2565 		spin_unlock(&delayed_refs->lock);
2566 	}
2567 	return 0;
2568 }
2569 
2570 #ifdef SCRAMBLE_DELAYED_REFS
2571 /*
2572  * Normally delayed refs get processed in ascending bytenr order. This
2573  * correlates in most cases to the order added. To expose dependencies on this
2574  * order, we start to process the tree in the middle instead of the beginning
2575  */
2576 static u64 find_middle(struct rb_root *root)
2577 {
2578 	struct rb_node *n = root->rb_node;
2579 	struct btrfs_delayed_ref_node *entry;
2580 	int alt = 1;
2581 	u64 middle;
2582 	u64 first = 0, last = 0;
2583 
2584 	n = rb_first(root);
2585 	if (n) {
2586 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2587 		first = entry->bytenr;
2588 	}
2589 	n = rb_last(root);
2590 	if (n) {
2591 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2592 		last = entry->bytenr;
2593 	}
2594 	n = root->rb_node;
2595 
2596 	while (n) {
2597 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2598 		WARN_ON(!entry->in_tree);
2599 
2600 		middle = entry->bytenr;
2601 
2602 		if (alt)
2603 			n = n->rb_left;
2604 		else
2605 			n = n->rb_right;
2606 
2607 		alt = 1 - alt;
2608 	}
2609 	return middle;
2610 }
2611 #endif
2612 
2613 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2614 {
2615 	u64 num_bytes;
2616 
2617 	num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2618 			     sizeof(struct btrfs_extent_inline_ref));
2619 	if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2620 		num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2621 
2622 	/*
2623 	 * We don't ever fill up leaves all the way so multiply by 2 just to be
2624 	 * closer to what we're really going to want to ouse.
2625 	 */
2626 	return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2627 }
2628 
2629 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2630 				       struct btrfs_root *root)
2631 {
2632 	struct btrfs_block_rsv *global_rsv;
2633 	u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2634 	u64 num_bytes;
2635 	int ret = 0;
2636 
2637 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2638 	num_heads = heads_to_leaves(root, num_heads);
2639 	if (num_heads > 1)
2640 		num_bytes += (num_heads - 1) * root->nodesize;
2641 	num_bytes <<= 1;
2642 	global_rsv = &root->fs_info->global_block_rsv;
2643 
2644 	/*
2645 	 * If we can't allocate any more chunks lets make sure we have _lots_ of
2646 	 * wiggle room since running delayed refs can create more delayed refs.
2647 	 */
2648 	if (global_rsv->space_info->full)
2649 		num_bytes <<= 1;
2650 
2651 	spin_lock(&global_rsv->lock);
2652 	if (global_rsv->reserved <= num_bytes)
2653 		ret = 1;
2654 	spin_unlock(&global_rsv->lock);
2655 	return ret;
2656 }
2657 
2658 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2659 				       struct btrfs_root *root)
2660 {
2661 	struct btrfs_fs_info *fs_info = root->fs_info;
2662 	u64 num_entries =
2663 		atomic_read(&trans->transaction->delayed_refs.num_entries);
2664 	u64 avg_runtime;
2665 	u64 val;
2666 
2667 	smp_mb();
2668 	avg_runtime = fs_info->avg_delayed_ref_runtime;
2669 	val = num_entries * avg_runtime;
2670 	if (num_entries * avg_runtime >= NSEC_PER_SEC)
2671 		return 1;
2672 	if (val >= NSEC_PER_SEC / 2)
2673 		return 2;
2674 
2675 	return btrfs_check_space_for_delayed_refs(trans, root);
2676 }
2677 
2678 struct async_delayed_refs {
2679 	struct btrfs_root *root;
2680 	int count;
2681 	int error;
2682 	int sync;
2683 	struct completion wait;
2684 	struct btrfs_work work;
2685 };
2686 
2687 static void delayed_ref_async_start(struct btrfs_work *work)
2688 {
2689 	struct async_delayed_refs *async;
2690 	struct btrfs_trans_handle *trans;
2691 	int ret;
2692 
2693 	async = container_of(work, struct async_delayed_refs, work);
2694 
2695 	trans = btrfs_join_transaction(async->root);
2696 	if (IS_ERR(trans)) {
2697 		async->error = PTR_ERR(trans);
2698 		goto done;
2699 	}
2700 
2701 	/*
2702 	 * trans->sync means that when we call end_transaciton, we won't
2703 	 * wait on delayed refs
2704 	 */
2705 	trans->sync = true;
2706 	ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2707 	if (ret)
2708 		async->error = ret;
2709 
2710 	ret = btrfs_end_transaction(trans, async->root);
2711 	if (ret && !async->error)
2712 		async->error = ret;
2713 done:
2714 	if (async->sync)
2715 		complete(&async->wait);
2716 	else
2717 		kfree(async);
2718 }
2719 
2720 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2721 				 unsigned long count, int wait)
2722 {
2723 	struct async_delayed_refs *async;
2724 	int ret;
2725 
2726 	async = kmalloc(sizeof(*async), GFP_NOFS);
2727 	if (!async)
2728 		return -ENOMEM;
2729 
2730 	async->root = root->fs_info->tree_root;
2731 	async->count = count;
2732 	async->error = 0;
2733 	if (wait)
2734 		async->sync = 1;
2735 	else
2736 		async->sync = 0;
2737 	init_completion(&async->wait);
2738 
2739 	btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2740 			delayed_ref_async_start, NULL, NULL);
2741 
2742 	btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2743 
2744 	if (wait) {
2745 		wait_for_completion(&async->wait);
2746 		ret = async->error;
2747 		kfree(async);
2748 		return ret;
2749 	}
2750 	return 0;
2751 }
2752 
2753 /*
2754  * this starts processing the delayed reference count updates and
2755  * extent insertions we have queued up so far.  count can be
2756  * 0, which means to process everything in the tree at the start
2757  * of the run (but not newly added entries), or it can be some target
2758  * number you'd like to process.
2759  *
2760  * Returns 0 on success or if called with an aborted transaction
2761  * Returns <0 on error and aborts the transaction
2762  */
2763 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2764 			   struct btrfs_root *root, unsigned long count)
2765 {
2766 	struct rb_node *node;
2767 	struct btrfs_delayed_ref_root *delayed_refs;
2768 	struct btrfs_delayed_ref_head *head;
2769 	int ret;
2770 	int run_all = count == (unsigned long)-1;
2771 	int run_most = 0;
2772 
2773 	/* We'll clean this up in btrfs_cleanup_transaction */
2774 	if (trans->aborted)
2775 		return 0;
2776 
2777 	if (root == root->fs_info->extent_root)
2778 		root = root->fs_info->tree_root;
2779 
2780 	delayed_refs = &trans->transaction->delayed_refs;
2781 	if (count == 0) {
2782 		count = atomic_read(&delayed_refs->num_entries) * 2;
2783 		run_most = 1;
2784 	}
2785 
2786 again:
2787 #ifdef SCRAMBLE_DELAYED_REFS
2788 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2789 #endif
2790 	ret = __btrfs_run_delayed_refs(trans, root, count);
2791 	if (ret < 0) {
2792 		btrfs_abort_transaction(trans, root, ret);
2793 		return ret;
2794 	}
2795 
2796 	if (run_all) {
2797 		if (!list_empty(&trans->new_bgs))
2798 			btrfs_create_pending_block_groups(trans, root);
2799 
2800 		spin_lock(&delayed_refs->lock);
2801 		node = rb_first(&delayed_refs->href_root);
2802 		if (!node) {
2803 			spin_unlock(&delayed_refs->lock);
2804 			goto out;
2805 		}
2806 		count = (unsigned long)-1;
2807 
2808 		while (node) {
2809 			head = rb_entry(node, struct btrfs_delayed_ref_head,
2810 					href_node);
2811 			if (btrfs_delayed_ref_is_head(&head->node)) {
2812 				struct btrfs_delayed_ref_node *ref;
2813 
2814 				ref = &head->node;
2815 				atomic_inc(&ref->refs);
2816 
2817 				spin_unlock(&delayed_refs->lock);
2818 				/*
2819 				 * Mutex was contended, block until it's
2820 				 * released and try again
2821 				 */
2822 				mutex_lock(&head->mutex);
2823 				mutex_unlock(&head->mutex);
2824 
2825 				btrfs_put_delayed_ref(ref);
2826 				cond_resched();
2827 				goto again;
2828 			} else {
2829 				WARN_ON(1);
2830 			}
2831 			node = rb_next(node);
2832 		}
2833 		spin_unlock(&delayed_refs->lock);
2834 		cond_resched();
2835 		goto again;
2836 	}
2837 out:
2838 	ret = btrfs_delayed_qgroup_accounting(trans, root->fs_info);
2839 	if (ret)
2840 		return ret;
2841 	assert_qgroups_uptodate(trans);
2842 	return 0;
2843 }
2844 
2845 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2846 				struct btrfs_root *root,
2847 				u64 bytenr, u64 num_bytes, u64 flags,
2848 				int level, int is_data)
2849 {
2850 	struct btrfs_delayed_extent_op *extent_op;
2851 	int ret;
2852 
2853 	extent_op = btrfs_alloc_delayed_extent_op();
2854 	if (!extent_op)
2855 		return -ENOMEM;
2856 
2857 	extent_op->flags_to_set = flags;
2858 	extent_op->update_flags = 1;
2859 	extent_op->update_key = 0;
2860 	extent_op->is_data = is_data ? 1 : 0;
2861 	extent_op->level = level;
2862 
2863 	ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2864 					  num_bytes, extent_op);
2865 	if (ret)
2866 		btrfs_free_delayed_extent_op(extent_op);
2867 	return ret;
2868 }
2869 
2870 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2871 				      struct btrfs_root *root,
2872 				      struct btrfs_path *path,
2873 				      u64 objectid, u64 offset, u64 bytenr)
2874 {
2875 	struct btrfs_delayed_ref_head *head;
2876 	struct btrfs_delayed_ref_node *ref;
2877 	struct btrfs_delayed_data_ref *data_ref;
2878 	struct btrfs_delayed_ref_root *delayed_refs;
2879 	struct rb_node *node;
2880 	int ret = 0;
2881 
2882 	delayed_refs = &trans->transaction->delayed_refs;
2883 	spin_lock(&delayed_refs->lock);
2884 	head = btrfs_find_delayed_ref_head(trans, bytenr);
2885 	if (!head) {
2886 		spin_unlock(&delayed_refs->lock);
2887 		return 0;
2888 	}
2889 
2890 	if (!mutex_trylock(&head->mutex)) {
2891 		atomic_inc(&head->node.refs);
2892 		spin_unlock(&delayed_refs->lock);
2893 
2894 		btrfs_release_path(path);
2895 
2896 		/*
2897 		 * Mutex was contended, block until it's released and let
2898 		 * caller try again
2899 		 */
2900 		mutex_lock(&head->mutex);
2901 		mutex_unlock(&head->mutex);
2902 		btrfs_put_delayed_ref(&head->node);
2903 		return -EAGAIN;
2904 	}
2905 	spin_unlock(&delayed_refs->lock);
2906 
2907 	spin_lock(&head->lock);
2908 	node = rb_first(&head->ref_root);
2909 	while (node) {
2910 		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2911 		node = rb_next(node);
2912 
2913 		/* If it's a shared ref we know a cross reference exists */
2914 		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2915 			ret = 1;
2916 			break;
2917 		}
2918 
2919 		data_ref = btrfs_delayed_node_to_data_ref(ref);
2920 
2921 		/*
2922 		 * If our ref doesn't match the one we're currently looking at
2923 		 * then we have a cross reference.
2924 		 */
2925 		if (data_ref->root != root->root_key.objectid ||
2926 		    data_ref->objectid != objectid ||
2927 		    data_ref->offset != offset) {
2928 			ret = 1;
2929 			break;
2930 		}
2931 	}
2932 	spin_unlock(&head->lock);
2933 	mutex_unlock(&head->mutex);
2934 	return ret;
2935 }
2936 
2937 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2938 					struct btrfs_root *root,
2939 					struct btrfs_path *path,
2940 					u64 objectid, u64 offset, u64 bytenr)
2941 {
2942 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2943 	struct extent_buffer *leaf;
2944 	struct btrfs_extent_data_ref *ref;
2945 	struct btrfs_extent_inline_ref *iref;
2946 	struct btrfs_extent_item *ei;
2947 	struct btrfs_key key;
2948 	u32 item_size;
2949 	int ret;
2950 
2951 	key.objectid = bytenr;
2952 	key.offset = (u64)-1;
2953 	key.type = BTRFS_EXTENT_ITEM_KEY;
2954 
2955 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2956 	if (ret < 0)
2957 		goto out;
2958 	BUG_ON(ret == 0); /* Corruption */
2959 
2960 	ret = -ENOENT;
2961 	if (path->slots[0] == 0)
2962 		goto out;
2963 
2964 	path->slots[0]--;
2965 	leaf = path->nodes[0];
2966 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2967 
2968 	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2969 		goto out;
2970 
2971 	ret = 1;
2972 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2973 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2974 	if (item_size < sizeof(*ei)) {
2975 		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2976 		goto out;
2977 	}
2978 #endif
2979 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2980 
2981 	if (item_size != sizeof(*ei) +
2982 	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2983 		goto out;
2984 
2985 	if (btrfs_extent_generation(leaf, ei) <=
2986 	    btrfs_root_last_snapshot(&root->root_item))
2987 		goto out;
2988 
2989 	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2990 	if (btrfs_extent_inline_ref_type(leaf, iref) !=
2991 	    BTRFS_EXTENT_DATA_REF_KEY)
2992 		goto out;
2993 
2994 	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2995 	if (btrfs_extent_refs(leaf, ei) !=
2996 	    btrfs_extent_data_ref_count(leaf, ref) ||
2997 	    btrfs_extent_data_ref_root(leaf, ref) !=
2998 	    root->root_key.objectid ||
2999 	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3000 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
3001 		goto out;
3002 
3003 	ret = 0;
3004 out:
3005 	return ret;
3006 }
3007 
3008 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3009 			  struct btrfs_root *root,
3010 			  u64 objectid, u64 offset, u64 bytenr)
3011 {
3012 	struct btrfs_path *path;
3013 	int ret;
3014 	int ret2;
3015 
3016 	path = btrfs_alloc_path();
3017 	if (!path)
3018 		return -ENOENT;
3019 
3020 	do {
3021 		ret = check_committed_ref(trans, root, path, objectid,
3022 					  offset, bytenr);
3023 		if (ret && ret != -ENOENT)
3024 			goto out;
3025 
3026 		ret2 = check_delayed_ref(trans, root, path, objectid,
3027 					 offset, bytenr);
3028 	} while (ret2 == -EAGAIN);
3029 
3030 	if (ret2 && ret2 != -ENOENT) {
3031 		ret = ret2;
3032 		goto out;
3033 	}
3034 
3035 	if (ret != -ENOENT || ret2 != -ENOENT)
3036 		ret = 0;
3037 out:
3038 	btrfs_free_path(path);
3039 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3040 		WARN_ON(ret > 0);
3041 	return ret;
3042 }
3043 
3044 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3045 			   struct btrfs_root *root,
3046 			   struct extent_buffer *buf,
3047 			   int full_backref, int inc)
3048 {
3049 	u64 bytenr;
3050 	u64 num_bytes;
3051 	u64 parent;
3052 	u64 ref_root;
3053 	u32 nritems;
3054 	struct btrfs_key key;
3055 	struct btrfs_file_extent_item *fi;
3056 	int i;
3057 	int level;
3058 	int ret = 0;
3059 	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3060 			    u64, u64, u64, u64, u64, u64, int);
3061 
3062 
3063 	if (btrfs_test_is_dummy_root(root))
3064 		return 0;
3065 
3066 	ref_root = btrfs_header_owner(buf);
3067 	nritems = btrfs_header_nritems(buf);
3068 	level = btrfs_header_level(buf);
3069 
3070 	if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3071 		return 0;
3072 
3073 	if (inc)
3074 		process_func = btrfs_inc_extent_ref;
3075 	else
3076 		process_func = btrfs_free_extent;
3077 
3078 	if (full_backref)
3079 		parent = buf->start;
3080 	else
3081 		parent = 0;
3082 
3083 	for (i = 0; i < nritems; i++) {
3084 		if (level == 0) {
3085 			btrfs_item_key_to_cpu(buf, &key, i);
3086 			if (key.type != BTRFS_EXTENT_DATA_KEY)
3087 				continue;
3088 			fi = btrfs_item_ptr(buf, i,
3089 					    struct btrfs_file_extent_item);
3090 			if (btrfs_file_extent_type(buf, fi) ==
3091 			    BTRFS_FILE_EXTENT_INLINE)
3092 				continue;
3093 			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3094 			if (bytenr == 0)
3095 				continue;
3096 
3097 			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3098 			key.offset -= btrfs_file_extent_offset(buf, fi);
3099 			ret = process_func(trans, root, bytenr, num_bytes,
3100 					   parent, ref_root, key.objectid,
3101 					   key.offset, 1);
3102 			if (ret)
3103 				goto fail;
3104 		} else {
3105 			bytenr = btrfs_node_blockptr(buf, i);
3106 			num_bytes = root->nodesize;
3107 			ret = process_func(trans, root, bytenr, num_bytes,
3108 					   parent, ref_root, level - 1, 0,
3109 					   1);
3110 			if (ret)
3111 				goto fail;
3112 		}
3113 	}
3114 	return 0;
3115 fail:
3116 	return ret;
3117 }
3118 
3119 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3120 		  struct extent_buffer *buf, int full_backref)
3121 {
3122 	return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3123 }
3124 
3125 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3126 		  struct extent_buffer *buf, int full_backref)
3127 {
3128 	return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3129 }
3130 
3131 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3132 				 struct btrfs_root *root,
3133 				 struct btrfs_path *path,
3134 				 struct btrfs_block_group_cache *cache)
3135 {
3136 	int ret;
3137 	struct btrfs_root *extent_root = root->fs_info->extent_root;
3138 	unsigned long bi;
3139 	struct extent_buffer *leaf;
3140 
3141 	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3142 	if (ret < 0)
3143 		goto fail;
3144 	BUG_ON(ret); /* Corruption */
3145 
3146 	leaf = path->nodes[0];
3147 	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3148 	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3149 	btrfs_mark_buffer_dirty(leaf);
3150 	btrfs_release_path(path);
3151 fail:
3152 	if (ret) {
3153 		btrfs_abort_transaction(trans, root, ret);
3154 		return ret;
3155 	}
3156 	return 0;
3157 
3158 }
3159 
3160 static struct btrfs_block_group_cache *
3161 next_block_group(struct btrfs_root *root,
3162 		 struct btrfs_block_group_cache *cache)
3163 {
3164 	struct rb_node *node;
3165 	spin_lock(&root->fs_info->block_group_cache_lock);
3166 	node = rb_next(&cache->cache_node);
3167 	btrfs_put_block_group(cache);
3168 	if (node) {
3169 		cache = rb_entry(node, struct btrfs_block_group_cache,
3170 				 cache_node);
3171 		btrfs_get_block_group(cache);
3172 	} else
3173 		cache = NULL;
3174 	spin_unlock(&root->fs_info->block_group_cache_lock);
3175 	return cache;
3176 }
3177 
3178 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3179 			    struct btrfs_trans_handle *trans,
3180 			    struct btrfs_path *path)
3181 {
3182 	struct btrfs_root *root = block_group->fs_info->tree_root;
3183 	struct inode *inode = NULL;
3184 	u64 alloc_hint = 0;
3185 	int dcs = BTRFS_DC_ERROR;
3186 	int num_pages = 0;
3187 	int retries = 0;
3188 	int ret = 0;
3189 
3190 	/*
3191 	 * If this block group is smaller than 100 megs don't bother caching the
3192 	 * block group.
3193 	 */
3194 	if (block_group->key.offset < (100 * 1024 * 1024)) {
3195 		spin_lock(&block_group->lock);
3196 		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3197 		spin_unlock(&block_group->lock);
3198 		return 0;
3199 	}
3200 
3201 again:
3202 	inode = lookup_free_space_inode(root, block_group, path);
3203 	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3204 		ret = PTR_ERR(inode);
3205 		btrfs_release_path(path);
3206 		goto out;
3207 	}
3208 
3209 	if (IS_ERR(inode)) {
3210 		BUG_ON(retries);
3211 		retries++;
3212 
3213 		if (block_group->ro)
3214 			goto out_free;
3215 
3216 		ret = create_free_space_inode(root, trans, block_group, path);
3217 		if (ret)
3218 			goto out_free;
3219 		goto again;
3220 	}
3221 
3222 	/* We've already setup this transaction, go ahead and exit */
3223 	if (block_group->cache_generation == trans->transid &&
3224 	    i_size_read(inode)) {
3225 		dcs = BTRFS_DC_SETUP;
3226 		goto out_put;
3227 	}
3228 
3229 	/*
3230 	 * We want to set the generation to 0, that way if anything goes wrong
3231 	 * from here on out we know not to trust this cache when we load up next
3232 	 * time.
3233 	 */
3234 	BTRFS_I(inode)->generation = 0;
3235 	ret = btrfs_update_inode(trans, root, inode);
3236 	WARN_ON(ret);
3237 
3238 	if (i_size_read(inode) > 0) {
3239 		ret = btrfs_check_trunc_cache_free_space(root,
3240 					&root->fs_info->global_block_rsv);
3241 		if (ret)
3242 			goto out_put;
3243 
3244 		ret = btrfs_truncate_free_space_cache(root, trans, inode);
3245 		if (ret)
3246 			goto out_put;
3247 	}
3248 
3249 	spin_lock(&block_group->lock);
3250 	if (block_group->cached != BTRFS_CACHE_FINISHED ||
3251 	    !btrfs_test_opt(root, SPACE_CACHE) ||
3252 	    block_group->delalloc_bytes) {
3253 		/*
3254 		 * don't bother trying to write stuff out _if_
3255 		 * a) we're not cached,
3256 		 * b) we're with nospace_cache mount option.
3257 		 */
3258 		dcs = BTRFS_DC_WRITTEN;
3259 		spin_unlock(&block_group->lock);
3260 		goto out_put;
3261 	}
3262 	spin_unlock(&block_group->lock);
3263 
3264 	/*
3265 	 * Try to preallocate enough space based on how big the block group is.
3266 	 * Keep in mind this has to include any pinned space which could end up
3267 	 * taking up quite a bit since it's not folded into the other space
3268 	 * cache.
3269 	 */
3270 	num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3271 	if (!num_pages)
3272 		num_pages = 1;
3273 
3274 	num_pages *= 16;
3275 	num_pages *= PAGE_CACHE_SIZE;
3276 
3277 	ret = btrfs_check_data_free_space(inode, num_pages);
3278 	if (ret)
3279 		goto out_put;
3280 
3281 	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3282 					      num_pages, num_pages,
3283 					      &alloc_hint);
3284 	if (!ret)
3285 		dcs = BTRFS_DC_SETUP;
3286 	btrfs_free_reserved_data_space(inode, num_pages);
3287 
3288 out_put:
3289 	iput(inode);
3290 out_free:
3291 	btrfs_release_path(path);
3292 out:
3293 	spin_lock(&block_group->lock);
3294 	if (!ret && dcs == BTRFS_DC_SETUP)
3295 		block_group->cache_generation = trans->transid;
3296 	block_group->disk_cache_state = dcs;
3297 	spin_unlock(&block_group->lock);
3298 
3299 	return ret;
3300 }
3301 
3302 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3303 				   struct btrfs_root *root)
3304 {
3305 	struct btrfs_block_group_cache *cache;
3306 	int err = 0;
3307 	struct btrfs_path *path;
3308 	u64 last = 0;
3309 
3310 	path = btrfs_alloc_path();
3311 	if (!path)
3312 		return -ENOMEM;
3313 
3314 again:
3315 	while (1) {
3316 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
3317 		while (cache) {
3318 			if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3319 				break;
3320 			cache = next_block_group(root, cache);
3321 		}
3322 		if (!cache) {
3323 			if (last == 0)
3324 				break;
3325 			last = 0;
3326 			continue;
3327 		}
3328 		err = cache_save_setup(cache, trans, path);
3329 		last = cache->key.objectid + cache->key.offset;
3330 		btrfs_put_block_group(cache);
3331 	}
3332 
3333 	while (1) {
3334 		if (last == 0) {
3335 			err = btrfs_run_delayed_refs(trans, root,
3336 						     (unsigned long)-1);
3337 			if (err) /* File system offline */
3338 				goto out;
3339 		}
3340 
3341 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
3342 		while (cache) {
3343 			if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3344 				btrfs_put_block_group(cache);
3345 				goto again;
3346 			}
3347 
3348 			if (cache->dirty)
3349 				break;
3350 			cache = next_block_group(root, cache);
3351 		}
3352 		if (!cache) {
3353 			if (last == 0)
3354 				break;
3355 			last = 0;
3356 			continue;
3357 		}
3358 
3359 		if (cache->disk_cache_state == BTRFS_DC_SETUP)
3360 			cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3361 		cache->dirty = 0;
3362 		last = cache->key.objectid + cache->key.offset;
3363 
3364 		err = write_one_cache_group(trans, root, path, cache);
3365 		btrfs_put_block_group(cache);
3366 		if (err) /* File system offline */
3367 			goto out;
3368 	}
3369 
3370 	while (1) {
3371 		/*
3372 		 * I don't think this is needed since we're just marking our
3373 		 * preallocated extent as written, but just in case it can't
3374 		 * hurt.
3375 		 */
3376 		if (last == 0) {
3377 			err = btrfs_run_delayed_refs(trans, root,
3378 						     (unsigned long)-1);
3379 			if (err) /* File system offline */
3380 				goto out;
3381 		}
3382 
3383 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
3384 		while (cache) {
3385 			/*
3386 			 * Really this shouldn't happen, but it could if we
3387 			 * couldn't write the entire preallocated extent and
3388 			 * splitting the extent resulted in a new block.
3389 			 */
3390 			if (cache->dirty) {
3391 				btrfs_put_block_group(cache);
3392 				goto again;
3393 			}
3394 			if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3395 				break;
3396 			cache = next_block_group(root, cache);
3397 		}
3398 		if (!cache) {
3399 			if (last == 0)
3400 				break;
3401 			last = 0;
3402 			continue;
3403 		}
3404 
3405 		err = btrfs_write_out_cache(root, trans, cache, path);
3406 
3407 		/*
3408 		 * If we didn't have an error then the cache state is still
3409 		 * NEED_WRITE, so we can set it to WRITTEN.
3410 		 */
3411 		if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3412 			cache->disk_cache_state = BTRFS_DC_WRITTEN;
3413 		last = cache->key.objectid + cache->key.offset;
3414 		btrfs_put_block_group(cache);
3415 	}
3416 out:
3417 
3418 	btrfs_free_path(path);
3419 	return err;
3420 }
3421 
3422 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3423 {
3424 	struct btrfs_block_group_cache *block_group;
3425 	int readonly = 0;
3426 
3427 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3428 	if (!block_group || block_group->ro)
3429 		readonly = 1;
3430 	if (block_group)
3431 		btrfs_put_block_group(block_group);
3432 	return readonly;
3433 }
3434 
3435 static const char *alloc_name(u64 flags)
3436 {
3437 	switch (flags) {
3438 	case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3439 		return "mixed";
3440 	case BTRFS_BLOCK_GROUP_METADATA:
3441 		return "metadata";
3442 	case BTRFS_BLOCK_GROUP_DATA:
3443 		return "data";
3444 	case BTRFS_BLOCK_GROUP_SYSTEM:
3445 		return "system";
3446 	default:
3447 		WARN_ON(1);
3448 		return "invalid-combination";
3449 	};
3450 }
3451 
3452 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3453 			     u64 total_bytes, u64 bytes_used,
3454 			     struct btrfs_space_info **space_info)
3455 {
3456 	struct btrfs_space_info *found;
3457 	int i;
3458 	int factor;
3459 	int ret;
3460 
3461 	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3462 		     BTRFS_BLOCK_GROUP_RAID10))
3463 		factor = 2;
3464 	else
3465 		factor = 1;
3466 
3467 	found = __find_space_info(info, flags);
3468 	if (found) {
3469 		spin_lock(&found->lock);
3470 		found->total_bytes += total_bytes;
3471 		found->disk_total += total_bytes * factor;
3472 		found->bytes_used += bytes_used;
3473 		found->disk_used += bytes_used * factor;
3474 		found->full = 0;
3475 		spin_unlock(&found->lock);
3476 		*space_info = found;
3477 		return 0;
3478 	}
3479 	found = kzalloc(sizeof(*found), GFP_NOFS);
3480 	if (!found)
3481 		return -ENOMEM;
3482 
3483 	ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3484 	if (ret) {
3485 		kfree(found);
3486 		return ret;
3487 	}
3488 
3489 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3490 		INIT_LIST_HEAD(&found->block_groups[i]);
3491 	init_rwsem(&found->groups_sem);
3492 	spin_lock_init(&found->lock);
3493 	found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3494 	found->total_bytes = total_bytes;
3495 	found->disk_total = total_bytes * factor;
3496 	found->bytes_used = bytes_used;
3497 	found->disk_used = bytes_used * factor;
3498 	found->bytes_pinned = 0;
3499 	found->bytes_reserved = 0;
3500 	found->bytes_readonly = 0;
3501 	found->bytes_may_use = 0;
3502 	found->full = 0;
3503 	found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3504 	found->chunk_alloc = 0;
3505 	found->flush = 0;
3506 	init_waitqueue_head(&found->wait);
3507 
3508 	ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3509 				    info->space_info_kobj, "%s",
3510 				    alloc_name(found->flags));
3511 	if (ret) {
3512 		kfree(found);
3513 		return ret;
3514 	}
3515 
3516 	*space_info = found;
3517 	list_add_rcu(&found->list, &info->space_info);
3518 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3519 		info->data_sinfo = found;
3520 
3521 	return ret;
3522 }
3523 
3524 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3525 {
3526 	u64 extra_flags = chunk_to_extended(flags) &
3527 				BTRFS_EXTENDED_PROFILE_MASK;
3528 
3529 	write_seqlock(&fs_info->profiles_lock);
3530 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3531 		fs_info->avail_data_alloc_bits |= extra_flags;
3532 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
3533 		fs_info->avail_metadata_alloc_bits |= extra_flags;
3534 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3535 		fs_info->avail_system_alloc_bits |= extra_flags;
3536 	write_sequnlock(&fs_info->profiles_lock);
3537 }
3538 
3539 /*
3540  * returns target flags in extended format or 0 if restripe for this
3541  * chunk_type is not in progress
3542  *
3543  * should be called with either volume_mutex or balance_lock held
3544  */
3545 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3546 {
3547 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3548 	u64 target = 0;
3549 
3550 	if (!bctl)
3551 		return 0;
3552 
3553 	if (flags & BTRFS_BLOCK_GROUP_DATA &&
3554 	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3555 		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3556 	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3557 		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3558 		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3559 	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3560 		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3561 		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3562 	}
3563 
3564 	return target;
3565 }
3566 
3567 /*
3568  * @flags: available profiles in extended format (see ctree.h)
3569  *
3570  * Returns reduced profile in chunk format.  If profile changing is in
3571  * progress (either running or paused) picks the target profile (if it's
3572  * already available), otherwise falls back to plain reducing.
3573  */
3574 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3575 {
3576 	u64 num_devices = root->fs_info->fs_devices->rw_devices;
3577 	u64 target;
3578 	u64 tmp;
3579 
3580 	/*
3581 	 * see if restripe for this chunk_type is in progress, if so
3582 	 * try to reduce to the target profile
3583 	 */
3584 	spin_lock(&root->fs_info->balance_lock);
3585 	target = get_restripe_target(root->fs_info, flags);
3586 	if (target) {
3587 		/* pick target profile only if it's already available */
3588 		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3589 			spin_unlock(&root->fs_info->balance_lock);
3590 			return extended_to_chunk(target);
3591 		}
3592 	}
3593 	spin_unlock(&root->fs_info->balance_lock);
3594 
3595 	/* First, mask out the RAID levels which aren't possible */
3596 	if (num_devices == 1)
3597 		flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3598 			   BTRFS_BLOCK_GROUP_RAID5);
3599 	if (num_devices < 3)
3600 		flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3601 	if (num_devices < 4)
3602 		flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3603 
3604 	tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3605 		       BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3606 		       BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3607 	flags &= ~tmp;
3608 
3609 	if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3610 		tmp = BTRFS_BLOCK_GROUP_RAID6;
3611 	else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3612 		tmp = BTRFS_BLOCK_GROUP_RAID5;
3613 	else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3614 		tmp = BTRFS_BLOCK_GROUP_RAID10;
3615 	else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3616 		tmp = BTRFS_BLOCK_GROUP_RAID1;
3617 	else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3618 		tmp = BTRFS_BLOCK_GROUP_RAID0;
3619 
3620 	return extended_to_chunk(flags | tmp);
3621 }
3622 
3623 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3624 {
3625 	unsigned seq;
3626 	u64 flags;
3627 
3628 	do {
3629 		flags = orig_flags;
3630 		seq = read_seqbegin(&root->fs_info->profiles_lock);
3631 
3632 		if (flags & BTRFS_BLOCK_GROUP_DATA)
3633 			flags |= root->fs_info->avail_data_alloc_bits;
3634 		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3635 			flags |= root->fs_info->avail_system_alloc_bits;
3636 		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3637 			flags |= root->fs_info->avail_metadata_alloc_bits;
3638 	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
3639 
3640 	return btrfs_reduce_alloc_profile(root, flags);
3641 }
3642 
3643 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3644 {
3645 	u64 flags;
3646 	u64 ret;
3647 
3648 	if (data)
3649 		flags = BTRFS_BLOCK_GROUP_DATA;
3650 	else if (root == root->fs_info->chunk_root)
3651 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
3652 	else
3653 		flags = BTRFS_BLOCK_GROUP_METADATA;
3654 
3655 	ret = get_alloc_profile(root, flags);
3656 	return ret;
3657 }
3658 
3659 /*
3660  * This will check the space that the inode allocates from to make sure we have
3661  * enough space for bytes.
3662  */
3663 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3664 {
3665 	struct btrfs_space_info *data_sinfo;
3666 	struct btrfs_root *root = BTRFS_I(inode)->root;
3667 	struct btrfs_fs_info *fs_info = root->fs_info;
3668 	u64 used;
3669 	int ret = 0, committed = 0, alloc_chunk = 1;
3670 
3671 	/* make sure bytes are sectorsize aligned */
3672 	bytes = ALIGN(bytes, root->sectorsize);
3673 
3674 	if (btrfs_is_free_space_inode(inode)) {
3675 		committed = 1;
3676 		ASSERT(current->journal_info);
3677 	}
3678 
3679 	data_sinfo = fs_info->data_sinfo;
3680 	if (!data_sinfo)
3681 		goto alloc;
3682 
3683 again:
3684 	/* make sure we have enough space to handle the data first */
3685 	spin_lock(&data_sinfo->lock);
3686 	used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3687 		data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3688 		data_sinfo->bytes_may_use;
3689 
3690 	if (used + bytes > data_sinfo->total_bytes) {
3691 		struct btrfs_trans_handle *trans;
3692 
3693 		/*
3694 		 * if we don't have enough free bytes in this space then we need
3695 		 * to alloc a new chunk.
3696 		 */
3697 		if (!data_sinfo->full && alloc_chunk) {
3698 			u64 alloc_target;
3699 
3700 			data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3701 			spin_unlock(&data_sinfo->lock);
3702 alloc:
3703 			alloc_target = btrfs_get_alloc_profile(root, 1);
3704 			/*
3705 			 * It is ugly that we don't call nolock join
3706 			 * transaction for the free space inode case here.
3707 			 * But it is safe because we only do the data space
3708 			 * reservation for the free space cache in the
3709 			 * transaction context, the common join transaction
3710 			 * just increase the counter of the current transaction
3711 			 * handler, doesn't try to acquire the trans_lock of
3712 			 * the fs.
3713 			 */
3714 			trans = btrfs_join_transaction(root);
3715 			if (IS_ERR(trans))
3716 				return PTR_ERR(trans);
3717 
3718 			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3719 					     alloc_target,
3720 					     CHUNK_ALLOC_NO_FORCE);
3721 			btrfs_end_transaction(trans, root);
3722 			if (ret < 0) {
3723 				if (ret != -ENOSPC)
3724 					return ret;
3725 				else
3726 					goto commit_trans;
3727 			}
3728 
3729 			if (!data_sinfo)
3730 				data_sinfo = fs_info->data_sinfo;
3731 
3732 			goto again;
3733 		}
3734 
3735 		/*
3736 		 * If we don't have enough pinned space to deal with this
3737 		 * allocation don't bother committing the transaction.
3738 		 */
3739 		if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3740 					   bytes) < 0)
3741 			committed = 1;
3742 		spin_unlock(&data_sinfo->lock);
3743 
3744 		/* commit the current transaction and try again */
3745 commit_trans:
3746 		if (!committed &&
3747 		    !atomic_read(&root->fs_info->open_ioctl_trans)) {
3748 			committed = 1;
3749 
3750 			trans = btrfs_join_transaction(root);
3751 			if (IS_ERR(trans))
3752 				return PTR_ERR(trans);
3753 			ret = btrfs_commit_transaction(trans, root);
3754 			if (ret)
3755 				return ret;
3756 			goto again;
3757 		}
3758 
3759 		trace_btrfs_space_reservation(root->fs_info,
3760 					      "space_info:enospc",
3761 					      data_sinfo->flags, bytes, 1);
3762 		return -ENOSPC;
3763 	}
3764 	data_sinfo->bytes_may_use += bytes;
3765 	trace_btrfs_space_reservation(root->fs_info, "space_info",
3766 				      data_sinfo->flags, bytes, 1);
3767 	spin_unlock(&data_sinfo->lock);
3768 
3769 	return 0;
3770 }
3771 
3772 /*
3773  * Called if we need to clear a data reservation for this inode.
3774  */
3775 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3776 {
3777 	struct btrfs_root *root = BTRFS_I(inode)->root;
3778 	struct btrfs_space_info *data_sinfo;
3779 
3780 	/* make sure bytes are sectorsize aligned */
3781 	bytes = ALIGN(bytes, root->sectorsize);
3782 
3783 	data_sinfo = root->fs_info->data_sinfo;
3784 	spin_lock(&data_sinfo->lock);
3785 	WARN_ON(data_sinfo->bytes_may_use < bytes);
3786 	data_sinfo->bytes_may_use -= bytes;
3787 	trace_btrfs_space_reservation(root->fs_info, "space_info",
3788 				      data_sinfo->flags, bytes, 0);
3789 	spin_unlock(&data_sinfo->lock);
3790 }
3791 
3792 static void force_metadata_allocation(struct btrfs_fs_info *info)
3793 {
3794 	struct list_head *head = &info->space_info;
3795 	struct btrfs_space_info *found;
3796 
3797 	rcu_read_lock();
3798 	list_for_each_entry_rcu(found, head, list) {
3799 		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3800 			found->force_alloc = CHUNK_ALLOC_FORCE;
3801 	}
3802 	rcu_read_unlock();
3803 }
3804 
3805 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3806 {
3807 	return (global->size << 1);
3808 }
3809 
3810 static int should_alloc_chunk(struct btrfs_root *root,
3811 			      struct btrfs_space_info *sinfo, int force)
3812 {
3813 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3814 	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3815 	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3816 	u64 thresh;
3817 
3818 	if (force == CHUNK_ALLOC_FORCE)
3819 		return 1;
3820 
3821 	/*
3822 	 * We need to take into account the global rsv because for all intents
3823 	 * and purposes it's used space.  Don't worry about locking the
3824 	 * global_rsv, it doesn't change except when the transaction commits.
3825 	 */
3826 	if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3827 		num_allocated += calc_global_rsv_need_space(global_rsv);
3828 
3829 	/*
3830 	 * in limited mode, we want to have some free space up to
3831 	 * about 1% of the FS size.
3832 	 */
3833 	if (force == CHUNK_ALLOC_LIMITED) {
3834 		thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3835 		thresh = max_t(u64, 64 * 1024 * 1024,
3836 			       div_factor_fine(thresh, 1));
3837 
3838 		if (num_bytes - num_allocated < thresh)
3839 			return 1;
3840 	}
3841 
3842 	if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3843 		return 0;
3844 	return 1;
3845 }
3846 
3847 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3848 {
3849 	u64 num_dev;
3850 
3851 	if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3852 		    BTRFS_BLOCK_GROUP_RAID0 |
3853 		    BTRFS_BLOCK_GROUP_RAID5 |
3854 		    BTRFS_BLOCK_GROUP_RAID6))
3855 		num_dev = root->fs_info->fs_devices->rw_devices;
3856 	else if (type & BTRFS_BLOCK_GROUP_RAID1)
3857 		num_dev = 2;
3858 	else
3859 		num_dev = 1;	/* DUP or single */
3860 
3861 	/* metadata for updaing devices and chunk tree */
3862 	return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3863 }
3864 
3865 static void check_system_chunk(struct btrfs_trans_handle *trans,
3866 			       struct btrfs_root *root, u64 type)
3867 {
3868 	struct btrfs_space_info *info;
3869 	u64 left;
3870 	u64 thresh;
3871 
3872 	info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3873 	spin_lock(&info->lock);
3874 	left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3875 		info->bytes_reserved - info->bytes_readonly;
3876 	spin_unlock(&info->lock);
3877 
3878 	thresh = get_system_chunk_thresh(root, type);
3879 	if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3880 		btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3881 			left, thresh, type);
3882 		dump_space_info(info, 0, 0);
3883 	}
3884 
3885 	if (left < thresh) {
3886 		u64 flags;
3887 
3888 		flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3889 		btrfs_alloc_chunk(trans, root, flags);
3890 	}
3891 }
3892 
3893 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3894 			  struct btrfs_root *extent_root, u64 flags, int force)
3895 {
3896 	struct btrfs_space_info *space_info;
3897 	struct btrfs_fs_info *fs_info = extent_root->fs_info;
3898 	int wait_for_alloc = 0;
3899 	int ret = 0;
3900 
3901 	/* Don't re-enter if we're already allocating a chunk */
3902 	if (trans->allocating_chunk)
3903 		return -ENOSPC;
3904 
3905 	space_info = __find_space_info(extent_root->fs_info, flags);
3906 	if (!space_info) {
3907 		ret = update_space_info(extent_root->fs_info, flags,
3908 					0, 0, &space_info);
3909 		BUG_ON(ret); /* -ENOMEM */
3910 	}
3911 	BUG_ON(!space_info); /* Logic error */
3912 
3913 again:
3914 	spin_lock(&space_info->lock);
3915 	if (force < space_info->force_alloc)
3916 		force = space_info->force_alloc;
3917 	if (space_info->full) {
3918 		if (should_alloc_chunk(extent_root, space_info, force))
3919 			ret = -ENOSPC;
3920 		else
3921 			ret = 0;
3922 		spin_unlock(&space_info->lock);
3923 		return ret;
3924 	}
3925 
3926 	if (!should_alloc_chunk(extent_root, space_info, force)) {
3927 		spin_unlock(&space_info->lock);
3928 		return 0;
3929 	} else if (space_info->chunk_alloc) {
3930 		wait_for_alloc = 1;
3931 	} else {
3932 		space_info->chunk_alloc = 1;
3933 	}
3934 
3935 	spin_unlock(&space_info->lock);
3936 
3937 	mutex_lock(&fs_info->chunk_mutex);
3938 
3939 	/*
3940 	 * The chunk_mutex is held throughout the entirety of a chunk
3941 	 * allocation, so once we've acquired the chunk_mutex we know that the
3942 	 * other guy is done and we need to recheck and see if we should
3943 	 * allocate.
3944 	 */
3945 	if (wait_for_alloc) {
3946 		mutex_unlock(&fs_info->chunk_mutex);
3947 		wait_for_alloc = 0;
3948 		goto again;
3949 	}
3950 
3951 	trans->allocating_chunk = true;
3952 
3953 	/*
3954 	 * If we have mixed data/metadata chunks we want to make sure we keep
3955 	 * allocating mixed chunks instead of individual chunks.
3956 	 */
3957 	if (btrfs_mixed_space_info(space_info))
3958 		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3959 
3960 	/*
3961 	 * if we're doing a data chunk, go ahead and make sure that
3962 	 * we keep a reasonable number of metadata chunks allocated in the
3963 	 * FS as well.
3964 	 */
3965 	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3966 		fs_info->data_chunk_allocations++;
3967 		if (!(fs_info->data_chunk_allocations %
3968 		      fs_info->metadata_ratio))
3969 			force_metadata_allocation(fs_info);
3970 	}
3971 
3972 	/*
3973 	 * Check if we have enough space in SYSTEM chunk because we may need
3974 	 * to update devices.
3975 	 */
3976 	check_system_chunk(trans, extent_root, flags);
3977 
3978 	ret = btrfs_alloc_chunk(trans, extent_root, flags);
3979 	trans->allocating_chunk = false;
3980 
3981 	spin_lock(&space_info->lock);
3982 	if (ret < 0 && ret != -ENOSPC)
3983 		goto out;
3984 	if (ret)
3985 		space_info->full = 1;
3986 	else
3987 		ret = 1;
3988 
3989 	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3990 out:
3991 	space_info->chunk_alloc = 0;
3992 	spin_unlock(&space_info->lock);
3993 	mutex_unlock(&fs_info->chunk_mutex);
3994 	return ret;
3995 }
3996 
3997 static int can_overcommit(struct btrfs_root *root,
3998 			  struct btrfs_space_info *space_info, u64 bytes,
3999 			  enum btrfs_reserve_flush_enum flush)
4000 {
4001 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4002 	u64 profile = btrfs_get_alloc_profile(root, 0);
4003 	u64 space_size;
4004 	u64 avail;
4005 	u64 used;
4006 
4007 	used = space_info->bytes_used + space_info->bytes_reserved +
4008 		space_info->bytes_pinned + space_info->bytes_readonly;
4009 
4010 	/*
4011 	 * We only want to allow over committing if we have lots of actual space
4012 	 * free, but if we don't have enough space to handle the global reserve
4013 	 * space then we could end up having a real enospc problem when trying
4014 	 * to allocate a chunk or some other such important allocation.
4015 	 */
4016 	spin_lock(&global_rsv->lock);
4017 	space_size = calc_global_rsv_need_space(global_rsv);
4018 	spin_unlock(&global_rsv->lock);
4019 	if (used + space_size >= space_info->total_bytes)
4020 		return 0;
4021 
4022 	used += space_info->bytes_may_use;
4023 
4024 	spin_lock(&root->fs_info->free_chunk_lock);
4025 	avail = root->fs_info->free_chunk_space;
4026 	spin_unlock(&root->fs_info->free_chunk_lock);
4027 
4028 	/*
4029 	 * If we have dup, raid1 or raid10 then only half of the free
4030 	 * space is actually useable.  For raid56, the space info used
4031 	 * doesn't include the parity drive, so we don't have to
4032 	 * change the math
4033 	 */
4034 	if (profile & (BTRFS_BLOCK_GROUP_DUP |
4035 		       BTRFS_BLOCK_GROUP_RAID1 |
4036 		       BTRFS_BLOCK_GROUP_RAID10))
4037 		avail >>= 1;
4038 
4039 	/*
4040 	 * If we aren't flushing all things, let us overcommit up to
4041 	 * 1/2th of the space. If we can flush, don't let us overcommit
4042 	 * too much, let it overcommit up to 1/8 of the space.
4043 	 */
4044 	if (flush == BTRFS_RESERVE_FLUSH_ALL)
4045 		avail >>= 3;
4046 	else
4047 		avail >>= 1;
4048 
4049 	if (used + bytes < space_info->total_bytes + avail)
4050 		return 1;
4051 	return 0;
4052 }
4053 
4054 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4055 					 unsigned long nr_pages, int nr_items)
4056 {
4057 	struct super_block *sb = root->fs_info->sb;
4058 
4059 	if (down_read_trylock(&sb->s_umount)) {
4060 		writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4061 		up_read(&sb->s_umount);
4062 	} else {
4063 		/*
4064 		 * We needn't worry the filesystem going from r/w to r/o though
4065 		 * we don't acquire ->s_umount mutex, because the filesystem
4066 		 * should guarantee the delalloc inodes list be empty after
4067 		 * the filesystem is readonly(all dirty pages are written to
4068 		 * the disk).
4069 		 */
4070 		btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4071 		if (!current->journal_info)
4072 			btrfs_wait_ordered_roots(root->fs_info, nr_items);
4073 	}
4074 }
4075 
4076 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4077 {
4078 	u64 bytes;
4079 	int nr;
4080 
4081 	bytes = btrfs_calc_trans_metadata_size(root, 1);
4082 	nr = (int)div64_u64(to_reclaim, bytes);
4083 	if (!nr)
4084 		nr = 1;
4085 	return nr;
4086 }
4087 
4088 #define EXTENT_SIZE_PER_ITEM	(256 * 1024)
4089 
4090 /*
4091  * shrink metadata reservation for delalloc
4092  */
4093 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4094 			    bool wait_ordered)
4095 {
4096 	struct btrfs_block_rsv *block_rsv;
4097 	struct btrfs_space_info *space_info;
4098 	struct btrfs_trans_handle *trans;
4099 	u64 delalloc_bytes;
4100 	u64 max_reclaim;
4101 	long time_left;
4102 	unsigned long nr_pages;
4103 	int loops;
4104 	int items;
4105 	enum btrfs_reserve_flush_enum flush;
4106 
4107 	/* Calc the number of the pages we need flush for space reservation */
4108 	items = calc_reclaim_items_nr(root, to_reclaim);
4109 	to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4110 
4111 	trans = (struct btrfs_trans_handle *)current->journal_info;
4112 	block_rsv = &root->fs_info->delalloc_block_rsv;
4113 	space_info = block_rsv->space_info;
4114 
4115 	delalloc_bytes = percpu_counter_sum_positive(
4116 						&root->fs_info->delalloc_bytes);
4117 	if (delalloc_bytes == 0) {
4118 		if (trans)
4119 			return;
4120 		if (wait_ordered)
4121 			btrfs_wait_ordered_roots(root->fs_info, items);
4122 		return;
4123 	}
4124 
4125 	loops = 0;
4126 	while (delalloc_bytes && loops < 3) {
4127 		max_reclaim = min(delalloc_bytes, to_reclaim);
4128 		nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4129 		btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4130 		/*
4131 		 * We need to wait for the async pages to actually start before
4132 		 * we do anything.
4133 		 */
4134 		max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4135 		if (!max_reclaim)
4136 			goto skip_async;
4137 
4138 		if (max_reclaim <= nr_pages)
4139 			max_reclaim = 0;
4140 		else
4141 			max_reclaim -= nr_pages;
4142 
4143 		wait_event(root->fs_info->async_submit_wait,
4144 			   atomic_read(&root->fs_info->async_delalloc_pages) <=
4145 			   (int)max_reclaim);
4146 skip_async:
4147 		if (!trans)
4148 			flush = BTRFS_RESERVE_FLUSH_ALL;
4149 		else
4150 			flush = BTRFS_RESERVE_NO_FLUSH;
4151 		spin_lock(&space_info->lock);
4152 		if (can_overcommit(root, space_info, orig, flush)) {
4153 			spin_unlock(&space_info->lock);
4154 			break;
4155 		}
4156 		spin_unlock(&space_info->lock);
4157 
4158 		loops++;
4159 		if (wait_ordered && !trans) {
4160 			btrfs_wait_ordered_roots(root->fs_info, items);
4161 		} else {
4162 			time_left = schedule_timeout_killable(1);
4163 			if (time_left)
4164 				break;
4165 		}
4166 		delalloc_bytes = percpu_counter_sum_positive(
4167 						&root->fs_info->delalloc_bytes);
4168 	}
4169 }
4170 
4171 /**
4172  * maybe_commit_transaction - possibly commit the transaction if its ok to
4173  * @root - the root we're allocating for
4174  * @bytes - the number of bytes we want to reserve
4175  * @force - force the commit
4176  *
4177  * This will check to make sure that committing the transaction will actually
4178  * get us somewhere and then commit the transaction if it does.  Otherwise it
4179  * will return -ENOSPC.
4180  */
4181 static int may_commit_transaction(struct btrfs_root *root,
4182 				  struct btrfs_space_info *space_info,
4183 				  u64 bytes, int force)
4184 {
4185 	struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4186 	struct btrfs_trans_handle *trans;
4187 
4188 	trans = (struct btrfs_trans_handle *)current->journal_info;
4189 	if (trans)
4190 		return -EAGAIN;
4191 
4192 	if (force)
4193 		goto commit;
4194 
4195 	/* See if there is enough pinned space to make this reservation */
4196 	if (percpu_counter_compare(&space_info->total_bytes_pinned,
4197 				   bytes) >= 0)
4198 		goto commit;
4199 
4200 	/*
4201 	 * See if there is some space in the delayed insertion reservation for
4202 	 * this reservation.
4203 	 */
4204 	if (space_info != delayed_rsv->space_info)
4205 		return -ENOSPC;
4206 
4207 	spin_lock(&delayed_rsv->lock);
4208 	if (percpu_counter_compare(&space_info->total_bytes_pinned,
4209 				   bytes - delayed_rsv->size) >= 0) {
4210 		spin_unlock(&delayed_rsv->lock);
4211 		return -ENOSPC;
4212 	}
4213 	spin_unlock(&delayed_rsv->lock);
4214 
4215 commit:
4216 	trans = btrfs_join_transaction(root);
4217 	if (IS_ERR(trans))
4218 		return -ENOSPC;
4219 
4220 	return btrfs_commit_transaction(trans, root);
4221 }
4222 
4223 enum flush_state {
4224 	FLUSH_DELAYED_ITEMS_NR	=	1,
4225 	FLUSH_DELAYED_ITEMS	=	2,
4226 	FLUSH_DELALLOC		=	3,
4227 	FLUSH_DELALLOC_WAIT	=	4,
4228 	ALLOC_CHUNK		=	5,
4229 	COMMIT_TRANS		=	6,
4230 };
4231 
4232 static int flush_space(struct btrfs_root *root,
4233 		       struct btrfs_space_info *space_info, u64 num_bytes,
4234 		       u64 orig_bytes, int state)
4235 {
4236 	struct btrfs_trans_handle *trans;
4237 	int nr;
4238 	int ret = 0;
4239 
4240 	switch (state) {
4241 	case FLUSH_DELAYED_ITEMS_NR:
4242 	case FLUSH_DELAYED_ITEMS:
4243 		if (state == FLUSH_DELAYED_ITEMS_NR)
4244 			nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4245 		else
4246 			nr = -1;
4247 
4248 		trans = btrfs_join_transaction(root);
4249 		if (IS_ERR(trans)) {
4250 			ret = PTR_ERR(trans);
4251 			break;
4252 		}
4253 		ret = btrfs_run_delayed_items_nr(trans, root, nr);
4254 		btrfs_end_transaction(trans, root);
4255 		break;
4256 	case FLUSH_DELALLOC:
4257 	case FLUSH_DELALLOC_WAIT:
4258 		shrink_delalloc(root, num_bytes * 2, orig_bytes,
4259 				state == FLUSH_DELALLOC_WAIT);
4260 		break;
4261 	case ALLOC_CHUNK:
4262 		trans = btrfs_join_transaction(root);
4263 		if (IS_ERR(trans)) {
4264 			ret = PTR_ERR(trans);
4265 			break;
4266 		}
4267 		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4268 				     btrfs_get_alloc_profile(root, 0),
4269 				     CHUNK_ALLOC_NO_FORCE);
4270 		btrfs_end_transaction(trans, root);
4271 		if (ret == -ENOSPC)
4272 			ret = 0;
4273 		break;
4274 	case COMMIT_TRANS:
4275 		ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4276 		break;
4277 	default:
4278 		ret = -ENOSPC;
4279 		break;
4280 	}
4281 
4282 	return ret;
4283 }
4284 
4285 static inline u64
4286 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4287 				 struct btrfs_space_info *space_info)
4288 {
4289 	u64 used;
4290 	u64 expected;
4291 	u64 to_reclaim;
4292 
4293 	to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4294 				16 * 1024 * 1024);
4295 	spin_lock(&space_info->lock);
4296 	if (can_overcommit(root, space_info, to_reclaim,
4297 			   BTRFS_RESERVE_FLUSH_ALL)) {
4298 		to_reclaim = 0;
4299 		goto out;
4300 	}
4301 
4302 	used = space_info->bytes_used + space_info->bytes_reserved +
4303 	       space_info->bytes_pinned + space_info->bytes_readonly +
4304 	       space_info->bytes_may_use;
4305 	if (can_overcommit(root, space_info, 1024 * 1024,
4306 			   BTRFS_RESERVE_FLUSH_ALL))
4307 		expected = div_factor_fine(space_info->total_bytes, 95);
4308 	else
4309 		expected = div_factor_fine(space_info->total_bytes, 90);
4310 
4311 	if (used > expected)
4312 		to_reclaim = used - expected;
4313 	else
4314 		to_reclaim = 0;
4315 	to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4316 				     space_info->bytes_reserved);
4317 out:
4318 	spin_unlock(&space_info->lock);
4319 
4320 	return to_reclaim;
4321 }
4322 
4323 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4324 					struct btrfs_fs_info *fs_info, u64 used)
4325 {
4326 	return (used >= div_factor_fine(space_info->total_bytes, 98) &&
4327 		!btrfs_fs_closing(fs_info) &&
4328 		!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4329 }
4330 
4331 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4332 				       struct btrfs_fs_info *fs_info,
4333 				       int flush_state)
4334 {
4335 	u64 used;
4336 
4337 	spin_lock(&space_info->lock);
4338 	/*
4339 	 * We run out of space and have not got any free space via flush_space,
4340 	 * so don't bother doing async reclaim.
4341 	 */
4342 	if (flush_state > COMMIT_TRANS && space_info->full) {
4343 		spin_unlock(&space_info->lock);
4344 		return 0;
4345 	}
4346 
4347 	used = space_info->bytes_used + space_info->bytes_reserved +
4348 	       space_info->bytes_pinned + space_info->bytes_readonly +
4349 	       space_info->bytes_may_use;
4350 	if (need_do_async_reclaim(space_info, fs_info, used)) {
4351 		spin_unlock(&space_info->lock);
4352 		return 1;
4353 	}
4354 	spin_unlock(&space_info->lock);
4355 
4356 	return 0;
4357 }
4358 
4359 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4360 {
4361 	struct btrfs_fs_info *fs_info;
4362 	struct btrfs_space_info *space_info;
4363 	u64 to_reclaim;
4364 	int flush_state;
4365 
4366 	fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4367 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4368 
4369 	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4370 						      space_info);
4371 	if (!to_reclaim)
4372 		return;
4373 
4374 	flush_state = FLUSH_DELAYED_ITEMS_NR;
4375 	do {
4376 		flush_space(fs_info->fs_root, space_info, to_reclaim,
4377 			    to_reclaim, flush_state);
4378 		flush_state++;
4379 		if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4380 						 flush_state))
4381 			return;
4382 	} while (flush_state <= COMMIT_TRANS);
4383 
4384 	if (btrfs_need_do_async_reclaim(space_info, fs_info, flush_state))
4385 		queue_work(system_unbound_wq, work);
4386 }
4387 
4388 void btrfs_init_async_reclaim_work(struct work_struct *work)
4389 {
4390 	INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4391 }
4392 
4393 /**
4394  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4395  * @root - the root we're allocating for
4396  * @block_rsv - the block_rsv we're allocating for
4397  * @orig_bytes - the number of bytes we want
4398  * @flush - whether or not we can flush to make our reservation
4399  *
4400  * This will reserve orgi_bytes number of bytes from the space info associated
4401  * with the block_rsv.  If there is not enough space it will make an attempt to
4402  * flush out space to make room.  It will do this by flushing delalloc if
4403  * possible or committing the transaction.  If flush is 0 then no attempts to
4404  * regain reservations will be made and this will fail if there is not enough
4405  * space already.
4406  */
4407 static int reserve_metadata_bytes(struct btrfs_root *root,
4408 				  struct btrfs_block_rsv *block_rsv,
4409 				  u64 orig_bytes,
4410 				  enum btrfs_reserve_flush_enum flush)
4411 {
4412 	struct btrfs_space_info *space_info = block_rsv->space_info;
4413 	u64 used;
4414 	u64 num_bytes = orig_bytes;
4415 	int flush_state = FLUSH_DELAYED_ITEMS_NR;
4416 	int ret = 0;
4417 	bool flushing = false;
4418 
4419 again:
4420 	ret = 0;
4421 	spin_lock(&space_info->lock);
4422 	/*
4423 	 * We only want to wait if somebody other than us is flushing and we
4424 	 * are actually allowed to flush all things.
4425 	 */
4426 	while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4427 	       space_info->flush) {
4428 		spin_unlock(&space_info->lock);
4429 		/*
4430 		 * If we have a trans handle we can't wait because the flusher
4431 		 * may have to commit the transaction, which would mean we would
4432 		 * deadlock since we are waiting for the flusher to finish, but
4433 		 * hold the current transaction open.
4434 		 */
4435 		if (current->journal_info)
4436 			return -EAGAIN;
4437 		ret = wait_event_killable(space_info->wait, !space_info->flush);
4438 		/* Must have been killed, return */
4439 		if (ret)
4440 			return -EINTR;
4441 
4442 		spin_lock(&space_info->lock);
4443 	}
4444 
4445 	ret = -ENOSPC;
4446 	used = space_info->bytes_used + space_info->bytes_reserved +
4447 		space_info->bytes_pinned + space_info->bytes_readonly +
4448 		space_info->bytes_may_use;
4449 
4450 	/*
4451 	 * The idea here is that we've not already over-reserved the block group
4452 	 * then we can go ahead and save our reservation first and then start
4453 	 * flushing if we need to.  Otherwise if we've already overcommitted
4454 	 * lets start flushing stuff first and then come back and try to make
4455 	 * our reservation.
4456 	 */
4457 	if (used <= space_info->total_bytes) {
4458 		if (used + orig_bytes <= space_info->total_bytes) {
4459 			space_info->bytes_may_use += orig_bytes;
4460 			trace_btrfs_space_reservation(root->fs_info,
4461 				"space_info", space_info->flags, orig_bytes, 1);
4462 			ret = 0;
4463 		} else {
4464 			/*
4465 			 * Ok set num_bytes to orig_bytes since we aren't
4466 			 * overocmmitted, this way we only try and reclaim what
4467 			 * we need.
4468 			 */
4469 			num_bytes = orig_bytes;
4470 		}
4471 	} else {
4472 		/*
4473 		 * Ok we're over committed, set num_bytes to the overcommitted
4474 		 * amount plus the amount of bytes that we need for this
4475 		 * reservation.
4476 		 */
4477 		num_bytes = used - space_info->total_bytes +
4478 			(orig_bytes * 2);
4479 	}
4480 
4481 	if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4482 		space_info->bytes_may_use += orig_bytes;
4483 		trace_btrfs_space_reservation(root->fs_info, "space_info",
4484 					      space_info->flags, orig_bytes,
4485 					      1);
4486 		ret = 0;
4487 	}
4488 
4489 	/*
4490 	 * Couldn't make our reservation, save our place so while we're trying
4491 	 * to reclaim space we can actually use it instead of somebody else
4492 	 * stealing it from us.
4493 	 *
4494 	 * We make the other tasks wait for the flush only when we can flush
4495 	 * all things.
4496 	 */
4497 	if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4498 		flushing = true;
4499 		space_info->flush = 1;
4500 	} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4501 		used += orig_bytes;
4502 		/*
4503 		 * We will do the space reservation dance during log replay,
4504 		 * which means we won't have fs_info->fs_root set, so don't do
4505 		 * the async reclaim as we will panic.
4506 		 */
4507 		if (!root->fs_info->log_root_recovering &&
4508 		    need_do_async_reclaim(space_info, root->fs_info, used) &&
4509 		    !work_busy(&root->fs_info->async_reclaim_work))
4510 			queue_work(system_unbound_wq,
4511 				   &root->fs_info->async_reclaim_work);
4512 	}
4513 	spin_unlock(&space_info->lock);
4514 
4515 	if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4516 		goto out;
4517 
4518 	ret = flush_space(root, space_info, num_bytes, orig_bytes,
4519 			  flush_state);
4520 	flush_state++;
4521 
4522 	/*
4523 	 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4524 	 * would happen. So skip delalloc flush.
4525 	 */
4526 	if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4527 	    (flush_state == FLUSH_DELALLOC ||
4528 	     flush_state == FLUSH_DELALLOC_WAIT))
4529 		flush_state = ALLOC_CHUNK;
4530 
4531 	if (!ret)
4532 		goto again;
4533 	else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4534 		 flush_state < COMMIT_TRANS)
4535 		goto again;
4536 	else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4537 		 flush_state <= COMMIT_TRANS)
4538 		goto again;
4539 
4540 out:
4541 	if (ret == -ENOSPC &&
4542 	    unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4543 		struct btrfs_block_rsv *global_rsv =
4544 			&root->fs_info->global_block_rsv;
4545 
4546 		if (block_rsv != global_rsv &&
4547 		    !block_rsv_use_bytes(global_rsv, orig_bytes))
4548 			ret = 0;
4549 	}
4550 	if (ret == -ENOSPC)
4551 		trace_btrfs_space_reservation(root->fs_info,
4552 					      "space_info:enospc",
4553 					      space_info->flags, orig_bytes, 1);
4554 	if (flushing) {
4555 		spin_lock(&space_info->lock);
4556 		space_info->flush = 0;
4557 		wake_up_all(&space_info->wait);
4558 		spin_unlock(&space_info->lock);
4559 	}
4560 	return ret;
4561 }
4562 
4563 static struct btrfs_block_rsv *get_block_rsv(
4564 					const struct btrfs_trans_handle *trans,
4565 					const struct btrfs_root *root)
4566 {
4567 	struct btrfs_block_rsv *block_rsv = NULL;
4568 
4569 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4570 		block_rsv = trans->block_rsv;
4571 
4572 	if (root == root->fs_info->csum_root && trans->adding_csums)
4573 		block_rsv = trans->block_rsv;
4574 
4575 	if (root == root->fs_info->uuid_root)
4576 		block_rsv = trans->block_rsv;
4577 
4578 	if (!block_rsv)
4579 		block_rsv = root->block_rsv;
4580 
4581 	if (!block_rsv)
4582 		block_rsv = &root->fs_info->empty_block_rsv;
4583 
4584 	return block_rsv;
4585 }
4586 
4587 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4588 			       u64 num_bytes)
4589 {
4590 	int ret = -ENOSPC;
4591 	spin_lock(&block_rsv->lock);
4592 	if (block_rsv->reserved >= num_bytes) {
4593 		block_rsv->reserved -= num_bytes;
4594 		if (block_rsv->reserved < block_rsv->size)
4595 			block_rsv->full = 0;
4596 		ret = 0;
4597 	}
4598 	spin_unlock(&block_rsv->lock);
4599 	return ret;
4600 }
4601 
4602 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4603 				u64 num_bytes, int update_size)
4604 {
4605 	spin_lock(&block_rsv->lock);
4606 	block_rsv->reserved += num_bytes;
4607 	if (update_size)
4608 		block_rsv->size += num_bytes;
4609 	else if (block_rsv->reserved >= block_rsv->size)
4610 		block_rsv->full = 1;
4611 	spin_unlock(&block_rsv->lock);
4612 }
4613 
4614 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4615 			     struct btrfs_block_rsv *dest, u64 num_bytes,
4616 			     int min_factor)
4617 {
4618 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4619 	u64 min_bytes;
4620 
4621 	if (global_rsv->space_info != dest->space_info)
4622 		return -ENOSPC;
4623 
4624 	spin_lock(&global_rsv->lock);
4625 	min_bytes = div_factor(global_rsv->size, min_factor);
4626 	if (global_rsv->reserved < min_bytes + num_bytes) {
4627 		spin_unlock(&global_rsv->lock);
4628 		return -ENOSPC;
4629 	}
4630 	global_rsv->reserved -= num_bytes;
4631 	if (global_rsv->reserved < global_rsv->size)
4632 		global_rsv->full = 0;
4633 	spin_unlock(&global_rsv->lock);
4634 
4635 	block_rsv_add_bytes(dest, num_bytes, 1);
4636 	return 0;
4637 }
4638 
4639 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4640 				    struct btrfs_block_rsv *block_rsv,
4641 				    struct btrfs_block_rsv *dest, u64 num_bytes)
4642 {
4643 	struct btrfs_space_info *space_info = block_rsv->space_info;
4644 
4645 	spin_lock(&block_rsv->lock);
4646 	if (num_bytes == (u64)-1)
4647 		num_bytes = block_rsv->size;
4648 	block_rsv->size -= num_bytes;
4649 	if (block_rsv->reserved >= block_rsv->size) {
4650 		num_bytes = block_rsv->reserved - block_rsv->size;
4651 		block_rsv->reserved = block_rsv->size;
4652 		block_rsv->full = 1;
4653 	} else {
4654 		num_bytes = 0;
4655 	}
4656 	spin_unlock(&block_rsv->lock);
4657 
4658 	if (num_bytes > 0) {
4659 		if (dest) {
4660 			spin_lock(&dest->lock);
4661 			if (!dest->full) {
4662 				u64 bytes_to_add;
4663 
4664 				bytes_to_add = dest->size - dest->reserved;
4665 				bytes_to_add = min(num_bytes, bytes_to_add);
4666 				dest->reserved += bytes_to_add;
4667 				if (dest->reserved >= dest->size)
4668 					dest->full = 1;
4669 				num_bytes -= bytes_to_add;
4670 			}
4671 			spin_unlock(&dest->lock);
4672 		}
4673 		if (num_bytes) {
4674 			spin_lock(&space_info->lock);
4675 			space_info->bytes_may_use -= num_bytes;
4676 			trace_btrfs_space_reservation(fs_info, "space_info",
4677 					space_info->flags, num_bytes, 0);
4678 			spin_unlock(&space_info->lock);
4679 		}
4680 	}
4681 }
4682 
4683 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4684 				   struct btrfs_block_rsv *dst, u64 num_bytes)
4685 {
4686 	int ret;
4687 
4688 	ret = block_rsv_use_bytes(src, num_bytes);
4689 	if (ret)
4690 		return ret;
4691 
4692 	block_rsv_add_bytes(dst, num_bytes, 1);
4693 	return 0;
4694 }
4695 
4696 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4697 {
4698 	memset(rsv, 0, sizeof(*rsv));
4699 	spin_lock_init(&rsv->lock);
4700 	rsv->type = type;
4701 }
4702 
4703 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4704 					      unsigned short type)
4705 {
4706 	struct btrfs_block_rsv *block_rsv;
4707 	struct btrfs_fs_info *fs_info = root->fs_info;
4708 
4709 	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4710 	if (!block_rsv)
4711 		return NULL;
4712 
4713 	btrfs_init_block_rsv(block_rsv, type);
4714 	block_rsv->space_info = __find_space_info(fs_info,
4715 						  BTRFS_BLOCK_GROUP_METADATA);
4716 	return block_rsv;
4717 }
4718 
4719 void btrfs_free_block_rsv(struct btrfs_root *root,
4720 			  struct btrfs_block_rsv *rsv)
4721 {
4722 	if (!rsv)
4723 		return;
4724 	btrfs_block_rsv_release(root, rsv, (u64)-1);
4725 	kfree(rsv);
4726 }
4727 
4728 int btrfs_block_rsv_add(struct btrfs_root *root,
4729 			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4730 			enum btrfs_reserve_flush_enum flush)
4731 {
4732 	int ret;
4733 
4734 	if (num_bytes == 0)
4735 		return 0;
4736 
4737 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4738 	if (!ret) {
4739 		block_rsv_add_bytes(block_rsv, num_bytes, 1);
4740 		return 0;
4741 	}
4742 
4743 	return ret;
4744 }
4745 
4746 int btrfs_block_rsv_check(struct btrfs_root *root,
4747 			  struct btrfs_block_rsv *block_rsv, int min_factor)
4748 {
4749 	u64 num_bytes = 0;
4750 	int ret = -ENOSPC;
4751 
4752 	if (!block_rsv)
4753 		return 0;
4754 
4755 	spin_lock(&block_rsv->lock);
4756 	num_bytes = div_factor(block_rsv->size, min_factor);
4757 	if (block_rsv->reserved >= num_bytes)
4758 		ret = 0;
4759 	spin_unlock(&block_rsv->lock);
4760 
4761 	return ret;
4762 }
4763 
4764 int btrfs_block_rsv_refill(struct btrfs_root *root,
4765 			   struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4766 			   enum btrfs_reserve_flush_enum flush)
4767 {
4768 	u64 num_bytes = 0;
4769 	int ret = -ENOSPC;
4770 
4771 	if (!block_rsv)
4772 		return 0;
4773 
4774 	spin_lock(&block_rsv->lock);
4775 	num_bytes = min_reserved;
4776 	if (block_rsv->reserved >= num_bytes)
4777 		ret = 0;
4778 	else
4779 		num_bytes -= block_rsv->reserved;
4780 	spin_unlock(&block_rsv->lock);
4781 
4782 	if (!ret)
4783 		return 0;
4784 
4785 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4786 	if (!ret) {
4787 		block_rsv_add_bytes(block_rsv, num_bytes, 0);
4788 		return 0;
4789 	}
4790 
4791 	return ret;
4792 }
4793 
4794 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4795 			    struct btrfs_block_rsv *dst_rsv,
4796 			    u64 num_bytes)
4797 {
4798 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4799 }
4800 
4801 void btrfs_block_rsv_release(struct btrfs_root *root,
4802 			     struct btrfs_block_rsv *block_rsv,
4803 			     u64 num_bytes)
4804 {
4805 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4806 	if (global_rsv == block_rsv ||
4807 	    block_rsv->space_info != global_rsv->space_info)
4808 		global_rsv = NULL;
4809 	block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4810 				num_bytes);
4811 }
4812 
4813 /*
4814  * helper to calculate size of global block reservation.
4815  * the desired value is sum of space used by extent tree,
4816  * checksum tree and root tree
4817  */
4818 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4819 {
4820 	struct btrfs_space_info *sinfo;
4821 	u64 num_bytes;
4822 	u64 meta_used;
4823 	u64 data_used;
4824 	int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4825 
4826 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4827 	spin_lock(&sinfo->lock);
4828 	data_used = sinfo->bytes_used;
4829 	spin_unlock(&sinfo->lock);
4830 
4831 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4832 	spin_lock(&sinfo->lock);
4833 	if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4834 		data_used = 0;
4835 	meta_used = sinfo->bytes_used;
4836 	spin_unlock(&sinfo->lock);
4837 
4838 	num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4839 		    csum_size * 2;
4840 	num_bytes += div64_u64(data_used + meta_used, 50);
4841 
4842 	if (num_bytes * 3 > meta_used)
4843 		num_bytes = div64_u64(meta_used, 3);
4844 
4845 	return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
4846 }
4847 
4848 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4849 {
4850 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4851 	struct btrfs_space_info *sinfo = block_rsv->space_info;
4852 	u64 num_bytes;
4853 
4854 	num_bytes = calc_global_metadata_size(fs_info);
4855 
4856 	spin_lock(&sinfo->lock);
4857 	spin_lock(&block_rsv->lock);
4858 
4859 	block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4860 
4861 	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4862 		    sinfo->bytes_reserved + sinfo->bytes_readonly +
4863 		    sinfo->bytes_may_use;
4864 
4865 	if (sinfo->total_bytes > num_bytes) {
4866 		num_bytes = sinfo->total_bytes - num_bytes;
4867 		block_rsv->reserved += num_bytes;
4868 		sinfo->bytes_may_use += num_bytes;
4869 		trace_btrfs_space_reservation(fs_info, "space_info",
4870 				      sinfo->flags, num_bytes, 1);
4871 	}
4872 
4873 	if (block_rsv->reserved >= block_rsv->size) {
4874 		num_bytes = block_rsv->reserved - block_rsv->size;
4875 		sinfo->bytes_may_use -= num_bytes;
4876 		trace_btrfs_space_reservation(fs_info, "space_info",
4877 				      sinfo->flags, num_bytes, 0);
4878 		block_rsv->reserved = block_rsv->size;
4879 		block_rsv->full = 1;
4880 	}
4881 
4882 	spin_unlock(&block_rsv->lock);
4883 	spin_unlock(&sinfo->lock);
4884 }
4885 
4886 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4887 {
4888 	struct btrfs_space_info *space_info;
4889 
4890 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4891 	fs_info->chunk_block_rsv.space_info = space_info;
4892 
4893 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4894 	fs_info->global_block_rsv.space_info = space_info;
4895 	fs_info->delalloc_block_rsv.space_info = space_info;
4896 	fs_info->trans_block_rsv.space_info = space_info;
4897 	fs_info->empty_block_rsv.space_info = space_info;
4898 	fs_info->delayed_block_rsv.space_info = space_info;
4899 
4900 	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4901 	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4902 	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4903 	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4904 	if (fs_info->quota_root)
4905 		fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4906 	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4907 
4908 	update_global_block_rsv(fs_info);
4909 }
4910 
4911 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4912 {
4913 	block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4914 				(u64)-1);
4915 	WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4916 	WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4917 	WARN_ON(fs_info->trans_block_rsv.size > 0);
4918 	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4919 	WARN_ON(fs_info->chunk_block_rsv.size > 0);
4920 	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4921 	WARN_ON(fs_info->delayed_block_rsv.size > 0);
4922 	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4923 }
4924 
4925 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4926 				  struct btrfs_root *root)
4927 {
4928 	if (!trans->block_rsv)
4929 		return;
4930 
4931 	if (!trans->bytes_reserved)
4932 		return;
4933 
4934 	trace_btrfs_space_reservation(root->fs_info, "transaction",
4935 				      trans->transid, trans->bytes_reserved, 0);
4936 	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4937 	trans->bytes_reserved = 0;
4938 }
4939 
4940 /* Can only return 0 or -ENOSPC */
4941 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4942 				  struct inode *inode)
4943 {
4944 	struct btrfs_root *root = BTRFS_I(inode)->root;
4945 	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4946 	struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4947 
4948 	/*
4949 	 * We need to hold space in order to delete our orphan item once we've
4950 	 * added it, so this takes the reservation so we can release it later
4951 	 * when we are truly done with the orphan item.
4952 	 */
4953 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4954 	trace_btrfs_space_reservation(root->fs_info, "orphan",
4955 				      btrfs_ino(inode), num_bytes, 1);
4956 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4957 }
4958 
4959 void btrfs_orphan_release_metadata(struct inode *inode)
4960 {
4961 	struct btrfs_root *root = BTRFS_I(inode)->root;
4962 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4963 	trace_btrfs_space_reservation(root->fs_info, "orphan",
4964 				      btrfs_ino(inode), num_bytes, 0);
4965 	btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4966 }
4967 
4968 /*
4969  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4970  * root: the root of the parent directory
4971  * rsv: block reservation
4972  * items: the number of items that we need do reservation
4973  * qgroup_reserved: used to return the reserved size in qgroup
4974  *
4975  * This function is used to reserve the space for snapshot/subvolume
4976  * creation and deletion. Those operations are different with the
4977  * common file/directory operations, they change two fs/file trees
4978  * and root tree, the number of items that the qgroup reserves is
4979  * different with the free space reservation. So we can not use
4980  * the space reseravtion mechanism in start_transaction().
4981  */
4982 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4983 				     struct btrfs_block_rsv *rsv,
4984 				     int items,
4985 				     u64 *qgroup_reserved,
4986 				     bool use_global_rsv)
4987 {
4988 	u64 num_bytes;
4989 	int ret;
4990 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4991 
4992 	if (root->fs_info->quota_enabled) {
4993 		/* One for parent inode, two for dir entries */
4994 		num_bytes = 3 * root->nodesize;
4995 		ret = btrfs_qgroup_reserve(root, num_bytes);
4996 		if (ret)
4997 			return ret;
4998 	} else {
4999 		num_bytes = 0;
5000 	}
5001 
5002 	*qgroup_reserved = num_bytes;
5003 
5004 	num_bytes = btrfs_calc_trans_metadata_size(root, items);
5005 	rsv->space_info = __find_space_info(root->fs_info,
5006 					    BTRFS_BLOCK_GROUP_METADATA);
5007 	ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5008 				  BTRFS_RESERVE_FLUSH_ALL);
5009 
5010 	if (ret == -ENOSPC && use_global_rsv)
5011 		ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5012 
5013 	if (ret) {
5014 		if (*qgroup_reserved)
5015 			btrfs_qgroup_free(root, *qgroup_reserved);
5016 	}
5017 
5018 	return ret;
5019 }
5020 
5021 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5022 				      struct btrfs_block_rsv *rsv,
5023 				      u64 qgroup_reserved)
5024 {
5025 	btrfs_block_rsv_release(root, rsv, (u64)-1);
5026 	if (qgroup_reserved)
5027 		btrfs_qgroup_free(root, qgroup_reserved);
5028 }
5029 
5030 /**
5031  * drop_outstanding_extent - drop an outstanding extent
5032  * @inode: the inode we're dropping the extent for
5033  *
5034  * This is called when we are freeing up an outstanding extent, either called
5035  * after an error or after an extent is written.  This will return the number of
5036  * reserved extents that need to be freed.  This must be called with
5037  * BTRFS_I(inode)->lock held.
5038  */
5039 static unsigned drop_outstanding_extent(struct inode *inode)
5040 {
5041 	unsigned drop_inode_space = 0;
5042 	unsigned dropped_extents = 0;
5043 
5044 	BUG_ON(!BTRFS_I(inode)->outstanding_extents);
5045 	BTRFS_I(inode)->outstanding_extents--;
5046 
5047 	if (BTRFS_I(inode)->outstanding_extents == 0 &&
5048 	    test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5049 			       &BTRFS_I(inode)->runtime_flags))
5050 		drop_inode_space = 1;
5051 
5052 	/*
5053 	 * If we have more or the same amount of outsanding extents than we have
5054 	 * reserved then we need to leave the reserved extents count alone.
5055 	 */
5056 	if (BTRFS_I(inode)->outstanding_extents >=
5057 	    BTRFS_I(inode)->reserved_extents)
5058 		return drop_inode_space;
5059 
5060 	dropped_extents = BTRFS_I(inode)->reserved_extents -
5061 		BTRFS_I(inode)->outstanding_extents;
5062 	BTRFS_I(inode)->reserved_extents -= dropped_extents;
5063 	return dropped_extents + drop_inode_space;
5064 }
5065 
5066 /**
5067  * calc_csum_metadata_size - return the amount of metada space that must be
5068  *	reserved/free'd for the given bytes.
5069  * @inode: the inode we're manipulating
5070  * @num_bytes: the number of bytes in question
5071  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5072  *
5073  * This adjusts the number of csum_bytes in the inode and then returns the
5074  * correct amount of metadata that must either be reserved or freed.  We
5075  * calculate how many checksums we can fit into one leaf and then divide the
5076  * number of bytes that will need to be checksumed by this value to figure out
5077  * how many checksums will be required.  If we are adding bytes then the number
5078  * may go up and we will return the number of additional bytes that must be
5079  * reserved.  If it is going down we will return the number of bytes that must
5080  * be freed.
5081  *
5082  * This must be called with BTRFS_I(inode)->lock held.
5083  */
5084 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5085 				   int reserve)
5086 {
5087 	struct btrfs_root *root = BTRFS_I(inode)->root;
5088 	u64 csum_size;
5089 	int num_csums_per_leaf;
5090 	int num_csums;
5091 	int old_csums;
5092 
5093 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5094 	    BTRFS_I(inode)->csum_bytes == 0)
5095 		return 0;
5096 
5097 	old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5098 	if (reserve)
5099 		BTRFS_I(inode)->csum_bytes += num_bytes;
5100 	else
5101 		BTRFS_I(inode)->csum_bytes -= num_bytes;
5102 	csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
5103 	num_csums_per_leaf = (int)div64_u64(csum_size,
5104 					    sizeof(struct btrfs_csum_item) +
5105 					    sizeof(struct btrfs_disk_key));
5106 	num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5107 	num_csums = num_csums + num_csums_per_leaf - 1;
5108 	num_csums = num_csums / num_csums_per_leaf;
5109 
5110 	old_csums = old_csums + num_csums_per_leaf - 1;
5111 	old_csums = old_csums / num_csums_per_leaf;
5112 
5113 	/* No change, no need to reserve more */
5114 	if (old_csums == num_csums)
5115 		return 0;
5116 
5117 	if (reserve)
5118 		return btrfs_calc_trans_metadata_size(root,
5119 						      num_csums - old_csums);
5120 
5121 	return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5122 }
5123 
5124 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5125 {
5126 	struct btrfs_root *root = BTRFS_I(inode)->root;
5127 	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5128 	u64 to_reserve = 0;
5129 	u64 csum_bytes;
5130 	unsigned nr_extents = 0;
5131 	int extra_reserve = 0;
5132 	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5133 	int ret = 0;
5134 	bool delalloc_lock = true;
5135 	u64 to_free = 0;
5136 	unsigned dropped;
5137 
5138 	/* If we are a free space inode we need to not flush since we will be in
5139 	 * the middle of a transaction commit.  We also don't need the delalloc
5140 	 * mutex since we won't race with anybody.  We need this mostly to make
5141 	 * lockdep shut its filthy mouth.
5142 	 */
5143 	if (btrfs_is_free_space_inode(inode)) {
5144 		flush = BTRFS_RESERVE_NO_FLUSH;
5145 		delalloc_lock = false;
5146 	}
5147 
5148 	if (flush != BTRFS_RESERVE_NO_FLUSH &&
5149 	    btrfs_transaction_in_commit(root->fs_info))
5150 		schedule_timeout(1);
5151 
5152 	if (delalloc_lock)
5153 		mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5154 
5155 	num_bytes = ALIGN(num_bytes, root->sectorsize);
5156 
5157 	spin_lock(&BTRFS_I(inode)->lock);
5158 	BTRFS_I(inode)->outstanding_extents++;
5159 
5160 	if (BTRFS_I(inode)->outstanding_extents >
5161 	    BTRFS_I(inode)->reserved_extents)
5162 		nr_extents = BTRFS_I(inode)->outstanding_extents -
5163 			BTRFS_I(inode)->reserved_extents;
5164 
5165 	/*
5166 	 * Add an item to reserve for updating the inode when we complete the
5167 	 * delalloc io.
5168 	 */
5169 	if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5170 		      &BTRFS_I(inode)->runtime_flags)) {
5171 		nr_extents++;
5172 		extra_reserve = 1;
5173 	}
5174 
5175 	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5176 	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5177 	csum_bytes = BTRFS_I(inode)->csum_bytes;
5178 	spin_unlock(&BTRFS_I(inode)->lock);
5179 
5180 	if (root->fs_info->quota_enabled) {
5181 		ret = btrfs_qgroup_reserve(root, num_bytes +
5182 					   nr_extents * root->nodesize);
5183 		if (ret)
5184 			goto out_fail;
5185 	}
5186 
5187 	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5188 	if (unlikely(ret)) {
5189 		if (root->fs_info->quota_enabled)
5190 			btrfs_qgroup_free(root, num_bytes +
5191 						nr_extents * root->nodesize);
5192 		goto out_fail;
5193 	}
5194 
5195 	spin_lock(&BTRFS_I(inode)->lock);
5196 	if (extra_reserve) {
5197 		set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5198 			&BTRFS_I(inode)->runtime_flags);
5199 		nr_extents--;
5200 	}
5201 	BTRFS_I(inode)->reserved_extents += nr_extents;
5202 	spin_unlock(&BTRFS_I(inode)->lock);
5203 
5204 	if (delalloc_lock)
5205 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5206 
5207 	if (to_reserve)
5208 		trace_btrfs_space_reservation(root->fs_info, "delalloc",
5209 					      btrfs_ino(inode), to_reserve, 1);
5210 	block_rsv_add_bytes(block_rsv, to_reserve, 1);
5211 
5212 	return 0;
5213 
5214 out_fail:
5215 	spin_lock(&BTRFS_I(inode)->lock);
5216 	dropped = drop_outstanding_extent(inode);
5217 	/*
5218 	 * If the inodes csum_bytes is the same as the original
5219 	 * csum_bytes then we know we haven't raced with any free()ers
5220 	 * so we can just reduce our inodes csum bytes and carry on.
5221 	 */
5222 	if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5223 		calc_csum_metadata_size(inode, num_bytes, 0);
5224 	} else {
5225 		u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5226 		u64 bytes;
5227 
5228 		/*
5229 		 * This is tricky, but first we need to figure out how much we
5230 		 * free'd from any free-ers that occured during this
5231 		 * reservation, so we reset ->csum_bytes to the csum_bytes
5232 		 * before we dropped our lock, and then call the free for the
5233 		 * number of bytes that were freed while we were trying our
5234 		 * reservation.
5235 		 */
5236 		bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5237 		BTRFS_I(inode)->csum_bytes = csum_bytes;
5238 		to_free = calc_csum_metadata_size(inode, bytes, 0);
5239 
5240 
5241 		/*
5242 		 * Now we need to see how much we would have freed had we not
5243 		 * been making this reservation and our ->csum_bytes were not
5244 		 * artificially inflated.
5245 		 */
5246 		BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5247 		bytes = csum_bytes - orig_csum_bytes;
5248 		bytes = calc_csum_metadata_size(inode, bytes, 0);
5249 
5250 		/*
5251 		 * Now reset ->csum_bytes to what it should be.  If bytes is
5252 		 * more than to_free then we would have free'd more space had we
5253 		 * not had an artificially high ->csum_bytes, so we need to free
5254 		 * the remainder.  If bytes is the same or less then we don't
5255 		 * need to do anything, the other free-ers did the correct
5256 		 * thing.
5257 		 */
5258 		BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5259 		if (bytes > to_free)
5260 			to_free = bytes - to_free;
5261 		else
5262 			to_free = 0;
5263 	}
5264 	spin_unlock(&BTRFS_I(inode)->lock);
5265 	if (dropped)
5266 		to_free += btrfs_calc_trans_metadata_size(root, dropped);
5267 
5268 	if (to_free) {
5269 		btrfs_block_rsv_release(root, block_rsv, to_free);
5270 		trace_btrfs_space_reservation(root->fs_info, "delalloc",
5271 					      btrfs_ino(inode), to_free, 0);
5272 	}
5273 	if (delalloc_lock)
5274 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5275 	return ret;
5276 }
5277 
5278 /**
5279  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5280  * @inode: the inode to release the reservation for
5281  * @num_bytes: the number of bytes we're releasing
5282  *
5283  * This will release the metadata reservation for an inode.  This can be called
5284  * once we complete IO for a given set of bytes to release their metadata
5285  * reservations.
5286  */
5287 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5288 {
5289 	struct btrfs_root *root = BTRFS_I(inode)->root;
5290 	u64 to_free = 0;
5291 	unsigned dropped;
5292 
5293 	num_bytes = ALIGN(num_bytes, root->sectorsize);
5294 	spin_lock(&BTRFS_I(inode)->lock);
5295 	dropped = drop_outstanding_extent(inode);
5296 
5297 	if (num_bytes)
5298 		to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5299 	spin_unlock(&BTRFS_I(inode)->lock);
5300 	if (dropped > 0)
5301 		to_free += btrfs_calc_trans_metadata_size(root, dropped);
5302 
5303 	trace_btrfs_space_reservation(root->fs_info, "delalloc",
5304 				      btrfs_ino(inode), to_free, 0);
5305 	if (root->fs_info->quota_enabled) {
5306 		btrfs_qgroup_free(root, num_bytes +
5307 					dropped * root->nodesize);
5308 	}
5309 
5310 	btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5311 				to_free);
5312 }
5313 
5314 /**
5315  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5316  * @inode: inode we're writing to
5317  * @num_bytes: the number of bytes we want to allocate
5318  *
5319  * This will do the following things
5320  *
5321  * o reserve space in the data space info for num_bytes
5322  * o reserve space in the metadata space info based on number of outstanding
5323  *   extents and how much csums will be needed
5324  * o add to the inodes ->delalloc_bytes
5325  * o add it to the fs_info's delalloc inodes list.
5326  *
5327  * This will return 0 for success and -ENOSPC if there is no space left.
5328  */
5329 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5330 {
5331 	int ret;
5332 
5333 	ret = btrfs_check_data_free_space(inode, num_bytes);
5334 	if (ret)
5335 		return ret;
5336 
5337 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5338 	if (ret) {
5339 		btrfs_free_reserved_data_space(inode, num_bytes);
5340 		return ret;
5341 	}
5342 
5343 	return 0;
5344 }
5345 
5346 /**
5347  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5348  * @inode: inode we're releasing space for
5349  * @num_bytes: the number of bytes we want to free up
5350  *
5351  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5352  * called in the case that we don't need the metadata AND data reservations
5353  * anymore.  So if there is an error or we insert an inline extent.
5354  *
5355  * This function will release the metadata space that was not used and will
5356  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5357  * list if there are no delalloc bytes left.
5358  */
5359 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5360 {
5361 	btrfs_delalloc_release_metadata(inode, num_bytes);
5362 	btrfs_free_reserved_data_space(inode, num_bytes);
5363 }
5364 
5365 static int update_block_group(struct btrfs_root *root,
5366 			      u64 bytenr, u64 num_bytes, int alloc)
5367 {
5368 	struct btrfs_block_group_cache *cache = NULL;
5369 	struct btrfs_fs_info *info = root->fs_info;
5370 	u64 total = num_bytes;
5371 	u64 old_val;
5372 	u64 byte_in_group;
5373 	int factor;
5374 
5375 	/* block accounting for super block */
5376 	spin_lock(&info->delalloc_root_lock);
5377 	old_val = btrfs_super_bytes_used(info->super_copy);
5378 	if (alloc)
5379 		old_val += num_bytes;
5380 	else
5381 		old_val -= num_bytes;
5382 	btrfs_set_super_bytes_used(info->super_copy, old_val);
5383 	spin_unlock(&info->delalloc_root_lock);
5384 
5385 	while (total) {
5386 		cache = btrfs_lookup_block_group(info, bytenr);
5387 		if (!cache)
5388 			return -ENOENT;
5389 		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5390 				    BTRFS_BLOCK_GROUP_RAID1 |
5391 				    BTRFS_BLOCK_GROUP_RAID10))
5392 			factor = 2;
5393 		else
5394 			factor = 1;
5395 		/*
5396 		 * If this block group has free space cache written out, we
5397 		 * need to make sure to load it if we are removing space.  This
5398 		 * is because we need the unpinning stage to actually add the
5399 		 * space back to the block group, otherwise we will leak space.
5400 		 */
5401 		if (!alloc && cache->cached == BTRFS_CACHE_NO)
5402 			cache_block_group(cache, 1);
5403 
5404 		byte_in_group = bytenr - cache->key.objectid;
5405 		WARN_ON(byte_in_group > cache->key.offset);
5406 
5407 		spin_lock(&cache->space_info->lock);
5408 		spin_lock(&cache->lock);
5409 
5410 		if (btrfs_test_opt(root, SPACE_CACHE) &&
5411 		    cache->disk_cache_state < BTRFS_DC_CLEAR)
5412 			cache->disk_cache_state = BTRFS_DC_CLEAR;
5413 
5414 		cache->dirty = 1;
5415 		old_val = btrfs_block_group_used(&cache->item);
5416 		num_bytes = min(total, cache->key.offset - byte_in_group);
5417 		if (alloc) {
5418 			old_val += num_bytes;
5419 			btrfs_set_block_group_used(&cache->item, old_val);
5420 			cache->reserved -= num_bytes;
5421 			cache->space_info->bytes_reserved -= num_bytes;
5422 			cache->space_info->bytes_used += num_bytes;
5423 			cache->space_info->disk_used += num_bytes * factor;
5424 			spin_unlock(&cache->lock);
5425 			spin_unlock(&cache->space_info->lock);
5426 		} else {
5427 			old_val -= num_bytes;
5428 
5429 			/*
5430 			 * No longer have used bytes in this block group, queue
5431 			 * it for deletion.
5432 			 */
5433 			if (old_val == 0) {
5434 				spin_lock(&info->unused_bgs_lock);
5435 				if (list_empty(&cache->bg_list)) {
5436 					btrfs_get_block_group(cache);
5437 					list_add_tail(&cache->bg_list,
5438 						      &info->unused_bgs);
5439 				}
5440 				spin_unlock(&info->unused_bgs_lock);
5441 			}
5442 			btrfs_set_block_group_used(&cache->item, old_val);
5443 			cache->pinned += num_bytes;
5444 			cache->space_info->bytes_pinned += num_bytes;
5445 			cache->space_info->bytes_used -= num_bytes;
5446 			cache->space_info->disk_used -= num_bytes * factor;
5447 			spin_unlock(&cache->lock);
5448 			spin_unlock(&cache->space_info->lock);
5449 
5450 			set_extent_dirty(info->pinned_extents,
5451 					 bytenr, bytenr + num_bytes - 1,
5452 					 GFP_NOFS | __GFP_NOFAIL);
5453 		}
5454 		btrfs_put_block_group(cache);
5455 		total -= num_bytes;
5456 		bytenr += num_bytes;
5457 	}
5458 	return 0;
5459 }
5460 
5461 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5462 {
5463 	struct btrfs_block_group_cache *cache;
5464 	u64 bytenr;
5465 
5466 	spin_lock(&root->fs_info->block_group_cache_lock);
5467 	bytenr = root->fs_info->first_logical_byte;
5468 	spin_unlock(&root->fs_info->block_group_cache_lock);
5469 
5470 	if (bytenr < (u64)-1)
5471 		return bytenr;
5472 
5473 	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5474 	if (!cache)
5475 		return 0;
5476 
5477 	bytenr = cache->key.objectid;
5478 	btrfs_put_block_group(cache);
5479 
5480 	return bytenr;
5481 }
5482 
5483 static int pin_down_extent(struct btrfs_root *root,
5484 			   struct btrfs_block_group_cache *cache,
5485 			   u64 bytenr, u64 num_bytes, int reserved)
5486 {
5487 	spin_lock(&cache->space_info->lock);
5488 	spin_lock(&cache->lock);
5489 	cache->pinned += num_bytes;
5490 	cache->space_info->bytes_pinned += num_bytes;
5491 	if (reserved) {
5492 		cache->reserved -= num_bytes;
5493 		cache->space_info->bytes_reserved -= num_bytes;
5494 	}
5495 	spin_unlock(&cache->lock);
5496 	spin_unlock(&cache->space_info->lock);
5497 
5498 	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5499 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5500 	if (reserved)
5501 		trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5502 	return 0;
5503 }
5504 
5505 /*
5506  * this function must be called within transaction
5507  */
5508 int btrfs_pin_extent(struct btrfs_root *root,
5509 		     u64 bytenr, u64 num_bytes, int reserved)
5510 {
5511 	struct btrfs_block_group_cache *cache;
5512 
5513 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5514 	BUG_ON(!cache); /* Logic error */
5515 
5516 	pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5517 
5518 	btrfs_put_block_group(cache);
5519 	return 0;
5520 }
5521 
5522 /*
5523  * this function must be called within transaction
5524  */
5525 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5526 				    u64 bytenr, u64 num_bytes)
5527 {
5528 	struct btrfs_block_group_cache *cache;
5529 	int ret;
5530 
5531 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5532 	if (!cache)
5533 		return -EINVAL;
5534 
5535 	/*
5536 	 * pull in the free space cache (if any) so that our pin
5537 	 * removes the free space from the cache.  We have load_only set
5538 	 * to one because the slow code to read in the free extents does check
5539 	 * the pinned extents.
5540 	 */
5541 	cache_block_group(cache, 1);
5542 
5543 	pin_down_extent(root, cache, bytenr, num_bytes, 0);
5544 
5545 	/* remove us from the free space cache (if we're there at all) */
5546 	ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5547 	btrfs_put_block_group(cache);
5548 	return ret;
5549 }
5550 
5551 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5552 {
5553 	int ret;
5554 	struct btrfs_block_group_cache *block_group;
5555 	struct btrfs_caching_control *caching_ctl;
5556 
5557 	block_group = btrfs_lookup_block_group(root->fs_info, start);
5558 	if (!block_group)
5559 		return -EINVAL;
5560 
5561 	cache_block_group(block_group, 0);
5562 	caching_ctl = get_caching_control(block_group);
5563 
5564 	if (!caching_ctl) {
5565 		/* Logic error */
5566 		BUG_ON(!block_group_cache_done(block_group));
5567 		ret = btrfs_remove_free_space(block_group, start, num_bytes);
5568 	} else {
5569 		mutex_lock(&caching_ctl->mutex);
5570 
5571 		if (start >= caching_ctl->progress) {
5572 			ret = add_excluded_extent(root, start, num_bytes);
5573 		} else if (start + num_bytes <= caching_ctl->progress) {
5574 			ret = btrfs_remove_free_space(block_group,
5575 						      start, num_bytes);
5576 		} else {
5577 			num_bytes = caching_ctl->progress - start;
5578 			ret = btrfs_remove_free_space(block_group,
5579 						      start, num_bytes);
5580 			if (ret)
5581 				goto out_lock;
5582 
5583 			num_bytes = (start + num_bytes) -
5584 				caching_ctl->progress;
5585 			start = caching_ctl->progress;
5586 			ret = add_excluded_extent(root, start, num_bytes);
5587 		}
5588 out_lock:
5589 		mutex_unlock(&caching_ctl->mutex);
5590 		put_caching_control(caching_ctl);
5591 	}
5592 	btrfs_put_block_group(block_group);
5593 	return ret;
5594 }
5595 
5596 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5597 				 struct extent_buffer *eb)
5598 {
5599 	struct btrfs_file_extent_item *item;
5600 	struct btrfs_key key;
5601 	int found_type;
5602 	int i;
5603 
5604 	if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5605 		return 0;
5606 
5607 	for (i = 0; i < btrfs_header_nritems(eb); i++) {
5608 		btrfs_item_key_to_cpu(eb, &key, i);
5609 		if (key.type != BTRFS_EXTENT_DATA_KEY)
5610 			continue;
5611 		item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5612 		found_type = btrfs_file_extent_type(eb, item);
5613 		if (found_type == BTRFS_FILE_EXTENT_INLINE)
5614 			continue;
5615 		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5616 			continue;
5617 		key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5618 		key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5619 		__exclude_logged_extent(log, key.objectid, key.offset);
5620 	}
5621 
5622 	return 0;
5623 }
5624 
5625 /**
5626  * btrfs_update_reserved_bytes - update the block_group and space info counters
5627  * @cache:	The cache we are manipulating
5628  * @num_bytes:	The number of bytes in question
5629  * @reserve:	One of the reservation enums
5630  * @delalloc:   The blocks are allocated for the delalloc write
5631  *
5632  * This is called by the allocator when it reserves space, or by somebody who is
5633  * freeing space that was never actually used on disk.  For example if you
5634  * reserve some space for a new leaf in transaction A and before transaction A
5635  * commits you free that leaf, you call this with reserve set to 0 in order to
5636  * clear the reservation.
5637  *
5638  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5639  * ENOSPC accounting.  For data we handle the reservation through clearing the
5640  * delalloc bits in the io_tree.  We have to do this since we could end up
5641  * allocating less disk space for the amount of data we have reserved in the
5642  * case of compression.
5643  *
5644  * If this is a reservation and the block group has become read only we cannot
5645  * make the reservation and return -EAGAIN, otherwise this function always
5646  * succeeds.
5647  */
5648 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5649 				       u64 num_bytes, int reserve, int delalloc)
5650 {
5651 	struct btrfs_space_info *space_info = cache->space_info;
5652 	int ret = 0;
5653 
5654 	spin_lock(&space_info->lock);
5655 	spin_lock(&cache->lock);
5656 	if (reserve != RESERVE_FREE) {
5657 		if (cache->ro) {
5658 			ret = -EAGAIN;
5659 		} else {
5660 			cache->reserved += num_bytes;
5661 			space_info->bytes_reserved += num_bytes;
5662 			if (reserve == RESERVE_ALLOC) {
5663 				trace_btrfs_space_reservation(cache->fs_info,
5664 						"space_info", space_info->flags,
5665 						num_bytes, 0);
5666 				space_info->bytes_may_use -= num_bytes;
5667 			}
5668 
5669 			if (delalloc)
5670 				cache->delalloc_bytes += num_bytes;
5671 		}
5672 	} else {
5673 		if (cache->ro)
5674 			space_info->bytes_readonly += num_bytes;
5675 		cache->reserved -= num_bytes;
5676 		space_info->bytes_reserved -= num_bytes;
5677 
5678 		if (delalloc)
5679 			cache->delalloc_bytes -= num_bytes;
5680 	}
5681 	spin_unlock(&cache->lock);
5682 	spin_unlock(&space_info->lock);
5683 	return ret;
5684 }
5685 
5686 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5687 				struct btrfs_root *root)
5688 {
5689 	struct btrfs_fs_info *fs_info = root->fs_info;
5690 	struct btrfs_caching_control *next;
5691 	struct btrfs_caching_control *caching_ctl;
5692 	struct btrfs_block_group_cache *cache;
5693 
5694 	down_write(&fs_info->commit_root_sem);
5695 
5696 	list_for_each_entry_safe(caching_ctl, next,
5697 				 &fs_info->caching_block_groups, list) {
5698 		cache = caching_ctl->block_group;
5699 		if (block_group_cache_done(cache)) {
5700 			cache->last_byte_to_unpin = (u64)-1;
5701 			list_del_init(&caching_ctl->list);
5702 			put_caching_control(caching_ctl);
5703 		} else {
5704 			cache->last_byte_to_unpin = caching_ctl->progress;
5705 		}
5706 	}
5707 
5708 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5709 		fs_info->pinned_extents = &fs_info->freed_extents[1];
5710 	else
5711 		fs_info->pinned_extents = &fs_info->freed_extents[0];
5712 
5713 	up_write(&fs_info->commit_root_sem);
5714 
5715 	update_global_block_rsv(fs_info);
5716 }
5717 
5718 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5719 {
5720 	struct btrfs_fs_info *fs_info = root->fs_info;
5721 	struct btrfs_block_group_cache *cache = NULL;
5722 	struct btrfs_space_info *space_info;
5723 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5724 	u64 len;
5725 	bool readonly;
5726 
5727 	while (start <= end) {
5728 		readonly = false;
5729 		if (!cache ||
5730 		    start >= cache->key.objectid + cache->key.offset) {
5731 			if (cache)
5732 				btrfs_put_block_group(cache);
5733 			cache = btrfs_lookup_block_group(fs_info, start);
5734 			BUG_ON(!cache); /* Logic error */
5735 		}
5736 
5737 		len = cache->key.objectid + cache->key.offset - start;
5738 		len = min(len, end + 1 - start);
5739 
5740 		if (start < cache->last_byte_to_unpin) {
5741 			len = min(len, cache->last_byte_to_unpin - start);
5742 			btrfs_add_free_space(cache, start, len);
5743 		}
5744 
5745 		start += len;
5746 		space_info = cache->space_info;
5747 
5748 		spin_lock(&space_info->lock);
5749 		spin_lock(&cache->lock);
5750 		cache->pinned -= len;
5751 		space_info->bytes_pinned -= len;
5752 		percpu_counter_add(&space_info->total_bytes_pinned, -len);
5753 		if (cache->ro) {
5754 			space_info->bytes_readonly += len;
5755 			readonly = true;
5756 		}
5757 		spin_unlock(&cache->lock);
5758 		if (!readonly && global_rsv->space_info == space_info) {
5759 			spin_lock(&global_rsv->lock);
5760 			if (!global_rsv->full) {
5761 				len = min(len, global_rsv->size -
5762 					  global_rsv->reserved);
5763 				global_rsv->reserved += len;
5764 				space_info->bytes_may_use += len;
5765 				if (global_rsv->reserved >= global_rsv->size)
5766 					global_rsv->full = 1;
5767 			}
5768 			spin_unlock(&global_rsv->lock);
5769 		}
5770 		spin_unlock(&space_info->lock);
5771 	}
5772 
5773 	if (cache)
5774 		btrfs_put_block_group(cache);
5775 	return 0;
5776 }
5777 
5778 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5779 			       struct btrfs_root *root)
5780 {
5781 	struct btrfs_fs_info *fs_info = root->fs_info;
5782 	struct extent_io_tree *unpin;
5783 	u64 start;
5784 	u64 end;
5785 	int ret;
5786 
5787 	if (trans->aborted)
5788 		return 0;
5789 
5790 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5791 		unpin = &fs_info->freed_extents[1];
5792 	else
5793 		unpin = &fs_info->freed_extents[0];
5794 
5795 	while (1) {
5796 		ret = find_first_extent_bit(unpin, 0, &start, &end,
5797 					    EXTENT_DIRTY, NULL);
5798 		if (ret)
5799 			break;
5800 
5801 		if (btrfs_test_opt(root, DISCARD))
5802 			ret = btrfs_discard_extent(root, start,
5803 						   end + 1 - start, NULL);
5804 
5805 		clear_extent_dirty(unpin, start, end, GFP_NOFS);
5806 		unpin_extent_range(root, start, end);
5807 		cond_resched();
5808 	}
5809 
5810 	return 0;
5811 }
5812 
5813 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5814 			     u64 owner, u64 root_objectid)
5815 {
5816 	struct btrfs_space_info *space_info;
5817 	u64 flags;
5818 
5819 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5820 		if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5821 			flags = BTRFS_BLOCK_GROUP_SYSTEM;
5822 		else
5823 			flags = BTRFS_BLOCK_GROUP_METADATA;
5824 	} else {
5825 		flags = BTRFS_BLOCK_GROUP_DATA;
5826 	}
5827 
5828 	space_info = __find_space_info(fs_info, flags);
5829 	BUG_ON(!space_info); /* Logic bug */
5830 	percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5831 }
5832 
5833 
5834 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5835 				struct btrfs_root *root,
5836 				u64 bytenr, u64 num_bytes, u64 parent,
5837 				u64 root_objectid, u64 owner_objectid,
5838 				u64 owner_offset, int refs_to_drop,
5839 				struct btrfs_delayed_extent_op *extent_op,
5840 				int no_quota)
5841 {
5842 	struct btrfs_key key;
5843 	struct btrfs_path *path;
5844 	struct btrfs_fs_info *info = root->fs_info;
5845 	struct btrfs_root *extent_root = info->extent_root;
5846 	struct extent_buffer *leaf;
5847 	struct btrfs_extent_item *ei;
5848 	struct btrfs_extent_inline_ref *iref;
5849 	int ret;
5850 	int is_data;
5851 	int extent_slot = 0;
5852 	int found_extent = 0;
5853 	int num_to_del = 1;
5854 	u32 item_size;
5855 	u64 refs;
5856 	int last_ref = 0;
5857 	enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_SUB_EXCL;
5858 	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5859 						 SKINNY_METADATA);
5860 
5861 	if (!info->quota_enabled || !is_fstree(root_objectid))
5862 		no_quota = 1;
5863 
5864 	path = btrfs_alloc_path();
5865 	if (!path)
5866 		return -ENOMEM;
5867 
5868 	path->reada = 1;
5869 	path->leave_spinning = 1;
5870 
5871 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5872 	BUG_ON(!is_data && refs_to_drop != 1);
5873 
5874 	if (is_data)
5875 		skinny_metadata = 0;
5876 
5877 	ret = lookup_extent_backref(trans, extent_root, path, &iref,
5878 				    bytenr, num_bytes, parent,
5879 				    root_objectid, owner_objectid,
5880 				    owner_offset);
5881 	if (ret == 0) {
5882 		extent_slot = path->slots[0];
5883 		while (extent_slot >= 0) {
5884 			btrfs_item_key_to_cpu(path->nodes[0], &key,
5885 					      extent_slot);
5886 			if (key.objectid != bytenr)
5887 				break;
5888 			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5889 			    key.offset == num_bytes) {
5890 				found_extent = 1;
5891 				break;
5892 			}
5893 			if (key.type == BTRFS_METADATA_ITEM_KEY &&
5894 			    key.offset == owner_objectid) {
5895 				found_extent = 1;
5896 				break;
5897 			}
5898 			if (path->slots[0] - extent_slot > 5)
5899 				break;
5900 			extent_slot--;
5901 		}
5902 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5903 		item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5904 		if (found_extent && item_size < sizeof(*ei))
5905 			found_extent = 0;
5906 #endif
5907 		if (!found_extent) {
5908 			BUG_ON(iref);
5909 			ret = remove_extent_backref(trans, extent_root, path,
5910 						    NULL, refs_to_drop,
5911 						    is_data, &last_ref);
5912 			if (ret) {
5913 				btrfs_abort_transaction(trans, extent_root, ret);
5914 				goto out;
5915 			}
5916 			btrfs_release_path(path);
5917 			path->leave_spinning = 1;
5918 
5919 			key.objectid = bytenr;
5920 			key.type = BTRFS_EXTENT_ITEM_KEY;
5921 			key.offset = num_bytes;
5922 
5923 			if (!is_data && skinny_metadata) {
5924 				key.type = BTRFS_METADATA_ITEM_KEY;
5925 				key.offset = owner_objectid;
5926 			}
5927 
5928 			ret = btrfs_search_slot(trans, extent_root,
5929 						&key, path, -1, 1);
5930 			if (ret > 0 && skinny_metadata && path->slots[0]) {
5931 				/*
5932 				 * Couldn't find our skinny metadata item,
5933 				 * see if we have ye olde extent item.
5934 				 */
5935 				path->slots[0]--;
5936 				btrfs_item_key_to_cpu(path->nodes[0], &key,
5937 						      path->slots[0]);
5938 				if (key.objectid == bytenr &&
5939 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
5940 				    key.offset == num_bytes)
5941 					ret = 0;
5942 			}
5943 
5944 			if (ret > 0 && skinny_metadata) {
5945 				skinny_metadata = false;
5946 				key.objectid = bytenr;
5947 				key.type = BTRFS_EXTENT_ITEM_KEY;
5948 				key.offset = num_bytes;
5949 				btrfs_release_path(path);
5950 				ret = btrfs_search_slot(trans, extent_root,
5951 							&key, path, -1, 1);
5952 			}
5953 
5954 			if (ret) {
5955 				btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5956 					ret, bytenr);
5957 				if (ret > 0)
5958 					btrfs_print_leaf(extent_root,
5959 							 path->nodes[0]);
5960 			}
5961 			if (ret < 0) {
5962 				btrfs_abort_transaction(trans, extent_root, ret);
5963 				goto out;
5964 			}
5965 			extent_slot = path->slots[0];
5966 		}
5967 	} else if (WARN_ON(ret == -ENOENT)) {
5968 		btrfs_print_leaf(extent_root, path->nodes[0]);
5969 		btrfs_err(info,
5970 			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5971 			bytenr, parent, root_objectid, owner_objectid,
5972 			owner_offset);
5973 		btrfs_abort_transaction(trans, extent_root, ret);
5974 		goto out;
5975 	} else {
5976 		btrfs_abort_transaction(trans, extent_root, ret);
5977 		goto out;
5978 	}
5979 
5980 	leaf = path->nodes[0];
5981 	item_size = btrfs_item_size_nr(leaf, extent_slot);
5982 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5983 	if (item_size < sizeof(*ei)) {
5984 		BUG_ON(found_extent || extent_slot != path->slots[0]);
5985 		ret = convert_extent_item_v0(trans, extent_root, path,
5986 					     owner_objectid, 0);
5987 		if (ret < 0) {
5988 			btrfs_abort_transaction(trans, extent_root, ret);
5989 			goto out;
5990 		}
5991 
5992 		btrfs_release_path(path);
5993 		path->leave_spinning = 1;
5994 
5995 		key.objectid = bytenr;
5996 		key.type = BTRFS_EXTENT_ITEM_KEY;
5997 		key.offset = num_bytes;
5998 
5999 		ret = btrfs_search_slot(trans, extent_root, &key, path,
6000 					-1, 1);
6001 		if (ret) {
6002 			btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6003 				ret, bytenr);
6004 			btrfs_print_leaf(extent_root, path->nodes[0]);
6005 		}
6006 		if (ret < 0) {
6007 			btrfs_abort_transaction(trans, extent_root, ret);
6008 			goto out;
6009 		}
6010 
6011 		extent_slot = path->slots[0];
6012 		leaf = path->nodes[0];
6013 		item_size = btrfs_item_size_nr(leaf, extent_slot);
6014 	}
6015 #endif
6016 	BUG_ON(item_size < sizeof(*ei));
6017 	ei = btrfs_item_ptr(leaf, extent_slot,
6018 			    struct btrfs_extent_item);
6019 	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6020 	    key.type == BTRFS_EXTENT_ITEM_KEY) {
6021 		struct btrfs_tree_block_info *bi;
6022 		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6023 		bi = (struct btrfs_tree_block_info *)(ei + 1);
6024 		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6025 	}
6026 
6027 	refs = btrfs_extent_refs(leaf, ei);
6028 	if (refs < refs_to_drop) {
6029 		btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6030 			  "for bytenr %Lu", refs_to_drop, refs, bytenr);
6031 		ret = -EINVAL;
6032 		btrfs_abort_transaction(trans, extent_root, ret);
6033 		goto out;
6034 	}
6035 	refs -= refs_to_drop;
6036 
6037 	if (refs > 0) {
6038 		type = BTRFS_QGROUP_OPER_SUB_SHARED;
6039 		if (extent_op)
6040 			__run_delayed_extent_op(extent_op, leaf, ei);
6041 		/*
6042 		 * In the case of inline back ref, reference count will
6043 		 * be updated by remove_extent_backref
6044 		 */
6045 		if (iref) {
6046 			BUG_ON(!found_extent);
6047 		} else {
6048 			btrfs_set_extent_refs(leaf, ei, refs);
6049 			btrfs_mark_buffer_dirty(leaf);
6050 		}
6051 		if (found_extent) {
6052 			ret = remove_extent_backref(trans, extent_root, path,
6053 						    iref, refs_to_drop,
6054 						    is_data, &last_ref);
6055 			if (ret) {
6056 				btrfs_abort_transaction(trans, extent_root, ret);
6057 				goto out;
6058 			}
6059 		}
6060 		add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6061 				 root_objectid);
6062 	} else {
6063 		if (found_extent) {
6064 			BUG_ON(is_data && refs_to_drop !=
6065 			       extent_data_ref_count(root, path, iref));
6066 			if (iref) {
6067 				BUG_ON(path->slots[0] != extent_slot);
6068 			} else {
6069 				BUG_ON(path->slots[0] != extent_slot + 1);
6070 				path->slots[0] = extent_slot;
6071 				num_to_del = 2;
6072 			}
6073 		}
6074 
6075 		last_ref = 1;
6076 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6077 				      num_to_del);
6078 		if (ret) {
6079 			btrfs_abort_transaction(trans, extent_root, ret);
6080 			goto out;
6081 		}
6082 		btrfs_release_path(path);
6083 
6084 		if (is_data) {
6085 			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6086 			if (ret) {
6087 				btrfs_abort_transaction(trans, extent_root, ret);
6088 				goto out;
6089 			}
6090 		}
6091 
6092 		ret = update_block_group(root, bytenr, num_bytes, 0);
6093 		if (ret) {
6094 			btrfs_abort_transaction(trans, extent_root, ret);
6095 			goto out;
6096 		}
6097 	}
6098 	btrfs_release_path(path);
6099 
6100 	/* Deal with the quota accounting */
6101 	if (!ret && last_ref && !no_quota) {
6102 		int mod_seq = 0;
6103 
6104 		if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
6105 		    type == BTRFS_QGROUP_OPER_SUB_SHARED)
6106 			mod_seq = 1;
6107 
6108 		ret = btrfs_qgroup_record_ref(trans, info, root_objectid,
6109 					      bytenr, num_bytes, type,
6110 					      mod_seq);
6111 	}
6112 out:
6113 	btrfs_free_path(path);
6114 	return ret;
6115 }
6116 
6117 /*
6118  * when we free an block, it is possible (and likely) that we free the last
6119  * delayed ref for that extent as well.  This searches the delayed ref tree for
6120  * a given extent, and if there are no other delayed refs to be processed, it
6121  * removes it from the tree.
6122  */
6123 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6124 				      struct btrfs_root *root, u64 bytenr)
6125 {
6126 	struct btrfs_delayed_ref_head *head;
6127 	struct btrfs_delayed_ref_root *delayed_refs;
6128 	int ret = 0;
6129 
6130 	delayed_refs = &trans->transaction->delayed_refs;
6131 	spin_lock(&delayed_refs->lock);
6132 	head = btrfs_find_delayed_ref_head(trans, bytenr);
6133 	if (!head)
6134 		goto out_delayed_unlock;
6135 
6136 	spin_lock(&head->lock);
6137 	if (rb_first(&head->ref_root))
6138 		goto out;
6139 
6140 	if (head->extent_op) {
6141 		if (!head->must_insert_reserved)
6142 			goto out;
6143 		btrfs_free_delayed_extent_op(head->extent_op);
6144 		head->extent_op = NULL;
6145 	}
6146 
6147 	/*
6148 	 * waiting for the lock here would deadlock.  If someone else has it
6149 	 * locked they are already in the process of dropping it anyway
6150 	 */
6151 	if (!mutex_trylock(&head->mutex))
6152 		goto out;
6153 
6154 	/*
6155 	 * at this point we have a head with no other entries.  Go
6156 	 * ahead and process it.
6157 	 */
6158 	head->node.in_tree = 0;
6159 	rb_erase(&head->href_node, &delayed_refs->href_root);
6160 
6161 	atomic_dec(&delayed_refs->num_entries);
6162 
6163 	/*
6164 	 * we don't take a ref on the node because we're removing it from the
6165 	 * tree, so we just steal the ref the tree was holding.
6166 	 */
6167 	delayed_refs->num_heads--;
6168 	if (head->processing == 0)
6169 		delayed_refs->num_heads_ready--;
6170 	head->processing = 0;
6171 	spin_unlock(&head->lock);
6172 	spin_unlock(&delayed_refs->lock);
6173 
6174 	BUG_ON(head->extent_op);
6175 	if (head->must_insert_reserved)
6176 		ret = 1;
6177 
6178 	mutex_unlock(&head->mutex);
6179 	btrfs_put_delayed_ref(&head->node);
6180 	return ret;
6181 out:
6182 	spin_unlock(&head->lock);
6183 
6184 out_delayed_unlock:
6185 	spin_unlock(&delayed_refs->lock);
6186 	return 0;
6187 }
6188 
6189 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6190 			   struct btrfs_root *root,
6191 			   struct extent_buffer *buf,
6192 			   u64 parent, int last_ref)
6193 {
6194 	struct btrfs_block_group_cache *cache = NULL;
6195 	int pin = 1;
6196 	int ret;
6197 
6198 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6199 		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6200 					buf->start, buf->len,
6201 					parent, root->root_key.objectid,
6202 					btrfs_header_level(buf),
6203 					BTRFS_DROP_DELAYED_REF, NULL, 0);
6204 		BUG_ON(ret); /* -ENOMEM */
6205 	}
6206 
6207 	if (!last_ref)
6208 		return;
6209 
6210 	cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6211 
6212 	if (btrfs_header_generation(buf) == trans->transid) {
6213 		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6214 			ret = check_ref_cleanup(trans, root, buf->start);
6215 			if (!ret)
6216 				goto out;
6217 		}
6218 
6219 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6220 			pin_down_extent(root, cache, buf->start, buf->len, 1);
6221 			goto out;
6222 		}
6223 
6224 		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6225 
6226 		btrfs_add_free_space(cache, buf->start, buf->len);
6227 		btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6228 		trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6229 		pin = 0;
6230 	}
6231 out:
6232 	if (pin)
6233 		add_pinned_bytes(root->fs_info, buf->len,
6234 				 btrfs_header_level(buf),
6235 				 root->root_key.objectid);
6236 
6237 	/*
6238 	 * Deleting the buffer, clear the corrupt flag since it doesn't matter
6239 	 * anymore.
6240 	 */
6241 	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6242 	btrfs_put_block_group(cache);
6243 }
6244 
6245 /* Can return -ENOMEM */
6246 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6247 		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6248 		      u64 owner, u64 offset, int no_quota)
6249 {
6250 	int ret;
6251 	struct btrfs_fs_info *fs_info = root->fs_info;
6252 
6253 	if (btrfs_test_is_dummy_root(root))
6254 		return 0;
6255 
6256 	add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6257 
6258 	/*
6259 	 * tree log blocks never actually go into the extent allocation
6260 	 * tree, just update pinning info and exit early.
6261 	 */
6262 	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6263 		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6264 		/* unlocks the pinned mutex */
6265 		btrfs_pin_extent(root, bytenr, num_bytes, 1);
6266 		ret = 0;
6267 	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6268 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6269 					num_bytes,
6270 					parent, root_objectid, (int)owner,
6271 					BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6272 	} else {
6273 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6274 						num_bytes,
6275 						parent, root_objectid, owner,
6276 						offset, BTRFS_DROP_DELAYED_REF,
6277 						NULL, no_quota);
6278 	}
6279 	return ret;
6280 }
6281 
6282 /*
6283  * when we wait for progress in the block group caching, its because
6284  * our allocation attempt failed at least once.  So, we must sleep
6285  * and let some progress happen before we try again.
6286  *
6287  * This function will sleep at least once waiting for new free space to
6288  * show up, and then it will check the block group free space numbers
6289  * for our min num_bytes.  Another option is to have it go ahead
6290  * and look in the rbtree for a free extent of a given size, but this
6291  * is a good start.
6292  *
6293  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6294  * any of the information in this block group.
6295  */
6296 static noinline void
6297 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6298 				u64 num_bytes)
6299 {
6300 	struct btrfs_caching_control *caching_ctl;
6301 
6302 	caching_ctl = get_caching_control(cache);
6303 	if (!caching_ctl)
6304 		return;
6305 
6306 	wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6307 		   (cache->free_space_ctl->free_space >= num_bytes));
6308 
6309 	put_caching_control(caching_ctl);
6310 }
6311 
6312 static noinline int
6313 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6314 {
6315 	struct btrfs_caching_control *caching_ctl;
6316 	int ret = 0;
6317 
6318 	caching_ctl = get_caching_control(cache);
6319 	if (!caching_ctl)
6320 		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6321 
6322 	wait_event(caching_ctl->wait, block_group_cache_done(cache));
6323 	if (cache->cached == BTRFS_CACHE_ERROR)
6324 		ret = -EIO;
6325 	put_caching_control(caching_ctl);
6326 	return ret;
6327 }
6328 
6329 int __get_raid_index(u64 flags)
6330 {
6331 	if (flags & BTRFS_BLOCK_GROUP_RAID10)
6332 		return BTRFS_RAID_RAID10;
6333 	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6334 		return BTRFS_RAID_RAID1;
6335 	else if (flags & BTRFS_BLOCK_GROUP_DUP)
6336 		return BTRFS_RAID_DUP;
6337 	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6338 		return BTRFS_RAID_RAID0;
6339 	else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6340 		return BTRFS_RAID_RAID5;
6341 	else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6342 		return BTRFS_RAID_RAID6;
6343 
6344 	return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6345 }
6346 
6347 int get_block_group_index(struct btrfs_block_group_cache *cache)
6348 {
6349 	return __get_raid_index(cache->flags);
6350 }
6351 
6352 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6353 	[BTRFS_RAID_RAID10]	= "raid10",
6354 	[BTRFS_RAID_RAID1]	= "raid1",
6355 	[BTRFS_RAID_DUP]	= "dup",
6356 	[BTRFS_RAID_RAID0]	= "raid0",
6357 	[BTRFS_RAID_SINGLE]	= "single",
6358 	[BTRFS_RAID_RAID5]	= "raid5",
6359 	[BTRFS_RAID_RAID6]	= "raid6",
6360 };
6361 
6362 static const char *get_raid_name(enum btrfs_raid_types type)
6363 {
6364 	if (type >= BTRFS_NR_RAID_TYPES)
6365 		return NULL;
6366 
6367 	return btrfs_raid_type_names[type];
6368 }
6369 
6370 enum btrfs_loop_type {
6371 	LOOP_CACHING_NOWAIT = 0,
6372 	LOOP_CACHING_WAIT = 1,
6373 	LOOP_ALLOC_CHUNK = 2,
6374 	LOOP_NO_EMPTY_SIZE = 3,
6375 };
6376 
6377 static inline void
6378 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6379 		       int delalloc)
6380 {
6381 	if (delalloc)
6382 		down_read(&cache->data_rwsem);
6383 }
6384 
6385 static inline void
6386 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6387 		       int delalloc)
6388 {
6389 	btrfs_get_block_group(cache);
6390 	if (delalloc)
6391 		down_read(&cache->data_rwsem);
6392 }
6393 
6394 static struct btrfs_block_group_cache *
6395 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6396 		   struct btrfs_free_cluster *cluster,
6397 		   int delalloc)
6398 {
6399 	struct btrfs_block_group_cache *used_bg;
6400 	bool locked = false;
6401 again:
6402 	spin_lock(&cluster->refill_lock);
6403 	if (locked) {
6404 		if (used_bg == cluster->block_group)
6405 			return used_bg;
6406 
6407 		up_read(&used_bg->data_rwsem);
6408 		btrfs_put_block_group(used_bg);
6409 	}
6410 
6411 	used_bg = cluster->block_group;
6412 	if (!used_bg)
6413 		return NULL;
6414 
6415 	if (used_bg == block_group)
6416 		return used_bg;
6417 
6418 	btrfs_get_block_group(used_bg);
6419 
6420 	if (!delalloc)
6421 		return used_bg;
6422 
6423 	if (down_read_trylock(&used_bg->data_rwsem))
6424 		return used_bg;
6425 
6426 	spin_unlock(&cluster->refill_lock);
6427 	down_read(&used_bg->data_rwsem);
6428 	locked = true;
6429 	goto again;
6430 }
6431 
6432 static inline void
6433 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6434 			 int delalloc)
6435 {
6436 	if (delalloc)
6437 		up_read(&cache->data_rwsem);
6438 	btrfs_put_block_group(cache);
6439 }
6440 
6441 /*
6442  * walks the btree of allocated extents and find a hole of a given size.
6443  * The key ins is changed to record the hole:
6444  * ins->objectid == start position
6445  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6446  * ins->offset == the size of the hole.
6447  * Any available blocks before search_start are skipped.
6448  *
6449  * If there is no suitable free space, we will record the max size of
6450  * the free space extent currently.
6451  */
6452 static noinline int find_free_extent(struct btrfs_root *orig_root,
6453 				     u64 num_bytes, u64 empty_size,
6454 				     u64 hint_byte, struct btrfs_key *ins,
6455 				     u64 flags, int delalloc)
6456 {
6457 	int ret = 0;
6458 	struct btrfs_root *root = orig_root->fs_info->extent_root;
6459 	struct btrfs_free_cluster *last_ptr = NULL;
6460 	struct btrfs_block_group_cache *block_group = NULL;
6461 	u64 search_start = 0;
6462 	u64 max_extent_size = 0;
6463 	int empty_cluster = 2 * 1024 * 1024;
6464 	struct btrfs_space_info *space_info;
6465 	int loop = 0;
6466 	int index = __get_raid_index(flags);
6467 	int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6468 		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6469 	bool failed_cluster_refill = false;
6470 	bool failed_alloc = false;
6471 	bool use_cluster = true;
6472 	bool have_caching_bg = false;
6473 
6474 	WARN_ON(num_bytes < root->sectorsize);
6475 	ins->type = BTRFS_EXTENT_ITEM_KEY;
6476 	ins->objectid = 0;
6477 	ins->offset = 0;
6478 
6479 	trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6480 
6481 	space_info = __find_space_info(root->fs_info, flags);
6482 	if (!space_info) {
6483 		btrfs_err(root->fs_info, "No space info for %llu", flags);
6484 		return -ENOSPC;
6485 	}
6486 
6487 	/*
6488 	 * If the space info is for both data and metadata it means we have a
6489 	 * small filesystem and we can't use the clustering stuff.
6490 	 */
6491 	if (btrfs_mixed_space_info(space_info))
6492 		use_cluster = false;
6493 
6494 	if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6495 		last_ptr = &root->fs_info->meta_alloc_cluster;
6496 		if (!btrfs_test_opt(root, SSD))
6497 			empty_cluster = 64 * 1024;
6498 	}
6499 
6500 	if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6501 	    btrfs_test_opt(root, SSD)) {
6502 		last_ptr = &root->fs_info->data_alloc_cluster;
6503 	}
6504 
6505 	if (last_ptr) {
6506 		spin_lock(&last_ptr->lock);
6507 		if (last_ptr->block_group)
6508 			hint_byte = last_ptr->window_start;
6509 		spin_unlock(&last_ptr->lock);
6510 	}
6511 
6512 	search_start = max(search_start, first_logical_byte(root, 0));
6513 	search_start = max(search_start, hint_byte);
6514 
6515 	if (!last_ptr)
6516 		empty_cluster = 0;
6517 
6518 	if (search_start == hint_byte) {
6519 		block_group = btrfs_lookup_block_group(root->fs_info,
6520 						       search_start);
6521 		/*
6522 		 * we don't want to use the block group if it doesn't match our
6523 		 * allocation bits, or if its not cached.
6524 		 *
6525 		 * However if we are re-searching with an ideal block group
6526 		 * picked out then we don't care that the block group is cached.
6527 		 */
6528 		if (block_group && block_group_bits(block_group, flags) &&
6529 		    block_group->cached != BTRFS_CACHE_NO) {
6530 			down_read(&space_info->groups_sem);
6531 			if (list_empty(&block_group->list) ||
6532 			    block_group->ro) {
6533 				/*
6534 				 * someone is removing this block group,
6535 				 * we can't jump into the have_block_group
6536 				 * target because our list pointers are not
6537 				 * valid
6538 				 */
6539 				btrfs_put_block_group(block_group);
6540 				up_read(&space_info->groups_sem);
6541 			} else {
6542 				index = get_block_group_index(block_group);
6543 				btrfs_lock_block_group(block_group, delalloc);
6544 				goto have_block_group;
6545 			}
6546 		} else if (block_group) {
6547 			btrfs_put_block_group(block_group);
6548 		}
6549 	}
6550 search:
6551 	have_caching_bg = false;
6552 	down_read(&space_info->groups_sem);
6553 	list_for_each_entry(block_group, &space_info->block_groups[index],
6554 			    list) {
6555 		u64 offset;
6556 		int cached;
6557 
6558 		btrfs_grab_block_group(block_group, delalloc);
6559 		search_start = block_group->key.objectid;
6560 
6561 		/*
6562 		 * this can happen if we end up cycling through all the
6563 		 * raid types, but we want to make sure we only allocate
6564 		 * for the proper type.
6565 		 */
6566 		if (!block_group_bits(block_group, flags)) {
6567 		    u64 extra = BTRFS_BLOCK_GROUP_DUP |
6568 				BTRFS_BLOCK_GROUP_RAID1 |
6569 				BTRFS_BLOCK_GROUP_RAID5 |
6570 				BTRFS_BLOCK_GROUP_RAID6 |
6571 				BTRFS_BLOCK_GROUP_RAID10;
6572 
6573 			/*
6574 			 * if they asked for extra copies and this block group
6575 			 * doesn't provide them, bail.  This does allow us to
6576 			 * fill raid0 from raid1.
6577 			 */
6578 			if ((flags & extra) && !(block_group->flags & extra))
6579 				goto loop;
6580 		}
6581 
6582 have_block_group:
6583 		cached = block_group_cache_done(block_group);
6584 		if (unlikely(!cached)) {
6585 			ret = cache_block_group(block_group, 0);
6586 			BUG_ON(ret < 0);
6587 			ret = 0;
6588 		}
6589 
6590 		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6591 			goto loop;
6592 		if (unlikely(block_group->ro))
6593 			goto loop;
6594 
6595 		/*
6596 		 * Ok we want to try and use the cluster allocator, so
6597 		 * lets look there
6598 		 */
6599 		if (last_ptr) {
6600 			struct btrfs_block_group_cache *used_block_group;
6601 			unsigned long aligned_cluster;
6602 			/*
6603 			 * the refill lock keeps out other
6604 			 * people trying to start a new cluster
6605 			 */
6606 			used_block_group = btrfs_lock_cluster(block_group,
6607 							      last_ptr,
6608 							      delalloc);
6609 			if (!used_block_group)
6610 				goto refill_cluster;
6611 
6612 			if (used_block_group != block_group &&
6613 			    (used_block_group->ro ||
6614 			     !block_group_bits(used_block_group, flags)))
6615 				goto release_cluster;
6616 
6617 			offset = btrfs_alloc_from_cluster(used_block_group,
6618 						last_ptr,
6619 						num_bytes,
6620 						used_block_group->key.objectid,
6621 						&max_extent_size);
6622 			if (offset) {
6623 				/* we have a block, we're done */
6624 				spin_unlock(&last_ptr->refill_lock);
6625 				trace_btrfs_reserve_extent_cluster(root,
6626 						used_block_group,
6627 						search_start, num_bytes);
6628 				if (used_block_group != block_group) {
6629 					btrfs_release_block_group(block_group,
6630 								  delalloc);
6631 					block_group = used_block_group;
6632 				}
6633 				goto checks;
6634 			}
6635 
6636 			WARN_ON(last_ptr->block_group != used_block_group);
6637 release_cluster:
6638 			/* If we are on LOOP_NO_EMPTY_SIZE, we can't
6639 			 * set up a new clusters, so lets just skip it
6640 			 * and let the allocator find whatever block
6641 			 * it can find.  If we reach this point, we
6642 			 * will have tried the cluster allocator
6643 			 * plenty of times and not have found
6644 			 * anything, so we are likely way too
6645 			 * fragmented for the clustering stuff to find
6646 			 * anything.
6647 			 *
6648 			 * However, if the cluster is taken from the
6649 			 * current block group, release the cluster
6650 			 * first, so that we stand a better chance of
6651 			 * succeeding in the unclustered
6652 			 * allocation.  */
6653 			if (loop >= LOOP_NO_EMPTY_SIZE &&
6654 			    used_block_group != block_group) {
6655 				spin_unlock(&last_ptr->refill_lock);
6656 				btrfs_release_block_group(used_block_group,
6657 							  delalloc);
6658 				goto unclustered_alloc;
6659 			}
6660 
6661 			/*
6662 			 * this cluster didn't work out, free it and
6663 			 * start over
6664 			 */
6665 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
6666 
6667 			if (used_block_group != block_group)
6668 				btrfs_release_block_group(used_block_group,
6669 							  delalloc);
6670 refill_cluster:
6671 			if (loop >= LOOP_NO_EMPTY_SIZE) {
6672 				spin_unlock(&last_ptr->refill_lock);
6673 				goto unclustered_alloc;
6674 			}
6675 
6676 			aligned_cluster = max_t(unsigned long,
6677 						empty_cluster + empty_size,
6678 					      block_group->full_stripe_len);
6679 
6680 			/* allocate a cluster in this block group */
6681 			ret = btrfs_find_space_cluster(root, block_group,
6682 						       last_ptr, search_start,
6683 						       num_bytes,
6684 						       aligned_cluster);
6685 			if (ret == 0) {
6686 				/*
6687 				 * now pull our allocation out of this
6688 				 * cluster
6689 				 */
6690 				offset = btrfs_alloc_from_cluster(block_group,
6691 							last_ptr,
6692 							num_bytes,
6693 							search_start,
6694 							&max_extent_size);
6695 				if (offset) {
6696 					/* we found one, proceed */
6697 					spin_unlock(&last_ptr->refill_lock);
6698 					trace_btrfs_reserve_extent_cluster(root,
6699 						block_group, search_start,
6700 						num_bytes);
6701 					goto checks;
6702 				}
6703 			} else if (!cached && loop > LOOP_CACHING_NOWAIT
6704 				   && !failed_cluster_refill) {
6705 				spin_unlock(&last_ptr->refill_lock);
6706 
6707 				failed_cluster_refill = true;
6708 				wait_block_group_cache_progress(block_group,
6709 				       num_bytes + empty_cluster + empty_size);
6710 				goto have_block_group;
6711 			}
6712 
6713 			/*
6714 			 * at this point we either didn't find a cluster
6715 			 * or we weren't able to allocate a block from our
6716 			 * cluster.  Free the cluster we've been trying
6717 			 * to use, and go to the next block group
6718 			 */
6719 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
6720 			spin_unlock(&last_ptr->refill_lock);
6721 			goto loop;
6722 		}
6723 
6724 unclustered_alloc:
6725 		spin_lock(&block_group->free_space_ctl->tree_lock);
6726 		if (cached &&
6727 		    block_group->free_space_ctl->free_space <
6728 		    num_bytes + empty_cluster + empty_size) {
6729 			if (block_group->free_space_ctl->free_space >
6730 			    max_extent_size)
6731 				max_extent_size =
6732 					block_group->free_space_ctl->free_space;
6733 			spin_unlock(&block_group->free_space_ctl->tree_lock);
6734 			goto loop;
6735 		}
6736 		spin_unlock(&block_group->free_space_ctl->tree_lock);
6737 
6738 		offset = btrfs_find_space_for_alloc(block_group, search_start,
6739 						    num_bytes, empty_size,
6740 						    &max_extent_size);
6741 		/*
6742 		 * If we didn't find a chunk, and we haven't failed on this
6743 		 * block group before, and this block group is in the middle of
6744 		 * caching and we are ok with waiting, then go ahead and wait
6745 		 * for progress to be made, and set failed_alloc to true.
6746 		 *
6747 		 * If failed_alloc is true then we've already waited on this
6748 		 * block group once and should move on to the next block group.
6749 		 */
6750 		if (!offset && !failed_alloc && !cached &&
6751 		    loop > LOOP_CACHING_NOWAIT) {
6752 			wait_block_group_cache_progress(block_group,
6753 						num_bytes + empty_size);
6754 			failed_alloc = true;
6755 			goto have_block_group;
6756 		} else if (!offset) {
6757 			if (!cached)
6758 				have_caching_bg = true;
6759 			goto loop;
6760 		}
6761 checks:
6762 		search_start = ALIGN(offset, root->stripesize);
6763 
6764 		/* move on to the next group */
6765 		if (search_start + num_bytes >
6766 		    block_group->key.objectid + block_group->key.offset) {
6767 			btrfs_add_free_space(block_group, offset, num_bytes);
6768 			goto loop;
6769 		}
6770 
6771 		if (offset < search_start)
6772 			btrfs_add_free_space(block_group, offset,
6773 					     search_start - offset);
6774 		BUG_ON(offset > search_start);
6775 
6776 		ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6777 						  alloc_type, delalloc);
6778 		if (ret == -EAGAIN) {
6779 			btrfs_add_free_space(block_group, offset, num_bytes);
6780 			goto loop;
6781 		}
6782 
6783 		/* we are all good, lets return */
6784 		ins->objectid = search_start;
6785 		ins->offset = num_bytes;
6786 
6787 		trace_btrfs_reserve_extent(orig_root, block_group,
6788 					   search_start, num_bytes);
6789 		btrfs_release_block_group(block_group, delalloc);
6790 		break;
6791 loop:
6792 		failed_cluster_refill = false;
6793 		failed_alloc = false;
6794 		BUG_ON(index != get_block_group_index(block_group));
6795 		btrfs_release_block_group(block_group, delalloc);
6796 	}
6797 	up_read(&space_info->groups_sem);
6798 
6799 	if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6800 		goto search;
6801 
6802 	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6803 		goto search;
6804 
6805 	/*
6806 	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6807 	 *			caching kthreads as we move along
6808 	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6809 	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6810 	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6811 	 *			again
6812 	 */
6813 	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6814 		index = 0;
6815 		loop++;
6816 		if (loop == LOOP_ALLOC_CHUNK) {
6817 			struct btrfs_trans_handle *trans;
6818 			int exist = 0;
6819 
6820 			trans = current->journal_info;
6821 			if (trans)
6822 				exist = 1;
6823 			else
6824 				trans = btrfs_join_transaction(root);
6825 
6826 			if (IS_ERR(trans)) {
6827 				ret = PTR_ERR(trans);
6828 				goto out;
6829 			}
6830 
6831 			ret = do_chunk_alloc(trans, root, flags,
6832 					     CHUNK_ALLOC_FORCE);
6833 			/*
6834 			 * Do not bail out on ENOSPC since we
6835 			 * can do more things.
6836 			 */
6837 			if (ret < 0 && ret != -ENOSPC)
6838 				btrfs_abort_transaction(trans,
6839 							root, ret);
6840 			else
6841 				ret = 0;
6842 			if (!exist)
6843 				btrfs_end_transaction(trans, root);
6844 			if (ret)
6845 				goto out;
6846 		}
6847 
6848 		if (loop == LOOP_NO_EMPTY_SIZE) {
6849 			empty_size = 0;
6850 			empty_cluster = 0;
6851 		}
6852 
6853 		goto search;
6854 	} else if (!ins->objectid) {
6855 		ret = -ENOSPC;
6856 	} else if (ins->objectid) {
6857 		ret = 0;
6858 	}
6859 out:
6860 	if (ret == -ENOSPC)
6861 		ins->offset = max_extent_size;
6862 	return ret;
6863 }
6864 
6865 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6866 			    int dump_block_groups)
6867 {
6868 	struct btrfs_block_group_cache *cache;
6869 	int index = 0;
6870 
6871 	spin_lock(&info->lock);
6872 	printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6873 	       info->flags,
6874 	       info->total_bytes - info->bytes_used - info->bytes_pinned -
6875 	       info->bytes_reserved - info->bytes_readonly,
6876 	       (info->full) ? "" : "not ");
6877 	printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6878 	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
6879 	       info->total_bytes, info->bytes_used, info->bytes_pinned,
6880 	       info->bytes_reserved, info->bytes_may_use,
6881 	       info->bytes_readonly);
6882 	spin_unlock(&info->lock);
6883 
6884 	if (!dump_block_groups)
6885 		return;
6886 
6887 	down_read(&info->groups_sem);
6888 again:
6889 	list_for_each_entry(cache, &info->block_groups[index], list) {
6890 		spin_lock(&cache->lock);
6891 		printk(KERN_INFO "BTRFS: "
6892 			   "block group %llu has %llu bytes, "
6893 			   "%llu used %llu pinned %llu reserved %s\n",
6894 		       cache->key.objectid, cache->key.offset,
6895 		       btrfs_block_group_used(&cache->item), cache->pinned,
6896 		       cache->reserved, cache->ro ? "[readonly]" : "");
6897 		btrfs_dump_free_space(cache, bytes);
6898 		spin_unlock(&cache->lock);
6899 	}
6900 	if (++index < BTRFS_NR_RAID_TYPES)
6901 		goto again;
6902 	up_read(&info->groups_sem);
6903 }
6904 
6905 int btrfs_reserve_extent(struct btrfs_root *root,
6906 			 u64 num_bytes, u64 min_alloc_size,
6907 			 u64 empty_size, u64 hint_byte,
6908 			 struct btrfs_key *ins, int is_data, int delalloc)
6909 {
6910 	bool final_tried = false;
6911 	u64 flags;
6912 	int ret;
6913 
6914 	flags = btrfs_get_alloc_profile(root, is_data);
6915 again:
6916 	WARN_ON(num_bytes < root->sectorsize);
6917 	ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6918 			       flags, delalloc);
6919 
6920 	if (ret == -ENOSPC) {
6921 		if (!final_tried && ins->offset) {
6922 			num_bytes = min(num_bytes >> 1, ins->offset);
6923 			num_bytes = round_down(num_bytes, root->sectorsize);
6924 			num_bytes = max(num_bytes, min_alloc_size);
6925 			if (num_bytes == min_alloc_size)
6926 				final_tried = true;
6927 			goto again;
6928 		} else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6929 			struct btrfs_space_info *sinfo;
6930 
6931 			sinfo = __find_space_info(root->fs_info, flags);
6932 			btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6933 				flags, num_bytes);
6934 			if (sinfo)
6935 				dump_space_info(sinfo, num_bytes, 1);
6936 		}
6937 	}
6938 
6939 	return ret;
6940 }
6941 
6942 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6943 					u64 start, u64 len,
6944 					int pin, int delalloc)
6945 {
6946 	struct btrfs_block_group_cache *cache;
6947 	int ret = 0;
6948 
6949 	cache = btrfs_lookup_block_group(root->fs_info, start);
6950 	if (!cache) {
6951 		btrfs_err(root->fs_info, "Unable to find block group for %llu",
6952 			start);
6953 		return -ENOSPC;
6954 	}
6955 
6956 	if (btrfs_test_opt(root, DISCARD))
6957 		ret = btrfs_discard_extent(root, start, len, NULL);
6958 
6959 	if (pin)
6960 		pin_down_extent(root, cache, start, len, 1);
6961 	else {
6962 		btrfs_add_free_space(cache, start, len);
6963 		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
6964 	}
6965 	btrfs_put_block_group(cache);
6966 
6967 	trace_btrfs_reserved_extent_free(root, start, len);
6968 
6969 	return ret;
6970 }
6971 
6972 int btrfs_free_reserved_extent(struct btrfs_root *root,
6973 			       u64 start, u64 len, int delalloc)
6974 {
6975 	return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
6976 }
6977 
6978 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6979 				       u64 start, u64 len)
6980 {
6981 	return __btrfs_free_reserved_extent(root, start, len, 1, 0);
6982 }
6983 
6984 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6985 				      struct btrfs_root *root,
6986 				      u64 parent, u64 root_objectid,
6987 				      u64 flags, u64 owner, u64 offset,
6988 				      struct btrfs_key *ins, int ref_mod)
6989 {
6990 	int ret;
6991 	struct btrfs_fs_info *fs_info = root->fs_info;
6992 	struct btrfs_extent_item *extent_item;
6993 	struct btrfs_extent_inline_ref *iref;
6994 	struct btrfs_path *path;
6995 	struct extent_buffer *leaf;
6996 	int type;
6997 	u32 size;
6998 
6999 	if (parent > 0)
7000 		type = BTRFS_SHARED_DATA_REF_KEY;
7001 	else
7002 		type = BTRFS_EXTENT_DATA_REF_KEY;
7003 
7004 	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7005 
7006 	path = btrfs_alloc_path();
7007 	if (!path)
7008 		return -ENOMEM;
7009 
7010 	path->leave_spinning = 1;
7011 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7012 				      ins, size);
7013 	if (ret) {
7014 		btrfs_free_path(path);
7015 		return ret;
7016 	}
7017 
7018 	leaf = path->nodes[0];
7019 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
7020 				     struct btrfs_extent_item);
7021 	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7022 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7023 	btrfs_set_extent_flags(leaf, extent_item,
7024 			       flags | BTRFS_EXTENT_FLAG_DATA);
7025 
7026 	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7027 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
7028 	if (parent > 0) {
7029 		struct btrfs_shared_data_ref *ref;
7030 		ref = (struct btrfs_shared_data_ref *)(iref + 1);
7031 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7032 		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7033 	} else {
7034 		struct btrfs_extent_data_ref *ref;
7035 		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7036 		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7037 		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7038 		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7039 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7040 	}
7041 
7042 	btrfs_mark_buffer_dirty(path->nodes[0]);
7043 	btrfs_free_path(path);
7044 
7045 	/* Always set parent to 0 here since its exclusive anyway. */
7046 	ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7047 				      ins->objectid, ins->offset,
7048 				      BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7049 	if (ret)
7050 		return ret;
7051 
7052 	ret = update_block_group(root, ins->objectid, ins->offset, 1);
7053 	if (ret) { /* -ENOENT, logic error */
7054 		btrfs_err(fs_info, "update block group failed for %llu %llu",
7055 			ins->objectid, ins->offset);
7056 		BUG();
7057 	}
7058 	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7059 	return ret;
7060 }
7061 
7062 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7063 				     struct btrfs_root *root,
7064 				     u64 parent, u64 root_objectid,
7065 				     u64 flags, struct btrfs_disk_key *key,
7066 				     int level, struct btrfs_key *ins,
7067 				     int no_quota)
7068 {
7069 	int ret;
7070 	struct btrfs_fs_info *fs_info = root->fs_info;
7071 	struct btrfs_extent_item *extent_item;
7072 	struct btrfs_tree_block_info *block_info;
7073 	struct btrfs_extent_inline_ref *iref;
7074 	struct btrfs_path *path;
7075 	struct extent_buffer *leaf;
7076 	u32 size = sizeof(*extent_item) + sizeof(*iref);
7077 	u64 num_bytes = ins->offset;
7078 	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7079 						 SKINNY_METADATA);
7080 
7081 	if (!skinny_metadata)
7082 		size += sizeof(*block_info);
7083 
7084 	path = btrfs_alloc_path();
7085 	if (!path) {
7086 		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7087 						   root->nodesize);
7088 		return -ENOMEM;
7089 	}
7090 
7091 	path->leave_spinning = 1;
7092 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7093 				      ins, size);
7094 	if (ret) {
7095 		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7096 						   root->nodesize);
7097 		btrfs_free_path(path);
7098 		return ret;
7099 	}
7100 
7101 	leaf = path->nodes[0];
7102 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
7103 				     struct btrfs_extent_item);
7104 	btrfs_set_extent_refs(leaf, extent_item, 1);
7105 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7106 	btrfs_set_extent_flags(leaf, extent_item,
7107 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7108 
7109 	if (skinny_metadata) {
7110 		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7111 		num_bytes = root->nodesize;
7112 	} else {
7113 		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7114 		btrfs_set_tree_block_key(leaf, block_info, key);
7115 		btrfs_set_tree_block_level(leaf, block_info, level);
7116 		iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7117 	}
7118 
7119 	if (parent > 0) {
7120 		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7121 		btrfs_set_extent_inline_ref_type(leaf, iref,
7122 						 BTRFS_SHARED_BLOCK_REF_KEY);
7123 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7124 	} else {
7125 		btrfs_set_extent_inline_ref_type(leaf, iref,
7126 						 BTRFS_TREE_BLOCK_REF_KEY);
7127 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7128 	}
7129 
7130 	btrfs_mark_buffer_dirty(leaf);
7131 	btrfs_free_path(path);
7132 
7133 	if (!no_quota) {
7134 		ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7135 					      ins->objectid, num_bytes,
7136 					      BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7137 		if (ret)
7138 			return ret;
7139 	}
7140 
7141 	ret = update_block_group(root, ins->objectid, root->nodesize, 1);
7142 	if (ret) { /* -ENOENT, logic error */
7143 		btrfs_err(fs_info, "update block group failed for %llu %llu",
7144 			ins->objectid, ins->offset);
7145 		BUG();
7146 	}
7147 
7148 	trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7149 	return ret;
7150 }
7151 
7152 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7153 				     struct btrfs_root *root,
7154 				     u64 root_objectid, u64 owner,
7155 				     u64 offset, struct btrfs_key *ins)
7156 {
7157 	int ret;
7158 
7159 	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7160 
7161 	ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7162 					 ins->offset, 0,
7163 					 root_objectid, owner, offset,
7164 					 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7165 	return ret;
7166 }
7167 
7168 /*
7169  * this is used by the tree logging recovery code.  It records that
7170  * an extent has been allocated and makes sure to clear the free
7171  * space cache bits as well
7172  */
7173 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7174 				   struct btrfs_root *root,
7175 				   u64 root_objectid, u64 owner, u64 offset,
7176 				   struct btrfs_key *ins)
7177 {
7178 	int ret;
7179 	struct btrfs_block_group_cache *block_group;
7180 
7181 	/*
7182 	 * Mixed block groups will exclude before processing the log so we only
7183 	 * need to do the exlude dance if this fs isn't mixed.
7184 	 */
7185 	if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7186 		ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7187 		if (ret)
7188 			return ret;
7189 	}
7190 
7191 	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7192 	if (!block_group)
7193 		return -EINVAL;
7194 
7195 	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7196 					  RESERVE_ALLOC_NO_ACCOUNT, 0);
7197 	BUG_ON(ret); /* logic error */
7198 	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7199 					 0, owner, offset, ins, 1);
7200 	btrfs_put_block_group(block_group);
7201 	return ret;
7202 }
7203 
7204 static struct extent_buffer *
7205 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7206 		      u64 bytenr, u32 blocksize, int level)
7207 {
7208 	struct extent_buffer *buf;
7209 
7210 	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
7211 	if (!buf)
7212 		return ERR_PTR(-ENOMEM);
7213 	btrfs_set_header_generation(buf, trans->transid);
7214 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7215 	btrfs_tree_lock(buf);
7216 	clean_tree_block(trans, root, buf);
7217 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7218 
7219 	btrfs_set_lock_blocking(buf);
7220 	btrfs_set_buffer_uptodate(buf);
7221 
7222 	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7223 		buf->log_index = root->log_transid % 2;
7224 		/*
7225 		 * we allow two log transactions at a time, use different
7226 		 * EXENT bit to differentiate dirty pages.
7227 		 */
7228 		if (buf->log_index == 0)
7229 			set_extent_dirty(&root->dirty_log_pages, buf->start,
7230 					buf->start + buf->len - 1, GFP_NOFS);
7231 		else
7232 			set_extent_new(&root->dirty_log_pages, buf->start,
7233 					buf->start + buf->len - 1, GFP_NOFS);
7234 	} else {
7235 		buf->log_index = -1;
7236 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7237 			 buf->start + buf->len - 1, GFP_NOFS);
7238 	}
7239 	trans->blocks_used++;
7240 	/* this returns a buffer locked for blocking */
7241 	return buf;
7242 }
7243 
7244 static struct btrfs_block_rsv *
7245 use_block_rsv(struct btrfs_trans_handle *trans,
7246 	      struct btrfs_root *root, u32 blocksize)
7247 {
7248 	struct btrfs_block_rsv *block_rsv;
7249 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7250 	int ret;
7251 	bool global_updated = false;
7252 
7253 	block_rsv = get_block_rsv(trans, root);
7254 
7255 	if (unlikely(block_rsv->size == 0))
7256 		goto try_reserve;
7257 again:
7258 	ret = block_rsv_use_bytes(block_rsv, blocksize);
7259 	if (!ret)
7260 		return block_rsv;
7261 
7262 	if (block_rsv->failfast)
7263 		return ERR_PTR(ret);
7264 
7265 	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7266 		global_updated = true;
7267 		update_global_block_rsv(root->fs_info);
7268 		goto again;
7269 	}
7270 
7271 	if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7272 		static DEFINE_RATELIMIT_STATE(_rs,
7273 				DEFAULT_RATELIMIT_INTERVAL * 10,
7274 				/*DEFAULT_RATELIMIT_BURST*/ 1);
7275 		if (__ratelimit(&_rs))
7276 			WARN(1, KERN_DEBUG
7277 				"BTRFS: block rsv returned %d\n", ret);
7278 	}
7279 try_reserve:
7280 	ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7281 				     BTRFS_RESERVE_NO_FLUSH);
7282 	if (!ret)
7283 		return block_rsv;
7284 	/*
7285 	 * If we couldn't reserve metadata bytes try and use some from
7286 	 * the global reserve if its space type is the same as the global
7287 	 * reservation.
7288 	 */
7289 	if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7290 	    block_rsv->space_info == global_rsv->space_info) {
7291 		ret = block_rsv_use_bytes(global_rsv, blocksize);
7292 		if (!ret)
7293 			return global_rsv;
7294 	}
7295 	return ERR_PTR(ret);
7296 }
7297 
7298 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7299 			    struct btrfs_block_rsv *block_rsv, u32 blocksize)
7300 {
7301 	block_rsv_add_bytes(block_rsv, blocksize, 0);
7302 	block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7303 }
7304 
7305 /*
7306  * finds a free extent and does all the dirty work required for allocation
7307  * returns the key for the extent through ins, and a tree buffer for
7308  * the first block of the extent through buf.
7309  *
7310  * returns the tree buffer or NULL.
7311  */
7312 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7313 					struct btrfs_root *root,
7314 					u64 parent, u64 root_objectid,
7315 					struct btrfs_disk_key *key, int level,
7316 					u64 hint, u64 empty_size)
7317 {
7318 	struct btrfs_key ins;
7319 	struct btrfs_block_rsv *block_rsv;
7320 	struct extent_buffer *buf;
7321 	u64 flags = 0;
7322 	int ret;
7323 	u32 blocksize = root->nodesize;
7324 	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7325 						 SKINNY_METADATA);
7326 
7327 	if (btrfs_test_is_dummy_root(root)) {
7328 		buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7329 					    blocksize, level);
7330 		if (!IS_ERR(buf))
7331 			root->alloc_bytenr += blocksize;
7332 		return buf;
7333 	}
7334 
7335 	block_rsv = use_block_rsv(trans, root, blocksize);
7336 	if (IS_ERR(block_rsv))
7337 		return ERR_CAST(block_rsv);
7338 
7339 	ret = btrfs_reserve_extent(root, blocksize, blocksize,
7340 				   empty_size, hint, &ins, 0, 0);
7341 	if (ret) {
7342 		unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7343 		return ERR_PTR(ret);
7344 	}
7345 
7346 	buf = btrfs_init_new_buffer(trans, root, ins.objectid,
7347 				    blocksize, level);
7348 	BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7349 
7350 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7351 		if (parent == 0)
7352 			parent = ins.objectid;
7353 		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7354 	} else
7355 		BUG_ON(parent > 0);
7356 
7357 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7358 		struct btrfs_delayed_extent_op *extent_op;
7359 		extent_op = btrfs_alloc_delayed_extent_op();
7360 		BUG_ON(!extent_op); /* -ENOMEM */
7361 		if (key)
7362 			memcpy(&extent_op->key, key, sizeof(extent_op->key));
7363 		else
7364 			memset(&extent_op->key, 0, sizeof(extent_op->key));
7365 		extent_op->flags_to_set = flags;
7366 		if (skinny_metadata)
7367 			extent_op->update_key = 0;
7368 		else
7369 			extent_op->update_key = 1;
7370 		extent_op->update_flags = 1;
7371 		extent_op->is_data = 0;
7372 		extent_op->level = level;
7373 
7374 		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7375 					ins.objectid,
7376 					ins.offset, parent, root_objectid,
7377 					level, BTRFS_ADD_DELAYED_EXTENT,
7378 					extent_op, 0);
7379 		BUG_ON(ret); /* -ENOMEM */
7380 	}
7381 	return buf;
7382 }
7383 
7384 struct walk_control {
7385 	u64 refs[BTRFS_MAX_LEVEL];
7386 	u64 flags[BTRFS_MAX_LEVEL];
7387 	struct btrfs_key update_progress;
7388 	int stage;
7389 	int level;
7390 	int shared_level;
7391 	int update_ref;
7392 	int keep_locks;
7393 	int reada_slot;
7394 	int reada_count;
7395 	int for_reloc;
7396 };
7397 
7398 #define DROP_REFERENCE	1
7399 #define UPDATE_BACKREF	2
7400 
7401 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7402 				     struct btrfs_root *root,
7403 				     struct walk_control *wc,
7404 				     struct btrfs_path *path)
7405 {
7406 	u64 bytenr;
7407 	u64 generation;
7408 	u64 refs;
7409 	u64 flags;
7410 	u32 nritems;
7411 	u32 blocksize;
7412 	struct btrfs_key key;
7413 	struct extent_buffer *eb;
7414 	int ret;
7415 	int slot;
7416 	int nread = 0;
7417 
7418 	if (path->slots[wc->level] < wc->reada_slot) {
7419 		wc->reada_count = wc->reada_count * 2 / 3;
7420 		wc->reada_count = max(wc->reada_count, 2);
7421 	} else {
7422 		wc->reada_count = wc->reada_count * 3 / 2;
7423 		wc->reada_count = min_t(int, wc->reada_count,
7424 					BTRFS_NODEPTRS_PER_BLOCK(root));
7425 	}
7426 
7427 	eb = path->nodes[wc->level];
7428 	nritems = btrfs_header_nritems(eb);
7429 	blocksize = root->nodesize;
7430 
7431 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7432 		if (nread >= wc->reada_count)
7433 			break;
7434 
7435 		cond_resched();
7436 		bytenr = btrfs_node_blockptr(eb, slot);
7437 		generation = btrfs_node_ptr_generation(eb, slot);
7438 
7439 		if (slot == path->slots[wc->level])
7440 			goto reada;
7441 
7442 		if (wc->stage == UPDATE_BACKREF &&
7443 		    generation <= root->root_key.offset)
7444 			continue;
7445 
7446 		/* We don't lock the tree block, it's OK to be racy here */
7447 		ret = btrfs_lookup_extent_info(trans, root, bytenr,
7448 					       wc->level - 1, 1, &refs,
7449 					       &flags);
7450 		/* We don't care about errors in readahead. */
7451 		if (ret < 0)
7452 			continue;
7453 		BUG_ON(refs == 0);
7454 
7455 		if (wc->stage == DROP_REFERENCE) {
7456 			if (refs == 1)
7457 				goto reada;
7458 
7459 			if (wc->level == 1 &&
7460 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7461 				continue;
7462 			if (!wc->update_ref ||
7463 			    generation <= root->root_key.offset)
7464 				continue;
7465 			btrfs_node_key_to_cpu(eb, &key, slot);
7466 			ret = btrfs_comp_cpu_keys(&key,
7467 						  &wc->update_progress);
7468 			if (ret < 0)
7469 				continue;
7470 		} else {
7471 			if (wc->level == 1 &&
7472 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7473 				continue;
7474 		}
7475 reada:
7476 		readahead_tree_block(root, bytenr, blocksize);
7477 		nread++;
7478 	}
7479 	wc->reada_slot = slot;
7480 }
7481 
7482 static int account_leaf_items(struct btrfs_trans_handle *trans,
7483 			      struct btrfs_root *root,
7484 			      struct extent_buffer *eb)
7485 {
7486 	int nr = btrfs_header_nritems(eb);
7487 	int i, extent_type, ret;
7488 	struct btrfs_key key;
7489 	struct btrfs_file_extent_item *fi;
7490 	u64 bytenr, num_bytes;
7491 
7492 	for (i = 0; i < nr; i++) {
7493 		btrfs_item_key_to_cpu(eb, &key, i);
7494 
7495 		if (key.type != BTRFS_EXTENT_DATA_KEY)
7496 			continue;
7497 
7498 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7499 		/* filter out non qgroup-accountable extents  */
7500 		extent_type = btrfs_file_extent_type(eb, fi);
7501 
7502 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7503 			continue;
7504 
7505 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7506 		if (!bytenr)
7507 			continue;
7508 
7509 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7510 
7511 		ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7512 					      root->objectid,
7513 					      bytenr, num_bytes,
7514 					      BTRFS_QGROUP_OPER_SUB_SUBTREE, 0);
7515 		if (ret)
7516 			return ret;
7517 	}
7518 	return 0;
7519 }
7520 
7521 /*
7522  * Walk up the tree from the bottom, freeing leaves and any interior
7523  * nodes which have had all slots visited. If a node (leaf or
7524  * interior) is freed, the node above it will have it's slot
7525  * incremented. The root node will never be freed.
7526  *
7527  * At the end of this function, we should have a path which has all
7528  * slots incremented to the next position for a search. If we need to
7529  * read a new node it will be NULL and the node above it will have the
7530  * correct slot selected for a later read.
7531  *
7532  * If we increment the root nodes slot counter past the number of
7533  * elements, 1 is returned to signal completion of the search.
7534  */
7535 static int adjust_slots_upwards(struct btrfs_root *root,
7536 				struct btrfs_path *path, int root_level)
7537 {
7538 	int level = 0;
7539 	int nr, slot;
7540 	struct extent_buffer *eb;
7541 
7542 	if (root_level == 0)
7543 		return 1;
7544 
7545 	while (level <= root_level) {
7546 		eb = path->nodes[level];
7547 		nr = btrfs_header_nritems(eb);
7548 		path->slots[level]++;
7549 		slot = path->slots[level];
7550 		if (slot >= nr || level == 0) {
7551 			/*
7552 			 * Don't free the root -  we will detect this
7553 			 * condition after our loop and return a
7554 			 * positive value for caller to stop walking the tree.
7555 			 */
7556 			if (level != root_level) {
7557 				btrfs_tree_unlock_rw(eb, path->locks[level]);
7558 				path->locks[level] = 0;
7559 
7560 				free_extent_buffer(eb);
7561 				path->nodes[level] = NULL;
7562 				path->slots[level] = 0;
7563 			}
7564 		} else {
7565 			/*
7566 			 * We have a valid slot to walk back down
7567 			 * from. Stop here so caller can process these
7568 			 * new nodes.
7569 			 */
7570 			break;
7571 		}
7572 
7573 		level++;
7574 	}
7575 
7576 	eb = path->nodes[root_level];
7577 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
7578 		return 1;
7579 
7580 	return 0;
7581 }
7582 
7583 /*
7584  * root_eb is the subtree root and is locked before this function is called.
7585  */
7586 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7587 				  struct btrfs_root *root,
7588 				  struct extent_buffer *root_eb,
7589 				  u64 root_gen,
7590 				  int root_level)
7591 {
7592 	int ret = 0;
7593 	int level;
7594 	struct extent_buffer *eb = root_eb;
7595 	struct btrfs_path *path = NULL;
7596 
7597 	BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7598 	BUG_ON(root_eb == NULL);
7599 
7600 	if (!root->fs_info->quota_enabled)
7601 		return 0;
7602 
7603 	if (!extent_buffer_uptodate(root_eb)) {
7604 		ret = btrfs_read_buffer(root_eb, root_gen);
7605 		if (ret)
7606 			goto out;
7607 	}
7608 
7609 	if (root_level == 0) {
7610 		ret = account_leaf_items(trans, root, root_eb);
7611 		goto out;
7612 	}
7613 
7614 	path = btrfs_alloc_path();
7615 	if (!path)
7616 		return -ENOMEM;
7617 
7618 	/*
7619 	 * Walk down the tree.  Missing extent blocks are filled in as
7620 	 * we go. Metadata is accounted every time we read a new
7621 	 * extent block.
7622 	 *
7623 	 * When we reach a leaf, we account for file extent items in it,
7624 	 * walk back up the tree (adjusting slot pointers as we go)
7625 	 * and restart the search process.
7626 	 */
7627 	extent_buffer_get(root_eb); /* For path */
7628 	path->nodes[root_level] = root_eb;
7629 	path->slots[root_level] = 0;
7630 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
7631 walk_down:
7632 	level = root_level;
7633 	while (level >= 0) {
7634 		if (path->nodes[level] == NULL) {
7635 			int parent_slot;
7636 			u64 child_gen;
7637 			u64 child_bytenr;
7638 
7639 			/* We need to get child blockptr/gen from
7640 			 * parent before we can read it. */
7641 			eb = path->nodes[level + 1];
7642 			parent_slot = path->slots[level + 1];
7643 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
7644 			child_gen = btrfs_node_ptr_generation(eb, parent_slot);
7645 
7646 			eb = read_tree_block(root, child_bytenr, child_gen);
7647 			if (!eb || !extent_buffer_uptodate(eb)) {
7648 				ret = -EIO;
7649 				goto out;
7650 			}
7651 
7652 			path->nodes[level] = eb;
7653 			path->slots[level] = 0;
7654 
7655 			btrfs_tree_read_lock(eb);
7656 			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7657 			path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
7658 
7659 			ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7660 						root->objectid,
7661 						child_bytenr,
7662 						root->nodesize,
7663 						BTRFS_QGROUP_OPER_SUB_SUBTREE,
7664 						0);
7665 			if (ret)
7666 				goto out;
7667 
7668 		}
7669 
7670 		if (level == 0) {
7671 			ret = account_leaf_items(trans, root, path->nodes[level]);
7672 			if (ret)
7673 				goto out;
7674 
7675 			/* Nonzero return here means we completed our search */
7676 			ret = adjust_slots_upwards(root, path, root_level);
7677 			if (ret)
7678 				break;
7679 
7680 			/* Restart search with new slots */
7681 			goto walk_down;
7682 		}
7683 
7684 		level--;
7685 	}
7686 
7687 	ret = 0;
7688 out:
7689 	btrfs_free_path(path);
7690 
7691 	return ret;
7692 }
7693 
7694 /*
7695  * helper to process tree block while walking down the tree.
7696  *
7697  * when wc->stage == UPDATE_BACKREF, this function updates
7698  * back refs for pointers in the block.
7699  *
7700  * NOTE: return value 1 means we should stop walking down.
7701  */
7702 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7703 				   struct btrfs_root *root,
7704 				   struct btrfs_path *path,
7705 				   struct walk_control *wc, int lookup_info)
7706 {
7707 	int level = wc->level;
7708 	struct extent_buffer *eb = path->nodes[level];
7709 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7710 	int ret;
7711 
7712 	if (wc->stage == UPDATE_BACKREF &&
7713 	    btrfs_header_owner(eb) != root->root_key.objectid)
7714 		return 1;
7715 
7716 	/*
7717 	 * when reference count of tree block is 1, it won't increase
7718 	 * again. once full backref flag is set, we never clear it.
7719 	 */
7720 	if (lookup_info &&
7721 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7722 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7723 		BUG_ON(!path->locks[level]);
7724 		ret = btrfs_lookup_extent_info(trans, root,
7725 					       eb->start, level, 1,
7726 					       &wc->refs[level],
7727 					       &wc->flags[level]);
7728 		BUG_ON(ret == -ENOMEM);
7729 		if (ret)
7730 			return ret;
7731 		BUG_ON(wc->refs[level] == 0);
7732 	}
7733 
7734 	if (wc->stage == DROP_REFERENCE) {
7735 		if (wc->refs[level] > 1)
7736 			return 1;
7737 
7738 		if (path->locks[level] && !wc->keep_locks) {
7739 			btrfs_tree_unlock_rw(eb, path->locks[level]);
7740 			path->locks[level] = 0;
7741 		}
7742 		return 0;
7743 	}
7744 
7745 	/* wc->stage == UPDATE_BACKREF */
7746 	if (!(wc->flags[level] & flag)) {
7747 		BUG_ON(!path->locks[level]);
7748 		ret = btrfs_inc_ref(trans, root, eb, 1);
7749 		BUG_ON(ret); /* -ENOMEM */
7750 		ret = btrfs_dec_ref(trans, root, eb, 0);
7751 		BUG_ON(ret); /* -ENOMEM */
7752 		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7753 						  eb->len, flag,
7754 						  btrfs_header_level(eb), 0);
7755 		BUG_ON(ret); /* -ENOMEM */
7756 		wc->flags[level] |= flag;
7757 	}
7758 
7759 	/*
7760 	 * the block is shared by multiple trees, so it's not good to
7761 	 * keep the tree lock
7762 	 */
7763 	if (path->locks[level] && level > 0) {
7764 		btrfs_tree_unlock_rw(eb, path->locks[level]);
7765 		path->locks[level] = 0;
7766 	}
7767 	return 0;
7768 }
7769 
7770 /*
7771  * helper to process tree block pointer.
7772  *
7773  * when wc->stage == DROP_REFERENCE, this function checks
7774  * reference count of the block pointed to. if the block
7775  * is shared and we need update back refs for the subtree
7776  * rooted at the block, this function changes wc->stage to
7777  * UPDATE_BACKREF. if the block is shared and there is no
7778  * need to update back, this function drops the reference
7779  * to the block.
7780  *
7781  * NOTE: return value 1 means we should stop walking down.
7782  */
7783 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7784 				 struct btrfs_root *root,
7785 				 struct btrfs_path *path,
7786 				 struct walk_control *wc, int *lookup_info)
7787 {
7788 	u64 bytenr;
7789 	u64 generation;
7790 	u64 parent;
7791 	u32 blocksize;
7792 	struct btrfs_key key;
7793 	struct extent_buffer *next;
7794 	int level = wc->level;
7795 	int reada = 0;
7796 	int ret = 0;
7797 	bool need_account = false;
7798 
7799 	generation = btrfs_node_ptr_generation(path->nodes[level],
7800 					       path->slots[level]);
7801 	/*
7802 	 * if the lower level block was created before the snapshot
7803 	 * was created, we know there is no need to update back refs
7804 	 * for the subtree
7805 	 */
7806 	if (wc->stage == UPDATE_BACKREF &&
7807 	    generation <= root->root_key.offset) {
7808 		*lookup_info = 1;
7809 		return 1;
7810 	}
7811 
7812 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7813 	blocksize = root->nodesize;
7814 
7815 	next = btrfs_find_tree_block(root, bytenr);
7816 	if (!next) {
7817 		next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7818 		if (!next)
7819 			return -ENOMEM;
7820 		btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7821 					       level - 1);
7822 		reada = 1;
7823 	}
7824 	btrfs_tree_lock(next);
7825 	btrfs_set_lock_blocking(next);
7826 
7827 	ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7828 				       &wc->refs[level - 1],
7829 				       &wc->flags[level - 1]);
7830 	if (ret < 0) {
7831 		btrfs_tree_unlock(next);
7832 		return ret;
7833 	}
7834 
7835 	if (unlikely(wc->refs[level - 1] == 0)) {
7836 		btrfs_err(root->fs_info, "Missing references.");
7837 		BUG();
7838 	}
7839 	*lookup_info = 0;
7840 
7841 	if (wc->stage == DROP_REFERENCE) {
7842 		if (wc->refs[level - 1] > 1) {
7843 			need_account = true;
7844 			if (level == 1 &&
7845 			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7846 				goto skip;
7847 
7848 			if (!wc->update_ref ||
7849 			    generation <= root->root_key.offset)
7850 				goto skip;
7851 
7852 			btrfs_node_key_to_cpu(path->nodes[level], &key,
7853 					      path->slots[level]);
7854 			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7855 			if (ret < 0)
7856 				goto skip;
7857 
7858 			wc->stage = UPDATE_BACKREF;
7859 			wc->shared_level = level - 1;
7860 		}
7861 	} else {
7862 		if (level == 1 &&
7863 		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7864 			goto skip;
7865 	}
7866 
7867 	if (!btrfs_buffer_uptodate(next, generation, 0)) {
7868 		btrfs_tree_unlock(next);
7869 		free_extent_buffer(next);
7870 		next = NULL;
7871 		*lookup_info = 1;
7872 	}
7873 
7874 	if (!next) {
7875 		if (reada && level == 1)
7876 			reada_walk_down(trans, root, wc, path);
7877 		next = read_tree_block(root, bytenr, generation);
7878 		if (!next || !extent_buffer_uptodate(next)) {
7879 			free_extent_buffer(next);
7880 			return -EIO;
7881 		}
7882 		btrfs_tree_lock(next);
7883 		btrfs_set_lock_blocking(next);
7884 	}
7885 
7886 	level--;
7887 	BUG_ON(level != btrfs_header_level(next));
7888 	path->nodes[level] = next;
7889 	path->slots[level] = 0;
7890 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7891 	wc->level = level;
7892 	if (wc->level == 1)
7893 		wc->reada_slot = 0;
7894 	return 0;
7895 skip:
7896 	wc->refs[level - 1] = 0;
7897 	wc->flags[level - 1] = 0;
7898 	if (wc->stage == DROP_REFERENCE) {
7899 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7900 			parent = path->nodes[level]->start;
7901 		} else {
7902 			BUG_ON(root->root_key.objectid !=
7903 			       btrfs_header_owner(path->nodes[level]));
7904 			parent = 0;
7905 		}
7906 
7907 		if (need_account) {
7908 			ret = account_shared_subtree(trans, root, next,
7909 						     generation, level - 1);
7910 			if (ret) {
7911 				printk_ratelimited(KERN_ERR "BTRFS: %s Error "
7912 					"%d accounting shared subtree. Quota "
7913 					"is out of sync, rescan required.\n",
7914 					root->fs_info->sb->s_id, ret);
7915 			}
7916 		}
7917 		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7918 				root->root_key.objectid, level - 1, 0, 0);
7919 		BUG_ON(ret); /* -ENOMEM */
7920 	}
7921 	btrfs_tree_unlock(next);
7922 	free_extent_buffer(next);
7923 	*lookup_info = 1;
7924 	return 1;
7925 }
7926 
7927 /*
7928  * helper to process tree block while walking up the tree.
7929  *
7930  * when wc->stage == DROP_REFERENCE, this function drops
7931  * reference count on the block.
7932  *
7933  * when wc->stage == UPDATE_BACKREF, this function changes
7934  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7935  * to UPDATE_BACKREF previously while processing the block.
7936  *
7937  * NOTE: return value 1 means we should stop walking up.
7938  */
7939 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7940 				 struct btrfs_root *root,
7941 				 struct btrfs_path *path,
7942 				 struct walk_control *wc)
7943 {
7944 	int ret;
7945 	int level = wc->level;
7946 	struct extent_buffer *eb = path->nodes[level];
7947 	u64 parent = 0;
7948 
7949 	if (wc->stage == UPDATE_BACKREF) {
7950 		BUG_ON(wc->shared_level < level);
7951 		if (level < wc->shared_level)
7952 			goto out;
7953 
7954 		ret = find_next_key(path, level + 1, &wc->update_progress);
7955 		if (ret > 0)
7956 			wc->update_ref = 0;
7957 
7958 		wc->stage = DROP_REFERENCE;
7959 		wc->shared_level = -1;
7960 		path->slots[level] = 0;
7961 
7962 		/*
7963 		 * check reference count again if the block isn't locked.
7964 		 * we should start walking down the tree again if reference
7965 		 * count is one.
7966 		 */
7967 		if (!path->locks[level]) {
7968 			BUG_ON(level == 0);
7969 			btrfs_tree_lock(eb);
7970 			btrfs_set_lock_blocking(eb);
7971 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7972 
7973 			ret = btrfs_lookup_extent_info(trans, root,
7974 						       eb->start, level, 1,
7975 						       &wc->refs[level],
7976 						       &wc->flags[level]);
7977 			if (ret < 0) {
7978 				btrfs_tree_unlock_rw(eb, path->locks[level]);
7979 				path->locks[level] = 0;
7980 				return ret;
7981 			}
7982 			BUG_ON(wc->refs[level] == 0);
7983 			if (wc->refs[level] == 1) {
7984 				btrfs_tree_unlock_rw(eb, path->locks[level]);
7985 				path->locks[level] = 0;
7986 				return 1;
7987 			}
7988 		}
7989 	}
7990 
7991 	/* wc->stage == DROP_REFERENCE */
7992 	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7993 
7994 	if (wc->refs[level] == 1) {
7995 		if (level == 0) {
7996 			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7997 				ret = btrfs_dec_ref(trans, root, eb, 1);
7998 			else
7999 				ret = btrfs_dec_ref(trans, root, eb, 0);
8000 			BUG_ON(ret); /* -ENOMEM */
8001 			ret = account_leaf_items(trans, root, eb);
8002 			if (ret) {
8003 				printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8004 					"%d accounting leaf items. Quota "
8005 					"is out of sync, rescan required.\n",
8006 					root->fs_info->sb->s_id, ret);
8007 			}
8008 		}
8009 		/* make block locked assertion in clean_tree_block happy */
8010 		if (!path->locks[level] &&
8011 		    btrfs_header_generation(eb) == trans->transid) {
8012 			btrfs_tree_lock(eb);
8013 			btrfs_set_lock_blocking(eb);
8014 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8015 		}
8016 		clean_tree_block(trans, root, eb);
8017 	}
8018 
8019 	if (eb == root->node) {
8020 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8021 			parent = eb->start;
8022 		else
8023 			BUG_ON(root->root_key.objectid !=
8024 			       btrfs_header_owner(eb));
8025 	} else {
8026 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8027 			parent = path->nodes[level + 1]->start;
8028 		else
8029 			BUG_ON(root->root_key.objectid !=
8030 			       btrfs_header_owner(path->nodes[level + 1]));
8031 	}
8032 
8033 	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8034 out:
8035 	wc->refs[level] = 0;
8036 	wc->flags[level] = 0;
8037 	return 0;
8038 }
8039 
8040 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8041 				   struct btrfs_root *root,
8042 				   struct btrfs_path *path,
8043 				   struct walk_control *wc)
8044 {
8045 	int level = wc->level;
8046 	int lookup_info = 1;
8047 	int ret;
8048 
8049 	while (level >= 0) {
8050 		ret = walk_down_proc(trans, root, path, wc, lookup_info);
8051 		if (ret > 0)
8052 			break;
8053 
8054 		if (level == 0)
8055 			break;
8056 
8057 		if (path->slots[level] >=
8058 		    btrfs_header_nritems(path->nodes[level]))
8059 			break;
8060 
8061 		ret = do_walk_down(trans, root, path, wc, &lookup_info);
8062 		if (ret > 0) {
8063 			path->slots[level]++;
8064 			continue;
8065 		} else if (ret < 0)
8066 			return ret;
8067 		level = wc->level;
8068 	}
8069 	return 0;
8070 }
8071 
8072 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8073 				 struct btrfs_root *root,
8074 				 struct btrfs_path *path,
8075 				 struct walk_control *wc, int max_level)
8076 {
8077 	int level = wc->level;
8078 	int ret;
8079 
8080 	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8081 	while (level < max_level && path->nodes[level]) {
8082 		wc->level = level;
8083 		if (path->slots[level] + 1 <
8084 		    btrfs_header_nritems(path->nodes[level])) {
8085 			path->slots[level]++;
8086 			return 0;
8087 		} else {
8088 			ret = walk_up_proc(trans, root, path, wc);
8089 			if (ret > 0)
8090 				return 0;
8091 
8092 			if (path->locks[level]) {
8093 				btrfs_tree_unlock_rw(path->nodes[level],
8094 						     path->locks[level]);
8095 				path->locks[level] = 0;
8096 			}
8097 			free_extent_buffer(path->nodes[level]);
8098 			path->nodes[level] = NULL;
8099 			level++;
8100 		}
8101 	}
8102 	return 1;
8103 }
8104 
8105 /*
8106  * drop a subvolume tree.
8107  *
8108  * this function traverses the tree freeing any blocks that only
8109  * referenced by the tree.
8110  *
8111  * when a shared tree block is found. this function decreases its
8112  * reference count by one. if update_ref is true, this function
8113  * also make sure backrefs for the shared block and all lower level
8114  * blocks are properly updated.
8115  *
8116  * If called with for_reloc == 0, may exit early with -EAGAIN
8117  */
8118 int btrfs_drop_snapshot(struct btrfs_root *root,
8119 			 struct btrfs_block_rsv *block_rsv, int update_ref,
8120 			 int for_reloc)
8121 {
8122 	struct btrfs_path *path;
8123 	struct btrfs_trans_handle *trans;
8124 	struct btrfs_root *tree_root = root->fs_info->tree_root;
8125 	struct btrfs_root_item *root_item = &root->root_item;
8126 	struct walk_control *wc;
8127 	struct btrfs_key key;
8128 	int err = 0;
8129 	int ret;
8130 	int level;
8131 	bool root_dropped = false;
8132 
8133 	btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8134 
8135 	path = btrfs_alloc_path();
8136 	if (!path) {
8137 		err = -ENOMEM;
8138 		goto out;
8139 	}
8140 
8141 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
8142 	if (!wc) {
8143 		btrfs_free_path(path);
8144 		err = -ENOMEM;
8145 		goto out;
8146 	}
8147 
8148 	trans = btrfs_start_transaction(tree_root, 0);
8149 	if (IS_ERR(trans)) {
8150 		err = PTR_ERR(trans);
8151 		goto out_free;
8152 	}
8153 
8154 	if (block_rsv)
8155 		trans->block_rsv = block_rsv;
8156 
8157 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8158 		level = btrfs_header_level(root->node);
8159 		path->nodes[level] = btrfs_lock_root_node(root);
8160 		btrfs_set_lock_blocking(path->nodes[level]);
8161 		path->slots[level] = 0;
8162 		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8163 		memset(&wc->update_progress, 0,
8164 		       sizeof(wc->update_progress));
8165 	} else {
8166 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8167 		memcpy(&wc->update_progress, &key,
8168 		       sizeof(wc->update_progress));
8169 
8170 		level = root_item->drop_level;
8171 		BUG_ON(level == 0);
8172 		path->lowest_level = level;
8173 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8174 		path->lowest_level = 0;
8175 		if (ret < 0) {
8176 			err = ret;
8177 			goto out_end_trans;
8178 		}
8179 		WARN_ON(ret > 0);
8180 
8181 		/*
8182 		 * unlock our path, this is safe because only this
8183 		 * function is allowed to delete this snapshot
8184 		 */
8185 		btrfs_unlock_up_safe(path, 0);
8186 
8187 		level = btrfs_header_level(root->node);
8188 		while (1) {
8189 			btrfs_tree_lock(path->nodes[level]);
8190 			btrfs_set_lock_blocking(path->nodes[level]);
8191 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8192 
8193 			ret = btrfs_lookup_extent_info(trans, root,
8194 						path->nodes[level]->start,
8195 						level, 1, &wc->refs[level],
8196 						&wc->flags[level]);
8197 			if (ret < 0) {
8198 				err = ret;
8199 				goto out_end_trans;
8200 			}
8201 			BUG_ON(wc->refs[level] == 0);
8202 
8203 			if (level == root_item->drop_level)
8204 				break;
8205 
8206 			btrfs_tree_unlock(path->nodes[level]);
8207 			path->locks[level] = 0;
8208 			WARN_ON(wc->refs[level] != 1);
8209 			level--;
8210 		}
8211 	}
8212 
8213 	wc->level = level;
8214 	wc->shared_level = -1;
8215 	wc->stage = DROP_REFERENCE;
8216 	wc->update_ref = update_ref;
8217 	wc->keep_locks = 0;
8218 	wc->for_reloc = for_reloc;
8219 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8220 
8221 	while (1) {
8222 
8223 		ret = walk_down_tree(trans, root, path, wc);
8224 		if (ret < 0) {
8225 			err = ret;
8226 			break;
8227 		}
8228 
8229 		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8230 		if (ret < 0) {
8231 			err = ret;
8232 			break;
8233 		}
8234 
8235 		if (ret > 0) {
8236 			BUG_ON(wc->stage != DROP_REFERENCE);
8237 			break;
8238 		}
8239 
8240 		if (wc->stage == DROP_REFERENCE) {
8241 			level = wc->level;
8242 			btrfs_node_key(path->nodes[level],
8243 				       &root_item->drop_progress,
8244 				       path->slots[level]);
8245 			root_item->drop_level = level;
8246 		}
8247 
8248 		BUG_ON(wc->level == 0);
8249 		if (btrfs_should_end_transaction(trans, tree_root) ||
8250 		    (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8251 			ret = btrfs_update_root(trans, tree_root,
8252 						&root->root_key,
8253 						root_item);
8254 			if (ret) {
8255 				btrfs_abort_transaction(trans, tree_root, ret);
8256 				err = ret;
8257 				goto out_end_trans;
8258 			}
8259 
8260 			/*
8261 			 * Qgroup update accounting is run from
8262 			 * delayed ref handling. This usually works
8263 			 * out because delayed refs are normally the
8264 			 * only way qgroup updates are added. However,
8265 			 * we may have added updates during our tree
8266 			 * walk so run qgroups here to make sure we
8267 			 * don't lose any updates.
8268 			 */
8269 			ret = btrfs_delayed_qgroup_accounting(trans,
8270 							      root->fs_info);
8271 			if (ret)
8272 				printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8273 						   "running qgroup updates "
8274 						   "during snapshot delete. "
8275 						   "Quota is out of sync, "
8276 						   "rescan required.\n", ret);
8277 
8278 			btrfs_end_transaction_throttle(trans, tree_root);
8279 			if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8280 				pr_debug("BTRFS: drop snapshot early exit\n");
8281 				err = -EAGAIN;
8282 				goto out_free;
8283 			}
8284 
8285 			trans = btrfs_start_transaction(tree_root, 0);
8286 			if (IS_ERR(trans)) {
8287 				err = PTR_ERR(trans);
8288 				goto out_free;
8289 			}
8290 			if (block_rsv)
8291 				trans->block_rsv = block_rsv;
8292 		}
8293 	}
8294 	btrfs_release_path(path);
8295 	if (err)
8296 		goto out_end_trans;
8297 
8298 	ret = btrfs_del_root(trans, tree_root, &root->root_key);
8299 	if (ret) {
8300 		btrfs_abort_transaction(trans, tree_root, ret);
8301 		goto out_end_trans;
8302 	}
8303 
8304 	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8305 		ret = btrfs_find_root(tree_root, &root->root_key, path,
8306 				      NULL, NULL);
8307 		if (ret < 0) {
8308 			btrfs_abort_transaction(trans, tree_root, ret);
8309 			err = ret;
8310 			goto out_end_trans;
8311 		} else if (ret > 0) {
8312 			/* if we fail to delete the orphan item this time
8313 			 * around, it'll get picked up the next time.
8314 			 *
8315 			 * The most common failure here is just -ENOENT.
8316 			 */
8317 			btrfs_del_orphan_item(trans, tree_root,
8318 					      root->root_key.objectid);
8319 		}
8320 	}
8321 
8322 	if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8323 		btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
8324 	} else {
8325 		free_extent_buffer(root->node);
8326 		free_extent_buffer(root->commit_root);
8327 		btrfs_put_fs_root(root);
8328 	}
8329 	root_dropped = true;
8330 out_end_trans:
8331 	ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info);
8332 	if (ret)
8333 		printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8334 				   "running qgroup updates "
8335 				   "during snapshot delete. "
8336 				   "Quota is out of sync, "
8337 				   "rescan required.\n", ret);
8338 
8339 	btrfs_end_transaction_throttle(trans, tree_root);
8340 out_free:
8341 	kfree(wc);
8342 	btrfs_free_path(path);
8343 out:
8344 	/*
8345 	 * So if we need to stop dropping the snapshot for whatever reason we
8346 	 * need to make sure to add it back to the dead root list so that we
8347 	 * keep trying to do the work later.  This also cleans up roots if we
8348 	 * don't have it in the radix (like when we recover after a power fail
8349 	 * or unmount) so we don't leak memory.
8350 	 */
8351 	if (!for_reloc && root_dropped == false)
8352 		btrfs_add_dead_root(root);
8353 	if (err && err != -EAGAIN)
8354 		btrfs_std_error(root->fs_info, err);
8355 	return err;
8356 }
8357 
8358 /*
8359  * drop subtree rooted at tree block 'node'.
8360  *
8361  * NOTE: this function will unlock and release tree block 'node'
8362  * only used by relocation code
8363  */
8364 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8365 			struct btrfs_root *root,
8366 			struct extent_buffer *node,
8367 			struct extent_buffer *parent)
8368 {
8369 	struct btrfs_path *path;
8370 	struct walk_control *wc;
8371 	int level;
8372 	int parent_level;
8373 	int ret = 0;
8374 	int wret;
8375 
8376 	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8377 
8378 	path = btrfs_alloc_path();
8379 	if (!path)
8380 		return -ENOMEM;
8381 
8382 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
8383 	if (!wc) {
8384 		btrfs_free_path(path);
8385 		return -ENOMEM;
8386 	}
8387 
8388 	btrfs_assert_tree_locked(parent);
8389 	parent_level = btrfs_header_level(parent);
8390 	extent_buffer_get(parent);
8391 	path->nodes[parent_level] = parent;
8392 	path->slots[parent_level] = btrfs_header_nritems(parent);
8393 
8394 	btrfs_assert_tree_locked(node);
8395 	level = btrfs_header_level(node);
8396 	path->nodes[level] = node;
8397 	path->slots[level] = 0;
8398 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8399 
8400 	wc->refs[parent_level] = 1;
8401 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8402 	wc->level = level;
8403 	wc->shared_level = -1;
8404 	wc->stage = DROP_REFERENCE;
8405 	wc->update_ref = 0;
8406 	wc->keep_locks = 1;
8407 	wc->for_reloc = 1;
8408 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8409 
8410 	while (1) {
8411 		wret = walk_down_tree(trans, root, path, wc);
8412 		if (wret < 0) {
8413 			ret = wret;
8414 			break;
8415 		}
8416 
8417 		wret = walk_up_tree(trans, root, path, wc, parent_level);
8418 		if (wret < 0)
8419 			ret = wret;
8420 		if (wret != 0)
8421 			break;
8422 	}
8423 
8424 	kfree(wc);
8425 	btrfs_free_path(path);
8426 	return ret;
8427 }
8428 
8429 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8430 {
8431 	u64 num_devices;
8432 	u64 stripped;
8433 
8434 	/*
8435 	 * if restripe for this chunk_type is on pick target profile and
8436 	 * return, otherwise do the usual balance
8437 	 */
8438 	stripped = get_restripe_target(root->fs_info, flags);
8439 	if (stripped)
8440 		return extended_to_chunk(stripped);
8441 
8442 	num_devices = root->fs_info->fs_devices->rw_devices;
8443 
8444 	stripped = BTRFS_BLOCK_GROUP_RAID0 |
8445 		BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8446 		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8447 
8448 	if (num_devices == 1) {
8449 		stripped |= BTRFS_BLOCK_GROUP_DUP;
8450 		stripped = flags & ~stripped;
8451 
8452 		/* turn raid0 into single device chunks */
8453 		if (flags & BTRFS_BLOCK_GROUP_RAID0)
8454 			return stripped;
8455 
8456 		/* turn mirroring into duplication */
8457 		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8458 			     BTRFS_BLOCK_GROUP_RAID10))
8459 			return stripped | BTRFS_BLOCK_GROUP_DUP;
8460 	} else {
8461 		/* they already had raid on here, just return */
8462 		if (flags & stripped)
8463 			return flags;
8464 
8465 		stripped |= BTRFS_BLOCK_GROUP_DUP;
8466 		stripped = flags & ~stripped;
8467 
8468 		/* switch duplicated blocks with raid1 */
8469 		if (flags & BTRFS_BLOCK_GROUP_DUP)
8470 			return stripped | BTRFS_BLOCK_GROUP_RAID1;
8471 
8472 		/* this is drive concat, leave it alone */
8473 	}
8474 
8475 	return flags;
8476 }
8477 
8478 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8479 {
8480 	struct btrfs_space_info *sinfo = cache->space_info;
8481 	u64 num_bytes;
8482 	u64 min_allocable_bytes;
8483 	int ret = -ENOSPC;
8484 
8485 
8486 	/*
8487 	 * We need some metadata space and system metadata space for
8488 	 * allocating chunks in some corner cases until we force to set
8489 	 * it to be readonly.
8490 	 */
8491 	if ((sinfo->flags &
8492 	     (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8493 	    !force)
8494 		min_allocable_bytes = 1 * 1024 * 1024;
8495 	else
8496 		min_allocable_bytes = 0;
8497 
8498 	spin_lock(&sinfo->lock);
8499 	spin_lock(&cache->lock);
8500 
8501 	if (cache->ro) {
8502 		ret = 0;
8503 		goto out;
8504 	}
8505 
8506 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8507 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
8508 
8509 	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8510 	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8511 	    min_allocable_bytes <= sinfo->total_bytes) {
8512 		sinfo->bytes_readonly += num_bytes;
8513 		cache->ro = 1;
8514 		ret = 0;
8515 	}
8516 out:
8517 	spin_unlock(&cache->lock);
8518 	spin_unlock(&sinfo->lock);
8519 	return ret;
8520 }
8521 
8522 int btrfs_set_block_group_ro(struct btrfs_root *root,
8523 			     struct btrfs_block_group_cache *cache)
8524 
8525 {
8526 	struct btrfs_trans_handle *trans;
8527 	u64 alloc_flags;
8528 	int ret;
8529 
8530 	BUG_ON(cache->ro);
8531 
8532 	trans = btrfs_join_transaction(root);
8533 	if (IS_ERR(trans))
8534 		return PTR_ERR(trans);
8535 
8536 	alloc_flags = update_block_group_flags(root, cache->flags);
8537 	if (alloc_flags != cache->flags) {
8538 		ret = do_chunk_alloc(trans, root, alloc_flags,
8539 				     CHUNK_ALLOC_FORCE);
8540 		if (ret < 0)
8541 			goto out;
8542 	}
8543 
8544 	ret = set_block_group_ro(cache, 0);
8545 	if (!ret)
8546 		goto out;
8547 	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8548 	ret = do_chunk_alloc(trans, root, alloc_flags,
8549 			     CHUNK_ALLOC_FORCE);
8550 	if (ret < 0)
8551 		goto out;
8552 	ret = set_block_group_ro(cache, 0);
8553 out:
8554 	btrfs_end_transaction(trans, root);
8555 	return ret;
8556 }
8557 
8558 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8559 			    struct btrfs_root *root, u64 type)
8560 {
8561 	u64 alloc_flags = get_alloc_profile(root, type);
8562 	return do_chunk_alloc(trans, root, alloc_flags,
8563 			      CHUNK_ALLOC_FORCE);
8564 }
8565 
8566 /*
8567  * helper to account the unused space of all the readonly block group in the
8568  * list. takes mirrors into account.
8569  */
8570 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
8571 {
8572 	struct btrfs_block_group_cache *block_group;
8573 	u64 free_bytes = 0;
8574 	int factor;
8575 
8576 	list_for_each_entry(block_group, groups_list, list) {
8577 		spin_lock(&block_group->lock);
8578 
8579 		if (!block_group->ro) {
8580 			spin_unlock(&block_group->lock);
8581 			continue;
8582 		}
8583 
8584 		if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8585 					  BTRFS_BLOCK_GROUP_RAID10 |
8586 					  BTRFS_BLOCK_GROUP_DUP))
8587 			factor = 2;
8588 		else
8589 			factor = 1;
8590 
8591 		free_bytes += (block_group->key.offset -
8592 			       btrfs_block_group_used(&block_group->item)) *
8593 			       factor;
8594 
8595 		spin_unlock(&block_group->lock);
8596 	}
8597 
8598 	return free_bytes;
8599 }
8600 
8601 /*
8602  * helper to account the unused space of all the readonly block group in the
8603  * space_info. takes mirrors into account.
8604  */
8605 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8606 {
8607 	int i;
8608 	u64 free_bytes = 0;
8609 
8610 	spin_lock(&sinfo->lock);
8611 
8612 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8613 		if (!list_empty(&sinfo->block_groups[i]))
8614 			free_bytes += __btrfs_get_ro_block_group_free_space(
8615 						&sinfo->block_groups[i]);
8616 
8617 	spin_unlock(&sinfo->lock);
8618 
8619 	return free_bytes;
8620 }
8621 
8622 void btrfs_set_block_group_rw(struct btrfs_root *root,
8623 			      struct btrfs_block_group_cache *cache)
8624 {
8625 	struct btrfs_space_info *sinfo = cache->space_info;
8626 	u64 num_bytes;
8627 
8628 	BUG_ON(!cache->ro);
8629 
8630 	spin_lock(&sinfo->lock);
8631 	spin_lock(&cache->lock);
8632 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8633 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
8634 	sinfo->bytes_readonly -= num_bytes;
8635 	cache->ro = 0;
8636 	spin_unlock(&cache->lock);
8637 	spin_unlock(&sinfo->lock);
8638 }
8639 
8640 /*
8641  * checks to see if its even possible to relocate this block group.
8642  *
8643  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8644  * ok to go ahead and try.
8645  */
8646 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8647 {
8648 	struct btrfs_block_group_cache *block_group;
8649 	struct btrfs_space_info *space_info;
8650 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8651 	struct btrfs_device *device;
8652 	struct btrfs_trans_handle *trans;
8653 	u64 min_free;
8654 	u64 dev_min = 1;
8655 	u64 dev_nr = 0;
8656 	u64 target;
8657 	int index;
8658 	int full = 0;
8659 	int ret = 0;
8660 
8661 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8662 
8663 	/* odd, couldn't find the block group, leave it alone */
8664 	if (!block_group)
8665 		return -1;
8666 
8667 	min_free = btrfs_block_group_used(&block_group->item);
8668 
8669 	/* no bytes used, we're good */
8670 	if (!min_free)
8671 		goto out;
8672 
8673 	space_info = block_group->space_info;
8674 	spin_lock(&space_info->lock);
8675 
8676 	full = space_info->full;
8677 
8678 	/*
8679 	 * if this is the last block group we have in this space, we can't
8680 	 * relocate it unless we're able to allocate a new chunk below.
8681 	 *
8682 	 * Otherwise, we need to make sure we have room in the space to handle
8683 	 * all of the extents from this block group.  If we can, we're good
8684 	 */
8685 	if ((space_info->total_bytes != block_group->key.offset) &&
8686 	    (space_info->bytes_used + space_info->bytes_reserved +
8687 	     space_info->bytes_pinned + space_info->bytes_readonly +
8688 	     min_free < space_info->total_bytes)) {
8689 		spin_unlock(&space_info->lock);
8690 		goto out;
8691 	}
8692 	spin_unlock(&space_info->lock);
8693 
8694 	/*
8695 	 * ok we don't have enough space, but maybe we have free space on our
8696 	 * devices to allocate new chunks for relocation, so loop through our
8697 	 * alloc devices and guess if we have enough space.  if this block
8698 	 * group is going to be restriped, run checks against the target
8699 	 * profile instead of the current one.
8700 	 */
8701 	ret = -1;
8702 
8703 	/*
8704 	 * index:
8705 	 *      0: raid10
8706 	 *      1: raid1
8707 	 *      2: dup
8708 	 *      3: raid0
8709 	 *      4: single
8710 	 */
8711 	target = get_restripe_target(root->fs_info, block_group->flags);
8712 	if (target) {
8713 		index = __get_raid_index(extended_to_chunk(target));
8714 	} else {
8715 		/*
8716 		 * this is just a balance, so if we were marked as full
8717 		 * we know there is no space for a new chunk
8718 		 */
8719 		if (full)
8720 			goto out;
8721 
8722 		index = get_block_group_index(block_group);
8723 	}
8724 
8725 	if (index == BTRFS_RAID_RAID10) {
8726 		dev_min = 4;
8727 		/* Divide by 2 */
8728 		min_free >>= 1;
8729 	} else if (index == BTRFS_RAID_RAID1) {
8730 		dev_min = 2;
8731 	} else if (index == BTRFS_RAID_DUP) {
8732 		/* Multiply by 2 */
8733 		min_free <<= 1;
8734 	} else if (index == BTRFS_RAID_RAID0) {
8735 		dev_min = fs_devices->rw_devices;
8736 		do_div(min_free, dev_min);
8737 	}
8738 
8739 	/* We need to do this so that we can look at pending chunks */
8740 	trans = btrfs_join_transaction(root);
8741 	if (IS_ERR(trans)) {
8742 		ret = PTR_ERR(trans);
8743 		goto out;
8744 	}
8745 
8746 	mutex_lock(&root->fs_info->chunk_mutex);
8747 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8748 		u64 dev_offset;
8749 
8750 		/*
8751 		 * check to make sure we can actually find a chunk with enough
8752 		 * space to fit our block group in.
8753 		 */
8754 		if (device->total_bytes > device->bytes_used + min_free &&
8755 		    !device->is_tgtdev_for_dev_replace) {
8756 			ret = find_free_dev_extent(trans, device, min_free,
8757 						   &dev_offset, NULL);
8758 			if (!ret)
8759 				dev_nr++;
8760 
8761 			if (dev_nr >= dev_min)
8762 				break;
8763 
8764 			ret = -1;
8765 		}
8766 	}
8767 	mutex_unlock(&root->fs_info->chunk_mutex);
8768 	btrfs_end_transaction(trans, root);
8769 out:
8770 	btrfs_put_block_group(block_group);
8771 	return ret;
8772 }
8773 
8774 static int find_first_block_group(struct btrfs_root *root,
8775 		struct btrfs_path *path, struct btrfs_key *key)
8776 {
8777 	int ret = 0;
8778 	struct btrfs_key found_key;
8779 	struct extent_buffer *leaf;
8780 	int slot;
8781 
8782 	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8783 	if (ret < 0)
8784 		goto out;
8785 
8786 	while (1) {
8787 		slot = path->slots[0];
8788 		leaf = path->nodes[0];
8789 		if (slot >= btrfs_header_nritems(leaf)) {
8790 			ret = btrfs_next_leaf(root, path);
8791 			if (ret == 0)
8792 				continue;
8793 			if (ret < 0)
8794 				goto out;
8795 			break;
8796 		}
8797 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
8798 
8799 		if (found_key.objectid >= key->objectid &&
8800 		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8801 			ret = 0;
8802 			goto out;
8803 		}
8804 		path->slots[0]++;
8805 	}
8806 out:
8807 	return ret;
8808 }
8809 
8810 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8811 {
8812 	struct btrfs_block_group_cache *block_group;
8813 	u64 last = 0;
8814 
8815 	while (1) {
8816 		struct inode *inode;
8817 
8818 		block_group = btrfs_lookup_first_block_group(info, last);
8819 		while (block_group) {
8820 			spin_lock(&block_group->lock);
8821 			if (block_group->iref)
8822 				break;
8823 			spin_unlock(&block_group->lock);
8824 			block_group = next_block_group(info->tree_root,
8825 						       block_group);
8826 		}
8827 		if (!block_group) {
8828 			if (last == 0)
8829 				break;
8830 			last = 0;
8831 			continue;
8832 		}
8833 
8834 		inode = block_group->inode;
8835 		block_group->iref = 0;
8836 		block_group->inode = NULL;
8837 		spin_unlock(&block_group->lock);
8838 		iput(inode);
8839 		last = block_group->key.objectid + block_group->key.offset;
8840 		btrfs_put_block_group(block_group);
8841 	}
8842 }
8843 
8844 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8845 {
8846 	struct btrfs_block_group_cache *block_group;
8847 	struct btrfs_space_info *space_info;
8848 	struct btrfs_caching_control *caching_ctl;
8849 	struct rb_node *n;
8850 
8851 	down_write(&info->commit_root_sem);
8852 	while (!list_empty(&info->caching_block_groups)) {
8853 		caching_ctl = list_entry(info->caching_block_groups.next,
8854 					 struct btrfs_caching_control, list);
8855 		list_del(&caching_ctl->list);
8856 		put_caching_control(caching_ctl);
8857 	}
8858 	up_write(&info->commit_root_sem);
8859 
8860 	spin_lock(&info->unused_bgs_lock);
8861 	while (!list_empty(&info->unused_bgs)) {
8862 		block_group = list_first_entry(&info->unused_bgs,
8863 					       struct btrfs_block_group_cache,
8864 					       bg_list);
8865 		list_del_init(&block_group->bg_list);
8866 		btrfs_put_block_group(block_group);
8867 	}
8868 	spin_unlock(&info->unused_bgs_lock);
8869 
8870 	spin_lock(&info->block_group_cache_lock);
8871 	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8872 		block_group = rb_entry(n, struct btrfs_block_group_cache,
8873 				       cache_node);
8874 		rb_erase(&block_group->cache_node,
8875 			 &info->block_group_cache_tree);
8876 		spin_unlock(&info->block_group_cache_lock);
8877 
8878 		down_write(&block_group->space_info->groups_sem);
8879 		list_del(&block_group->list);
8880 		up_write(&block_group->space_info->groups_sem);
8881 
8882 		if (block_group->cached == BTRFS_CACHE_STARTED)
8883 			wait_block_group_cache_done(block_group);
8884 
8885 		/*
8886 		 * We haven't cached this block group, which means we could
8887 		 * possibly have excluded extents on this block group.
8888 		 */
8889 		if (block_group->cached == BTRFS_CACHE_NO ||
8890 		    block_group->cached == BTRFS_CACHE_ERROR)
8891 			free_excluded_extents(info->extent_root, block_group);
8892 
8893 		btrfs_remove_free_space_cache(block_group);
8894 		btrfs_put_block_group(block_group);
8895 
8896 		spin_lock(&info->block_group_cache_lock);
8897 	}
8898 	spin_unlock(&info->block_group_cache_lock);
8899 
8900 	/* now that all the block groups are freed, go through and
8901 	 * free all the space_info structs.  This is only called during
8902 	 * the final stages of unmount, and so we know nobody is
8903 	 * using them.  We call synchronize_rcu() once before we start,
8904 	 * just to be on the safe side.
8905 	 */
8906 	synchronize_rcu();
8907 
8908 	release_global_block_rsv(info);
8909 
8910 	while (!list_empty(&info->space_info)) {
8911 		int i;
8912 
8913 		space_info = list_entry(info->space_info.next,
8914 					struct btrfs_space_info,
8915 					list);
8916 		if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8917 			if (WARN_ON(space_info->bytes_pinned > 0 ||
8918 			    space_info->bytes_reserved > 0 ||
8919 			    space_info->bytes_may_use > 0)) {
8920 				dump_space_info(space_info, 0, 0);
8921 			}
8922 		}
8923 		list_del(&space_info->list);
8924 		for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8925 			struct kobject *kobj;
8926 			kobj = space_info->block_group_kobjs[i];
8927 			space_info->block_group_kobjs[i] = NULL;
8928 			if (kobj) {
8929 				kobject_del(kobj);
8930 				kobject_put(kobj);
8931 			}
8932 		}
8933 		kobject_del(&space_info->kobj);
8934 		kobject_put(&space_info->kobj);
8935 	}
8936 	return 0;
8937 }
8938 
8939 static void __link_block_group(struct btrfs_space_info *space_info,
8940 			       struct btrfs_block_group_cache *cache)
8941 {
8942 	int index = get_block_group_index(cache);
8943 	bool first = false;
8944 
8945 	down_write(&space_info->groups_sem);
8946 	if (list_empty(&space_info->block_groups[index]))
8947 		first = true;
8948 	list_add_tail(&cache->list, &space_info->block_groups[index]);
8949 	up_write(&space_info->groups_sem);
8950 
8951 	if (first) {
8952 		struct raid_kobject *rkobj;
8953 		int ret;
8954 
8955 		rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
8956 		if (!rkobj)
8957 			goto out_err;
8958 		rkobj->raid_type = index;
8959 		kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
8960 		ret = kobject_add(&rkobj->kobj, &space_info->kobj,
8961 				  "%s", get_raid_name(index));
8962 		if (ret) {
8963 			kobject_put(&rkobj->kobj);
8964 			goto out_err;
8965 		}
8966 		space_info->block_group_kobjs[index] = &rkobj->kobj;
8967 	}
8968 
8969 	return;
8970 out_err:
8971 	pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8972 }
8973 
8974 static struct btrfs_block_group_cache *
8975 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8976 {
8977 	struct btrfs_block_group_cache *cache;
8978 
8979 	cache = kzalloc(sizeof(*cache), GFP_NOFS);
8980 	if (!cache)
8981 		return NULL;
8982 
8983 	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8984 					GFP_NOFS);
8985 	if (!cache->free_space_ctl) {
8986 		kfree(cache);
8987 		return NULL;
8988 	}
8989 
8990 	cache->key.objectid = start;
8991 	cache->key.offset = size;
8992 	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8993 
8994 	cache->sectorsize = root->sectorsize;
8995 	cache->fs_info = root->fs_info;
8996 	cache->full_stripe_len = btrfs_full_stripe_len(root,
8997 					       &root->fs_info->mapping_tree,
8998 					       start);
8999 	atomic_set(&cache->count, 1);
9000 	spin_lock_init(&cache->lock);
9001 	init_rwsem(&cache->data_rwsem);
9002 	INIT_LIST_HEAD(&cache->list);
9003 	INIT_LIST_HEAD(&cache->cluster_list);
9004 	INIT_LIST_HEAD(&cache->bg_list);
9005 	btrfs_init_free_space_ctl(cache);
9006 
9007 	return cache;
9008 }
9009 
9010 int btrfs_read_block_groups(struct btrfs_root *root)
9011 {
9012 	struct btrfs_path *path;
9013 	int ret;
9014 	struct btrfs_block_group_cache *cache;
9015 	struct btrfs_fs_info *info = root->fs_info;
9016 	struct btrfs_space_info *space_info;
9017 	struct btrfs_key key;
9018 	struct btrfs_key found_key;
9019 	struct extent_buffer *leaf;
9020 	int need_clear = 0;
9021 	u64 cache_gen;
9022 
9023 	root = info->extent_root;
9024 	key.objectid = 0;
9025 	key.offset = 0;
9026 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9027 	path = btrfs_alloc_path();
9028 	if (!path)
9029 		return -ENOMEM;
9030 	path->reada = 1;
9031 
9032 	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9033 	if (btrfs_test_opt(root, SPACE_CACHE) &&
9034 	    btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9035 		need_clear = 1;
9036 	if (btrfs_test_opt(root, CLEAR_CACHE))
9037 		need_clear = 1;
9038 
9039 	while (1) {
9040 		ret = find_first_block_group(root, path, &key);
9041 		if (ret > 0)
9042 			break;
9043 		if (ret != 0)
9044 			goto error;
9045 
9046 		leaf = path->nodes[0];
9047 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9048 
9049 		cache = btrfs_create_block_group_cache(root, found_key.objectid,
9050 						       found_key.offset);
9051 		if (!cache) {
9052 			ret = -ENOMEM;
9053 			goto error;
9054 		}
9055 
9056 		if (need_clear) {
9057 			/*
9058 			 * When we mount with old space cache, we need to
9059 			 * set BTRFS_DC_CLEAR and set dirty flag.
9060 			 *
9061 			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9062 			 *    truncate the old free space cache inode and
9063 			 *    setup a new one.
9064 			 * b) Setting 'dirty flag' makes sure that we flush
9065 			 *    the new space cache info onto disk.
9066 			 */
9067 			cache->disk_cache_state = BTRFS_DC_CLEAR;
9068 			if (btrfs_test_opt(root, SPACE_CACHE))
9069 				cache->dirty = 1;
9070 		}
9071 
9072 		read_extent_buffer(leaf, &cache->item,
9073 				   btrfs_item_ptr_offset(leaf, path->slots[0]),
9074 				   sizeof(cache->item));
9075 		cache->flags = btrfs_block_group_flags(&cache->item);
9076 
9077 		key.objectid = found_key.objectid + found_key.offset;
9078 		btrfs_release_path(path);
9079 
9080 		/*
9081 		 * We need to exclude the super stripes now so that the space
9082 		 * info has super bytes accounted for, otherwise we'll think
9083 		 * we have more space than we actually do.
9084 		 */
9085 		ret = exclude_super_stripes(root, cache);
9086 		if (ret) {
9087 			/*
9088 			 * We may have excluded something, so call this just in
9089 			 * case.
9090 			 */
9091 			free_excluded_extents(root, cache);
9092 			btrfs_put_block_group(cache);
9093 			goto error;
9094 		}
9095 
9096 		/*
9097 		 * check for two cases, either we are full, and therefore
9098 		 * don't need to bother with the caching work since we won't
9099 		 * find any space, or we are empty, and we can just add all
9100 		 * the space in and be done with it.  This saves us _alot_ of
9101 		 * time, particularly in the full case.
9102 		 */
9103 		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9104 			cache->last_byte_to_unpin = (u64)-1;
9105 			cache->cached = BTRFS_CACHE_FINISHED;
9106 			free_excluded_extents(root, cache);
9107 		} else if (btrfs_block_group_used(&cache->item) == 0) {
9108 			cache->last_byte_to_unpin = (u64)-1;
9109 			cache->cached = BTRFS_CACHE_FINISHED;
9110 			add_new_free_space(cache, root->fs_info,
9111 					   found_key.objectid,
9112 					   found_key.objectid +
9113 					   found_key.offset);
9114 			free_excluded_extents(root, cache);
9115 		}
9116 
9117 		ret = btrfs_add_block_group_cache(root->fs_info, cache);
9118 		if (ret) {
9119 			btrfs_remove_free_space_cache(cache);
9120 			btrfs_put_block_group(cache);
9121 			goto error;
9122 		}
9123 
9124 		ret = update_space_info(info, cache->flags, found_key.offset,
9125 					btrfs_block_group_used(&cache->item),
9126 					&space_info);
9127 		if (ret) {
9128 			btrfs_remove_free_space_cache(cache);
9129 			spin_lock(&info->block_group_cache_lock);
9130 			rb_erase(&cache->cache_node,
9131 				 &info->block_group_cache_tree);
9132 			spin_unlock(&info->block_group_cache_lock);
9133 			btrfs_put_block_group(cache);
9134 			goto error;
9135 		}
9136 
9137 		cache->space_info = space_info;
9138 		spin_lock(&cache->space_info->lock);
9139 		cache->space_info->bytes_readonly += cache->bytes_super;
9140 		spin_unlock(&cache->space_info->lock);
9141 
9142 		__link_block_group(space_info, cache);
9143 
9144 		set_avail_alloc_bits(root->fs_info, cache->flags);
9145 		if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9146 			set_block_group_ro(cache, 1);
9147 		} else if (btrfs_block_group_used(&cache->item) == 0) {
9148 			spin_lock(&info->unused_bgs_lock);
9149 			/* Should always be true but just in case. */
9150 			if (list_empty(&cache->bg_list)) {
9151 				btrfs_get_block_group(cache);
9152 				list_add_tail(&cache->bg_list,
9153 					      &info->unused_bgs);
9154 			}
9155 			spin_unlock(&info->unused_bgs_lock);
9156 		}
9157 	}
9158 
9159 	list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9160 		if (!(get_alloc_profile(root, space_info->flags) &
9161 		      (BTRFS_BLOCK_GROUP_RAID10 |
9162 		       BTRFS_BLOCK_GROUP_RAID1 |
9163 		       BTRFS_BLOCK_GROUP_RAID5 |
9164 		       BTRFS_BLOCK_GROUP_RAID6 |
9165 		       BTRFS_BLOCK_GROUP_DUP)))
9166 			continue;
9167 		/*
9168 		 * avoid allocating from un-mirrored block group if there are
9169 		 * mirrored block groups.
9170 		 */
9171 		list_for_each_entry(cache,
9172 				&space_info->block_groups[BTRFS_RAID_RAID0],
9173 				list)
9174 			set_block_group_ro(cache, 1);
9175 		list_for_each_entry(cache,
9176 				&space_info->block_groups[BTRFS_RAID_SINGLE],
9177 				list)
9178 			set_block_group_ro(cache, 1);
9179 	}
9180 
9181 	init_global_block_rsv(info);
9182 	ret = 0;
9183 error:
9184 	btrfs_free_path(path);
9185 	return ret;
9186 }
9187 
9188 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9189 				       struct btrfs_root *root)
9190 {
9191 	struct btrfs_block_group_cache *block_group, *tmp;
9192 	struct btrfs_root *extent_root = root->fs_info->extent_root;
9193 	struct btrfs_block_group_item item;
9194 	struct btrfs_key key;
9195 	int ret = 0;
9196 
9197 	list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9198 		list_del_init(&block_group->bg_list);
9199 		if (ret)
9200 			continue;
9201 
9202 		spin_lock(&block_group->lock);
9203 		memcpy(&item, &block_group->item, sizeof(item));
9204 		memcpy(&key, &block_group->key, sizeof(key));
9205 		spin_unlock(&block_group->lock);
9206 
9207 		ret = btrfs_insert_item(trans, extent_root, &key, &item,
9208 					sizeof(item));
9209 		if (ret)
9210 			btrfs_abort_transaction(trans, extent_root, ret);
9211 		ret = btrfs_finish_chunk_alloc(trans, extent_root,
9212 					       key.objectid, key.offset);
9213 		if (ret)
9214 			btrfs_abort_transaction(trans, extent_root, ret);
9215 	}
9216 }
9217 
9218 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9219 			   struct btrfs_root *root, u64 bytes_used,
9220 			   u64 type, u64 chunk_objectid, u64 chunk_offset,
9221 			   u64 size)
9222 {
9223 	int ret;
9224 	struct btrfs_root *extent_root;
9225 	struct btrfs_block_group_cache *cache;
9226 
9227 	extent_root = root->fs_info->extent_root;
9228 
9229 	btrfs_set_log_full_commit(root->fs_info, trans);
9230 
9231 	cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9232 	if (!cache)
9233 		return -ENOMEM;
9234 
9235 	btrfs_set_block_group_used(&cache->item, bytes_used);
9236 	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9237 	btrfs_set_block_group_flags(&cache->item, type);
9238 
9239 	cache->flags = type;
9240 	cache->last_byte_to_unpin = (u64)-1;
9241 	cache->cached = BTRFS_CACHE_FINISHED;
9242 	ret = exclude_super_stripes(root, cache);
9243 	if (ret) {
9244 		/*
9245 		 * We may have excluded something, so call this just in
9246 		 * case.
9247 		 */
9248 		free_excluded_extents(root, cache);
9249 		btrfs_put_block_group(cache);
9250 		return ret;
9251 	}
9252 
9253 	add_new_free_space(cache, root->fs_info, chunk_offset,
9254 			   chunk_offset + size);
9255 
9256 	free_excluded_extents(root, cache);
9257 
9258 	ret = btrfs_add_block_group_cache(root->fs_info, cache);
9259 	if (ret) {
9260 		btrfs_remove_free_space_cache(cache);
9261 		btrfs_put_block_group(cache);
9262 		return ret;
9263 	}
9264 
9265 	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9266 				&cache->space_info);
9267 	if (ret) {
9268 		btrfs_remove_free_space_cache(cache);
9269 		spin_lock(&root->fs_info->block_group_cache_lock);
9270 		rb_erase(&cache->cache_node,
9271 			 &root->fs_info->block_group_cache_tree);
9272 		spin_unlock(&root->fs_info->block_group_cache_lock);
9273 		btrfs_put_block_group(cache);
9274 		return ret;
9275 	}
9276 	update_global_block_rsv(root->fs_info);
9277 
9278 	spin_lock(&cache->space_info->lock);
9279 	cache->space_info->bytes_readonly += cache->bytes_super;
9280 	spin_unlock(&cache->space_info->lock);
9281 
9282 	__link_block_group(cache->space_info, cache);
9283 
9284 	list_add_tail(&cache->bg_list, &trans->new_bgs);
9285 
9286 	set_avail_alloc_bits(extent_root->fs_info, type);
9287 
9288 	return 0;
9289 }
9290 
9291 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9292 {
9293 	u64 extra_flags = chunk_to_extended(flags) &
9294 				BTRFS_EXTENDED_PROFILE_MASK;
9295 
9296 	write_seqlock(&fs_info->profiles_lock);
9297 	if (flags & BTRFS_BLOCK_GROUP_DATA)
9298 		fs_info->avail_data_alloc_bits &= ~extra_flags;
9299 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
9300 		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9301 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9302 		fs_info->avail_system_alloc_bits &= ~extra_flags;
9303 	write_sequnlock(&fs_info->profiles_lock);
9304 }
9305 
9306 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9307 			     struct btrfs_root *root, u64 group_start)
9308 {
9309 	struct btrfs_path *path;
9310 	struct btrfs_block_group_cache *block_group;
9311 	struct btrfs_free_cluster *cluster;
9312 	struct btrfs_root *tree_root = root->fs_info->tree_root;
9313 	struct btrfs_key key;
9314 	struct inode *inode;
9315 	struct kobject *kobj = NULL;
9316 	int ret;
9317 	int index;
9318 	int factor;
9319 
9320 	root = root->fs_info->extent_root;
9321 
9322 	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9323 	BUG_ON(!block_group);
9324 	BUG_ON(!block_group->ro);
9325 
9326 	/*
9327 	 * Free the reserved super bytes from this block group before
9328 	 * remove it.
9329 	 */
9330 	free_excluded_extents(root, block_group);
9331 
9332 	memcpy(&key, &block_group->key, sizeof(key));
9333 	index = get_block_group_index(block_group);
9334 	if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9335 				  BTRFS_BLOCK_GROUP_RAID1 |
9336 				  BTRFS_BLOCK_GROUP_RAID10))
9337 		factor = 2;
9338 	else
9339 		factor = 1;
9340 
9341 	/* make sure this block group isn't part of an allocation cluster */
9342 	cluster = &root->fs_info->data_alloc_cluster;
9343 	spin_lock(&cluster->refill_lock);
9344 	btrfs_return_cluster_to_free_space(block_group, cluster);
9345 	spin_unlock(&cluster->refill_lock);
9346 
9347 	/*
9348 	 * make sure this block group isn't part of a metadata
9349 	 * allocation cluster
9350 	 */
9351 	cluster = &root->fs_info->meta_alloc_cluster;
9352 	spin_lock(&cluster->refill_lock);
9353 	btrfs_return_cluster_to_free_space(block_group, cluster);
9354 	spin_unlock(&cluster->refill_lock);
9355 
9356 	path = btrfs_alloc_path();
9357 	if (!path) {
9358 		ret = -ENOMEM;
9359 		goto out;
9360 	}
9361 
9362 	inode = lookup_free_space_inode(tree_root, block_group, path);
9363 	if (!IS_ERR(inode)) {
9364 		ret = btrfs_orphan_add(trans, inode);
9365 		if (ret) {
9366 			btrfs_add_delayed_iput(inode);
9367 			goto out;
9368 		}
9369 		clear_nlink(inode);
9370 		/* One for the block groups ref */
9371 		spin_lock(&block_group->lock);
9372 		if (block_group->iref) {
9373 			block_group->iref = 0;
9374 			block_group->inode = NULL;
9375 			spin_unlock(&block_group->lock);
9376 			iput(inode);
9377 		} else {
9378 			spin_unlock(&block_group->lock);
9379 		}
9380 		/* One for our lookup ref */
9381 		btrfs_add_delayed_iput(inode);
9382 	}
9383 
9384 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9385 	key.offset = block_group->key.objectid;
9386 	key.type = 0;
9387 
9388 	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9389 	if (ret < 0)
9390 		goto out;
9391 	if (ret > 0)
9392 		btrfs_release_path(path);
9393 	if (ret == 0) {
9394 		ret = btrfs_del_item(trans, tree_root, path);
9395 		if (ret)
9396 			goto out;
9397 		btrfs_release_path(path);
9398 	}
9399 
9400 	spin_lock(&root->fs_info->block_group_cache_lock);
9401 	rb_erase(&block_group->cache_node,
9402 		 &root->fs_info->block_group_cache_tree);
9403 
9404 	if (root->fs_info->first_logical_byte == block_group->key.objectid)
9405 		root->fs_info->first_logical_byte = (u64)-1;
9406 	spin_unlock(&root->fs_info->block_group_cache_lock);
9407 
9408 	down_write(&block_group->space_info->groups_sem);
9409 	/*
9410 	 * we must use list_del_init so people can check to see if they
9411 	 * are still on the list after taking the semaphore
9412 	 */
9413 	list_del_init(&block_group->list);
9414 	if (list_empty(&block_group->space_info->block_groups[index])) {
9415 		kobj = block_group->space_info->block_group_kobjs[index];
9416 		block_group->space_info->block_group_kobjs[index] = NULL;
9417 		clear_avail_alloc_bits(root->fs_info, block_group->flags);
9418 	}
9419 	up_write(&block_group->space_info->groups_sem);
9420 	if (kobj) {
9421 		kobject_del(kobj);
9422 		kobject_put(kobj);
9423 	}
9424 
9425 	if (block_group->cached == BTRFS_CACHE_STARTED)
9426 		wait_block_group_cache_done(block_group);
9427 
9428 	btrfs_remove_free_space_cache(block_group);
9429 
9430 	spin_lock(&block_group->space_info->lock);
9431 	block_group->space_info->total_bytes -= block_group->key.offset;
9432 	block_group->space_info->bytes_readonly -= block_group->key.offset;
9433 	block_group->space_info->disk_total -= block_group->key.offset * factor;
9434 	spin_unlock(&block_group->space_info->lock);
9435 
9436 	memcpy(&key, &block_group->key, sizeof(key));
9437 
9438 	btrfs_put_block_group(block_group);
9439 	btrfs_put_block_group(block_group);
9440 
9441 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9442 	if (ret > 0)
9443 		ret = -EIO;
9444 	if (ret < 0)
9445 		goto out;
9446 
9447 	ret = btrfs_del_item(trans, root, path);
9448 out:
9449 	btrfs_free_path(path);
9450 	return ret;
9451 }
9452 
9453 /*
9454  * Process the unused_bgs list and remove any that don't have any allocated
9455  * space inside of them.
9456  */
9457 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9458 {
9459 	struct btrfs_block_group_cache *block_group;
9460 	struct btrfs_space_info *space_info;
9461 	struct btrfs_root *root = fs_info->extent_root;
9462 	struct btrfs_trans_handle *trans;
9463 	int ret = 0;
9464 
9465 	if (!fs_info->open)
9466 		return;
9467 
9468 	spin_lock(&fs_info->unused_bgs_lock);
9469 	while (!list_empty(&fs_info->unused_bgs)) {
9470 		u64 start, end;
9471 
9472 		block_group = list_first_entry(&fs_info->unused_bgs,
9473 					       struct btrfs_block_group_cache,
9474 					       bg_list);
9475 		space_info = block_group->space_info;
9476 		list_del_init(&block_group->bg_list);
9477 		if (ret || btrfs_mixed_space_info(space_info)) {
9478 			btrfs_put_block_group(block_group);
9479 			continue;
9480 		}
9481 		spin_unlock(&fs_info->unused_bgs_lock);
9482 
9483 		/* Don't want to race with allocators so take the groups_sem */
9484 		down_write(&space_info->groups_sem);
9485 		spin_lock(&block_group->lock);
9486 		if (block_group->reserved ||
9487 		    btrfs_block_group_used(&block_group->item) ||
9488 		    block_group->ro) {
9489 			/*
9490 			 * We want to bail if we made new allocations or have
9491 			 * outstanding allocations in this block group.  We do
9492 			 * the ro check in case balance is currently acting on
9493 			 * this block group.
9494 			 */
9495 			spin_unlock(&block_group->lock);
9496 			up_write(&space_info->groups_sem);
9497 			goto next;
9498 		}
9499 		spin_unlock(&block_group->lock);
9500 
9501 		/* We don't want to force the issue, only flip if it's ok. */
9502 		ret = set_block_group_ro(block_group, 0);
9503 		up_write(&space_info->groups_sem);
9504 		if (ret < 0) {
9505 			ret = 0;
9506 			goto next;
9507 		}
9508 
9509 		/*
9510 		 * Want to do this before we do anything else so we can recover
9511 		 * properly if we fail to join the transaction.
9512 		 */
9513 		trans = btrfs_join_transaction(root);
9514 		if (IS_ERR(trans)) {
9515 			btrfs_set_block_group_rw(root, block_group);
9516 			ret = PTR_ERR(trans);
9517 			goto next;
9518 		}
9519 
9520 		/*
9521 		 * We could have pending pinned extents for this block group,
9522 		 * just delete them, we don't care about them anymore.
9523 		 */
9524 		start = block_group->key.objectid;
9525 		end = start + block_group->key.offset - 1;
9526 		clear_extent_bits(&fs_info->freed_extents[0], start, end,
9527 				  EXTENT_DIRTY, GFP_NOFS);
9528 		clear_extent_bits(&fs_info->freed_extents[1], start, end,
9529 				  EXTENT_DIRTY, GFP_NOFS);
9530 
9531 		/* Reset pinned so btrfs_put_block_group doesn't complain */
9532 		block_group->pinned = 0;
9533 
9534 		/*
9535 		 * Btrfs_remove_chunk will abort the transaction if things go
9536 		 * horribly wrong.
9537 		 */
9538 		ret = btrfs_remove_chunk(trans, root,
9539 					 block_group->key.objectid);
9540 		btrfs_end_transaction(trans, root);
9541 next:
9542 		btrfs_put_block_group(block_group);
9543 		spin_lock(&fs_info->unused_bgs_lock);
9544 	}
9545 	spin_unlock(&fs_info->unused_bgs_lock);
9546 }
9547 
9548 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
9549 {
9550 	struct btrfs_space_info *space_info;
9551 	struct btrfs_super_block *disk_super;
9552 	u64 features;
9553 	u64 flags;
9554 	int mixed = 0;
9555 	int ret;
9556 
9557 	disk_super = fs_info->super_copy;
9558 	if (!btrfs_super_root(disk_super))
9559 		return 1;
9560 
9561 	features = btrfs_super_incompat_flags(disk_super);
9562 	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
9563 		mixed = 1;
9564 
9565 	flags = BTRFS_BLOCK_GROUP_SYSTEM;
9566 	ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9567 	if (ret)
9568 		goto out;
9569 
9570 	if (mixed) {
9571 		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
9572 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9573 	} else {
9574 		flags = BTRFS_BLOCK_GROUP_METADATA;
9575 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9576 		if (ret)
9577 			goto out;
9578 
9579 		flags = BTRFS_BLOCK_GROUP_DATA;
9580 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9581 	}
9582 out:
9583 	return ret;
9584 }
9585 
9586 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
9587 {
9588 	return unpin_extent_range(root, start, end);
9589 }
9590 
9591 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
9592 			       u64 num_bytes, u64 *actual_bytes)
9593 {
9594 	return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
9595 }
9596 
9597 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
9598 {
9599 	struct btrfs_fs_info *fs_info = root->fs_info;
9600 	struct btrfs_block_group_cache *cache = NULL;
9601 	u64 group_trimmed;
9602 	u64 start;
9603 	u64 end;
9604 	u64 trimmed = 0;
9605 	u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
9606 	int ret = 0;
9607 
9608 	/*
9609 	 * try to trim all FS space, our block group may start from non-zero.
9610 	 */
9611 	if (range->len == total_bytes)
9612 		cache = btrfs_lookup_first_block_group(fs_info, range->start);
9613 	else
9614 		cache = btrfs_lookup_block_group(fs_info, range->start);
9615 
9616 	while (cache) {
9617 		if (cache->key.objectid >= (range->start + range->len)) {
9618 			btrfs_put_block_group(cache);
9619 			break;
9620 		}
9621 
9622 		start = max(range->start, cache->key.objectid);
9623 		end = min(range->start + range->len,
9624 				cache->key.objectid + cache->key.offset);
9625 
9626 		if (end - start >= range->minlen) {
9627 			if (!block_group_cache_done(cache)) {
9628 				ret = cache_block_group(cache, 0);
9629 				if (ret) {
9630 					btrfs_put_block_group(cache);
9631 					break;
9632 				}
9633 				ret = wait_block_group_cache_done(cache);
9634 				if (ret) {
9635 					btrfs_put_block_group(cache);
9636 					break;
9637 				}
9638 			}
9639 			ret = btrfs_trim_block_group(cache,
9640 						     &group_trimmed,
9641 						     start,
9642 						     end,
9643 						     range->minlen);
9644 
9645 			trimmed += group_trimmed;
9646 			if (ret) {
9647 				btrfs_put_block_group(cache);
9648 				break;
9649 			}
9650 		}
9651 
9652 		cache = next_block_group(fs_info->tree_root, cache);
9653 	}
9654 
9655 	range->len = trimmed;
9656 	return ret;
9657 }
9658 
9659 /*
9660  * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(),
9661  * they are used to prevent the some tasks writing data into the page cache
9662  * by nocow before the subvolume is snapshoted, but flush the data into
9663  * the disk after the snapshot creation.
9664  */
9665 void btrfs_end_nocow_write(struct btrfs_root *root)
9666 {
9667 	percpu_counter_dec(&root->subv_writers->counter);
9668 	/*
9669 	 * Make sure counter is updated before we wake up
9670 	 * waiters.
9671 	 */
9672 	smp_mb();
9673 	if (waitqueue_active(&root->subv_writers->wait))
9674 		wake_up(&root->subv_writers->wait);
9675 }
9676 
9677 int btrfs_start_nocow_write(struct btrfs_root *root)
9678 {
9679 	if (atomic_read(&root->will_be_snapshoted))
9680 		return 0;
9681 
9682 	percpu_counter_inc(&root->subv_writers->counter);
9683 	/*
9684 	 * Make sure counter is updated before we check for snapshot creation.
9685 	 */
9686 	smp_mb();
9687 	if (atomic_read(&root->will_be_snapshoted)) {
9688 		btrfs_end_nocow_write(root);
9689 		return 0;
9690 	}
9691 	return 1;
9692 }
9693