xref: /openbmc/linux/fs/btrfs/extent-tree.c (revision 8730046c)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "free-space-tree.h"
37 #include "math.h"
38 #include "sysfs.h"
39 #include "qgroup.h"
40 
41 #undef SCRAMBLE_DELAYED_REFS
42 
43 /*
44  * control flags for do_chunk_alloc's force field
45  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
46  * if we really need one.
47  *
48  * CHUNK_ALLOC_LIMITED means to only try and allocate one
49  * if we have very few chunks already allocated.  This is
50  * used as part of the clustering code to help make sure
51  * we have a good pool of storage to cluster in, without
52  * filling the FS with empty chunks
53  *
54  * CHUNK_ALLOC_FORCE means it must try to allocate one
55  *
56  */
57 enum {
58 	CHUNK_ALLOC_NO_FORCE = 0,
59 	CHUNK_ALLOC_LIMITED = 1,
60 	CHUNK_ALLOC_FORCE = 2,
61 };
62 
63 static int update_block_group(struct btrfs_trans_handle *trans,
64 			      struct btrfs_fs_info *fs_info, u64 bytenr,
65 			      u64 num_bytes, int alloc);
66 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
67 			       struct btrfs_fs_info *fs_info,
68 				struct btrfs_delayed_ref_node *node, u64 parent,
69 				u64 root_objectid, u64 owner_objectid,
70 				u64 owner_offset, int refs_to_drop,
71 				struct btrfs_delayed_extent_op *extra_op);
72 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
73 				    struct extent_buffer *leaf,
74 				    struct btrfs_extent_item *ei);
75 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
76 				      struct btrfs_fs_info *fs_info,
77 				      u64 parent, u64 root_objectid,
78 				      u64 flags, u64 owner, u64 offset,
79 				      struct btrfs_key *ins, int ref_mod);
80 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
81 				     struct btrfs_fs_info *fs_info,
82 				     u64 parent, u64 root_objectid,
83 				     u64 flags, struct btrfs_disk_key *key,
84 				     int level, struct btrfs_key *ins);
85 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
86 			  struct btrfs_fs_info *fs_info, u64 flags,
87 			  int force);
88 static int find_next_key(struct btrfs_path *path, int level,
89 			 struct btrfs_key *key);
90 static void dump_space_info(struct btrfs_fs_info *fs_info,
91 			    struct btrfs_space_info *info, u64 bytes,
92 			    int dump_block_groups);
93 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
94 				    u64 ram_bytes, u64 num_bytes, int delalloc);
95 static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
96 				     u64 num_bytes, int delalloc);
97 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
98 			       u64 num_bytes);
99 static int __reserve_metadata_bytes(struct btrfs_root *root,
100 				    struct btrfs_space_info *space_info,
101 				    u64 orig_bytes,
102 				    enum btrfs_reserve_flush_enum flush);
103 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
104 				     struct btrfs_space_info *space_info,
105 				     u64 num_bytes);
106 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
107 				     struct btrfs_space_info *space_info,
108 				     u64 num_bytes);
109 
110 static noinline int
111 block_group_cache_done(struct btrfs_block_group_cache *cache)
112 {
113 	smp_mb();
114 	return cache->cached == BTRFS_CACHE_FINISHED ||
115 		cache->cached == BTRFS_CACHE_ERROR;
116 }
117 
118 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
119 {
120 	return (cache->flags & bits) == bits;
121 }
122 
123 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
124 {
125 	atomic_inc(&cache->count);
126 }
127 
128 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
129 {
130 	if (atomic_dec_and_test(&cache->count)) {
131 		WARN_ON(cache->pinned > 0);
132 		WARN_ON(cache->reserved > 0);
133 		kfree(cache->free_space_ctl);
134 		kfree(cache);
135 	}
136 }
137 
138 /*
139  * this adds the block group to the fs_info rb tree for the block group
140  * cache
141  */
142 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
143 				struct btrfs_block_group_cache *block_group)
144 {
145 	struct rb_node **p;
146 	struct rb_node *parent = NULL;
147 	struct btrfs_block_group_cache *cache;
148 
149 	spin_lock(&info->block_group_cache_lock);
150 	p = &info->block_group_cache_tree.rb_node;
151 
152 	while (*p) {
153 		parent = *p;
154 		cache = rb_entry(parent, struct btrfs_block_group_cache,
155 				 cache_node);
156 		if (block_group->key.objectid < cache->key.objectid) {
157 			p = &(*p)->rb_left;
158 		} else if (block_group->key.objectid > cache->key.objectid) {
159 			p = &(*p)->rb_right;
160 		} else {
161 			spin_unlock(&info->block_group_cache_lock);
162 			return -EEXIST;
163 		}
164 	}
165 
166 	rb_link_node(&block_group->cache_node, parent, p);
167 	rb_insert_color(&block_group->cache_node,
168 			&info->block_group_cache_tree);
169 
170 	if (info->first_logical_byte > block_group->key.objectid)
171 		info->first_logical_byte = block_group->key.objectid;
172 
173 	spin_unlock(&info->block_group_cache_lock);
174 
175 	return 0;
176 }
177 
178 /*
179  * This will return the block group at or after bytenr if contains is 0, else
180  * it will return the block group that contains the bytenr
181  */
182 static struct btrfs_block_group_cache *
183 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
184 			      int contains)
185 {
186 	struct btrfs_block_group_cache *cache, *ret = NULL;
187 	struct rb_node *n;
188 	u64 end, start;
189 
190 	spin_lock(&info->block_group_cache_lock);
191 	n = info->block_group_cache_tree.rb_node;
192 
193 	while (n) {
194 		cache = rb_entry(n, struct btrfs_block_group_cache,
195 				 cache_node);
196 		end = cache->key.objectid + cache->key.offset - 1;
197 		start = cache->key.objectid;
198 
199 		if (bytenr < start) {
200 			if (!contains && (!ret || start < ret->key.objectid))
201 				ret = cache;
202 			n = n->rb_left;
203 		} else if (bytenr > start) {
204 			if (contains && bytenr <= end) {
205 				ret = cache;
206 				break;
207 			}
208 			n = n->rb_right;
209 		} else {
210 			ret = cache;
211 			break;
212 		}
213 	}
214 	if (ret) {
215 		btrfs_get_block_group(ret);
216 		if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
217 			info->first_logical_byte = ret->key.objectid;
218 	}
219 	spin_unlock(&info->block_group_cache_lock);
220 
221 	return ret;
222 }
223 
224 static int add_excluded_extent(struct btrfs_fs_info *fs_info,
225 			       u64 start, u64 num_bytes)
226 {
227 	u64 end = start + num_bytes - 1;
228 	set_extent_bits(&fs_info->freed_extents[0],
229 			start, end, EXTENT_UPTODATE);
230 	set_extent_bits(&fs_info->freed_extents[1],
231 			start, end, EXTENT_UPTODATE);
232 	return 0;
233 }
234 
235 static void free_excluded_extents(struct btrfs_fs_info *fs_info,
236 				  struct btrfs_block_group_cache *cache)
237 {
238 	u64 start, end;
239 
240 	start = cache->key.objectid;
241 	end = start + cache->key.offset - 1;
242 
243 	clear_extent_bits(&fs_info->freed_extents[0],
244 			  start, end, EXTENT_UPTODATE);
245 	clear_extent_bits(&fs_info->freed_extents[1],
246 			  start, end, EXTENT_UPTODATE);
247 }
248 
249 static int exclude_super_stripes(struct btrfs_fs_info *fs_info,
250 				 struct btrfs_block_group_cache *cache)
251 {
252 	u64 bytenr;
253 	u64 *logical;
254 	int stripe_len;
255 	int i, nr, ret;
256 
257 	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
258 		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
259 		cache->bytes_super += stripe_len;
260 		ret = add_excluded_extent(fs_info, cache->key.objectid,
261 					  stripe_len);
262 		if (ret)
263 			return ret;
264 	}
265 
266 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
267 		bytenr = btrfs_sb_offset(i);
268 		ret = btrfs_rmap_block(fs_info, cache->key.objectid,
269 				       bytenr, 0, &logical, &nr, &stripe_len);
270 		if (ret)
271 			return ret;
272 
273 		while (nr--) {
274 			u64 start, len;
275 
276 			if (logical[nr] > cache->key.objectid +
277 			    cache->key.offset)
278 				continue;
279 
280 			if (logical[nr] + stripe_len <= cache->key.objectid)
281 				continue;
282 
283 			start = logical[nr];
284 			if (start < cache->key.objectid) {
285 				start = cache->key.objectid;
286 				len = (logical[nr] + stripe_len) - start;
287 			} else {
288 				len = min_t(u64, stripe_len,
289 					    cache->key.objectid +
290 					    cache->key.offset - start);
291 			}
292 
293 			cache->bytes_super += len;
294 			ret = add_excluded_extent(fs_info, start, len);
295 			if (ret) {
296 				kfree(logical);
297 				return ret;
298 			}
299 		}
300 
301 		kfree(logical);
302 	}
303 	return 0;
304 }
305 
306 static struct btrfs_caching_control *
307 get_caching_control(struct btrfs_block_group_cache *cache)
308 {
309 	struct btrfs_caching_control *ctl;
310 
311 	spin_lock(&cache->lock);
312 	if (!cache->caching_ctl) {
313 		spin_unlock(&cache->lock);
314 		return NULL;
315 	}
316 
317 	ctl = cache->caching_ctl;
318 	atomic_inc(&ctl->count);
319 	spin_unlock(&cache->lock);
320 	return ctl;
321 }
322 
323 static void put_caching_control(struct btrfs_caching_control *ctl)
324 {
325 	if (atomic_dec_and_test(&ctl->count))
326 		kfree(ctl);
327 }
328 
329 #ifdef CONFIG_BTRFS_DEBUG
330 static void fragment_free_space(struct btrfs_block_group_cache *block_group)
331 {
332 	struct btrfs_fs_info *fs_info = block_group->fs_info;
333 	u64 start = block_group->key.objectid;
334 	u64 len = block_group->key.offset;
335 	u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
336 		fs_info->nodesize : fs_info->sectorsize;
337 	u64 step = chunk << 1;
338 
339 	while (len > chunk) {
340 		btrfs_remove_free_space(block_group, start, chunk);
341 		start += step;
342 		if (len < step)
343 			len = 0;
344 		else
345 			len -= step;
346 	}
347 }
348 #endif
349 
350 /*
351  * this is only called by cache_block_group, since we could have freed extents
352  * we need to check the pinned_extents for any extents that can't be used yet
353  * since their free space will be released as soon as the transaction commits.
354  */
355 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
356 		       struct btrfs_fs_info *info, u64 start, u64 end)
357 {
358 	u64 extent_start, extent_end, size, total_added = 0;
359 	int ret;
360 
361 	while (start < end) {
362 		ret = find_first_extent_bit(info->pinned_extents, start,
363 					    &extent_start, &extent_end,
364 					    EXTENT_DIRTY | EXTENT_UPTODATE,
365 					    NULL);
366 		if (ret)
367 			break;
368 
369 		if (extent_start <= start) {
370 			start = extent_end + 1;
371 		} else if (extent_start > start && extent_start < end) {
372 			size = extent_start - start;
373 			total_added += size;
374 			ret = btrfs_add_free_space(block_group, start,
375 						   size);
376 			BUG_ON(ret); /* -ENOMEM or logic error */
377 			start = extent_end + 1;
378 		} else {
379 			break;
380 		}
381 	}
382 
383 	if (start < end) {
384 		size = end - start;
385 		total_added += size;
386 		ret = btrfs_add_free_space(block_group, start, size);
387 		BUG_ON(ret); /* -ENOMEM or logic error */
388 	}
389 
390 	return total_added;
391 }
392 
393 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
394 {
395 	struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
396 	struct btrfs_fs_info *fs_info = block_group->fs_info;
397 	struct btrfs_root *extent_root = fs_info->extent_root;
398 	struct btrfs_path *path;
399 	struct extent_buffer *leaf;
400 	struct btrfs_key key;
401 	u64 total_found = 0;
402 	u64 last = 0;
403 	u32 nritems;
404 	int ret;
405 	bool wakeup = true;
406 
407 	path = btrfs_alloc_path();
408 	if (!path)
409 		return -ENOMEM;
410 
411 	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
412 
413 #ifdef CONFIG_BTRFS_DEBUG
414 	/*
415 	 * If we're fragmenting we don't want to make anybody think we can
416 	 * allocate from this block group until we've had a chance to fragment
417 	 * the free space.
418 	 */
419 	if (btrfs_should_fragment_free_space(block_group))
420 		wakeup = false;
421 #endif
422 	/*
423 	 * We don't want to deadlock with somebody trying to allocate a new
424 	 * extent for the extent root while also trying to search the extent
425 	 * root to add free space.  So we skip locking and search the commit
426 	 * root, since its read-only
427 	 */
428 	path->skip_locking = 1;
429 	path->search_commit_root = 1;
430 	path->reada = READA_FORWARD;
431 
432 	key.objectid = last;
433 	key.offset = 0;
434 	key.type = BTRFS_EXTENT_ITEM_KEY;
435 
436 next:
437 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
438 	if (ret < 0)
439 		goto out;
440 
441 	leaf = path->nodes[0];
442 	nritems = btrfs_header_nritems(leaf);
443 
444 	while (1) {
445 		if (btrfs_fs_closing(fs_info) > 1) {
446 			last = (u64)-1;
447 			break;
448 		}
449 
450 		if (path->slots[0] < nritems) {
451 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
452 		} else {
453 			ret = find_next_key(path, 0, &key);
454 			if (ret)
455 				break;
456 
457 			if (need_resched() ||
458 			    rwsem_is_contended(&fs_info->commit_root_sem)) {
459 				if (wakeup)
460 					caching_ctl->progress = last;
461 				btrfs_release_path(path);
462 				up_read(&fs_info->commit_root_sem);
463 				mutex_unlock(&caching_ctl->mutex);
464 				cond_resched();
465 				mutex_lock(&caching_ctl->mutex);
466 				down_read(&fs_info->commit_root_sem);
467 				goto next;
468 			}
469 
470 			ret = btrfs_next_leaf(extent_root, path);
471 			if (ret < 0)
472 				goto out;
473 			if (ret)
474 				break;
475 			leaf = path->nodes[0];
476 			nritems = btrfs_header_nritems(leaf);
477 			continue;
478 		}
479 
480 		if (key.objectid < last) {
481 			key.objectid = last;
482 			key.offset = 0;
483 			key.type = BTRFS_EXTENT_ITEM_KEY;
484 
485 			if (wakeup)
486 				caching_ctl->progress = last;
487 			btrfs_release_path(path);
488 			goto next;
489 		}
490 
491 		if (key.objectid < block_group->key.objectid) {
492 			path->slots[0]++;
493 			continue;
494 		}
495 
496 		if (key.objectid >= block_group->key.objectid +
497 		    block_group->key.offset)
498 			break;
499 
500 		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
501 		    key.type == BTRFS_METADATA_ITEM_KEY) {
502 			total_found += add_new_free_space(block_group,
503 							  fs_info, last,
504 							  key.objectid);
505 			if (key.type == BTRFS_METADATA_ITEM_KEY)
506 				last = key.objectid +
507 					fs_info->nodesize;
508 			else
509 				last = key.objectid + key.offset;
510 
511 			if (total_found > CACHING_CTL_WAKE_UP) {
512 				total_found = 0;
513 				if (wakeup)
514 					wake_up(&caching_ctl->wait);
515 			}
516 		}
517 		path->slots[0]++;
518 	}
519 	ret = 0;
520 
521 	total_found += add_new_free_space(block_group, fs_info, last,
522 					  block_group->key.objectid +
523 					  block_group->key.offset);
524 	caching_ctl->progress = (u64)-1;
525 
526 out:
527 	btrfs_free_path(path);
528 	return ret;
529 }
530 
531 static noinline void caching_thread(struct btrfs_work *work)
532 {
533 	struct btrfs_block_group_cache *block_group;
534 	struct btrfs_fs_info *fs_info;
535 	struct btrfs_caching_control *caching_ctl;
536 	struct btrfs_root *extent_root;
537 	int ret;
538 
539 	caching_ctl = container_of(work, struct btrfs_caching_control, work);
540 	block_group = caching_ctl->block_group;
541 	fs_info = block_group->fs_info;
542 	extent_root = fs_info->extent_root;
543 
544 	mutex_lock(&caching_ctl->mutex);
545 	down_read(&fs_info->commit_root_sem);
546 
547 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
548 		ret = load_free_space_tree(caching_ctl);
549 	else
550 		ret = load_extent_tree_free(caching_ctl);
551 
552 	spin_lock(&block_group->lock);
553 	block_group->caching_ctl = NULL;
554 	block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
555 	spin_unlock(&block_group->lock);
556 
557 #ifdef CONFIG_BTRFS_DEBUG
558 	if (btrfs_should_fragment_free_space(block_group)) {
559 		u64 bytes_used;
560 
561 		spin_lock(&block_group->space_info->lock);
562 		spin_lock(&block_group->lock);
563 		bytes_used = block_group->key.offset -
564 			btrfs_block_group_used(&block_group->item);
565 		block_group->space_info->bytes_used += bytes_used >> 1;
566 		spin_unlock(&block_group->lock);
567 		spin_unlock(&block_group->space_info->lock);
568 		fragment_free_space(block_group);
569 	}
570 #endif
571 
572 	caching_ctl->progress = (u64)-1;
573 
574 	up_read(&fs_info->commit_root_sem);
575 	free_excluded_extents(fs_info, block_group);
576 	mutex_unlock(&caching_ctl->mutex);
577 
578 	wake_up(&caching_ctl->wait);
579 
580 	put_caching_control(caching_ctl);
581 	btrfs_put_block_group(block_group);
582 }
583 
584 static int cache_block_group(struct btrfs_block_group_cache *cache,
585 			     int load_cache_only)
586 {
587 	DEFINE_WAIT(wait);
588 	struct btrfs_fs_info *fs_info = cache->fs_info;
589 	struct btrfs_caching_control *caching_ctl;
590 	int ret = 0;
591 
592 	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
593 	if (!caching_ctl)
594 		return -ENOMEM;
595 
596 	INIT_LIST_HEAD(&caching_ctl->list);
597 	mutex_init(&caching_ctl->mutex);
598 	init_waitqueue_head(&caching_ctl->wait);
599 	caching_ctl->block_group = cache;
600 	caching_ctl->progress = cache->key.objectid;
601 	atomic_set(&caching_ctl->count, 1);
602 	btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
603 			caching_thread, NULL, NULL);
604 
605 	spin_lock(&cache->lock);
606 	/*
607 	 * This should be a rare occasion, but this could happen I think in the
608 	 * case where one thread starts to load the space cache info, and then
609 	 * some other thread starts a transaction commit which tries to do an
610 	 * allocation while the other thread is still loading the space cache
611 	 * info.  The previous loop should have kept us from choosing this block
612 	 * group, but if we've moved to the state where we will wait on caching
613 	 * block groups we need to first check if we're doing a fast load here,
614 	 * so we can wait for it to finish, otherwise we could end up allocating
615 	 * from a block group who's cache gets evicted for one reason or
616 	 * another.
617 	 */
618 	while (cache->cached == BTRFS_CACHE_FAST) {
619 		struct btrfs_caching_control *ctl;
620 
621 		ctl = cache->caching_ctl;
622 		atomic_inc(&ctl->count);
623 		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
624 		spin_unlock(&cache->lock);
625 
626 		schedule();
627 
628 		finish_wait(&ctl->wait, &wait);
629 		put_caching_control(ctl);
630 		spin_lock(&cache->lock);
631 	}
632 
633 	if (cache->cached != BTRFS_CACHE_NO) {
634 		spin_unlock(&cache->lock);
635 		kfree(caching_ctl);
636 		return 0;
637 	}
638 	WARN_ON(cache->caching_ctl);
639 	cache->caching_ctl = caching_ctl;
640 	cache->cached = BTRFS_CACHE_FAST;
641 	spin_unlock(&cache->lock);
642 
643 	if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
644 		mutex_lock(&caching_ctl->mutex);
645 		ret = load_free_space_cache(fs_info, cache);
646 
647 		spin_lock(&cache->lock);
648 		if (ret == 1) {
649 			cache->caching_ctl = NULL;
650 			cache->cached = BTRFS_CACHE_FINISHED;
651 			cache->last_byte_to_unpin = (u64)-1;
652 			caching_ctl->progress = (u64)-1;
653 		} else {
654 			if (load_cache_only) {
655 				cache->caching_ctl = NULL;
656 				cache->cached = BTRFS_CACHE_NO;
657 			} else {
658 				cache->cached = BTRFS_CACHE_STARTED;
659 				cache->has_caching_ctl = 1;
660 			}
661 		}
662 		spin_unlock(&cache->lock);
663 #ifdef CONFIG_BTRFS_DEBUG
664 		if (ret == 1 &&
665 		    btrfs_should_fragment_free_space(cache)) {
666 			u64 bytes_used;
667 
668 			spin_lock(&cache->space_info->lock);
669 			spin_lock(&cache->lock);
670 			bytes_used = cache->key.offset -
671 				btrfs_block_group_used(&cache->item);
672 			cache->space_info->bytes_used += bytes_used >> 1;
673 			spin_unlock(&cache->lock);
674 			spin_unlock(&cache->space_info->lock);
675 			fragment_free_space(cache);
676 		}
677 #endif
678 		mutex_unlock(&caching_ctl->mutex);
679 
680 		wake_up(&caching_ctl->wait);
681 		if (ret == 1) {
682 			put_caching_control(caching_ctl);
683 			free_excluded_extents(fs_info, cache);
684 			return 0;
685 		}
686 	} else {
687 		/*
688 		 * We're either using the free space tree or no caching at all.
689 		 * Set cached to the appropriate value and wakeup any waiters.
690 		 */
691 		spin_lock(&cache->lock);
692 		if (load_cache_only) {
693 			cache->caching_ctl = NULL;
694 			cache->cached = BTRFS_CACHE_NO;
695 		} else {
696 			cache->cached = BTRFS_CACHE_STARTED;
697 			cache->has_caching_ctl = 1;
698 		}
699 		spin_unlock(&cache->lock);
700 		wake_up(&caching_ctl->wait);
701 	}
702 
703 	if (load_cache_only) {
704 		put_caching_control(caching_ctl);
705 		return 0;
706 	}
707 
708 	down_write(&fs_info->commit_root_sem);
709 	atomic_inc(&caching_ctl->count);
710 	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
711 	up_write(&fs_info->commit_root_sem);
712 
713 	btrfs_get_block_group(cache);
714 
715 	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
716 
717 	return ret;
718 }
719 
720 /*
721  * return the block group that starts at or after bytenr
722  */
723 static struct btrfs_block_group_cache *
724 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
725 {
726 	return block_group_cache_tree_search(info, bytenr, 0);
727 }
728 
729 /*
730  * return the block group that contains the given bytenr
731  */
732 struct btrfs_block_group_cache *btrfs_lookup_block_group(
733 						 struct btrfs_fs_info *info,
734 						 u64 bytenr)
735 {
736 	return block_group_cache_tree_search(info, bytenr, 1);
737 }
738 
739 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
740 						  u64 flags)
741 {
742 	struct list_head *head = &info->space_info;
743 	struct btrfs_space_info *found;
744 
745 	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
746 
747 	rcu_read_lock();
748 	list_for_each_entry_rcu(found, head, list) {
749 		if (found->flags & flags) {
750 			rcu_read_unlock();
751 			return found;
752 		}
753 	}
754 	rcu_read_unlock();
755 	return NULL;
756 }
757 
758 /*
759  * after adding space to the filesystem, we need to clear the full flags
760  * on all the space infos.
761  */
762 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
763 {
764 	struct list_head *head = &info->space_info;
765 	struct btrfs_space_info *found;
766 
767 	rcu_read_lock();
768 	list_for_each_entry_rcu(found, head, list)
769 		found->full = 0;
770 	rcu_read_unlock();
771 }
772 
773 /* simple helper to search for an existing data extent at a given offset */
774 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
775 {
776 	int ret;
777 	struct btrfs_key key;
778 	struct btrfs_path *path;
779 
780 	path = btrfs_alloc_path();
781 	if (!path)
782 		return -ENOMEM;
783 
784 	key.objectid = start;
785 	key.offset = len;
786 	key.type = BTRFS_EXTENT_ITEM_KEY;
787 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
788 	btrfs_free_path(path);
789 	return ret;
790 }
791 
792 /*
793  * helper function to lookup reference count and flags of a tree block.
794  *
795  * the head node for delayed ref is used to store the sum of all the
796  * reference count modifications queued up in the rbtree. the head
797  * node may also store the extent flags to set. This way you can check
798  * to see what the reference count and extent flags would be if all of
799  * the delayed refs are not processed.
800  */
801 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
802 			     struct btrfs_fs_info *fs_info, u64 bytenr,
803 			     u64 offset, int metadata, u64 *refs, u64 *flags)
804 {
805 	struct btrfs_delayed_ref_head *head;
806 	struct btrfs_delayed_ref_root *delayed_refs;
807 	struct btrfs_path *path;
808 	struct btrfs_extent_item *ei;
809 	struct extent_buffer *leaf;
810 	struct btrfs_key key;
811 	u32 item_size;
812 	u64 num_refs;
813 	u64 extent_flags;
814 	int ret;
815 
816 	/*
817 	 * If we don't have skinny metadata, don't bother doing anything
818 	 * different
819 	 */
820 	if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
821 		offset = fs_info->nodesize;
822 		metadata = 0;
823 	}
824 
825 	path = btrfs_alloc_path();
826 	if (!path)
827 		return -ENOMEM;
828 
829 	if (!trans) {
830 		path->skip_locking = 1;
831 		path->search_commit_root = 1;
832 	}
833 
834 search_again:
835 	key.objectid = bytenr;
836 	key.offset = offset;
837 	if (metadata)
838 		key.type = BTRFS_METADATA_ITEM_KEY;
839 	else
840 		key.type = BTRFS_EXTENT_ITEM_KEY;
841 
842 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
843 	if (ret < 0)
844 		goto out_free;
845 
846 	if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
847 		if (path->slots[0]) {
848 			path->slots[0]--;
849 			btrfs_item_key_to_cpu(path->nodes[0], &key,
850 					      path->slots[0]);
851 			if (key.objectid == bytenr &&
852 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
853 			    key.offset == fs_info->nodesize)
854 				ret = 0;
855 		}
856 	}
857 
858 	if (ret == 0) {
859 		leaf = path->nodes[0];
860 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
861 		if (item_size >= sizeof(*ei)) {
862 			ei = btrfs_item_ptr(leaf, path->slots[0],
863 					    struct btrfs_extent_item);
864 			num_refs = btrfs_extent_refs(leaf, ei);
865 			extent_flags = btrfs_extent_flags(leaf, ei);
866 		} else {
867 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
868 			struct btrfs_extent_item_v0 *ei0;
869 			BUG_ON(item_size != sizeof(*ei0));
870 			ei0 = btrfs_item_ptr(leaf, path->slots[0],
871 					     struct btrfs_extent_item_v0);
872 			num_refs = btrfs_extent_refs_v0(leaf, ei0);
873 			/* FIXME: this isn't correct for data */
874 			extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
875 #else
876 			BUG();
877 #endif
878 		}
879 		BUG_ON(num_refs == 0);
880 	} else {
881 		num_refs = 0;
882 		extent_flags = 0;
883 		ret = 0;
884 	}
885 
886 	if (!trans)
887 		goto out;
888 
889 	delayed_refs = &trans->transaction->delayed_refs;
890 	spin_lock(&delayed_refs->lock);
891 	head = btrfs_find_delayed_ref_head(trans, bytenr);
892 	if (head) {
893 		if (!mutex_trylock(&head->mutex)) {
894 			atomic_inc(&head->node.refs);
895 			spin_unlock(&delayed_refs->lock);
896 
897 			btrfs_release_path(path);
898 
899 			/*
900 			 * Mutex was contended, block until it's released and try
901 			 * again
902 			 */
903 			mutex_lock(&head->mutex);
904 			mutex_unlock(&head->mutex);
905 			btrfs_put_delayed_ref(&head->node);
906 			goto search_again;
907 		}
908 		spin_lock(&head->lock);
909 		if (head->extent_op && head->extent_op->update_flags)
910 			extent_flags |= head->extent_op->flags_to_set;
911 		else
912 			BUG_ON(num_refs == 0);
913 
914 		num_refs += head->node.ref_mod;
915 		spin_unlock(&head->lock);
916 		mutex_unlock(&head->mutex);
917 	}
918 	spin_unlock(&delayed_refs->lock);
919 out:
920 	WARN_ON(num_refs == 0);
921 	if (refs)
922 		*refs = num_refs;
923 	if (flags)
924 		*flags = extent_flags;
925 out_free:
926 	btrfs_free_path(path);
927 	return ret;
928 }
929 
930 /*
931  * Back reference rules.  Back refs have three main goals:
932  *
933  * 1) differentiate between all holders of references to an extent so that
934  *    when a reference is dropped we can make sure it was a valid reference
935  *    before freeing the extent.
936  *
937  * 2) Provide enough information to quickly find the holders of an extent
938  *    if we notice a given block is corrupted or bad.
939  *
940  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
941  *    maintenance.  This is actually the same as #2, but with a slightly
942  *    different use case.
943  *
944  * There are two kinds of back refs. The implicit back refs is optimized
945  * for pointers in non-shared tree blocks. For a given pointer in a block,
946  * back refs of this kind provide information about the block's owner tree
947  * and the pointer's key. These information allow us to find the block by
948  * b-tree searching. The full back refs is for pointers in tree blocks not
949  * referenced by their owner trees. The location of tree block is recorded
950  * in the back refs. Actually the full back refs is generic, and can be
951  * used in all cases the implicit back refs is used. The major shortcoming
952  * of the full back refs is its overhead. Every time a tree block gets
953  * COWed, we have to update back refs entry for all pointers in it.
954  *
955  * For a newly allocated tree block, we use implicit back refs for
956  * pointers in it. This means most tree related operations only involve
957  * implicit back refs. For a tree block created in old transaction, the
958  * only way to drop a reference to it is COW it. So we can detect the
959  * event that tree block loses its owner tree's reference and do the
960  * back refs conversion.
961  *
962  * When a tree block is COWed through a tree, there are four cases:
963  *
964  * The reference count of the block is one and the tree is the block's
965  * owner tree. Nothing to do in this case.
966  *
967  * The reference count of the block is one and the tree is not the
968  * block's owner tree. In this case, full back refs is used for pointers
969  * in the block. Remove these full back refs, add implicit back refs for
970  * every pointers in the new block.
971  *
972  * The reference count of the block is greater than one and the tree is
973  * the block's owner tree. In this case, implicit back refs is used for
974  * pointers in the block. Add full back refs for every pointers in the
975  * block, increase lower level extents' reference counts. The original
976  * implicit back refs are entailed to the new block.
977  *
978  * The reference count of the block is greater than one and the tree is
979  * not the block's owner tree. Add implicit back refs for every pointer in
980  * the new block, increase lower level extents' reference count.
981  *
982  * Back Reference Key composing:
983  *
984  * The key objectid corresponds to the first byte in the extent,
985  * The key type is used to differentiate between types of back refs.
986  * There are different meanings of the key offset for different types
987  * of back refs.
988  *
989  * File extents can be referenced by:
990  *
991  * - multiple snapshots, subvolumes, or different generations in one subvol
992  * - different files inside a single subvolume
993  * - different offsets inside a file (bookend extents in file.c)
994  *
995  * The extent ref structure for the implicit back refs has fields for:
996  *
997  * - Objectid of the subvolume root
998  * - objectid of the file holding the reference
999  * - original offset in the file
1000  * - how many bookend extents
1001  *
1002  * The key offset for the implicit back refs is hash of the first
1003  * three fields.
1004  *
1005  * The extent ref structure for the full back refs has field for:
1006  *
1007  * - number of pointers in the tree leaf
1008  *
1009  * The key offset for the implicit back refs is the first byte of
1010  * the tree leaf
1011  *
1012  * When a file extent is allocated, The implicit back refs is used.
1013  * the fields are filled in:
1014  *
1015  *     (root_key.objectid, inode objectid, offset in file, 1)
1016  *
1017  * When a file extent is removed file truncation, we find the
1018  * corresponding implicit back refs and check the following fields:
1019  *
1020  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1021  *
1022  * Btree extents can be referenced by:
1023  *
1024  * - Different subvolumes
1025  *
1026  * Both the implicit back refs and the full back refs for tree blocks
1027  * only consist of key. The key offset for the implicit back refs is
1028  * objectid of block's owner tree. The key offset for the full back refs
1029  * is the first byte of parent block.
1030  *
1031  * When implicit back refs is used, information about the lowest key and
1032  * level of the tree block are required. These information are stored in
1033  * tree block info structure.
1034  */
1035 
1036 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1037 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1038 				  struct btrfs_root *root,
1039 				  struct btrfs_path *path,
1040 				  u64 owner, u32 extra_size)
1041 {
1042 	struct btrfs_extent_item *item;
1043 	struct btrfs_extent_item_v0 *ei0;
1044 	struct btrfs_extent_ref_v0 *ref0;
1045 	struct btrfs_tree_block_info *bi;
1046 	struct extent_buffer *leaf;
1047 	struct btrfs_key key;
1048 	struct btrfs_key found_key;
1049 	u32 new_size = sizeof(*item);
1050 	u64 refs;
1051 	int ret;
1052 
1053 	leaf = path->nodes[0];
1054 	BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1055 
1056 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1057 	ei0 = btrfs_item_ptr(leaf, path->slots[0],
1058 			     struct btrfs_extent_item_v0);
1059 	refs = btrfs_extent_refs_v0(leaf, ei0);
1060 
1061 	if (owner == (u64)-1) {
1062 		while (1) {
1063 			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1064 				ret = btrfs_next_leaf(root, path);
1065 				if (ret < 0)
1066 					return ret;
1067 				BUG_ON(ret > 0); /* Corruption */
1068 				leaf = path->nodes[0];
1069 			}
1070 			btrfs_item_key_to_cpu(leaf, &found_key,
1071 					      path->slots[0]);
1072 			BUG_ON(key.objectid != found_key.objectid);
1073 			if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1074 				path->slots[0]++;
1075 				continue;
1076 			}
1077 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1078 					      struct btrfs_extent_ref_v0);
1079 			owner = btrfs_ref_objectid_v0(leaf, ref0);
1080 			break;
1081 		}
1082 	}
1083 	btrfs_release_path(path);
1084 
1085 	if (owner < BTRFS_FIRST_FREE_OBJECTID)
1086 		new_size += sizeof(*bi);
1087 
1088 	new_size -= sizeof(*ei0);
1089 	ret = btrfs_search_slot(trans, root, &key, path,
1090 				new_size + extra_size, 1);
1091 	if (ret < 0)
1092 		return ret;
1093 	BUG_ON(ret); /* Corruption */
1094 
1095 	btrfs_extend_item(root->fs_info, path, new_size);
1096 
1097 	leaf = path->nodes[0];
1098 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1099 	btrfs_set_extent_refs(leaf, item, refs);
1100 	/* FIXME: get real generation */
1101 	btrfs_set_extent_generation(leaf, item, 0);
1102 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1103 		btrfs_set_extent_flags(leaf, item,
1104 				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
1105 				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
1106 		bi = (struct btrfs_tree_block_info *)(item + 1);
1107 		/* FIXME: get first key of the block */
1108 		memzero_extent_buffer(leaf, (unsigned long)bi, sizeof(*bi));
1109 		btrfs_set_tree_block_level(leaf, bi, (int)owner);
1110 	} else {
1111 		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1112 	}
1113 	btrfs_mark_buffer_dirty(leaf);
1114 	return 0;
1115 }
1116 #endif
1117 
1118 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1119 {
1120 	u32 high_crc = ~(u32)0;
1121 	u32 low_crc = ~(u32)0;
1122 	__le64 lenum;
1123 
1124 	lenum = cpu_to_le64(root_objectid);
1125 	high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1126 	lenum = cpu_to_le64(owner);
1127 	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1128 	lenum = cpu_to_le64(offset);
1129 	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1130 
1131 	return ((u64)high_crc << 31) ^ (u64)low_crc;
1132 }
1133 
1134 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1135 				     struct btrfs_extent_data_ref *ref)
1136 {
1137 	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1138 				    btrfs_extent_data_ref_objectid(leaf, ref),
1139 				    btrfs_extent_data_ref_offset(leaf, ref));
1140 }
1141 
1142 static int match_extent_data_ref(struct extent_buffer *leaf,
1143 				 struct btrfs_extent_data_ref *ref,
1144 				 u64 root_objectid, u64 owner, u64 offset)
1145 {
1146 	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1147 	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1148 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
1149 		return 0;
1150 	return 1;
1151 }
1152 
1153 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1154 					   struct btrfs_root *root,
1155 					   struct btrfs_path *path,
1156 					   u64 bytenr, u64 parent,
1157 					   u64 root_objectid,
1158 					   u64 owner, u64 offset)
1159 {
1160 	struct btrfs_key key;
1161 	struct btrfs_extent_data_ref *ref;
1162 	struct extent_buffer *leaf;
1163 	u32 nritems;
1164 	int ret;
1165 	int recow;
1166 	int err = -ENOENT;
1167 
1168 	key.objectid = bytenr;
1169 	if (parent) {
1170 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1171 		key.offset = parent;
1172 	} else {
1173 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1174 		key.offset = hash_extent_data_ref(root_objectid,
1175 						  owner, offset);
1176 	}
1177 again:
1178 	recow = 0;
1179 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1180 	if (ret < 0) {
1181 		err = ret;
1182 		goto fail;
1183 	}
1184 
1185 	if (parent) {
1186 		if (!ret)
1187 			return 0;
1188 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1189 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1190 		btrfs_release_path(path);
1191 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1192 		if (ret < 0) {
1193 			err = ret;
1194 			goto fail;
1195 		}
1196 		if (!ret)
1197 			return 0;
1198 #endif
1199 		goto fail;
1200 	}
1201 
1202 	leaf = path->nodes[0];
1203 	nritems = btrfs_header_nritems(leaf);
1204 	while (1) {
1205 		if (path->slots[0] >= nritems) {
1206 			ret = btrfs_next_leaf(root, path);
1207 			if (ret < 0)
1208 				err = ret;
1209 			if (ret)
1210 				goto fail;
1211 
1212 			leaf = path->nodes[0];
1213 			nritems = btrfs_header_nritems(leaf);
1214 			recow = 1;
1215 		}
1216 
1217 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1218 		if (key.objectid != bytenr ||
1219 		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
1220 			goto fail;
1221 
1222 		ref = btrfs_item_ptr(leaf, path->slots[0],
1223 				     struct btrfs_extent_data_ref);
1224 
1225 		if (match_extent_data_ref(leaf, ref, root_objectid,
1226 					  owner, offset)) {
1227 			if (recow) {
1228 				btrfs_release_path(path);
1229 				goto again;
1230 			}
1231 			err = 0;
1232 			break;
1233 		}
1234 		path->slots[0]++;
1235 	}
1236 fail:
1237 	return err;
1238 }
1239 
1240 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1241 					   struct btrfs_root *root,
1242 					   struct btrfs_path *path,
1243 					   u64 bytenr, u64 parent,
1244 					   u64 root_objectid, u64 owner,
1245 					   u64 offset, int refs_to_add)
1246 {
1247 	struct btrfs_key key;
1248 	struct extent_buffer *leaf;
1249 	u32 size;
1250 	u32 num_refs;
1251 	int ret;
1252 
1253 	key.objectid = bytenr;
1254 	if (parent) {
1255 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1256 		key.offset = parent;
1257 		size = sizeof(struct btrfs_shared_data_ref);
1258 	} else {
1259 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1260 		key.offset = hash_extent_data_ref(root_objectid,
1261 						  owner, offset);
1262 		size = sizeof(struct btrfs_extent_data_ref);
1263 	}
1264 
1265 	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1266 	if (ret && ret != -EEXIST)
1267 		goto fail;
1268 
1269 	leaf = path->nodes[0];
1270 	if (parent) {
1271 		struct btrfs_shared_data_ref *ref;
1272 		ref = btrfs_item_ptr(leaf, path->slots[0],
1273 				     struct btrfs_shared_data_ref);
1274 		if (ret == 0) {
1275 			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1276 		} else {
1277 			num_refs = btrfs_shared_data_ref_count(leaf, ref);
1278 			num_refs += refs_to_add;
1279 			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1280 		}
1281 	} else {
1282 		struct btrfs_extent_data_ref *ref;
1283 		while (ret == -EEXIST) {
1284 			ref = btrfs_item_ptr(leaf, path->slots[0],
1285 					     struct btrfs_extent_data_ref);
1286 			if (match_extent_data_ref(leaf, ref, root_objectid,
1287 						  owner, offset))
1288 				break;
1289 			btrfs_release_path(path);
1290 			key.offset++;
1291 			ret = btrfs_insert_empty_item(trans, root, path, &key,
1292 						      size);
1293 			if (ret && ret != -EEXIST)
1294 				goto fail;
1295 
1296 			leaf = path->nodes[0];
1297 		}
1298 		ref = btrfs_item_ptr(leaf, path->slots[0],
1299 				     struct btrfs_extent_data_ref);
1300 		if (ret == 0) {
1301 			btrfs_set_extent_data_ref_root(leaf, ref,
1302 						       root_objectid);
1303 			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1304 			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1305 			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1306 		} else {
1307 			num_refs = btrfs_extent_data_ref_count(leaf, ref);
1308 			num_refs += refs_to_add;
1309 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1310 		}
1311 	}
1312 	btrfs_mark_buffer_dirty(leaf);
1313 	ret = 0;
1314 fail:
1315 	btrfs_release_path(path);
1316 	return ret;
1317 }
1318 
1319 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1320 					   struct btrfs_root *root,
1321 					   struct btrfs_path *path,
1322 					   int refs_to_drop, int *last_ref)
1323 {
1324 	struct btrfs_key key;
1325 	struct btrfs_extent_data_ref *ref1 = NULL;
1326 	struct btrfs_shared_data_ref *ref2 = NULL;
1327 	struct extent_buffer *leaf;
1328 	u32 num_refs = 0;
1329 	int ret = 0;
1330 
1331 	leaf = path->nodes[0];
1332 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1333 
1334 	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1335 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1336 				      struct btrfs_extent_data_ref);
1337 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1338 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1339 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1340 				      struct btrfs_shared_data_ref);
1341 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1342 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1343 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1344 		struct btrfs_extent_ref_v0 *ref0;
1345 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1346 				      struct btrfs_extent_ref_v0);
1347 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1348 #endif
1349 	} else {
1350 		BUG();
1351 	}
1352 
1353 	BUG_ON(num_refs < refs_to_drop);
1354 	num_refs -= refs_to_drop;
1355 
1356 	if (num_refs == 0) {
1357 		ret = btrfs_del_item(trans, root, path);
1358 		*last_ref = 1;
1359 	} else {
1360 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1361 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1362 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1363 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1364 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1365 		else {
1366 			struct btrfs_extent_ref_v0 *ref0;
1367 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1368 					struct btrfs_extent_ref_v0);
1369 			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1370 		}
1371 #endif
1372 		btrfs_mark_buffer_dirty(leaf);
1373 	}
1374 	return ret;
1375 }
1376 
1377 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1378 					  struct btrfs_extent_inline_ref *iref)
1379 {
1380 	struct btrfs_key key;
1381 	struct extent_buffer *leaf;
1382 	struct btrfs_extent_data_ref *ref1;
1383 	struct btrfs_shared_data_ref *ref2;
1384 	u32 num_refs = 0;
1385 
1386 	leaf = path->nodes[0];
1387 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1388 	if (iref) {
1389 		if (btrfs_extent_inline_ref_type(leaf, iref) ==
1390 		    BTRFS_EXTENT_DATA_REF_KEY) {
1391 			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1392 			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1393 		} else {
1394 			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1395 			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1396 		}
1397 	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1398 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1399 				      struct btrfs_extent_data_ref);
1400 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1401 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1402 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1403 				      struct btrfs_shared_data_ref);
1404 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1405 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1406 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1407 		struct btrfs_extent_ref_v0 *ref0;
1408 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1409 				      struct btrfs_extent_ref_v0);
1410 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1411 #endif
1412 	} else {
1413 		WARN_ON(1);
1414 	}
1415 	return num_refs;
1416 }
1417 
1418 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1419 					  struct btrfs_root *root,
1420 					  struct btrfs_path *path,
1421 					  u64 bytenr, u64 parent,
1422 					  u64 root_objectid)
1423 {
1424 	struct btrfs_key key;
1425 	int ret;
1426 
1427 	key.objectid = bytenr;
1428 	if (parent) {
1429 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1430 		key.offset = parent;
1431 	} else {
1432 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1433 		key.offset = root_objectid;
1434 	}
1435 
1436 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1437 	if (ret > 0)
1438 		ret = -ENOENT;
1439 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1440 	if (ret == -ENOENT && parent) {
1441 		btrfs_release_path(path);
1442 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1443 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1444 		if (ret > 0)
1445 			ret = -ENOENT;
1446 	}
1447 #endif
1448 	return ret;
1449 }
1450 
1451 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1452 					  struct btrfs_root *root,
1453 					  struct btrfs_path *path,
1454 					  u64 bytenr, u64 parent,
1455 					  u64 root_objectid)
1456 {
1457 	struct btrfs_key key;
1458 	int ret;
1459 
1460 	key.objectid = bytenr;
1461 	if (parent) {
1462 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1463 		key.offset = parent;
1464 	} else {
1465 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1466 		key.offset = root_objectid;
1467 	}
1468 
1469 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1470 	btrfs_release_path(path);
1471 	return ret;
1472 }
1473 
1474 static inline int extent_ref_type(u64 parent, u64 owner)
1475 {
1476 	int type;
1477 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1478 		if (parent > 0)
1479 			type = BTRFS_SHARED_BLOCK_REF_KEY;
1480 		else
1481 			type = BTRFS_TREE_BLOCK_REF_KEY;
1482 	} else {
1483 		if (parent > 0)
1484 			type = BTRFS_SHARED_DATA_REF_KEY;
1485 		else
1486 			type = BTRFS_EXTENT_DATA_REF_KEY;
1487 	}
1488 	return type;
1489 }
1490 
1491 static int find_next_key(struct btrfs_path *path, int level,
1492 			 struct btrfs_key *key)
1493 
1494 {
1495 	for (; level < BTRFS_MAX_LEVEL; level++) {
1496 		if (!path->nodes[level])
1497 			break;
1498 		if (path->slots[level] + 1 >=
1499 		    btrfs_header_nritems(path->nodes[level]))
1500 			continue;
1501 		if (level == 0)
1502 			btrfs_item_key_to_cpu(path->nodes[level], key,
1503 					      path->slots[level] + 1);
1504 		else
1505 			btrfs_node_key_to_cpu(path->nodes[level], key,
1506 					      path->slots[level] + 1);
1507 		return 0;
1508 	}
1509 	return 1;
1510 }
1511 
1512 /*
1513  * look for inline back ref. if back ref is found, *ref_ret is set
1514  * to the address of inline back ref, and 0 is returned.
1515  *
1516  * if back ref isn't found, *ref_ret is set to the address where it
1517  * should be inserted, and -ENOENT is returned.
1518  *
1519  * if insert is true and there are too many inline back refs, the path
1520  * points to the extent item, and -EAGAIN is returned.
1521  *
1522  * NOTE: inline back refs are ordered in the same way that back ref
1523  *	 items in the tree are ordered.
1524  */
1525 static noinline_for_stack
1526 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1527 				 struct btrfs_root *root,
1528 				 struct btrfs_path *path,
1529 				 struct btrfs_extent_inline_ref **ref_ret,
1530 				 u64 bytenr, u64 num_bytes,
1531 				 u64 parent, u64 root_objectid,
1532 				 u64 owner, u64 offset, int insert)
1533 {
1534 	struct btrfs_fs_info *fs_info = root->fs_info;
1535 	struct btrfs_key key;
1536 	struct extent_buffer *leaf;
1537 	struct btrfs_extent_item *ei;
1538 	struct btrfs_extent_inline_ref *iref;
1539 	u64 flags;
1540 	u64 item_size;
1541 	unsigned long ptr;
1542 	unsigned long end;
1543 	int extra_size;
1544 	int type;
1545 	int want;
1546 	int ret;
1547 	int err = 0;
1548 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
1549 
1550 	key.objectid = bytenr;
1551 	key.type = BTRFS_EXTENT_ITEM_KEY;
1552 	key.offset = num_bytes;
1553 
1554 	want = extent_ref_type(parent, owner);
1555 	if (insert) {
1556 		extra_size = btrfs_extent_inline_ref_size(want);
1557 		path->keep_locks = 1;
1558 	} else
1559 		extra_size = -1;
1560 
1561 	/*
1562 	 * Owner is our parent level, so we can just add one to get the level
1563 	 * for the block we are interested in.
1564 	 */
1565 	if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1566 		key.type = BTRFS_METADATA_ITEM_KEY;
1567 		key.offset = owner;
1568 	}
1569 
1570 again:
1571 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1572 	if (ret < 0) {
1573 		err = ret;
1574 		goto out;
1575 	}
1576 
1577 	/*
1578 	 * We may be a newly converted file system which still has the old fat
1579 	 * extent entries for metadata, so try and see if we have one of those.
1580 	 */
1581 	if (ret > 0 && skinny_metadata) {
1582 		skinny_metadata = false;
1583 		if (path->slots[0]) {
1584 			path->slots[0]--;
1585 			btrfs_item_key_to_cpu(path->nodes[0], &key,
1586 					      path->slots[0]);
1587 			if (key.objectid == bytenr &&
1588 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
1589 			    key.offset == num_bytes)
1590 				ret = 0;
1591 		}
1592 		if (ret) {
1593 			key.objectid = bytenr;
1594 			key.type = BTRFS_EXTENT_ITEM_KEY;
1595 			key.offset = num_bytes;
1596 			btrfs_release_path(path);
1597 			goto again;
1598 		}
1599 	}
1600 
1601 	if (ret && !insert) {
1602 		err = -ENOENT;
1603 		goto out;
1604 	} else if (WARN_ON(ret)) {
1605 		err = -EIO;
1606 		goto out;
1607 	}
1608 
1609 	leaf = path->nodes[0];
1610 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1611 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1612 	if (item_size < sizeof(*ei)) {
1613 		if (!insert) {
1614 			err = -ENOENT;
1615 			goto out;
1616 		}
1617 		ret = convert_extent_item_v0(trans, root, path, owner,
1618 					     extra_size);
1619 		if (ret < 0) {
1620 			err = ret;
1621 			goto out;
1622 		}
1623 		leaf = path->nodes[0];
1624 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1625 	}
1626 #endif
1627 	BUG_ON(item_size < sizeof(*ei));
1628 
1629 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1630 	flags = btrfs_extent_flags(leaf, ei);
1631 
1632 	ptr = (unsigned long)(ei + 1);
1633 	end = (unsigned long)ei + item_size;
1634 
1635 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1636 		ptr += sizeof(struct btrfs_tree_block_info);
1637 		BUG_ON(ptr > end);
1638 	}
1639 
1640 	err = -ENOENT;
1641 	while (1) {
1642 		if (ptr >= end) {
1643 			WARN_ON(ptr > end);
1644 			break;
1645 		}
1646 		iref = (struct btrfs_extent_inline_ref *)ptr;
1647 		type = btrfs_extent_inline_ref_type(leaf, iref);
1648 		if (want < type)
1649 			break;
1650 		if (want > type) {
1651 			ptr += btrfs_extent_inline_ref_size(type);
1652 			continue;
1653 		}
1654 
1655 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1656 			struct btrfs_extent_data_ref *dref;
1657 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1658 			if (match_extent_data_ref(leaf, dref, root_objectid,
1659 						  owner, offset)) {
1660 				err = 0;
1661 				break;
1662 			}
1663 			if (hash_extent_data_ref_item(leaf, dref) <
1664 			    hash_extent_data_ref(root_objectid, owner, offset))
1665 				break;
1666 		} else {
1667 			u64 ref_offset;
1668 			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1669 			if (parent > 0) {
1670 				if (parent == ref_offset) {
1671 					err = 0;
1672 					break;
1673 				}
1674 				if (ref_offset < parent)
1675 					break;
1676 			} else {
1677 				if (root_objectid == ref_offset) {
1678 					err = 0;
1679 					break;
1680 				}
1681 				if (ref_offset < root_objectid)
1682 					break;
1683 			}
1684 		}
1685 		ptr += btrfs_extent_inline_ref_size(type);
1686 	}
1687 	if (err == -ENOENT && insert) {
1688 		if (item_size + extra_size >=
1689 		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1690 			err = -EAGAIN;
1691 			goto out;
1692 		}
1693 		/*
1694 		 * To add new inline back ref, we have to make sure
1695 		 * there is no corresponding back ref item.
1696 		 * For simplicity, we just do not add new inline back
1697 		 * ref if there is any kind of item for this block
1698 		 */
1699 		if (find_next_key(path, 0, &key) == 0 &&
1700 		    key.objectid == bytenr &&
1701 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1702 			err = -EAGAIN;
1703 			goto out;
1704 		}
1705 	}
1706 	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1707 out:
1708 	if (insert) {
1709 		path->keep_locks = 0;
1710 		btrfs_unlock_up_safe(path, 1);
1711 	}
1712 	return err;
1713 }
1714 
1715 /*
1716  * helper to add new inline back ref
1717  */
1718 static noinline_for_stack
1719 void setup_inline_extent_backref(struct btrfs_root *root,
1720 				 struct btrfs_path *path,
1721 				 struct btrfs_extent_inline_ref *iref,
1722 				 u64 parent, u64 root_objectid,
1723 				 u64 owner, u64 offset, int refs_to_add,
1724 				 struct btrfs_delayed_extent_op *extent_op)
1725 {
1726 	struct extent_buffer *leaf;
1727 	struct btrfs_extent_item *ei;
1728 	unsigned long ptr;
1729 	unsigned long end;
1730 	unsigned long item_offset;
1731 	u64 refs;
1732 	int size;
1733 	int type;
1734 
1735 	leaf = path->nodes[0];
1736 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1737 	item_offset = (unsigned long)iref - (unsigned long)ei;
1738 
1739 	type = extent_ref_type(parent, owner);
1740 	size = btrfs_extent_inline_ref_size(type);
1741 
1742 	btrfs_extend_item(root->fs_info, path, size);
1743 
1744 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1745 	refs = btrfs_extent_refs(leaf, ei);
1746 	refs += refs_to_add;
1747 	btrfs_set_extent_refs(leaf, ei, refs);
1748 	if (extent_op)
1749 		__run_delayed_extent_op(extent_op, leaf, ei);
1750 
1751 	ptr = (unsigned long)ei + item_offset;
1752 	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1753 	if (ptr < end - size)
1754 		memmove_extent_buffer(leaf, ptr + size, ptr,
1755 				      end - size - ptr);
1756 
1757 	iref = (struct btrfs_extent_inline_ref *)ptr;
1758 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1759 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1760 		struct btrfs_extent_data_ref *dref;
1761 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1762 		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1763 		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1764 		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1765 		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1766 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1767 		struct btrfs_shared_data_ref *sref;
1768 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1769 		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1770 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1771 	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1772 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1773 	} else {
1774 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1775 	}
1776 	btrfs_mark_buffer_dirty(leaf);
1777 }
1778 
1779 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1780 				 struct btrfs_root *root,
1781 				 struct btrfs_path *path,
1782 				 struct btrfs_extent_inline_ref **ref_ret,
1783 				 u64 bytenr, u64 num_bytes, u64 parent,
1784 				 u64 root_objectid, u64 owner, u64 offset)
1785 {
1786 	int ret;
1787 
1788 	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1789 					   bytenr, num_bytes, parent,
1790 					   root_objectid, owner, offset, 0);
1791 	if (ret != -ENOENT)
1792 		return ret;
1793 
1794 	btrfs_release_path(path);
1795 	*ref_ret = NULL;
1796 
1797 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1798 		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1799 					    root_objectid);
1800 	} else {
1801 		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1802 					     root_objectid, owner, offset);
1803 	}
1804 	return ret;
1805 }
1806 
1807 /*
1808  * helper to update/remove inline back ref
1809  */
1810 static noinline_for_stack
1811 void update_inline_extent_backref(struct btrfs_root *root,
1812 				  struct btrfs_path *path,
1813 				  struct btrfs_extent_inline_ref *iref,
1814 				  int refs_to_mod,
1815 				  struct btrfs_delayed_extent_op *extent_op,
1816 				  int *last_ref)
1817 {
1818 	struct extent_buffer *leaf;
1819 	struct btrfs_extent_item *ei;
1820 	struct btrfs_extent_data_ref *dref = NULL;
1821 	struct btrfs_shared_data_ref *sref = NULL;
1822 	unsigned long ptr;
1823 	unsigned long end;
1824 	u32 item_size;
1825 	int size;
1826 	int type;
1827 	u64 refs;
1828 
1829 	leaf = path->nodes[0];
1830 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1831 	refs = btrfs_extent_refs(leaf, ei);
1832 	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1833 	refs += refs_to_mod;
1834 	btrfs_set_extent_refs(leaf, ei, refs);
1835 	if (extent_op)
1836 		__run_delayed_extent_op(extent_op, leaf, ei);
1837 
1838 	type = btrfs_extent_inline_ref_type(leaf, iref);
1839 
1840 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1841 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1842 		refs = btrfs_extent_data_ref_count(leaf, dref);
1843 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1844 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1845 		refs = btrfs_shared_data_ref_count(leaf, sref);
1846 	} else {
1847 		refs = 1;
1848 		BUG_ON(refs_to_mod != -1);
1849 	}
1850 
1851 	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1852 	refs += refs_to_mod;
1853 
1854 	if (refs > 0) {
1855 		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1856 			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1857 		else
1858 			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1859 	} else {
1860 		*last_ref = 1;
1861 		size =  btrfs_extent_inline_ref_size(type);
1862 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1863 		ptr = (unsigned long)iref;
1864 		end = (unsigned long)ei + item_size;
1865 		if (ptr + size < end)
1866 			memmove_extent_buffer(leaf, ptr, ptr + size,
1867 					      end - ptr - size);
1868 		item_size -= size;
1869 		btrfs_truncate_item(root->fs_info, path, item_size, 1);
1870 	}
1871 	btrfs_mark_buffer_dirty(leaf);
1872 }
1873 
1874 static noinline_for_stack
1875 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1876 				 struct btrfs_root *root,
1877 				 struct btrfs_path *path,
1878 				 u64 bytenr, u64 num_bytes, u64 parent,
1879 				 u64 root_objectid, u64 owner,
1880 				 u64 offset, int refs_to_add,
1881 				 struct btrfs_delayed_extent_op *extent_op)
1882 {
1883 	struct btrfs_extent_inline_ref *iref;
1884 	int ret;
1885 
1886 	ret = lookup_inline_extent_backref(trans, root, path, &iref,
1887 					   bytenr, num_bytes, parent,
1888 					   root_objectid, owner, offset, 1);
1889 	if (ret == 0) {
1890 		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1891 		update_inline_extent_backref(root, path, iref,
1892 					     refs_to_add, extent_op, NULL);
1893 	} else if (ret == -ENOENT) {
1894 		setup_inline_extent_backref(root, path, iref, parent,
1895 					    root_objectid, owner, offset,
1896 					    refs_to_add, extent_op);
1897 		ret = 0;
1898 	}
1899 	return ret;
1900 }
1901 
1902 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1903 				 struct btrfs_root *root,
1904 				 struct btrfs_path *path,
1905 				 u64 bytenr, u64 parent, u64 root_objectid,
1906 				 u64 owner, u64 offset, int refs_to_add)
1907 {
1908 	int ret;
1909 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1910 		BUG_ON(refs_to_add != 1);
1911 		ret = insert_tree_block_ref(trans, root, path, bytenr,
1912 					    parent, root_objectid);
1913 	} else {
1914 		ret = insert_extent_data_ref(trans, root, path, bytenr,
1915 					     parent, root_objectid,
1916 					     owner, offset, refs_to_add);
1917 	}
1918 	return ret;
1919 }
1920 
1921 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1922 				 struct btrfs_root *root,
1923 				 struct btrfs_path *path,
1924 				 struct btrfs_extent_inline_ref *iref,
1925 				 int refs_to_drop, int is_data, int *last_ref)
1926 {
1927 	int ret = 0;
1928 
1929 	BUG_ON(!is_data && refs_to_drop != 1);
1930 	if (iref) {
1931 		update_inline_extent_backref(root, path, iref,
1932 					     -refs_to_drop, NULL, last_ref);
1933 	} else if (is_data) {
1934 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1935 					     last_ref);
1936 	} else {
1937 		*last_ref = 1;
1938 		ret = btrfs_del_item(trans, root, path);
1939 	}
1940 	return ret;
1941 }
1942 
1943 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1944 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1945 			       u64 *discarded_bytes)
1946 {
1947 	int j, ret = 0;
1948 	u64 bytes_left, end;
1949 	u64 aligned_start = ALIGN(start, 1 << 9);
1950 
1951 	if (WARN_ON(start != aligned_start)) {
1952 		len -= aligned_start - start;
1953 		len = round_down(len, 1 << 9);
1954 		start = aligned_start;
1955 	}
1956 
1957 	*discarded_bytes = 0;
1958 
1959 	if (!len)
1960 		return 0;
1961 
1962 	end = start + len;
1963 	bytes_left = len;
1964 
1965 	/* Skip any superblocks on this device. */
1966 	for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1967 		u64 sb_start = btrfs_sb_offset(j);
1968 		u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1969 		u64 size = sb_start - start;
1970 
1971 		if (!in_range(sb_start, start, bytes_left) &&
1972 		    !in_range(sb_end, start, bytes_left) &&
1973 		    !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1974 			continue;
1975 
1976 		/*
1977 		 * Superblock spans beginning of range.  Adjust start and
1978 		 * try again.
1979 		 */
1980 		if (sb_start <= start) {
1981 			start += sb_end - start;
1982 			if (start > end) {
1983 				bytes_left = 0;
1984 				break;
1985 			}
1986 			bytes_left = end - start;
1987 			continue;
1988 		}
1989 
1990 		if (size) {
1991 			ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1992 						   GFP_NOFS, 0);
1993 			if (!ret)
1994 				*discarded_bytes += size;
1995 			else if (ret != -EOPNOTSUPP)
1996 				return ret;
1997 		}
1998 
1999 		start = sb_end;
2000 		if (start > end) {
2001 			bytes_left = 0;
2002 			break;
2003 		}
2004 		bytes_left = end - start;
2005 	}
2006 
2007 	if (bytes_left) {
2008 		ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2009 					   GFP_NOFS, 0);
2010 		if (!ret)
2011 			*discarded_bytes += bytes_left;
2012 	}
2013 	return ret;
2014 }
2015 
2016 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
2017 			 u64 num_bytes, u64 *actual_bytes)
2018 {
2019 	int ret;
2020 	u64 discarded_bytes = 0;
2021 	struct btrfs_bio *bbio = NULL;
2022 
2023 
2024 	/*
2025 	 * Avoid races with device replace and make sure our bbio has devices
2026 	 * associated to its stripes that don't go away while we are discarding.
2027 	 */
2028 	btrfs_bio_counter_inc_blocked(fs_info);
2029 	/* Tell the block device(s) that the sectors can be discarded */
2030 	ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
2031 			      &bbio, 0);
2032 	/* Error condition is -ENOMEM */
2033 	if (!ret) {
2034 		struct btrfs_bio_stripe *stripe = bbio->stripes;
2035 		int i;
2036 
2037 
2038 		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2039 			u64 bytes;
2040 			if (!stripe->dev->can_discard)
2041 				continue;
2042 
2043 			ret = btrfs_issue_discard(stripe->dev->bdev,
2044 						  stripe->physical,
2045 						  stripe->length,
2046 						  &bytes);
2047 			if (!ret)
2048 				discarded_bytes += bytes;
2049 			else if (ret != -EOPNOTSUPP)
2050 				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2051 
2052 			/*
2053 			 * Just in case we get back EOPNOTSUPP for some reason,
2054 			 * just ignore the return value so we don't screw up
2055 			 * people calling discard_extent.
2056 			 */
2057 			ret = 0;
2058 		}
2059 		btrfs_put_bbio(bbio);
2060 	}
2061 	btrfs_bio_counter_dec(fs_info);
2062 
2063 	if (actual_bytes)
2064 		*actual_bytes = discarded_bytes;
2065 
2066 
2067 	if (ret == -EOPNOTSUPP)
2068 		ret = 0;
2069 	return ret;
2070 }
2071 
2072 /* Can return -ENOMEM */
2073 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2074 			 struct btrfs_fs_info *fs_info,
2075 			 u64 bytenr, u64 num_bytes, u64 parent,
2076 			 u64 root_objectid, u64 owner, u64 offset)
2077 {
2078 	int ret;
2079 
2080 	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2081 	       root_objectid == BTRFS_TREE_LOG_OBJECTID);
2082 
2083 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2084 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2085 					num_bytes,
2086 					parent, root_objectid, (int)owner,
2087 					BTRFS_ADD_DELAYED_REF, NULL);
2088 	} else {
2089 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2090 					num_bytes, parent, root_objectid,
2091 					owner, offset, 0,
2092 					BTRFS_ADD_DELAYED_REF, NULL);
2093 	}
2094 	return ret;
2095 }
2096 
2097 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2098 				  struct btrfs_fs_info *fs_info,
2099 				  struct btrfs_delayed_ref_node *node,
2100 				  u64 parent, u64 root_objectid,
2101 				  u64 owner, u64 offset, int refs_to_add,
2102 				  struct btrfs_delayed_extent_op *extent_op)
2103 {
2104 	struct btrfs_path *path;
2105 	struct extent_buffer *leaf;
2106 	struct btrfs_extent_item *item;
2107 	struct btrfs_key key;
2108 	u64 bytenr = node->bytenr;
2109 	u64 num_bytes = node->num_bytes;
2110 	u64 refs;
2111 	int ret;
2112 
2113 	path = btrfs_alloc_path();
2114 	if (!path)
2115 		return -ENOMEM;
2116 
2117 	path->reada = READA_FORWARD;
2118 	path->leave_spinning = 1;
2119 	/* this will setup the path even if it fails to insert the back ref */
2120 	ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2121 					   bytenr, num_bytes, parent,
2122 					   root_objectid, owner, offset,
2123 					   refs_to_add, extent_op);
2124 	if ((ret < 0 && ret != -EAGAIN) || !ret)
2125 		goto out;
2126 
2127 	/*
2128 	 * Ok we had -EAGAIN which means we didn't have space to insert and
2129 	 * inline extent ref, so just update the reference count and add a
2130 	 * normal backref.
2131 	 */
2132 	leaf = path->nodes[0];
2133 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2134 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2135 	refs = btrfs_extent_refs(leaf, item);
2136 	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2137 	if (extent_op)
2138 		__run_delayed_extent_op(extent_op, leaf, item);
2139 
2140 	btrfs_mark_buffer_dirty(leaf);
2141 	btrfs_release_path(path);
2142 
2143 	path->reada = READA_FORWARD;
2144 	path->leave_spinning = 1;
2145 	/* now insert the actual backref */
2146 	ret = insert_extent_backref(trans, fs_info->extent_root,
2147 				    path, bytenr, parent, root_objectid,
2148 				    owner, offset, refs_to_add);
2149 	if (ret)
2150 		btrfs_abort_transaction(trans, ret);
2151 out:
2152 	btrfs_free_path(path);
2153 	return ret;
2154 }
2155 
2156 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2157 				struct btrfs_fs_info *fs_info,
2158 				struct btrfs_delayed_ref_node *node,
2159 				struct btrfs_delayed_extent_op *extent_op,
2160 				int insert_reserved)
2161 {
2162 	int ret = 0;
2163 	struct btrfs_delayed_data_ref *ref;
2164 	struct btrfs_key ins;
2165 	u64 parent = 0;
2166 	u64 ref_root = 0;
2167 	u64 flags = 0;
2168 
2169 	ins.objectid = node->bytenr;
2170 	ins.offset = node->num_bytes;
2171 	ins.type = BTRFS_EXTENT_ITEM_KEY;
2172 
2173 	ref = btrfs_delayed_node_to_data_ref(node);
2174 	trace_run_delayed_data_ref(fs_info, node, ref, node->action);
2175 
2176 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2177 		parent = ref->parent;
2178 	ref_root = ref->root;
2179 
2180 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2181 		if (extent_op)
2182 			flags |= extent_op->flags_to_set;
2183 		ret = alloc_reserved_file_extent(trans, fs_info,
2184 						 parent, ref_root, flags,
2185 						 ref->objectid, ref->offset,
2186 						 &ins, node->ref_mod);
2187 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2188 		ret = __btrfs_inc_extent_ref(trans, fs_info, node, parent,
2189 					     ref_root, ref->objectid,
2190 					     ref->offset, node->ref_mod,
2191 					     extent_op);
2192 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2193 		ret = __btrfs_free_extent(trans, fs_info, node, parent,
2194 					  ref_root, ref->objectid,
2195 					  ref->offset, node->ref_mod,
2196 					  extent_op);
2197 	} else {
2198 		BUG();
2199 	}
2200 	return ret;
2201 }
2202 
2203 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2204 				    struct extent_buffer *leaf,
2205 				    struct btrfs_extent_item *ei)
2206 {
2207 	u64 flags = btrfs_extent_flags(leaf, ei);
2208 	if (extent_op->update_flags) {
2209 		flags |= extent_op->flags_to_set;
2210 		btrfs_set_extent_flags(leaf, ei, flags);
2211 	}
2212 
2213 	if (extent_op->update_key) {
2214 		struct btrfs_tree_block_info *bi;
2215 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2216 		bi = (struct btrfs_tree_block_info *)(ei + 1);
2217 		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2218 	}
2219 }
2220 
2221 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2222 				 struct btrfs_fs_info *fs_info,
2223 				 struct btrfs_delayed_ref_node *node,
2224 				 struct btrfs_delayed_extent_op *extent_op)
2225 {
2226 	struct btrfs_key key;
2227 	struct btrfs_path *path;
2228 	struct btrfs_extent_item *ei;
2229 	struct extent_buffer *leaf;
2230 	u32 item_size;
2231 	int ret;
2232 	int err = 0;
2233 	int metadata = !extent_op->is_data;
2234 
2235 	if (trans->aborted)
2236 		return 0;
2237 
2238 	if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2239 		metadata = 0;
2240 
2241 	path = btrfs_alloc_path();
2242 	if (!path)
2243 		return -ENOMEM;
2244 
2245 	key.objectid = node->bytenr;
2246 
2247 	if (metadata) {
2248 		key.type = BTRFS_METADATA_ITEM_KEY;
2249 		key.offset = extent_op->level;
2250 	} else {
2251 		key.type = BTRFS_EXTENT_ITEM_KEY;
2252 		key.offset = node->num_bytes;
2253 	}
2254 
2255 again:
2256 	path->reada = READA_FORWARD;
2257 	path->leave_spinning = 1;
2258 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
2259 	if (ret < 0) {
2260 		err = ret;
2261 		goto out;
2262 	}
2263 	if (ret > 0) {
2264 		if (metadata) {
2265 			if (path->slots[0] > 0) {
2266 				path->slots[0]--;
2267 				btrfs_item_key_to_cpu(path->nodes[0], &key,
2268 						      path->slots[0]);
2269 				if (key.objectid == node->bytenr &&
2270 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
2271 				    key.offset == node->num_bytes)
2272 					ret = 0;
2273 			}
2274 			if (ret > 0) {
2275 				btrfs_release_path(path);
2276 				metadata = 0;
2277 
2278 				key.objectid = node->bytenr;
2279 				key.offset = node->num_bytes;
2280 				key.type = BTRFS_EXTENT_ITEM_KEY;
2281 				goto again;
2282 			}
2283 		} else {
2284 			err = -EIO;
2285 			goto out;
2286 		}
2287 	}
2288 
2289 	leaf = path->nodes[0];
2290 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2291 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2292 	if (item_size < sizeof(*ei)) {
2293 		ret = convert_extent_item_v0(trans, fs_info->extent_root,
2294 					     path, (u64)-1, 0);
2295 		if (ret < 0) {
2296 			err = ret;
2297 			goto out;
2298 		}
2299 		leaf = path->nodes[0];
2300 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2301 	}
2302 #endif
2303 	BUG_ON(item_size < sizeof(*ei));
2304 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2305 	__run_delayed_extent_op(extent_op, leaf, ei);
2306 
2307 	btrfs_mark_buffer_dirty(leaf);
2308 out:
2309 	btrfs_free_path(path);
2310 	return err;
2311 }
2312 
2313 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2314 				struct btrfs_fs_info *fs_info,
2315 				struct btrfs_delayed_ref_node *node,
2316 				struct btrfs_delayed_extent_op *extent_op,
2317 				int insert_reserved)
2318 {
2319 	int ret = 0;
2320 	struct btrfs_delayed_tree_ref *ref;
2321 	struct btrfs_key ins;
2322 	u64 parent = 0;
2323 	u64 ref_root = 0;
2324 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
2325 
2326 	ref = btrfs_delayed_node_to_tree_ref(node);
2327 	trace_run_delayed_tree_ref(fs_info, node, ref, node->action);
2328 
2329 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2330 		parent = ref->parent;
2331 	ref_root = ref->root;
2332 
2333 	ins.objectid = node->bytenr;
2334 	if (skinny_metadata) {
2335 		ins.offset = ref->level;
2336 		ins.type = BTRFS_METADATA_ITEM_KEY;
2337 	} else {
2338 		ins.offset = node->num_bytes;
2339 		ins.type = BTRFS_EXTENT_ITEM_KEY;
2340 	}
2341 
2342 	if (node->ref_mod != 1) {
2343 		btrfs_err(fs_info,
2344 	"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
2345 			  node->bytenr, node->ref_mod, node->action, ref_root,
2346 			  parent);
2347 		return -EIO;
2348 	}
2349 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2350 		BUG_ON(!extent_op || !extent_op->update_flags);
2351 		ret = alloc_reserved_tree_block(trans, fs_info,
2352 						parent, ref_root,
2353 						extent_op->flags_to_set,
2354 						&extent_op->key,
2355 						ref->level, &ins);
2356 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2357 		ret = __btrfs_inc_extent_ref(trans, fs_info, node,
2358 					     parent, ref_root,
2359 					     ref->level, 0, 1,
2360 					     extent_op);
2361 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2362 		ret = __btrfs_free_extent(trans, fs_info, node,
2363 					  parent, ref_root,
2364 					  ref->level, 0, 1, extent_op);
2365 	} else {
2366 		BUG();
2367 	}
2368 	return ret;
2369 }
2370 
2371 /* helper function to actually process a single delayed ref entry */
2372 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2373 			       struct btrfs_fs_info *fs_info,
2374 			       struct btrfs_delayed_ref_node *node,
2375 			       struct btrfs_delayed_extent_op *extent_op,
2376 			       int insert_reserved)
2377 {
2378 	int ret = 0;
2379 
2380 	if (trans->aborted) {
2381 		if (insert_reserved)
2382 			btrfs_pin_extent(fs_info, node->bytenr,
2383 					 node->num_bytes, 1);
2384 		return 0;
2385 	}
2386 
2387 	if (btrfs_delayed_ref_is_head(node)) {
2388 		struct btrfs_delayed_ref_head *head;
2389 		/*
2390 		 * we've hit the end of the chain and we were supposed
2391 		 * to insert this extent into the tree.  But, it got
2392 		 * deleted before we ever needed to insert it, so all
2393 		 * we have to do is clean up the accounting
2394 		 */
2395 		BUG_ON(extent_op);
2396 		head = btrfs_delayed_node_to_head(node);
2397 		trace_run_delayed_ref_head(fs_info, node, head, node->action);
2398 
2399 		if (insert_reserved) {
2400 			btrfs_pin_extent(fs_info, node->bytenr,
2401 					 node->num_bytes, 1);
2402 			if (head->is_data) {
2403 				ret = btrfs_del_csums(trans, fs_info,
2404 						      node->bytenr,
2405 						      node->num_bytes);
2406 			}
2407 		}
2408 
2409 		/* Also free its reserved qgroup space */
2410 		btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
2411 					      head->qgroup_reserved);
2412 		return ret;
2413 	}
2414 
2415 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2416 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2417 		ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
2418 					   insert_reserved);
2419 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2420 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
2421 		ret = run_delayed_data_ref(trans, fs_info, node, extent_op,
2422 					   insert_reserved);
2423 	else
2424 		BUG();
2425 	return ret;
2426 }
2427 
2428 static inline struct btrfs_delayed_ref_node *
2429 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2430 {
2431 	struct btrfs_delayed_ref_node *ref;
2432 
2433 	if (list_empty(&head->ref_list))
2434 		return NULL;
2435 
2436 	/*
2437 	 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2438 	 * This is to prevent a ref count from going down to zero, which deletes
2439 	 * the extent item from the extent tree, when there still are references
2440 	 * to add, which would fail because they would not find the extent item.
2441 	 */
2442 	if (!list_empty(&head->ref_add_list))
2443 		return list_first_entry(&head->ref_add_list,
2444 				struct btrfs_delayed_ref_node, add_list);
2445 
2446 	ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
2447 			       list);
2448 	ASSERT(list_empty(&ref->add_list));
2449 	return ref;
2450 }
2451 
2452 /*
2453  * Returns 0 on success or if called with an already aborted transaction.
2454  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2455  */
2456 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2457 					     struct btrfs_fs_info *fs_info,
2458 					     unsigned long nr)
2459 {
2460 	struct btrfs_delayed_ref_root *delayed_refs;
2461 	struct btrfs_delayed_ref_node *ref;
2462 	struct btrfs_delayed_ref_head *locked_ref = NULL;
2463 	struct btrfs_delayed_extent_op *extent_op;
2464 	ktime_t start = ktime_get();
2465 	int ret;
2466 	unsigned long count = 0;
2467 	unsigned long actual_count = 0;
2468 	int must_insert_reserved = 0;
2469 
2470 	delayed_refs = &trans->transaction->delayed_refs;
2471 	while (1) {
2472 		if (!locked_ref) {
2473 			if (count >= nr)
2474 				break;
2475 
2476 			spin_lock(&delayed_refs->lock);
2477 			locked_ref = btrfs_select_ref_head(trans);
2478 			if (!locked_ref) {
2479 				spin_unlock(&delayed_refs->lock);
2480 				break;
2481 			}
2482 
2483 			/* grab the lock that says we are going to process
2484 			 * all the refs for this head */
2485 			ret = btrfs_delayed_ref_lock(trans, locked_ref);
2486 			spin_unlock(&delayed_refs->lock);
2487 			/*
2488 			 * we may have dropped the spin lock to get the head
2489 			 * mutex lock, and that might have given someone else
2490 			 * time to free the head.  If that's true, it has been
2491 			 * removed from our list and we can move on.
2492 			 */
2493 			if (ret == -EAGAIN) {
2494 				locked_ref = NULL;
2495 				count++;
2496 				continue;
2497 			}
2498 		}
2499 
2500 		/*
2501 		 * We need to try and merge add/drops of the same ref since we
2502 		 * can run into issues with relocate dropping the implicit ref
2503 		 * and then it being added back again before the drop can
2504 		 * finish.  If we merged anything we need to re-loop so we can
2505 		 * get a good ref.
2506 		 * Or we can get node references of the same type that weren't
2507 		 * merged when created due to bumps in the tree mod seq, and
2508 		 * we need to merge them to prevent adding an inline extent
2509 		 * backref before dropping it (triggering a BUG_ON at
2510 		 * insert_inline_extent_backref()).
2511 		 */
2512 		spin_lock(&locked_ref->lock);
2513 		btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2514 					 locked_ref);
2515 
2516 		/*
2517 		 * locked_ref is the head node, so we have to go one
2518 		 * node back for any delayed ref updates
2519 		 */
2520 		ref = select_delayed_ref(locked_ref);
2521 
2522 		if (ref && ref->seq &&
2523 		    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2524 			spin_unlock(&locked_ref->lock);
2525 			spin_lock(&delayed_refs->lock);
2526 			locked_ref->processing = 0;
2527 			delayed_refs->num_heads_ready++;
2528 			spin_unlock(&delayed_refs->lock);
2529 			btrfs_delayed_ref_unlock(locked_ref);
2530 			locked_ref = NULL;
2531 			cond_resched();
2532 			count++;
2533 			continue;
2534 		}
2535 
2536 		/*
2537 		 * record the must insert reserved flag before we
2538 		 * drop the spin lock.
2539 		 */
2540 		must_insert_reserved = locked_ref->must_insert_reserved;
2541 		locked_ref->must_insert_reserved = 0;
2542 
2543 		extent_op = locked_ref->extent_op;
2544 		locked_ref->extent_op = NULL;
2545 
2546 		if (!ref) {
2547 
2548 
2549 			/* All delayed refs have been processed, Go ahead
2550 			 * and send the head node to run_one_delayed_ref,
2551 			 * so that any accounting fixes can happen
2552 			 */
2553 			ref = &locked_ref->node;
2554 
2555 			if (extent_op && must_insert_reserved) {
2556 				btrfs_free_delayed_extent_op(extent_op);
2557 				extent_op = NULL;
2558 			}
2559 
2560 			if (extent_op) {
2561 				spin_unlock(&locked_ref->lock);
2562 				ret = run_delayed_extent_op(trans, fs_info,
2563 							    ref, extent_op);
2564 				btrfs_free_delayed_extent_op(extent_op);
2565 
2566 				if (ret) {
2567 					/*
2568 					 * Need to reset must_insert_reserved if
2569 					 * there was an error so the abort stuff
2570 					 * can cleanup the reserved space
2571 					 * properly.
2572 					 */
2573 					if (must_insert_reserved)
2574 						locked_ref->must_insert_reserved = 1;
2575 					spin_lock(&delayed_refs->lock);
2576 					locked_ref->processing = 0;
2577 					delayed_refs->num_heads_ready++;
2578 					spin_unlock(&delayed_refs->lock);
2579 					btrfs_debug(fs_info,
2580 						    "run_delayed_extent_op returned %d",
2581 						    ret);
2582 					btrfs_delayed_ref_unlock(locked_ref);
2583 					return ret;
2584 				}
2585 				continue;
2586 			}
2587 
2588 			/*
2589 			 * Need to drop our head ref lock and re-acquire the
2590 			 * delayed ref lock and then re-check to make sure
2591 			 * nobody got added.
2592 			 */
2593 			spin_unlock(&locked_ref->lock);
2594 			spin_lock(&delayed_refs->lock);
2595 			spin_lock(&locked_ref->lock);
2596 			if (!list_empty(&locked_ref->ref_list) ||
2597 			    locked_ref->extent_op) {
2598 				spin_unlock(&locked_ref->lock);
2599 				spin_unlock(&delayed_refs->lock);
2600 				continue;
2601 			}
2602 			ref->in_tree = 0;
2603 			delayed_refs->num_heads--;
2604 			rb_erase(&locked_ref->href_node,
2605 				 &delayed_refs->href_root);
2606 			spin_unlock(&delayed_refs->lock);
2607 		} else {
2608 			actual_count++;
2609 			ref->in_tree = 0;
2610 			list_del(&ref->list);
2611 			if (!list_empty(&ref->add_list))
2612 				list_del(&ref->add_list);
2613 		}
2614 		atomic_dec(&delayed_refs->num_entries);
2615 
2616 		if (!btrfs_delayed_ref_is_head(ref)) {
2617 			/*
2618 			 * when we play the delayed ref, also correct the
2619 			 * ref_mod on head
2620 			 */
2621 			switch (ref->action) {
2622 			case BTRFS_ADD_DELAYED_REF:
2623 			case BTRFS_ADD_DELAYED_EXTENT:
2624 				locked_ref->node.ref_mod -= ref->ref_mod;
2625 				break;
2626 			case BTRFS_DROP_DELAYED_REF:
2627 				locked_ref->node.ref_mod += ref->ref_mod;
2628 				break;
2629 			default:
2630 				WARN_ON(1);
2631 			}
2632 		}
2633 		spin_unlock(&locked_ref->lock);
2634 
2635 		ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
2636 					  must_insert_reserved);
2637 
2638 		btrfs_free_delayed_extent_op(extent_op);
2639 		if (ret) {
2640 			spin_lock(&delayed_refs->lock);
2641 			locked_ref->processing = 0;
2642 			delayed_refs->num_heads_ready++;
2643 			spin_unlock(&delayed_refs->lock);
2644 			btrfs_delayed_ref_unlock(locked_ref);
2645 			btrfs_put_delayed_ref(ref);
2646 			btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
2647 				    ret);
2648 			return ret;
2649 		}
2650 
2651 		/*
2652 		 * If this node is a head, that means all the refs in this head
2653 		 * have been dealt with, and we will pick the next head to deal
2654 		 * with, so we must unlock the head and drop it from the cluster
2655 		 * list before we release it.
2656 		 */
2657 		if (btrfs_delayed_ref_is_head(ref)) {
2658 			if (locked_ref->is_data &&
2659 			    locked_ref->total_ref_mod < 0) {
2660 				spin_lock(&delayed_refs->lock);
2661 				delayed_refs->pending_csums -= ref->num_bytes;
2662 				spin_unlock(&delayed_refs->lock);
2663 			}
2664 			btrfs_delayed_ref_unlock(locked_ref);
2665 			locked_ref = NULL;
2666 		}
2667 		btrfs_put_delayed_ref(ref);
2668 		count++;
2669 		cond_resched();
2670 	}
2671 
2672 	/*
2673 	 * We don't want to include ref heads since we can have empty ref heads
2674 	 * and those will drastically skew our runtime down since we just do
2675 	 * accounting, no actual extent tree updates.
2676 	 */
2677 	if (actual_count > 0) {
2678 		u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2679 		u64 avg;
2680 
2681 		/*
2682 		 * We weigh the current average higher than our current runtime
2683 		 * to avoid large swings in the average.
2684 		 */
2685 		spin_lock(&delayed_refs->lock);
2686 		avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2687 		fs_info->avg_delayed_ref_runtime = avg >> 2;	/* div by 4 */
2688 		spin_unlock(&delayed_refs->lock);
2689 	}
2690 	return 0;
2691 }
2692 
2693 #ifdef SCRAMBLE_DELAYED_REFS
2694 /*
2695  * Normally delayed refs get processed in ascending bytenr order. This
2696  * correlates in most cases to the order added. To expose dependencies on this
2697  * order, we start to process the tree in the middle instead of the beginning
2698  */
2699 static u64 find_middle(struct rb_root *root)
2700 {
2701 	struct rb_node *n = root->rb_node;
2702 	struct btrfs_delayed_ref_node *entry;
2703 	int alt = 1;
2704 	u64 middle;
2705 	u64 first = 0, last = 0;
2706 
2707 	n = rb_first(root);
2708 	if (n) {
2709 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2710 		first = entry->bytenr;
2711 	}
2712 	n = rb_last(root);
2713 	if (n) {
2714 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2715 		last = entry->bytenr;
2716 	}
2717 	n = root->rb_node;
2718 
2719 	while (n) {
2720 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2721 		WARN_ON(!entry->in_tree);
2722 
2723 		middle = entry->bytenr;
2724 
2725 		if (alt)
2726 			n = n->rb_left;
2727 		else
2728 			n = n->rb_right;
2729 
2730 		alt = 1 - alt;
2731 	}
2732 	return middle;
2733 }
2734 #endif
2735 
2736 static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
2737 {
2738 	u64 num_bytes;
2739 
2740 	num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2741 			     sizeof(struct btrfs_extent_inline_ref));
2742 	if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2743 		num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2744 
2745 	/*
2746 	 * We don't ever fill up leaves all the way so multiply by 2 just to be
2747 	 * closer to what we're really going to want to use.
2748 	 */
2749 	return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
2750 }
2751 
2752 /*
2753  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2754  * would require to store the csums for that many bytes.
2755  */
2756 u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
2757 {
2758 	u64 csum_size;
2759 	u64 num_csums_per_leaf;
2760 	u64 num_csums;
2761 
2762 	csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
2763 	num_csums_per_leaf = div64_u64(csum_size,
2764 			(u64)btrfs_super_csum_size(fs_info->super_copy));
2765 	num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
2766 	num_csums += num_csums_per_leaf - 1;
2767 	num_csums = div64_u64(num_csums, num_csums_per_leaf);
2768 	return num_csums;
2769 }
2770 
2771 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2772 				       struct btrfs_fs_info *fs_info)
2773 {
2774 	struct btrfs_block_rsv *global_rsv;
2775 	u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2776 	u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2777 	u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2778 	u64 num_bytes, num_dirty_bgs_bytes;
2779 	int ret = 0;
2780 
2781 	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
2782 	num_heads = heads_to_leaves(fs_info, num_heads);
2783 	if (num_heads > 1)
2784 		num_bytes += (num_heads - 1) * fs_info->nodesize;
2785 	num_bytes <<= 1;
2786 	num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
2787 							fs_info->nodesize;
2788 	num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
2789 							     num_dirty_bgs);
2790 	global_rsv = &fs_info->global_block_rsv;
2791 
2792 	/*
2793 	 * If we can't allocate any more chunks lets make sure we have _lots_ of
2794 	 * wiggle room since running delayed refs can create more delayed refs.
2795 	 */
2796 	if (global_rsv->space_info->full) {
2797 		num_dirty_bgs_bytes <<= 1;
2798 		num_bytes <<= 1;
2799 	}
2800 
2801 	spin_lock(&global_rsv->lock);
2802 	if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2803 		ret = 1;
2804 	spin_unlock(&global_rsv->lock);
2805 	return ret;
2806 }
2807 
2808 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2809 				       struct btrfs_fs_info *fs_info)
2810 {
2811 	u64 num_entries =
2812 		atomic_read(&trans->transaction->delayed_refs.num_entries);
2813 	u64 avg_runtime;
2814 	u64 val;
2815 
2816 	smp_mb();
2817 	avg_runtime = fs_info->avg_delayed_ref_runtime;
2818 	val = num_entries * avg_runtime;
2819 	if (val >= NSEC_PER_SEC)
2820 		return 1;
2821 	if (val >= NSEC_PER_SEC / 2)
2822 		return 2;
2823 
2824 	return btrfs_check_space_for_delayed_refs(trans, fs_info);
2825 }
2826 
2827 struct async_delayed_refs {
2828 	struct btrfs_root *root;
2829 	u64 transid;
2830 	int count;
2831 	int error;
2832 	int sync;
2833 	struct completion wait;
2834 	struct btrfs_work work;
2835 };
2836 
2837 static inline struct async_delayed_refs *
2838 to_async_delayed_refs(struct btrfs_work *work)
2839 {
2840 	return container_of(work, struct async_delayed_refs, work);
2841 }
2842 
2843 static void delayed_ref_async_start(struct btrfs_work *work)
2844 {
2845 	struct async_delayed_refs *async = to_async_delayed_refs(work);
2846 	struct btrfs_trans_handle *trans;
2847 	struct btrfs_fs_info *fs_info = async->root->fs_info;
2848 	int ret;
2849 
2850 	/* if the commit is already started, we don't need to wait here */
2851 	if (btrfs_transaction_blocked(fs_info))
2852 		goto done;
2853 
2854 	trans = btrfs_join_transaction(async->root);
2855 	if (IS_ERR(trans)) {
2856 		async->error = PTR_ERR(trans);
2857 		goto done;
2858 	}
2859 
2860 	/*
2861 	 * trans->sync means that when we call end_transaction, we won't
2862 	 * wait on delayed refs
2863 	 */
2864 	trans->sync = true;
2865 
2866 	/* Don't bother flushing if we got into a different transaction */
2867 	if (trans->transid > async->transid)
2868 		goto end;
2869 
2870 	ret = btrfs_run_delayed_refs(trans, fs_info, async->count);
2871 	if (ret)
2872 		async->error = ret;
2873 end:
2874 	ret = btrfs_end_transaction(trans);
2875 	if (ret && !async->error)
2876 		async->error = ret;
2877 done:
2878 	if (async->sync)
2879 		complete(&async->wait);
2880 	else
2881 		kfree(async);
2882 }
2883 
2884 int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
2885 				 unsigned long count, u64 transid, int wait)
2886 {
2887 	struct async_delayed_refs *async;
2888 	int ret;
2889 
2890 	async = kmalloc(sizeof(*async), GFP_NOFS);
2891 	if (!async)
2892 		return -ENOMEM;
2893 
2894 	async->root = fs_info->tree_root;
2895 	async->count = count;
2896 	async->error = 0;
2897 	async->transid = transid;
2898 	if (wait)
2899 		async->sync = 1;
2900 	else
2901 		async->sync = 0;
2902 	init_completion(&async->wait);
2903 
2904 	btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2905 			delayed_ref_async_start, NULL, NULL);
2906 
2907 	btrfs_queue_work(fs_info->extent_workers, &async->work);
2908 
2909 	if (wait) {
2910 		wait_for_completion(&async->wait);
2911 		ret = async->error;
2912 		kfree(async);
2913 		return ret;
2914 	}
2915 	return 0;
2916 }
2917 
2918 /*
2919  * this starts processing the delayed reference count updates and
2920  * extent insertions we have queued up so far.  count can be
2921  * 0, which means to process everything in the tree at the start
2922  * of the run (but not newly added entries), or it can be some target
2923  * number you'd like to process.
2924  *
2925  * Returns 0 on success or if called with an aborted transaction
2926  * Returns <0 on error and aborts the transaction
2927  */
2928 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2929 			   struct btrfs_fs_info *fs_info, unsigned long count)
2930 {
2931 	struct rb_node *node;
2932 	struct btrfs_delayed_ref_root *delayed_refs;
2933 	struct btrfs_delayed_ref_head *head;
2934 	int ret;
2935 	int run_all = count == (unsigned long)-1;
2936 	bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2937 
2938 	/* We'll clean this up in btrfs_cleanup_transaction */
2939 	if (trans->aborted)
2940 		return 0;
2941 
2942 	if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
2943 		return 0;
2944 
2945 	delayed_refs = &trans->transaction->delayed_refs;
2946 	if (count == 0)
2947 		count = atomic_read(&delayed_refs->num_entries) * 2;
2948 
2949 again:
2950 #ifdef SCRAMBLE_DELAYED_REFS
2951 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2952 #endif
2953 	trans->can_flush_pending_bgs = false;
2954 	ret = __btrfs_run_delayed_refs(trans, fs_info, count);
2955 	if (ret < 0) {
2956 		btrfs_abort_transaction(trans, ret);
2957 		return ret;
2958 	}
2959 
2960 	if (run_all) {
2961 		if (!list_empty(&trans->new_bgs))
2962 			btrfs_create_pending_block_groups(trans, fs_info);
2963 
2964 		spin_lock(&delayed_refs->lock);
2965 		node = rb_first(&delayed_refs->href_root);
2966 		if (!node) {
2967 			spin_unlock(&delayed_refs->lock);
2968 			goto out;
2969 		}
2970 
2971 		while (node) {
2972 			head = rb_entry(node, struct btrfs_delayed_ref_head,
2973 					href_node);
2974 			if (btrfs_delayed_ref_is_head(&head->node)) {
2975 				struct btrfs_delayed_ref_node *ref;
2976 
2977 				ref = &head->node;
2978 				atomic_inc(&ref->refs);
2979 
2980 				spin_unlock(&delayed_refs->lock);
2981 				/*
2982 				 * Mutex was contended, block until it's
2983 				 * released and try again
2984 				 */
2985 				mutex_lock(&head->mutex);
2986 				mutex_unlock(&head->mutex);
2987 
2988 				btrfs_put_delayed_ref(ref);
2989 				cond_resched();
2990 				goto again;
2991 			} else {
2992 				WARN_ON(1);
2993 			}
2994 			node = rb_next(node);
2995 		}
2996 		spin_unlock(&delayed_refs->lock);
2997 		cond_resched();
2998 		goto again;
2999 	}
3000 out:
3001 	assert_qgroups_uptodate(trans);
3002 	trans->can_flush_pending_bgs = can_flush_pending_bgs;
3003 	return 0;
3004 }
3005 
3006 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
3007 				struct btrfs_fs_info *fs_info,
3008 				u64 bytenr, u64 num_bytes, u64 flags,
3009 				int level, int is_data)
3010 {
3011 	struct btrfs_delayed_extent_op *extent_op;
3012 	int ret;
3013 
3014 	extent_op = btrfs_alloc_delayed_extent_op();
3015 	if (!extent_op)
3016 		return -ENOMEM;
3017 
3018 	extent_op->flags_to_set = flags;
3019 	extent_op->update_flags = true;
3020 	extent_op->update_key = false;
3021 	extent_op->is_data = is_data ? true : false;
3022 	extent_op->level = level;
3023 
3024 	ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
3025 					  num_bytes, extent_op);
3026 	if (ret)
3027 		btrfs_free_delayed_extent_op(extent_op);
3028 	return ret;
3029 }
3030 
3031 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3032 				      struct btrfs_root *root,
3033 				      struct btrfs_path *path,
3034 				      u64 objectid, u64 offset, u64 bytenr)
3035 {
3036 	struct btrfs_delayed_ref_head *head;
3037 	struct btrfs_delayed_ref_node *ref;
3038 	struct btrfs_delayed_data_ref *data_ref;
3039 	struct btrfs_delayed_ref_root *delayed_refs;
3040 	int ret = 0;
3041 
3042 	delayed_refs = &trans->transaction->delayed_refs;
3043 	spin_lock(&delayed_refs->lock);
3044 	head = btrfs_find_delayed_ref_head(trans, bytenr);
3045 	if (!head) {
3046 		spin_unlock(&delayed_refs->lock);
3047 		return 0;
3048 	}
3049 
3050 	if (!mutex_trylock(&head->mutex)) {
3051 		atomic_inc(&head->node.refs);
3052 		spin_unlock(&delayed_refs->lock);
3053 
3054 		btrfs_release_path(path);
3055 
3056 		/*
3057 		 * Mutex was contended, block until it's released and let
3058 		 * caller try again
3059 		 */
3060 		mutex_lock(&head->mutex);
3061 		mutex_unlock(&head->mutex);
3062 		btrfs_put_delayed_ref(&head->node);
3063 		return -EAGAIN;
3064 	}
3065 	spin_unlock(&delayed_refs->lock);
3066 
3067 	spin_lock(&head->lock);
3068 	list_for_each_entry(ref, &head->ref_list, list) {
3069 		/* If it's a shared ref we know a cross reference exists */
3070 		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3071 			ret = 1;
3072 			break;
3073 		}
3074 
3075 		data_ref = btrfs_delayed_node_to_data_ref(ref);
3076 
3077 		/*
3078 		 * If our ref doesn't match the one we're currently looking at
3079 		 * then we have a cross reference.
3080 		 */
3081 		if (data_ref->root != root->root_key.objectid ||
3082 		    data_ref->objectid != objectid ||
3083 		    data_ref->offset != offset) {
3084 			ret = 1;
3085 			break;
3086 		}
3087 	}
3088 	spin_unlock(&head->lock);
3089 	mutex_unlock(&head->mutex);
3090 	return ret;
3091 }
3092 
3093 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3094 					struct btrfs_root *root,
3095 					struct btrfs_path *path,
3096 					u64 objectid, u64 offset, u64 bytenr)
3097 {
3098 	struct btrfs_fs_info *fs_info = root->fs_info;
3099 	struct btrfs_root *extent_root = fs_info->extent_root;
3100 	struct extent_buffer *leaf;
3101 	struct btrfs_extent_data_ref *ref;
3102 	struct btrfs_extent_inline_ref *iref;
3103 	struct btrfs_extent_item *ei;
3104 	struct btrfs_key key;
3105 	u32 item_size;
3106 	int ret;
3107 
3108 	key.objectid = bytenr;
3109 	key.offset = (u64)-1;
3110 	key.type = BTRFS_EXTENT_ITEM_KEY;
3111 
3112 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3113 	if (ret < 0)
3114 		goto out;
3115 	BUG_ON(ret == 0); /* Corruption */
3116 
3117 	ret = -ENOENT;
3118 	if (path->slots[0] == 0)
3119 		goto out;
3120 
3121 	path->slots[0]--;
3122 	leaf = path->nodes[0];
3123 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3124 
3125 	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3126 		goto out;
3127 
3128 	ret = 1;
3129 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3130 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3131 	if (item_size < sizeof(*ei)) {
3132 		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3133 		goto out;
3134 	}
3135 #endif
3136 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3137 
3138 	if (item_size != sizeof(*ei) +
3139 	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3140 		goto out;
3141 
3142 	if (btrfs_extent_generation(leaf, ei) <=
3143 	    btrfs_root_last_snapshot(&root->root_item))
3144 		goto out;
3145 
3146 	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3147 	if (btrfs_extent_inline_ref_type(leaf, iref) !=
3148 	    BTRFS_EXTENT_DATA_REF_KEY)
3149 		goto out;
3150 
3151 	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3152 	if (btrfs_extent_refs(leaf, ei) !=
3153 	    btrfs_extent_data_ref_count(leaf, ref) ||
3154 	    btrfs_extent_data_ref_root(leaf, ref) !=
3155 	    root->root_key.objectid ||
3156 	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3157 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
3158 		goto out;
3159 
3160 	ret = 0;
3161 out:
3162 	return ret;
3163 }
3164 
3165 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3166 			  struct btrfs_root *root,
3167 			  u64 objectid, u64 offset, u64 bytenr)
3168 {
3169 	struct btrfs_path *path;
3170 	int ret;
3171 	int ret2;
3172 
3173 	path = btrfs_alloc_path();
3174 	if (!path)
3175 		return -ENOENT;
3176 
3177 	do {
3178 		ret = check_committed_ref(trans, root, path, objectid,
3179 					  offset, bytenr);
3180 		if (ret && ret != -ENOENT)
3181 			goto out;
3182 
3183 		ret2 = check_delayed_ref(trans, root, path, objectid,
3184 					 offset, bytenr);
3185 	} while (ret2 == -EAGAIN);
3186 
3187 	if (ret2 && ret2 != -ENOENT) {
3188 		ret = ret2;
3189 		goto out;
3190 	}
3191 
3192 	if (ret != -ENOENT || ret2 != -ENOENT)
3193 		ret = 0;
3194 out:
3195 	btrfs_free_path(path);
3196 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3197 		WARN_ON(ret > 0);
3198 	return ret;
3199 }
3200 
3201 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3202 			   struct btrfs_root *root,
3203 			   struct extent_buffer *buf,
3204 			   int full_backref, int inc)
3205 {
3206 	struct btrfs_fs_info *fs_info = root->fs_info;
3207 	u64 bytenr;
3208 	u64 num_bytes;
3209 	u64 parent;
3210 	u64 ref_root;
3211 	u32 nritems;
3212 	struct btrfs_key key;
3213 	struct btrfs_file_extent_item *fi;
3214 	int i;
3215 	int level;
3216 	int ret = 0;
3217 	int (*process_func)(struct btrfs_trans_handle *,
3218 			    struct btrfs_fs_info *,
3219 			    u64, u64, u64, u64, u64, u64);
3220 
3221 
3222 	if (btrfs_is_testing(fs_info))
3223 		return 0;
3224 
3225 	ref_root = btrfs_header_owner(buf);
3226 	nritems = btrfs_header_nritems(buf);
3227 	level = btrfs_header_level(buf);
3228 
3229 	if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3230 		return 0;
3231 
3232 	if (inc)
3233 		process_func = btrfs_inc_extent_ref;
3234 	else
3235 		process_func = btrfs_free_extent;
3236 
3237 	if (full_backref)
3238 		parent = buf->start;
3239 	else
3240 		parent = 0;
3241 
3242 	for (i = 0; i < nritems; i++) {
3243 		if (level == 0) {
3244 			btrfs_item_key_to_cpu(buf, &key, i);
3245 			if (key.type != BTRFS_EXTENT_DATA_KEY)
3246 				continue;
3247 			fi = btrfs_item_ptr(buf, i,
3248 					    struct btrfs_file_extent_item);
3249 			if (btrfs_file_extent_type(buf, fi) ==
3250 			    BTRFS_FILE_EXTENT_INLINE)
3251 				continue;
3252 			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3253 			if (bytenr == 0)
3254 				continue;
3255 
3256 			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3257 			key.offset -= btrfs_file_extent_offset(buf, fi);
3258 			ret = process_func(trans, fs_info, bytenr, num_bytes,
3259 					   parent, ref_root, key.objectid,
3260 					   key.offset);
3261 			if (ret)
3262 				goto fail;
3263 		} else {
3264 			bytenr = btrfs_node_blockptr(buf, i);
3265 			num_bytes = fs_info->nodesize;
3266 			ret = process_func(trans, fs_info, bytenr, num_bytes,
3267 					   parent, ref_root, level - 1, 0);
3268 			if (ret)
3269 				goto fail;
3270 		}
3271 	}
3272 	return 0;
3273 fail:
3274 	return ret;
3275 }
3276 
3277 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3278 		  struct extent_buffer *buf, int full_backref)
3279 {
3280 	return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3281 }
3282 
3283 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3284 		  struct extent_buffer *buf, int full_backref)
3285 {
3286 	return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3287 }
3288 
3289 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3290 				 struct btrfs_fs_info *fs_info,
3291 				 struct btrfs_path *path,
3292 				 struct btrfs_block_group_cache *cache)
3293 {
3294 	int ret;
3295 	struct btrfs_root *extent_root = fs_info->extent_root;
3296 	unsigned long bi;
3297 	struct extent_buffer *leaf;
3298 
3299 	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3300 	if (ret) {
3301 		if (ret > 0)
3302 			ret = -ENOENT;
3303 		goto fail;
3304 	}
3305 
3306 	leaf = path->nodes[0];
3307 	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3308 	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3309 	btrfs_mark_buffer_dirty(leaf);
3310 fail:
3311 	btrfs_release_path(path);
3312 	return ret;
3313 
3314 }
3315 
3316 static struct btrfs_block_group_cache *
3317 next_block_group(struct btrfs_fs_info *fs_info,
3318 		 struct btrfs_block_group_cache *cache)
3319 {
3320 	struct rb_node *node;
3321 
3322 	spin_lock(&fs_info->block_group_cache_lock);
3323 
3324 	/* If our block group was removed, we need a full search. */
3325 	if (RB_EMPTY_NODE(&cache->cache_node)) {
3326 		const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3327 
3328 		spin_unlock(&fs_info->block_group_cache_lock);
3329 		btrfs_put_block_group(cache);
3330 		cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
3331 	}
3332 	node = rb_next(&cache->cache_node);
3333 	btrfs_put_block_group(cache);
3334 	if (node) {
3335 		cache = rb_entry(node, struct btrfs_block_group_cache,
3336 				 cache_node);
3337 		btrfs_get_block_group(cache);
3338 	} else
3339 		cache = NULL;
3340 	spin_unlock(&fs_info->block_group_cache_lock);
3341 	return cache;
3342 }
3343 
3344 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3345 			    struct btrfs_trans_handle *trans,
3346 			    struct btrfs_path *path)
3347 {
3348 	struct btrfs_fs_info *fs_info = block_group->fs_info;
3349 	struct btrfs_root *root = fs_info->tree_root;
3350 	struct inode *inode = NULL;
3351 	u64 alloc_hint = 0;
3352 	int dcs = BTRFS_DC_ERROR;
3353 	u64 num_pages = 0;
3354 	int retries = 0;
3355 	int ret = 0;
3356 
3357 	/*
3358 	 * If this block group is smaller than 100 megs don't bother caching the
3359 	 * block group.
3360 	 */
3361 	if (block_group->key.offset < (100 * SZ_1M)) {
3362 		spin_lock(&block_group->lock);
3363 		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3364 		spin_unlock(&block_group->lock);
3365 		return 0;
3366 	}
3367 
3368 	if (trans->aborted)
3369 		return 0;
3370 again:
3371 	inode = lookup_free_space_inode(root, block_group, path);
3372 	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3373 		ret = PTR_ERR(inode);
3374 		btrfs_release_path(path);
3375 		goto out;
3376 	}
3377 
3378 	if (IS_ERR(inode)) {
3379 		BUG_ON(retries);
3380 		retries++;
3381 
3382 		if (block_group->ro)
3383 			goto out_free;
3384 
3385 		ret = create_free_space_inode(root, trans, block_group, path);
3386 		if (ret)
3387 			goto out_free;
3388 		goto again;
3389 	}
3390 
3391 	/* We've already setup this transaction, go ahead and exit */
3392 	if (block_group->cache_generation == trans->transid &&
3393 	    i_size_read(inode)) {
3394 		dcs = BTRFS_DC_SETUP;
3395 		goto out_put;
3396 	}
3397 
3398 	/*
3399 	 * We want to set the generation to 0, that way if anything goes wrong
3400 	 * from here on out we know not to trust this cache when we load up next
3401 	 * time.
3402 	 */
3403 	BTRFS_I(inode)->generation = 0;
3404 	ret = btrfs_update_inode(trans, root, inode);
3405 	if (ret) {
3406 		/*
3407 		 * So theoretically we could recover from this, simply set the
3408 		 * super cache generation to 0 so we know to invalidate the
3409 		 * cache, but then we'd have to keep track of the block groups
3410 		 * that fail this way so we know we _have_ to reset this cache
3411 		 * before the next commit or risk reading stale cache.  So to
3412 		 * limit our exposure to horrible edge cases lets just abort the
3413 		 * transaction, this only happens in really bad situations
3414 		 * anyway.
3415 		 */
3416 		btrfs_abort_transaction(trans, ret);
3417 		goto out_put;
3418 	}
3419 	WARN_ON(ret);
3420 
3421 	if (i_size_read(inode) > 0) {
3422 		ret = btrfs_check_trunc_cache_free_space(fs_info,
3423 					&fs_info->global_block_rsv);
3424 		if (ret)
3425 			goto out_put;
3426 
3427 		ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3428 		if (ret)
3429 			goto out_put;
3430 	}
3431 
3432 	spin_lock(&block_group->lock);
3433 	if (block_group->cached != BTRFS_CACHE_FINISHED ||
3434 	    !btrfs_test_opt(fs_info, SPACE_CACHE)) {
3435 		/*
3436 		 * don't bother trying to write stuff out _if_
3437 		 * a) we're not cached,
3438 		 * b) we're with nospace_cache mount option.
3439 		 */
3440 		dcs = BTRFS_DC_WRITTEN;
3441 		spin_unlock(&block_group->lock);
3442 		goto out_put;
3443 	}
3444 	spin_unlock(&block_group->lock);
3445 
3446 	/*
3447 	 * We hit an ENOSPC when setting up the cache in this transaction, just
3448 	 * skip doing the setup, we've already cleared the cache so we're safe.
3449 	 */
3450 	if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3451 		ret = -ENOSPC;
3452 		goto out_put;
3453 	}
3454 
3455 	/*
3456 	 * Try to preallocate enough space based on how big the block group is.
3457 	 * Keep in mind this has to include any pinned space which could end up
3458 	 * taking up quite a bit since it's not folded into the other space
3459 	 * cache.
3460 	 */
3461 	num_pages = div_u64(block_group->key.offset, SZ_256M);
3462 	if (!num_pages)
3463 		num_pages = 1;
3464 
3465 	num_pages *= 16;
3466 	num_pages *= PAGE_SIZE;
3467 
3468 	ret = btrfs_check_data_free_space(inode, 0, num_pages);
3469 	if (ret)
3470 		goto out_put;
3471 
3472 	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3473 					      num_pages, num_pages,
3474 					      &alloc_hint);
3475 	/*
3476 	 * Our cache requires contiguous chunks so that we don't modify a bunch
3477 	 * of metadata or split extents when writing the cache out, which means
3478 	 * we can enospc if we are heavily fragmented in addition to just normal
3479 	 * out of space conditions.  So if we hit this just skip setting up any
3480 	 * other block groups for this transaction, maybe we'll unpin enough
3481 	 * space the next time around.
3482 	 */
3483 	if (!ret)
3484 		dcs = BTRFS_DC_SETUP;
3485 	else if (ret == -ENOSPC)
3486 		set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3487 
3488 out_put:
3489 	iput(inode);
3490 out_free:
3491 	btrfs_release_path(path);
3492 out:
3493 	spin_lock(&block_group->lock);
3494 	if (!ret && dcs == BTRFS_DC_SETUP)
3495 		block_group->cache_generation = trans->transid;
3496 	block_group->disk_cache_state = dcs;
3497 	spin_unlock(&block_group->lock);
3498 
3499 	return ret;
3500 }
3501 
3502 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3503 			    struct btrfs_fs_info *fs_info)
3504 {
3505 	struct btrfs_block_group_cache *cache, *tmp;
3506 	struct btrfs_transaction *cur_trans = trans->transaction;
3507 	struct btrfs_path *path;
3508 
3509 	if (list_empty(&cur_trans->dirty_bgs) ||
3510 	    !btrfs_test_opt(fs_info, SPACE_CACHE))
3511 		return 0;
3512 
3513 	path = btrfs_alloc_path();
3514 	if (!path)
3515 		return -ENOMEM;
3516 
3517 	/* Could add new block groups, use _safe just in case */
3518 	list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3519 				 dirty_list) {
3520 		if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3521 			cache_save_setup(cache, trans, path);
3522 	}
3523 
3524 	btrfs_free_path(path);
3525 	return 0;
3526 }
3527 
3528 /*
3529  * transaction commit does final block group cache writeback during a
3530  * critical section where nothing is allowed to change the FS.  This is
3531  * required in order for the cache to actually match the block group,
3532  * but can introduce a lot of latency into the commit.
3533  *
3534  * So, btrfs_start_dirty_block_groups is here to kick off block group
3535  * cache IO.  There's a chance we'll have to redo some of it if the
3536  * block group changes again during the commit, but it greatly reduces
3537  * the commit latency by getting rid of the easy block groups while
3538  * we're still allowing others to join the commit.
3539  */
3540 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3541 				   struct btrfs_fs_info *fs_info)
3542 {
3543 	struct btrfs_block_group_cache *cache;
3544 	struct btrfs_transaction *cur_trans = trans->transaction;
3545 	int ret = 0;
3546 	int should_put;
3547 	struct btrfs_path *path = NULL;
3548 	LIST_HEAD(dirty);
3549 	struct list_head *io = &cur_trans->io_bgs;
3550 	int num_started = 0;
3551 	int loops = 0;
3552 
3553 	spin_lock(&cur_trans->dirty_bgs_lock);
3554 	if (list_empty(&cur_trans->dirty_bgs)) {
3555 		spin_unlock(&cur_trans->dirty_bgs_lock);
3556 		return 0;
3557 	}
3558 	list_splice_init(&cur_trans->dirty_bgs, &dirty);
3559 	spin_unlock(&cur_trans->dirty_bgs_lock);
3560 
3561 again:
3562 	/*
3563 	 * make sure all the block groups on our dirty list actually
3564 	 * exist
3565 	 */
3566 	btrfs_create_pending_block_groups(trans, fs_info);
3567 
3568 	if (!path) {
3569 		path = btrfs_alloc_path();
3570 		if (!path)
3571 			return -ENOMEM;
3572 	}
3573 
3574 	/*
3575 	 * cache_write_mutex is here only to save us from balance or automatic
3576 	 * removal of empty block groups deleting this block group while we are
3577 	 * writing out the cache
3578 	 */
3579 	mutex_lock(&trans->transaction->cache_write_mutex);
3580 	while (!list_empty(&dirty)) {
3581 		cache = list_first_entry(&dirty,
3582 					 struct btrfs_block_group_cache,
3583 					 dirty_list);
3584 		/*
3585 		 * this can happen if something re-dirties a block
3586 		 * group that is already under IO.  Just wait for it to
3587 		 * finish and then do it all again
3588 		 */
3589 		if (!list_empty(&cache->io_list)) {
3590 			list_del_init(&cache->io_list);
3591 			btrfs_wait_cache_io(trans, cache, path);
3592 			btrfs_put_block_group(cache);
3593 		}
3594 
3595 
3596 		/*
3597 		 * btrfs_wait_cache_io uses the cache->dirty_list to decide
3598 		 * if it should update the cache_state.  Don't delete
3599 		 * until after we wait.
3600 		 *
3601 		 * Since we're not running in the commit critical section
3602 		 * we need the dirty_bgs_lock to protect from update_block_group
3603 		 */
3604 		spin_lock(&cur_trans->dirty_bgs_lock);
3605 		list_del_init(&cache->dirty_list);
3606 		spin_unlock(&cur_trans->dirty_bgs_lock);
3607 
3608 		should_put = 1;
3609 
3610 		cache_save_setup(cache, trans, path);
3611 
3612 		if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3613 			cache->io_ctl.inode = NULL;
3614 			ret = btrfs_write_out_cache(fs_info, trans,
3615 						    cache, path);
3616 			if (ret == 0 && cache->io_ctl.inode) {
3617 				num_started++;
3618 				should_put = 0;
3619 
3620 				/*
3621 				 * the cache_write_mutex is protecting
3622 				 * the io_list
3623 				 */
3624 				list_add_tail(&cache->io_list, io);
3625 			} else {
3626 				/*
3627 				 * if we failed to write the cache, the
3628 				 * generation will be bad and life goes on
3629 				 */
3630 				ret = 0;
3631 			}
3632 		}
3633 		if (!ret) {
3634 			ret = write_one_cache_group(trans, fs_info,
3635 						    path, cache);
3636 			/*
3637 			 * Our block group might still be attached to the list
3638 			 * of new block groups in the transaction handle of some
3639 			 * other task (struct btrfs_trans_handle->new_bgs). This
3640 			 * means its block group item isn't yet in the extent
3641 			 * tree. If this happens ignore the error, as we will
3642 			 * try again later in the critical section of the
3643 			 * transaction commit.
3644 			 */
3645 			if (ret == -ENOENT) {
3646 				ret = 0;
3647 				spin_lock(&cur_trans->dirty_bgs_lock);
3648 				if (list_empty(&cache->dirty_list)) {
3649 					list_add_tail(&cache->dirty_list,
3650 						      &cur_trans->dirty_bgs);
3651 					btrfs_get_block_group(cache);
3652 				}
3653 				spin_unlock(&cur_trans->dirty_bgs_lock);
3654 			} else if (ret) {
3655 				btrfs_abort_transaction(trans, ret);
3656 			}
3657 		}
3658 
3659 		/* if its not on the io list, we need to put the block group */
3660 		if (should_put)
3661 			btrfs_put_block_group(cache);
3662 
3663 		if (ret)
3664 			break;
3665 
3666 		/*
3667 		 * Avoid blocking other tasks for too long. It might even save
3668 		 * us from writing caches for block groups that are going to be
3669 		 * removed.
3670 		 */
3671 		mutex_unlock(&trans->transaction->cache_write_mutex);
3672 		mutex_lock(&trans->transaction->cache_write_mutex);
3673 	}
3674 	mutex_unlock(&trans->transaction->cache_write_mutex);
3675 
3676 	/*
3677 	 * go through delayed refs for all the stuff we've just kicked off
3678 	 * and then loop back (just once)
3679 	 */
3680 	ret = btrfs_run_delayed_refs(trans, fs_info, 0);
3681 	if (!ret && loops == 0) {
3682 		loops++;
3683 		spin_lock(&cur_trans->dirty_bgs_lock);
3684 		list_splice_init(&cur_trans->dirty_bgs, &dirty);
3685 		/*
3686 		 * dirty_bgs_lock protects us from concurrent block group
3687 		 * deletes too (not just cache_write_mutex).
3688 		 */
3689 		if (!list_empty(&dirty)) {
3690 			spin_unlock(&cur_trans->dirty_bgs_lock);
3691 			goto again;
3692 		}
3693 		spin_unlock(&cur_trans->dirty_bgs_lock);
3694 	} else if (ret < 0) {
3695 		btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
3696 	}
3697 
3698 	btrfs_free_path(path);
3699 	return ret;
3700 }
3701 
3702 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3703 				   struct btrfs_fs_info *fs_info)
3704 {
3705 	struct btrfs_block_group_cache *cache;
3706 	struct btrfs_transaction *cur_trans = trans->transaction;
3707 	int ret = 0;
3708 	int should_put;
3709 	struct btrfs_path *path;
3710 	struct list_head *io = &cur_trans->io_bgs;
3711 	int num_started = 0;
3712 
3713 	path = btrfs_alloc_path();
3714 	if (!path)
3715 		return -ENOMEM;
3716 
3717 	/*
3718 	 * Even though we are in the critical section of the transaction commit,
3719 	 * we can still have concurrent tasks adding elements to this
3720 	 * transaction's list of dirty block groups. These tasks correspond to
3721 	 * endio free space workers started when writeback finishes for a
3722 	 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3723 	 * allocate new block groups as a result of COWing nodes of the root
3724 	 * tree when updating the free space inode. The writeback for the space
3725 	 * caches is triggered by an earlier call to
3726 	 * btrfs_start_dirty_block_groups() and iterations of the following
3727 	 * loop.
3728 	 * Also we want to do the cache_save_setup first and then run the
3729 	 * delayed refs to make sure we have the best chance at doing this all
3730 	 * in one shot.
3731 	 */
3732 	spin_lock(&cur_trans->dirty_bgs_lock);
3733 	while (!list_empty(&cur_trans->dirty_bgs)) {
3734 		cache = list_first_entry(&cur_trans->dirty_bgs,
3735 					 struct btrfs_block_group_cache,
3736 					 dirty_list);
3737 
3738 		/*
3739 		 * this can happen if cache_save_setup re-dirties a block
3740 		 * group that is already under IO.  Just wait for it to
3741 		 * finish and then do it all again
3742 		 */
3743 		if (!list_empty(&cache->io_list)) {
3744 			spin_unlock(&cur_trans->dirty_bgs_lock);
3745 			list_del_init(&cache->io_list);
3746 			btrfs_wait_cache_io(trans, cache, path);
3747 			btrfs_put_block_group(cache);
3748 			spin_lock(&cur_trans->dirty_bgs_lock);
3749 		}
3750 
3751 		/*
3752 		 * don't remove from the dirty list until after we've waited
3753 		 * on any pending IO
3754 		 */
3755 		list_del_init(&cache->dirty_list);
3756 		spin_unlock(&cur_trans->dirty_bgs_lock);
3757 		should_put = 1;
3758 
3759 		cache_save_setup(cache, trans, path);
3760 
3761 		if (!ret)
3762 			ret = btrfs_run_delayed_refs(trans, fs_info,
3763 						     (unsigned long) -1);
3764 
3765 		if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3766 			cache->io_ctl.inode = NULL;
3767 			ret = btrfs_write_out_cache(fs_info, trans,
3768 						    cache, path);
3769 			if (ret == 0 && cache->io_ctl.inode) {
3770 				num_started++;
3771 				should_put = 0;
3772 				list_add_tail(&cache->io_list, io);
3773 			} else {
3774 				/*
3775 				 * if we failed to write the cache, the
3776 				 * generation will be bad and life goes on
3777 				 */
3778 				ret = 0;
3779 			}
3780 		}
3781 		if (!ret) {
3782 			ret = write_one_cache_group(trans, fs_info,
3783 						    path, cache);
3784 			/*
3785 			 * One of the free space endio workers might have
3786 			 * created a new block group while updating a free space
3787 			 * cache's inode (at inode.c:btrfs_finish_ordered_io())
3788 			 * and hasn't released its transaction handle yet, in
3789 			 * which case the new block group is still attached to
3790 			 * its transaction handle and its creation has not
3791 			 * finished yet (no block group item in the extent tree
3792 			 * yet, etc). If this is the case, wait for all free
3793 			 * space endio workers to finish and retry. This is a
3794 			 * a very rare case so no need for a more efficient and
3795 			 * complex approach.
3796 			 */
3797 			if (ret == -ENOENT) {
3798 				wait_event(cur_trans->writer_wait,
3799 				   atomic_read(&cur_trans->num_writers) == 1);
3800 				ret = write_one_cache_group(trans, fs_info,
3801 							    path, cache);
3802 			}
3803 			if (ret)
3804 				btrfs_abort_transaction(trans, ret);
3805 		}
3806 
3807 		/* if its not on the io list, we need to put the block group */
3808 		if (should_put)
3809 			btrfs_put_block_group(cache);
3810 		spin_lock(&cur_trans->dirty_bgs_lock);
3811 	}
3812 	spin_unlock(&cur_trans->dirty_bgs_lock);
3813 
3814 	while (!list_empty(io)) {
3815 		cache = list_first_entry(io, struct btrfs_block_group_cache,
3816 					 io_list);
3817 		list_del_init(&cache->io_list);
3818 		btrfs_wait_cache_io(trans, cache, path);
3819 		btrfs_put_block_group(cache);
3820 	}
3821 
3822 	btrfs_free_path(path);
3823 	return ret;
3824 }
3825 
3826 int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
3827 {
3828 	struct btrfs_block_group_cache *block_group;
3829 	int readonly = 0;
3830 
3831 	block_group = btrfs_lookup_block_group(fs_info, bytenr);
3832 	if (!block_group || block_group->ro)
3833 		readonly = 1;
3834 	if (block_group)
3835 		btrfs_put_block_group(block_group);
3836 	return readonly;
3837 }
3838 
3839 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3840 {
3841 	struct btrfs_block_group_cache *bg;
3842 	bool ret = true;
3843 
3844 	bg = btrfs_lookup_block_group(fs_info, bytenr);
3845 	if (!bg)
3846 		return false;
3847 
3848 	spin_lock(&bg->lock);
3849 	if (bg->ro)
3850 		ret = false;
3851 	else
3852 		atomic_inc(&bg->nocow_writers);
3853 	spin_unlock(&bg->lock);
3854 
3855 	/* no put on block group, done by btrfs_dec_nocow_writers */
3856 	if (!ret)
3857 		btrfs_put_block_group(bg);
3858 
3859 	return ret;
3860 
3861 }
3862 
3863 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3864 {
3865 	struct btrfs_block_group_cache *bg;
3866 
3867 	bg = btrfs_lookup_block_group(fs_info, bytenr);
3868 	ASSERT(bg);
3869 	if (atomic_dec_and_test(&bg->nocow_writers))
3870 		wake_up_atomic_t(&bg->nocow_writers);
3871 	/*
3872 	 * Once for our lookup and once for the lookup done by a previous call
3873 	 * to btrfs_inc_nocow_writers()
3874 	 */
3875 	btrfs_put_block_group(bg);
3876 	btrfs_put_block_group(bg);
3877 }
3878 
3879 static int btrfs_wait_nocow_writers_atomic_t(atomic_t *a)
3880 {
3881 	schedule();
3882 	return 0;
3883 }
3884 
3885 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
3886 {
3887 	wait_on_atomic_t(&bg->nocow_writers,
3888 			 btrfs_wait_nocow_writers_atomic_t,
3889 			 TASK_UNINTERRUPTIBLE);
3890 }
3891 
3892 static const char *alloc_name(u64 flags)
3893 {
3894 	switch (flags) {
3895 	case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3896 		return "mixed";
3897 	case BTRFS_BLOCK_GROUP_METADATA:
3898 		return "metadata";
3899 	case BTRFS_BLOCK_GROUP_DATA:
3900 		return "data";
3901 	case BTRFS_BLOCK_GROUP_SYSTEM:
3902 		return "system";
3903 	default:
3904 		WARN_ON(1);
3905 		return "invalid-combination";
3906 	};
3907 }
3908 
3909 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3910 			     u64 total_bytes, u64 bytes_used,
3911 			     u64 bytes_readonly,
3912 			     struct btrfs_space_info **space_info)
3913 {
3914 	struct btrfs_space_info *found;
3915 	int i;
3916 	int factor;
3917 	int ret;
3918 
3919 	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3920 		     BTRFS_BLOCK_GROUP_RAID10))
3921 		factor = 2;
3922 	else
3923 		factor = 1;
3924 
3925 	found = __find_space_info(info, flags);
3926 	if (found) {
3927 		spin_lock(&found->lock);
3928 		found->total_bytes += total_bytes;
3929 		found->disk_total += total_bytes * factor;
3930 		found->bytes_used += bytes_used;
3931 		found->disk_used += bytes_used * factor;
3932 		found->bytes_readonly += bytes_readonly;
3933 		if (total_bytes > 0)
3934 			found->full = 0;
3935 		space_info_add_new_bytes(info, found, total_bytes -
3936 					 bytes_used - bytes_readonly);
3937 		spin_unlock(&found->lock);
3938 		*space_info = found;
3939 		return 0;
3940 	}
3941 	found = kzalloc(sizeof(*found), GFP_NOFS);
3942 	if (!found)
3943 		return -ENOMEM;
3944 
3945 	ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3946 	if (ret) {
3947 		kfree(found);
3948 		return ret;
3949 	}
3950 
3951 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3952 		INIT_LIST_HEAD(&found->block_groups[i]);
3953 	init_rwsem(&found->groups_sem);
3954 	spin_lock_init(&found->lock);
3955 	found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3956 	found->total_bytes = total_bytes;
3957 	found->disk_total = total_bytes * factor;
3958 	found->bytes_used = bytes_used;
3959 	found->disk_used = bytes_used * factor;
3960 	found->bytes_pinned = 0;
3961 	found->bytes_reserved = 0;
3962 	found->bytes_readonly = bytes_readonly;
3963 	found->bytes_may_use = 0;
3964 	found->full = 0;
3965 	found->max_extent_size = 0;
3966 	found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3967 	found->chunk_alloc = 0;
3968 	found->flush = 0;
3969 	init_waitqueue_head(&found->wait);
3970 	INIT_LIST_HEAD(&found->ro_bgs);
3971 	INIT_LIST_HEAD(&found->tickets);
3972 	INIT_LIST_HEAD(&found->priority_tickets);
3973 
3974 	ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3975 				    info->space_info_kobj, "%s",
3976 				    alloc_name(found->flags));
3977 	if (ret) {
3978 		kfree(found);
3979 		return ret;
3980 	}
3981 
3982 	*space_info = found;
3983 	list_add_rcu(&found->list, &info->space_info);
3984 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3985 		info->data_sinfo = found;
3986 
3987 	return ret;
3988 }
3989 
3990 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3991 {
3992 	u64 extra_flags = chunk_to_extended(flags) &
3993 				BTRFS_EXTENDED_PROFILE_MASK;
3994 
3995 	write_seqlock(&fs_info->profiles_lock);
3996 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3997 		fs_info->avail_data_alloc_bits |= extra_flags;
3998 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
3999 		fs_info->avail_metadata_alloc_bits |= extra_flags;
4000 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4001 		fs_info->avail_system_alloc_bits |= extra_flags;
4002 	write_sequnlock(&fs_info->profiles_lock);
4003 }
4004 
4005 /*
4006  * returns target flags in extended format or 0 if restripe for this
4007  * chunk_type is not in progress
4008  *
4009  * should be called with either volume_mutex or balance_lock held
4010  */
4011 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
4012 {
4013 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4014 	u64 target = 0;
4015 
4016 	if (!bctl)
4017 		return 0;
4018 
4019 	if (flags & BTRFS_BLOCK_GROUP_DATA &&
4020 	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4021 		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
4022 	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
4023 		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4024 		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
4025 	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
4026 		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4027 		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
4028 	}
4029 
4030 	return target;
4031 }
4032 
4033 /*
4034  * @flags: available profiles in extended format (see ctree.h)
4035  *
4036  * Returns reduced profile in chunk format.  If profile changing is in
4037  * progress (either running or paused) picks the target profile (if it's
4038  * already available), otherwise falls back to plain reducing.
4039  */
4040 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
4041 {
4042 	u64 num_devices = fs_info->fs_devices->rw_devices;
4043 	u64 target;
4044 	u64 raid_type;
4045 	u64 allowed = 0;
4046 
4047 	/*
4048 	 * see if restripe for this chunk_type is in progress, if so
4049 	 * try to reduce to the target profile
4050 	 */
4051 	spin_lock(&fs_info->balance_lock);
4052 	target = get_restripe_target(fs_info, flags);
4053 	if (target) {
4054 		/* pick target profile only if it's already available */
4055 		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
4056 			spin_unlock(&fs_info->balance_lock);
4057 			return extended_to_chunk(target);
4058 		}
4059 	}
4060 	spin_unlock(&fs_info->balance_lock);
4061 
4062 	/* First, mask out the RAID levels which aren't possible */
4063 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4064 		if (num_devices >= btrfs_raid_array[raid_type].devs_min)
4065 			allowed |= btrfs_raid_group[raid_type];
4066 	}
4067 	allowed &= flags;
4068 
4069 	if (allowed & BTRFS_BLOCK_GROUP_RAID6)
4070 		allowed = BTRFS_BLOCK_GROUP_RAID6;
4071 	else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
4072 		allowed = BTRFS_BLOCK_GROUP_RAID5;
4073 	else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
4074 		allowed = BTRFS_BLOCK_GROUP_RAID10;
4075 	else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
4076 		allowed = BTRFS_BLOCK_GROUP_RAID1;
4077 	else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
4078 		allowed = BTRFS_BLOCK_GROUP_RAID0;
4079 
4080 	flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
4081 
4082 	return extended_to_chunk(flags | allowed);
4083 }
4084 
4085 static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
4086 {
4087 	unsigned seq;
4088 	u64 flags;
4089 
4090 	do {
4091 		flags = orig_flags;
4092 		seq = read_seqbegin(&fs_info->profiles_lock);
4093 
4094 		if (flags & BTRFS_BLOCK_GROUP_DATA)
4095 			flags |= fs_info->avail_data_alloc_bits;
4096 		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4097 			flags |= fs_info->avail_system_alloc_bits;
4098 		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
4099 			flags |= fs_info->avail_metadata_alloc_bits;
4100 	} while (read_seqretry(&fs_info->profiles_lock, seq));
4101 
4102 	return btrfs_reduce_alloc_profile(fs_info, flags);
4103 }
4104 
4105 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
4106 {
4107 	struct btrfs_fs_info *fs_info = root->fs_info;
4108 	u64 flags;
4109 	u64 ret;
4110 
4111 	if (data)
4112 		flags = BTRFS_BLOCK_GROUP_DATA;
4113 	else if (root == fs_info->chunk_root)
4114 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
4115 	else
4116 		flags = BTRFS_BLOCK_GROUP_METADATA;
4117 
4118 	ret = get_alloc_profile(fs_info, flags);
4119 	return ret;
4120 }
4121 
4122 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
4123 {
4124 	struct btrfs_space_info *data_sinfo;
4125 	struct btrfs_root *root = BTRFS_I(inode)->root;
4126 	struct btrfs_fs_info *fs_info = root->fs_info;
4127 	u64 used;
4128 	int ret = 0;
4129 	int need_commit = 2;
4130 	int have_pinned_space;
4131 
4132 	/* make sure bytes are sectorsize aligned */
4133 	bytes = ALIGN(bytes, fs_info->sectorsize);
4134 
4135 	if (btrfs_is_free_space_inode(inode)) {
4136 		need_commit = 0;
4137 		ASSERT(current->journal_info);
4138 	}
4139 
4140 	data_sinfo = fs_info->data_sinfo;
4141 	if (!data_sinfo)
4142 		goto alloc;
4143 
4144 again:
4145 	/* make sure we have enough space to handle the data first */
4146 	spin_lock(&data_sinfo->lock);
4147 	used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4148 		data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4149 		data_sinfo->bytes_may_use;
4150 
4151 	if (used + bytes > data_sinfo->total_bytes) {
4152 		struct btrfs_trans_handle *trans;
4153 
4154 		/*
4155 		 * if we don't have enough free bytes in this space then we need
4156 		 * to alloc a new chunk.
4157 		 */
4158 		if (!data_sinfo->full) {
4159 			u64 alloc_target;
4160 
4161 			data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4162 			spin_unlock(&data_sinfo->lock);
4163 alloc:
4164 			alloc_target = btrfs_get_alloc_profile(root, 1);
4165 			/*
4166 			 * It is ugly that we don't call nolock join
4167 			 * transaction for the free space inode case here.
4168 			 * But it is safe because we only do the data space
4169 			 * reservation for the free space cache in the
4170 			 * transaction context, the common join transaction
4171 			 * just increase the counter of the current transaction
4172 			 * handler, doesn't try to acquire the trans_lock of
4173 			 * the fs.
4174 			 */
4175 			trans = btrfs_join_transaction(root);
4176 			if (IS_ERR(trans))
4177 				return PTR_ERR(trans);
4178 
4179 			ret = do_chunk_alloc(trans, fs_info, alloc_target,
4180 					     CHUNK_ALLOC_NO_FORCE);
4181 			btrfs_end_transaction(trans);
4182 			if (ret < 0) {
4183 				if (ret != -ENOSPC)
4184 					return ret;
4185 				else {
4186 					have_pinned_space = 1;
4187 					goto commit_trans;
4188 				}
4189 			}
4190 
4191 			if (!data_sinfo)
4192 				data_sinfo = fs_info->data_sinfo;
4193 
4194 			goto again;
4195 		}
4196 
4197 		/*
4198 		 * If we don't have enough pinned space to deal with this
4199 		 * allocation, and no removed chunk in current transaction,
4200 		 * don't bother committing the transaction.
4201 		 */
4202 		have_pinned_space = percpu_counter_compare(
4203 			&data_sinfo->total_bytes_pinned,
4204 			used + bytes - data_sinfo->total_bytes);
4205 		spin_unlock(&data_sinfo->lock);
4206 
4207 		/* commit the current transaction and try again */
4208 commit_trans:
4209 		if (need_commit &&
4210 		    !atomic_read(&fs_info->open_ioctl_trans)) {
4211 			need_commit--;
4212 
4213 			if (need_commit > 0) {
4214 				btrfs_start_delalloc_roots(fs_info, 0, -1);
4215 				btrfs_wait_ordered_roots(fs_info, -1, 0,
4216 							 (u64)-1);
4217 			}
4218 
4219 			trans = btrfs_join_transaction(root);
4220 			if (IS_ERR(trans))
4221 				return PTR_ERR(trans);
4222 			if (have_pinned_space >= 0 ||
4223 			    test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4224 				     &trans->transaction->flags) ||
4225 			    need_commit > 0) {
4226 				ret = btrfs_commit_transaction(trans);
4227 				if (ret)
4228 					return ret;
4229 				/*
4230 				 * The cleaner kthread might still be doing iput
4231 				 * operations. Wait for it to finish so that
4232 				 * more space is released.
4233 				 */
4234 				mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
4235 				mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
4236 				goto again;
4237 			} else {
4238 				btrfs_end_transaction(trans);
4239 			}
4240 		}
4241 
4242 		trace_btrfs_space_reservation(fs_info,
4243 					      "space_info:enospc",
4244 					      data_sinfo->flags, bytes, 1);
4245 		return -ENOSPC;
4246 	}
4247 	data_sinfo->bytes_may_use += bytes;
4248 	trace_btrfs_space_reservation(fs_info, "space_info",
4249 				      data_sinfo->flags, bytes, 1);
4250 	spin_unlock(&data_sinfo->lock);
4251 
4252 	return ret;
4253 }
4254 
4255 /*
4256  * New check_data_free_space() with ability for precious data reservation
4257  * Will replace old btrfs_check_data_free_space(), but for patch split,
4258  * add a new function first and then replace it.
4259  */
4260 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4261 {
4262 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4263 	int ret;
4264 
4265 	/* align the range */
4266 	len = round_up(start + len, fs_info->sectorsize) -
4267 	      round_down(start, fs_info->sectorsize);
4268 	start = round_down(start, fs_info->sectorsize);
4269 
4270 	ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4271 	if (ret < 0)
4272 		return ret;
4273 
4274 	/* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
4275 	ret = btrfs_qgroup_reserve_data(inode, start, len);
4276 	if (ret)
4277 		btrfs_free_reserved_data_space_noquota(inode, start, len);
4278 	return ret;
4279 }
4280 
4281 /*
4282  * Called if we need to clear a data reservation for this inode
4283  * Normally in a error case.
4284  *
4285  * This one will *NOT* use accurate qgroup reserved space API, just for case
4286  * which we can't sleep and is sure it won't affect qgroup reserved space.
4287  * Like clear_bit_hook().
4288  */
4289 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4290 					    u64 len)
4291 {
4292 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4293 	struct btrfs_space_info *data_sinfo;
4294 
4295 	/* Make sure the range is aligned to sectorsize */
4296 	len = round_up(start + len, fs_info->sectorsize) -
4297 	      round_down(start, fs_info->sectorsize);
4298 	start = round_down(start, fs_info->sectorsize);
4299 
4300 	data_sinfo = fs_info->data_sinfo;
4301 	spin_lock(&data_sinfo->lock);
4302 	if (WARN_ON(data_sinfo->bytes_may_use < len))
4303 		data_sinfo->bytes_may_use = 0;
4304 	else
4305 		data_sinfo->bytes_may_use -= len;
4306 	trace_btrfs_space_reservation(fs_info, "space_info",
4307 				      data_sinfo->flags, len, 0);
4308 	spin_unlock(&data_sinfo->lock);
4309 }
4310 
4311 /*
4312  * Called if we need to clear a data reservation for this inode
4313  * Normally in a error case.
4314  *
4315  * This one will handle the per-inode data rsv map for accurate reserved
4316  * space framework.
4317  */
4318 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4319 {
4320 	struct btrfs_root *root = BTRFS_I(inode)->root;
4321 
4322 	/* Make sure the range is aligned to sectorsize */
4323 	len = round_up(start + len, root->fs_info->sectorsize) -
4324 	      round_down(start, root->fs_info->sectorsize);
4325 	start = round_down(start, root->fs_info->sectorsize);
4326 
4327 	btrfs_free_reserved_data_space_noquota(inode, start, len);
4328 	btrfs_qgroup_free_data(inode, start, len);
4329 }
4330 
4331 static void force_metadata_allocation(struct btrfs_fs_info *info)
4332 {
4333 	struct list_head *head = &info->space_info;
4334 	struct btrfs_space_info *found;
4335 
4336 	rcu_read_lock();
4337 	list_for_each_entry_rcu(found, head, list) {
4338 		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4339 			found->force_alloc = CHUNK_ALLOC_FORCE;
4340 	}
4341 	rcu_read_unlock();
4342 }
4343 
4344 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4345 {
4346 	return (global->size << 1);
4347 }
4348 
4349 static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
4350 			      struct btrfs_space_info *sinfo, int force)
4351 {
4352 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4353 	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4354 	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4355 	u64 thresh;
4356 
4357 	if (force == CHUNK_ALLOC_FORCE)
4358 		return 1;
4359 
4360 	/*
4361 	 * We need to take into account the global rsv because for all intents
4362 	 * and purposes it's used space.  Don't worry about locking the
4363 	 * global_rsv, it doesn't change except when the transaction commits.
4364 	 */
4365 	if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4366 		num_allocated += calc_global_rsv_need_space(global_rsv);
4367 
4368 	/*
4369 	 * in limited mode, we want to have some free space up to
4370 	 * about 1% of the FS size.
4371 	 */
4372 	if (force == CHUNK_ALLOC_LIMITED) {
4373 		thresh = btrfs_super_total_bytes(fs_info->super_copy);
4374 		thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
4375 
4376 		if (num_bytes - num_allocated < thresh)
4377 			return 1;
4378 	}
4379 
4380 	if (num_allocated + SZ_2M < div_factor(num_bytes, 8))
4381 		return 0;
4382 	return 1;
4383 }
4384 
4385 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
4386 {
4387 	u64 num_dev;
4388 
4389 	if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4390 		    BTRFS_BLOCK_GROUP_RAID0 |
4391 		    BTRFS_BLOCK_GROUP_RAID5 |
4392 		    BTRFS_BLOCK_GROUP_RAID6))
4393 		num_dev = fs_info->fs_devices->rw_devices;
4394 	else if (type & BTRFS_BLOCK_GROUP_RAID1)
4395 		num_dev = 2;
4396 	else
4397 		num_dev = 1;	/* DUP or single */
4398 
4399 	return num_dev;
4400 }
4401 
4402 /*
4403  * If @is_allocation is true, reserve space in the system space info necessary
4404  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4405  * removing a chunk.
4406  */
4407 void check_system_chunk(struct btrfs_trans_handle *trans,
4408 			struct btrfs_fs_info *fs_info, u64 type)
4409 {
4410 	struct btrfs_space_info *info;
4411 	u64 left;
4412 	u64 thresh;
4413 	int ret = 0;
4414 	u64 num_devs;
4415 
4416 	/*
4417 	 * Needed because we can end up allocating a system chunk and for an
4418 	 * atomic and race free space reservation in the chunk block reserve.
4419 	 */
4420 	ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
4421 
4422 	info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4423 	spin_lock(&info->lock);
4424 	left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4425 		info->bytes_reserved - info->bytes_readonly -
4426 		info->bytes_may_use;
4427 	spin_unlock(&info->lock);
4428 
4429 	num_devs = get_profile_num_devs(fs_info, type);
4430 
4431 	/* num_devs device items to update and 1 chunk item to add or remove */
4432 	thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
4433 		btrfs_calc_trans_metadata_size(fs_info, 1);
4434 
4435 	if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
4436 		btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
4437 			   left, thresh, type);
4438 		dump_space_info(fs_info, info, 0, 0);
4439 	}
4440 
4441 	if (left < thresh) {
4442 		u64 flags;
4443 
4444 		flags = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4445 		/*
4446 		 * Ignore failure to create system chunk. We might end up not
4447 		 * needing it, as we might not need to COW all nodes/leafs from
4448 		 * the paths we visit in the chunk tree (they were already COWed
4449 		 * or created in the current transaction for example).
4450 		 */
4451 		ret = btrfs_alloc_chunk(trans, fs_info, flags);
4452 	}
4453 
4454 	if (!ret) {
4455 		ret = btrfs_block_rsv_add(fs_info->chunk_root,
4456 					  &fs_info->chunk_block_rsv,
4457 					  thresh, BTRFS_RESERVE_NO_FLUSH);
4458 		if (!ret)
4459 			trans->chunk_bytes_reserved += thresh;
4460 	}
4461 }
4462 
4463 /*
4464  * If force is CHUNK_ALLOC_FORCE:
4465  *    - return 1 if it successfully allocates a chunk,
4466  *    - return errors including -ENOSPC otherwise.
4467  * If force is NOT CHUNK_ALLOC_FORCE:
4468  *    - return 0 if it doesn't need to allocate a new chunk,
4469  *    - return 1 if it successfully allocates a chunk,
4470  *    - return errors including -ENOSPC otherwise.
4471  */
4472 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4473 			  struct btrfs_fs_info *fs_info, u64 flags, int force)
4474 {
4475 	struct btrfs_space_info *space_info;
4476 	int wait_for_alloc = 0;
4477 	int ret = 0;
4478 
4479 	/* Don't re-enter if we're already allocating a chunk */
4480 	if (trans->allocating_chunk)
4481 		return -ENOSPC;
4482 
4483 	space_info = __find_space_info(fs_info, flags);
4484 	if (!space_info) {
4485 		ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
4486 		BUG_ON(ret); /* -ENOMEM */
4487 	}
4488 	BUG_ON(!space_info); /* Logic error */
4489 
4490 again:
4491 	spin_lock(&space_info->lock);
4492 	if (force < space_info->force_alloc)
4493 		force = space_info->force_alloc;
4494 	if (space_info->full) {
4495 		if (should_alloc_chunk(fs_info, space_info, force))
4496 			ret = -ENOSPC;
4497 		else
4498 			ret = 0;
4499 		spin_unlock(&space_info->lock);
4500 		return ret;
4501 	}
4502 
4503 	if (!should_alloc_chunk(fs_info, space_info, force)) {
4504 		spin_unlock(&space_info->lock);
4505 		return 0;
4506 	} else if (space_info->chunk_alloc) {
4507 		wait_for_alloc = 1;
4508 	} else {
4509 		space_info->chunk_alloc = 1;
4510 	}
4511 
4512 	spin_unlock(&space_info->lock);
4513 
4514 	mutex_lock(&fs_info->chunk_mutex);
4515 
4516 	/*
4517 	 * The chunk_mutex is held throughout the entirety of a chunk
4518 	 * allocation, so once we've acquired the chunk_mutex we know that the
4519 	 * other guy is done and we need to recheck and see if we should
4520 	 * allocate.
4521 	 */
4522 	if (wait_for_alloc) {
4523 		mutex_unlock(&fs_info->chunk_mutex);
4524 		wait_for_alloc = 0;
4525 		goto again;
4526 	}
4527 
4528 	trans->allocating_chunk = true;
4529 
4530 	/*
4531 	 * If we have mixed data/metadata chunks we want to make sure we keep
4532 	 * allocating mixed chunks instead of individual chunks.
4533 	 */
4534 	if (btrfs_mixed_space_info(space_info))
4535 		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4536 
4537 	/*
4538 	 * if we're doing a data chunk, go ahead and make sure that
4539 	 * we keep a reasonable number of metadata chunks allocated in the
4540 	 * FS as well.
4541 	 */
4542 	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4543 		fs_info->data_chunk_allocations++;
4544 		if (!(fs_info->data_chunk_allocations %
4545 		      fs_info->metadata_ratio))
4546 			force_metadata_allocation(fs_info);
4547 	}
4548 
4549 	/*
4550 	 * Check if we have enough space in SYSTEM chunk because we may need
4551 	 * to update devices.
4552 	 */
4553 	check_system_chunk(trans, fs_info, flags);
4554 
4555 	ret = btrfs_alloc_chunk(trans, fs_info, flags);
4556 	trans->allocating_chunk = false;
4557 
4558 	spin_lock(&space_info->lock);
4559 	if (ret < 0 && ret != -ENOSPC)
4560 		goto out;
4561 	if (ret)
4562 		space_info->full = 1;
4563 	else
4564 		ret = 1;
4565 
4566 	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4567 out:
4568 	space_info->chunk_alloc = 0;
4569 	spin_unlock(&space_info->lock);
4570 	mutex_unlock(&fs_info->chunk_mutex);
4571 	/*
4572 	 * When we allocate a new chunk we reserve space in the chunk block
4573 	 * reserve to make sure we can COW nodes/leafs in the chunk tree or
4574 	 * add new nodes/leafs to it if we end up needing to do it when
4575 	 * inserting the chunk item and updating device items as part of the
4576 	 * second phase of chunk allocation, performed by
4577 	 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4578 	 * large number of new block groups to create in our transaction
4579 	 * handle's new_bgs list to avoid exhausting the chunk block reserve
4580 	 * in extreme cases - like having a single transaction create many new
4581 	 * block groups when starting to write out the free space caches of all
4582 	 * the block groups that were made dirty during the lifetime of the
4583 	 * transaction.
4584 	 */
4585 	if (trans->can_flush_pending_bgs &&
4586 	    trans->chunk_bytes_reserved >= (u64)SZ_2M) {
4587 		btrfs_create_pending_block_groups(trans, fs_info);
4588 		btrfs_trans_release_chunk_metadata(trans);
4589 	}
4590 	return ret;
4591 }
4592 
4593 static int can_overcommit(struct btrfs_root *root,
4594 			  struct btrfs_space_info *space_info, u64 bytes,
4595 			  enum btrfs_reserve_flush_enum flush)
4596 {
4597 	struct btrfs_fs_info *fs_info = root->fs_info;
4598 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4599 	u64 profile;
4600 	u64 space_size;
4601 	u64 avail;
4602 	u64 used;
4603 
4604 	/* Don't overcommit when in mixed mode. */
4605 	if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
4606 		return 0;
4607 
4608 	profile = btrfs_get_alloc_profile(root, 0);
4609 	used = space_info->bytes_used + space_info->bytes_reserved +
4610 		space_info->bytes_pinned + space_info->bytes_readonly;
4611 
4612 	/*
4613 	 * We only want to allow over committing if we have lots of actual space
4614 	 * free, but if we don't have enough space to handle the global reserve
4615 	 * space then we could end up having a real enospc problem when trying
4616 	 * to allocate a chunk or some other such important allocation.
4617 	 */
4618 	spin_lock(&global_rsv->lock);
4619 	space_size = calc_global_rsv_need_space(global_rsv);
4620 	spin_unlock(&global_rsv->lock);
4621 	if (used + space_size >= space_info->total_bytes)
4622 		return 0;
4623 
4624 	used += space_info->bytes_may_use;
4625 
4626 	spin_lock(&fs_info->free_chunk_lock);
4627 	avail = fs_info->free_chunk_space;
4628 	spin_unlock(&fs_info->free_chunk_lock);
4629 
4630 	/*
4631 	 * If we have dup, raid1 or raid10 then only half of the free
4632 	 * space is actually useable.  For raid56, the space info used
4633 	 * doesn't include the parity drive, so we don't have to
4634 	 * change the math
4635 	 */
4636 	if (profile & (BTRFS_BLOCK_GROUP_DUP |
4637 		       BTRFS_BLOCK_GROUP_RAID1 |
4638 		       BTRFS_BLOCK_GROUP_RAID10))
4639 		avail >>= 1;
4640 
4641 	/*
4642 	 * If we aren't flushing all things, let us overcommit up to
4643 	 * 1/2th of the space. If we can flush, don't let us overcommit
4644 	 * too much, let it overcommit up to 1/8 of the space.
4645 	 */
4646 	if (flush == BTRFS_RESERVE_FLUSH_ALL)
4647 		avail >>= 3;
4648 	else
4649 		avail >>= 1;
4650 
4651 	if (used + bytes < space_info->total_bytes + avail)
4652 		return 1;
4653 	return 0;
4654 }
4655 
4656 static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
4657 					 unsigned long nr_pages, int nr_items)
4658 {
4659 	struct super_block *sb = fs_info->sb;
4660 
4661 	if (down_read_trylock(&sb->s_umount)) {
4662 		writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4663 		up_read(&sb->s_umount);
4664 	} else {
4665 		/*
4666 		 * We needn't worry the filesystem going from r/w to r/o though
4667 		 * we don't acquire ->s_umount mutex, because the filesystem
4668 		 * should guarantee the delalloc inodes list be empty after
4669 		 * the filesystem is readonly(all dirty pages are written to
4670 		 * the disk).
4671 		 */
4672 		btrfs_start_delalloc_roots(fs_info, 0, nr_items);
4673 		if (!current->journal_info)
4674 			btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
4675 	}
4676 }
4677 
4678 static inline int calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
4679 					u64 to_reclaim)
4680 {
4681 	u64 bytes;
4682 	int nr;
4683 
4684 	bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
4685 	nr = (int)div64_u64(to_reclaim, bytes);
4686 	if (!nr)
4687 		nr = 1;
4688 	return nr;
4689 }
4690 
4691 #define EXTENT_SIZE_PER_ITEM	SZ_256K
4692 
4693 /*
4694  * shrink metadata reservation for delalloc
4695  */
4696 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4697 			    bool wait_ordered)
4698 {
4699 	struct btrfs_fs_info *fs_info = root->fs_info;
4700 	struct btrfs_block_rsv *block_rsv;
4701 	struct btrfs_space_info *space_info;
4702 	struct btrfs_trans_handle *trans;
4703 	u64 delalloc_bytes;
4704 	u64 max_reclaim;
4705 	long time_left;
4706 	unsigned long nr_pages;
4707 	int loops;
4708 	int items;
4709 	enum btrfs_reserve_flush_enum flush;
4710 
4711 	/* Calc the number of the pages we need flush for space reservation */
4712 	items = calc_reclaim_items_nr(fs_info, to_reclaim);
4713 	to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;
4714 
4715 	trans = (struct btrfs_trans_handle *)current->journal_info;
4716 	block_rsv = &fs_info->delalloc_block_rsv;
4717 	space_info = block_rsv->space_info;
4718 
4719 	delalloc_bytes = percpu_counter_sum_positive(
4720 						&fs_info->delalloc_bytes);
4721 	if (delalloc_bytes == 0) {
4722 		if (trans)
4723 			return;
4724 		if (wait_ordered)
4725 			btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
4726 		return;
4727 	}
4728 
4729 	loops = 0;
4730 	while (delalloc_bytes && loops < 3) {
4731 		max_reclaim = min(delalloc_bytes, to_reclaim);
4732 		nr_pages = max_reclaim >> PAGE_SHIFT;
4733 		btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
4734 		/*
4735 		 * We need to wait for the async pages to actually start before
4736 		 * we do anything.
4737 		 */
4738 		max_reclaim = atomic_read(&fs_info->async_delalloc_pages);
4739 		if (!max_reclaim)
4740 			goto skip_async;
4741 
4742 		if (max_reclaim <= nr_pages)
4743 			max_reclaim = 0;
4744 		else
4745 			max_reclaim -= nr_pages;
4746 
4747 		wait_event(fs_info->async_submit_wait,
4748 			   atomic_read(&fs_info->async_delalloc_pages) <=
4749 			   (int)max_reclaim);
4750 skip_async:
4751 		if (!trans)
4752 			flush = BTRFS_RESERVE_FLUSH_ALL;
4753 		else
4754 			flush = BTRFS_RESERVE_NO_FLUSH;
4755 		spin_lock(&space_info->lock);
4756 		if (can_overcommit(root, space_info, orig, flush)) {
4757 			spin_unlock(&space_info->lock);
4758 			break;
4759 		}
4760 		if (list_empty(&space_info->tickets) &&
4761 		    list_empty(&space_info->priority_tickets)) {
4762 			spin_unlock(&space_info->lock);
4763 			break;
4764 		}
4765 		spin_unlock(&space_info->lock);
4766 
4767 		loops++;
4768 		if (wait_ordered && !trans) {
4769 			btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
4770 		} else {
4771 			time_left = schedule_timeout_killable(1);
4772 			if (time_left)
4773 				break;
4774 		}
4775 		delalloc_bytes = percpu_counter_sum_positive(
4776 						&fs_info->delalloc_bytes);
4777 	}
4778 }
4779 
4780 /**
4781  * maybe_commit_transaction - possibly commit the transaction if its ok to
4782  * @root - the root we're allocating for
4783  * @bytes - the number of bytes we want to reserve
4784  * @force - force the commit
4785  *
4786  * This will check to make sure that committing the transaction will actually
4787  * get us somewhere and then commit the transaction if it does.  Otherwise it
4788  * will return -ENOSPC.
4789  */
4790 static int may_commit_transaction(struct btrfs_root *root,
4791 				  struct btrfs_space_info *space_info,
4792 				  u64 bytes, int force)
4793 {
4794 	struct btrfs_fs_info *fs_info = root->fs_info;
4795 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
4796 	struct btrfs_trans_handle *trans;
4797 
4798 	trans = (struct btrfs_trans_handle *)current->journal_info;
4799 	if (trans)
4800 		return -EAGAIN;
4801 
4802 	if (force)
4803 		goto commit;
4804 
4805 	/* See if there is enough pinned space to make this reservation */
4806 	if (percpu_counter_compare(&space_info->total_bytes_pinned,
4807 				   bytes) >= 0)
4808 		goto commit;
4809 
4810 	/*
4811 	 * See if there is some space in the delayed insertion reservation for
4812 	 * this reservation.
4813 	 */
4814 	if (space_info != delayed_rsv->space_info)
4815 		return -ENOSPC;
4816 
4817 	spin_lock(&delayed_rsv->lock);
4818 	if (percpu_counter_compare(&space_info->total_bytes_pinned,
4819 				   bytes - delayed_rsv->size) >= 0) {
4820 		spin_unlock(&delayed_rsv->lock);
4821 		return -ENOSPC;
4822 	}
4823 	spin_unlock(&delayed_rsv->lock);
4824 
4825 commit:
4826 	trans = btrfs_join_transaction(root);
4827 	if (IS_ERR(trans))
4828 		return -ENOSPC;
4829 
4830 	return btrfs_commit_transaction(trans);
4831 }
4832 
4833 struct reserve_ticket {
4834 	u64 bytes;
4835 	int error;
4836 	struct list_head list;
4837 	wait_queue_head_t wait;
4838 };
4839 
4840 static int flush_space(struct btrfs_root *root,
4841 		       struct btrfs_space_info *space_info, u64 num_bytes,
4842 		       u64 orig_bytes, int state)
4843 {
4844 	struct btrfs_fs_info *fs_info = root->fs_info;
4845 	struct btrfs_trans_handle *trans;
4846 	int nr;
4847 	int ret = 0;
4848 
4849 	switch (state) {
4850 	case FLUSH_DELAYED_ITEMS_NR:
4851 	case FLUSH_DELAYED_ITEMS:
4852 		if (state == FLUSH_DELAYED_ITEMS_NR)
4853 			nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
4854 		else
4855 			nr = -1;
4856 
4857 		trans = btrfs_join_transaction(root);
4858 		if (IS_ERR(trans)) {
4859 			ret = PTR_ERR(trans);
4860 			break;
4861 		}
4862 		ret = btrfs_run_delayed_items_nr(trans, fs_info, nr);
4863 		btrfs_end_transaction(trans);
4864 		break;
4865 	case FLUSH_DELALLOC:
4866 	case FLUSH_DELALLOC_WAIT:
4867 		shrink_delalloc(root, num_bytes * 2, orig_bytes,
4868 				state == FLUSH_DELALLOC_WAIT);
4869 		break;
4870 	case ALLOC_CHUNK:
4871 		trans = btrfs_join_transaction(root);
4872 		if (IS_ERR(trans)) {
4873 			ret = PTR_ERR(trans);
4874 			break;
4875 		}
4876 		ret = do_chunk_alloc(trans, fs_info,
4877 				     btrfs_get_alloc_profile(root, 0),
4878 				     CHUNK_ALLOC_NO_FORCE);
4879 		btrfs_end_transaction(trans);
4880 		if (ret > 0 || ret == -ENOSPC)
4881 			ret = 0;
4882 		break;
4883 	case COMMIT_TRANS:
4884 		ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4885 		break;
4886 	default:
4887 		ret = -ENOSPC;
4888 		break;
4889 	}
4890 
4891 	trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes,
4892 				orig_bytes, state, ret);
4893 	return ret;
4894 }
4895 
4896 static inline u64
4897 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4898 				 struct btrfs_space_info *space_info)
4899 {
4900 	struct reserve_ticket *ticket;
4901 	u64 used;
4902 	u64 expected;
4903 	u64 to_reclaim = 0;
4904 
4905 	list_for_each_entry(ticket, &space_info->tickets, list)
4906 		to_reclaim += ticket->bytes;
4907 	list_for_each_entry(ticket, &space_info->priority_tickets, list)
4908 		to_reclaim += ticket->bytes;
4909 	if (to_reclaim)
4910 		return to_reclaim;
4911 
4912 	to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
4913 	if (can_overcommit(root, space_info, to_reclaim,
4914 			   BTRFS_RESERVE_FLUSH_ALL))
4915 		return 0;
4916 
4917 	used = space_info->bytes_used + space_info->bytes_reserved +
4918 	       space_info->bytes_pinned + space_info->bytes_readonly +
4919 	       space_info->bytes_may_use;
4920 	if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
4921 		expected = div_factor_fine(space_info->total_bytes, 95);
4922 	else
4923 		expected = div_factor_fine(space_info->total_bytes, 90);
4924 
4925 	if (used > expected)
4926 		to_reclaim = used - expected;
4927 	else
4928 		to_reclaim = 0;
4929 	to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4930 				     space_info->bytes_reserved);
4931 	return to_reclaim;
4932 }
4933 
4934 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4935 					struct btrfs_root *root, u64 used)
4936 {
4937 	struct btrfs_fs_info *fs_info = root->fs_info;
4938 	u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4939 
4940 	/* If we're just plain full then async reclaim just slows us down. */
4941 	if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
4942 		return 0;
4943 
4944 	if (!btrfs_calc_reclaim_metadata_size(root, space_info))
4945 		return 0;
4946 
4947 	return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4948 		!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4949 }
4950 
4951 static void wake_all_tickets(struct list_head *head)
4952 {
4953 	struct reserve_ticket *ticket;
4954 
4955 	while (!list_empty(head)) {
4956 		ticket = list_first_entry(head, struct reserve_ticket, list);
4957 		list_del_init(&ticket->list);
4958 		ticket->error = -ENOSPC;
4959 		wake_up(&ticket->wait);
4960 	}
4961 }
4962 
4963 /*
4964  * This is for normal flushers, we can wait all goddamned day if we want to.  We
4965  * will loop and continuously try to flush as long as we are making progress.
4966  * We count progress as clearing off tickets each time we have to loop.
4967  */
4968 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4969 {
4970 	struct btrfs_fs_info *fs_info;
4971 	struct btrfs_space_info *space_info;
4972 	u64 to_reclaim;
4973 	int flush_state;
4974 	int commit_cycles = 0;
4975 	u64 last_tickets_id;
4976 
4977 	fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4978 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4979 
4980 	spin_lock(&space_info->lock);
4981 	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4982 						      space_info);
4983 	if (!to_reclaim) {
4984 		space_info->flush = 0;
4985 		spin_unlock(&space_info->lock);
4986 		return;
4987 	}
4988 	last_tickets_id = space_info->tickets_id;
4989 	spin_unlock(&space_info->lock);
4990 
4991 	flush_state = FLUSH_DELAYED_ITEMS_NR;
4992 	do {
4993 		struct reserve_ticket *ticket;
4994 		int ret;
4995 
4996 		ret = flush_space(fs_info->fs_root, space_info, to_reclaim,
4997 			    to_reclaim, flush_state);
4998 		spin_lock(&space_info->lock);
4999 		if (list_empty(&space_info->tickets)) {
5000 			space_info->flush = 0;
5001 			spin_unlock(&space_info->lock);
5002 			return;
5003 		}
5004 		to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
5005 							      space_info);
5006 		ticket = list_first_entry(&space_info->tickets,
5007 					  struct reserve_ticket, list);
5008 		if (last_tickets_id == space_info->tickets_id) {
5009 			flush_state++;
5010 		} else {
5011 			last_tickets_id = space_info->tickets_id;
5012 			flush_state = FLUSH_DELAYED_ITEMS_NR;
5013 			if (commit_cycles)
5014 				commit_cycles--;
5015 		}
5016 
5017 		if (flush_state > COMMIT_TRANS) {
5018 			commit_cycles++;
5019 			if (commit_cycles > 2) {
5020 				wake_all_tickets(&space_info->tickets);
5021 				space_info->flush = 0;
5022 			} else {
5023 				flush_state = FLUSH_DELAYED_ITEMS_NR;
5024 			}
5025 		}
5026 		spin_unlock(&space_info->lock);
5027 	} while (flush_state <= COMMIT_TRANS);
5028 }
5029 
5030 void btrfs_init_async_reclaim_work(struct work_struct *work)
5031 {
5032 	INIT_WORK(work, btrfs_async_reclaim_metadata_space);
5033 }
5034 
5035 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
5036 					    struct btrfs_space_info *space_info,
5037 					    struct reserve_ticket *ticket)
5038 {
5039 	u64 to_reclaim;
5040 	int flush_state = FLUSH_DELAYED_ITEMS_NR;
5041 
5042 	spin_lock(&space_info->lock);
5043 	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
5044 						      space_info);
5045 	if (!to_reclaim) {
5046 		spin_unlock(&space_info->lock);
5047 		return;
5048 	}
5049 	spin_unlock(&space_info->lock);
5050 
5051 	do {
5052 		flush_space(fs_info->fs_root, space_info, to_reclaim,
5053 			    to_reclaim, flush_state);
5054 		flush_state++;
5055 		spin_lock(&space_info->lock);
5056 		if (ticket->bytes == 0) {
5057 			spin_unlock(&space_info->lock);
5058 			return;
5059 		}
5060 		spin_unlock(&space_info->lock);
5061 
5062 		/*
5063 		 * Priority flushers can't wait on delalloc without
5064 		 * deadlocking.
5065 		 */
5066 		if (flush_state == FLUSH_DELALLOC ||
5067 		    flush_state == FLUSH_DELALLOC_WAIT)
5068 			flush_state = ALLOC_CHUNK;
5069 	} while (flush_state < COMMIT_TRANS);
5070 }
5071 
5072 static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
5073 			       struct btrfs_space_info *space_info,
5074 			       struct reserve_ticket *ticket, u64 orig_bytes)
5075 
5076 {
5077 	DEFINE_WAIT(wait);
5078 	int ret = 0;
5079 
5080 	spin_lock(&space_info->lock);
5081 	while (ticket->bytes > 0 && ticket->error == 0) {
5082 		ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
5083 		if (ret) {
5084 			ret = -EINTR;
5085 			break;
5086 		}
5087 		spin_unlock(&space_info->lock);
5088 
5089 		schedule();
5090 
5091 		finish_wait(&ticket->wait, &wait);
5092 		spin_lock(&space_info->lock);
5093 	}
5094 	if (!ret)
5095 		ret = ticket->error;
5096 	if (!list_empty(&ticket->list))
5097 		list_del_init(&ticket->list);
5098 	if (ticket->bytes && ticket->bytes < orig_bytes) {
5099 		u64 num_bytes = orig_bytes - ticket->bytes;
5100 		space_info->bytes_may_use -= num_bytes;
5101 		trace_btrfs_space_reservation(fs_info, "space_info",
5102 					      space_info->flags, num_bytes, 0);
5103 	}
5104 	spin_unlock(&space_info->lock);
5105 
5106 	return ret;
5107 }
5108 
5109 /**
5110  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5111  * @root - the root we're allocating for
5112  * @space_info - the space info we want to allocate from
5113  * @orig_bytes - the number of bytes we want
5114  * @flush - whether or not we can flush to make our reservation
5115  *
5116  * This will reserve orig_bytes number of bytes from the space info associated
5117  * with the block_rsv.  If there is not enough space it will make an attempt to
5118  * flush out space to make room.  It will do this by flushing delalloc if
5119  * possible or committing the transaction.  If flush is 0 then no attempts to
5120  * regain reservations will be made and this will fail if there is not enough
5121  * space already.
5122  */
5123 static int __reserve_metadata_bytes(struct btrfs_root *root,
5124 				    struct btrfs_space_info *space_info,
5125 				    u64 orig_bytes,
5126 				    enum btrfs_reserve_flush_enum flush)
5127 {
5128 	struct btrfs_fs_info *fs_info = root->fs_info;
5129 	struct reserve_ticket ticket;
5130 	u64 used;
5131 	int ret = 0;
5132 
5133 	ASSERT(orig_bytes);
5134 	ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
5135 
5136 	spin_lock(&space_info->lock);
5137 	ret = -ENOSPC;
5138 	used = space_info->bytes_used + space_info->bytes_reserved +
5139 		space_info->bytes_pinned + space_info->bytes_readonly +
5140 		space_info->bytes_may_use;
5141 
5142 	/*
5143 	 * If we have enough space then hooray, make our reservation and carry
5144 	 * on.  If not see if we can overcommit, and if we can, hooray carry on.
5145 	 * If not things get more complicated.
5146 	 */
5147 	if (used + orig_bytes <= space_info->total_bytes) {
5148 		space_info->bytes_may_use += orig_bytes;
5149 		trace_btrfs_space_reservation(fs_info, "space_info",
5150 					      space_info->flags, orig_bytes, 1);
5151 		ret = 0;
5152 	} else if (can_overcommit(root, space_info, orig_bytes, flush)) {
5153 		space_info->bytes_may_use += orig_bytes;
5154 		trace_btrfs_space_reservation(fs_info, "space_info",
5155 					      space_info->flags, orig_bytes, 1);
5156 		ret = 0;
5157 	}
5158 
5159 	/*
5160 	 * If we couldn't make a reservation then setup our reservation ticket
5161 	 * and kick the async worker if it's not already running.
5162 	 *
5163 	 * If we are a priority flusher then we just need to add our ticket to
5164 	 * the list and we will do our own flushing further down.
5165 	 */
5166 	if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
5167 		ticket.bytes = orig_bytes;
5168 		ticket.error = 0;
5169 		init_waitqueue_head(&ticket.wait);
5170 		if (flush == BTRFS_RESERVE_FLUSH_ALL) {
5171 			list_add_tail(&ticket.list, &space_info->tickets);
5172 			if (!space_info->flush) {
5173 				space_info->flush = 1;
5174 				trace_btrfs_trigger_flush(fs_info,
5175 							  space_info->flags,
5176 							  orig_bytes, flush,
5177 							  "enospc");
5178 				queue_work(system_unbound_wq,
5179 					   &root->fs_info->async_reclaim_work);
5180 			}
5181 		} else {
5182 			list_add_tail(&ticket.list,
5183 				      &space_info->priority_tickets);
5184 		}
5185 	} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
5186 		used += orig_bytes;
5187 		/*
5188 		 * We will do the space reservation dance during log replay,
5189 		 * which means we won't have fs_info->fs_root set, so don't do
5190 		 * the async reclaim as we will panic.
5191 		 */
5192 		if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
5193 		    need_do_async_reclaim(space_info, root, used) &&
5194 		    !work_busy(&fs_info->async_reclaim_work)) {
5195 			trace_btrfs_trigger_flush(fs_info, space_info->flags,
5196 						  orig_bytes, flush, "preempt");
5197 			queue_work(system_unbound_wq,
5198 				   &fs_info->async_reclaim_work);
5199 		}
5200 	}
5201 	spin_unlock(&space_info->lock);
5202 	if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
5203 		return ret;
5204 
5205 	if (flush == BTRFS_RESERVE_FLUSH_ALL)
5206 		return wait_reserve_ticket(fs_info, space_info, &ticket,
5207 					   orig_bytes);
5208 
5209 	ret = 0;
5210 	priority_reclaim_metadata_space(fs_info, space_info, &ticket);
5211 	spin_lock(&space_info->lock);
5212 	if (ticket.bytes) {
5213 		if (ticket.bytes < orig_bytes) {
5214 			u64 num_bytes = orig_bytes - ticket.bytes;
5215 			space_info->bytes_may_use -= num_bytes;
5216 			trace_btrfs_space_reservation(fs_info, "space_info",
5217 						      space_info->flags,
5218 						      num_bytes, 0);
5219 
5220 		}
5221 		list_del_init(&ticket.list);
5222 		ret = -ENOSPC;
5223 	}
5224 	spin_unlock(&space_info->lock);
5225 	ASSERT(list_empty(&ticket.list));
5226 	return ret;
5227 }
5228 
5229 /**
5230  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5231  * @root - the root we're allocating for
5232  * @block_rsv - the block_rsv we're allocating for
5233  * @orig_bytes - the number of bytes we want
5234  * @flush - whether or not we can flush to make our reservation
5235  *
5236  * This will reserve orgi_bytes number of bytes from the space info associated
5237  * with the block_rsv.  If there is not enough space it will make an attempt to
5238  * flush out space to make room.  It will do this by flushing delalloc if
5239  * possible or committing the transaction.  If flush is 0 then no attempts to
5240  * regain reservations will be made and this will fail if there is not enough
5241  * space already.
5242  */
5243 static int reserve_metadata_bytes(struct btrfs_root *root,
5244 				  struct btrfs_block_rsv *block_rsv,
5245 				  u64 orig_bytes,
5246 				  enum btrfs_reserve_flush_enum flush)
5247 {
5248 	struct btrfs_fs_info *fs_info = root->fs_info;
5249 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5250 	int ret;
5251 
5252 	ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes,
5253 				       flush);
5254 	if (ret == -ENOSPC &&
5255 	    unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5256 		if (block_rsv != global_rsv &&
5257 		    !block_rsv_use_bytes(global_rsv, orig_bytes))
5258 			ret = 0;
5259 	}
5260 	if (ret == -ENOSPC)
5261 		trace_btrfs_space_reservation(fs_info, "space_info:enospc",
5262 					      block_rsv->space_info->flags,
5263 					      orig_bytes, 1);
5264 	return ret;
5265 }
5266 
5267 static struct btrfs_block_rsv *get_block_rsv(
5268 					const struct btrfs_trans_handle *trans,
5269 					const struct btrfs_root *root)
5270 {
5271 	struct btrfs_fs_info *fs_info = root->fs_info;
5272 	struct btrfs_block_rsv *block_rsv = NULL;
5273 
5274 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5275 	    (root == fs_info->csum_root && trans->adding_csums) ||
5276 	    (root == fs_info->uuid_root))
5277 		block_rsv = trans->block_rsv;
5278 
5279 	if (!block_rsv)
5280 		block_rsv = root->block_rsv;
5281 
5282 	if (!block_rsv)
5283 		block_rsv = &fs_info->empty_block_rsv;
5284 
5285 	return block_rsv;
5286 }
5287 
5288 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5289 			       u64 num_bytes)
5290 {
5291 	int ret = -ENOSPC;
5292 	spin_lock(&block_rsv->lock);
5293 	if (block_rsv->reserved >= num_bytes) {
5294 		block_rsv->reserved -= num_bytes;
5295 		if (block_rsv->reserved < block_rsv->size)
5296 			block_rsv->full = 0;
5297 		ret = 0;
5298 	}
5299 	spin_unlock(&block_rsv->lock);
5300 	return ret;
5301 }
5302 
5303 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5304 				u64 num_bytes, int update_size)
5305 {
5306 	spin_lock(&block_rsv->lock);
5307 	block_rsv->reserved += num_bytes;
5308 	if (update_size)
5309 		block_rsv->size += num_bytes;
5310 	else if (block_rsv->reserved >= block_rsv->size)
5311 		block_rsv->full = 1;
5312 	spin_unlock(&block_rsv->lock);
5313 }
5314 
5315 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5316 			     struct btrfs_block_rsv *dest, u64 num_bytes,
5317 			     int min_factor)
5318 {
5319 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5320 	u64 min_bytes;
5321 
5322 	if (global_rsv->space_info != dest->space_info)
5323 		return -ENOSPC;
5324 
5325 	spin_lock(&global_rsv->lock);
5326 	min_bytes = div_factor(global_rsv->size, min_factor);
5327 	if (global_rsv->reserved < min_bytes + num_bytes) {
5328 		spin_unlock(&global_rsv->lock);
5329 		return -ENOSPC;
5330 	}
5331 	global_rsv->reserved -= num_bytes;
5332 	if (global_rsv->reserved < global_rsv->size)
5333 		global_rsv->full = 0;
5334 	spin_unlock(&global_rsv->lock);
5335 
5336 	block_rsv_add_bytes(dest, num_bytes, 1);
5337 	return 0;
5338 }
5339 
5340 /*
5341  * This is for space we already have accounted in space_info->bytes_may_use, so
5342  * basically when we're returning space from block_rsv's.
5343  */
5344 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
5345 				     struct btrfs_space_info *space_info,
5346 				     u64 num_bytes)
5347 {
5348 	struct reserve_ticket *ticket;
5349 	struct list_head *head;
5350 	u64 used;
5351 	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
5352 	bool check_overcommit = false;
5353 
5354 	spin_lock(&space_info->lock);
5355 	head = &space_info->priority_tickets;
5356 
5357 	/*
5358 	 * If we are over our limit then we need to check and see if we can
5359 	 * overcommit, and if we can't then we just need to free up our space
5360 	 * and not satisfy any requests.
5361 	 */
5362 	used = space_info->bytes_used + space_info->bytes_reserved +
5363 		space_info->bytes_pinned + space_info->bytes_readonly +
5364 		space_info->bytes_may_use;
5365 	if (used - num_bytes >= space_info->total_bytes)
5366 		check_overcommit = true;
5367 again:
5368 	while (!list_empty(head) && num_bytes) {
5369 		ticket = list_first_entry(head, struct reserve_ticket,
5370 					  list);
5371 		/*
5372 		 * We use 0 bytes because this space is already reserved, so
5373 		 * adding the ticket space would be a double count.
5374 		 */
5375 		if (check_overcommit &&
5376 		    !can_overcommit(fs_info->extent_root, space_info, 0,
5377 				    flush))
5378 			break;
5379 		if (num_bytes >= ticket->bytes) {
5380 			list_del_init(&ticket->list);
5381 			num_bytes -= ticket->bytes;
5382 			ticket->bytes = 0;
5383 			space_info->tickets_id++;
5384 			wake_up(&ticket->wait);
5385 		} else {
5386 			ticket->bytes -= num_bytes;
5387 			num_bytes = 0;
5388 		}
5389 	}
5390 
5391 	if (num_bytes && head == &space_info->priority_tickets) {
5392 		head = &space_info->tickets;
5393 		flush = BTRFS_RESERVE_FLUSH_ALL;
5394 		goto again;
5395 	}
5396 	space_info->bytes_may_use -= num_bytes;
5397 	trace_btrfs_space_reservation(fs_info, "space_info",
5398 				      space_info->flags, num_bytes, 0);
5399 	spin_unlock(&space_info->lock);
5400 }
5401 
5402 /*
5403  * This is for newly allocated space that isn't accounted in
5404  * space_info->bytes_may_use yet.  So if we allocate a chunk or unpin an extent
5405  * we use this helper.
5406  */
5407 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
5408 				     struct btrfs_space_info *space_info,
5409 				     u64 num_bytes)
5410 {
5411 	struct reserve_ticket *ticket;
5412 	struct list_head *head = &space_info->priority_tickets;
5413 
5414 again:
5415 	while (!list_empty(head) && num_bytes) {
5416 		ticket = list_first_entry(head, struct reserve_ticket,
5417 					  list);
5418 		if (num_bytes >= ticket->bytes) {
5419 			trace_btrfs_space_reservation(fs_info, "space_info",
5420 						      space_info->flags,
5421 						      ticket->bytes, 1);
5422 			list_del_init(&ticket->list);
5423 			num_bytes -= ticket->bytes;
5424 			space_info->bytes_may_use += ticket->bytes;
5425 			ticket->bytes = 0;
5426 			space_info->tickets_id++;
5427 			wake_up(&ticket->wait);
5428 		} else {
5429 			trace_btrfs_space_reservation(fs_info, "space_info",
5430 						      space_info->flags,
5431 						      num_bytes, 1);
5432 			space_info->bytes_may_use += num_bytes;
5433 			ticket->bytes -= num_bytes;
5434 			num_bytes = 0;
5435 		}
5436 	}
5437 
5438 	if (num_bytes && head == &space_info->priority_tickets) {
5439 		head = &space_info->tickets;
5440 		goto again;
5441 	}
5442 }
5443 
5444 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5445 				    struct btrfs_block_rsv *block_rsv,
5446 				    struct btrfs_block_rsv *dest, u64 num_bytes)
5447 {
5448 	struct btrfs_space_info *space_info = block_rsv->space_info;
5449 
5450 	spin_lock(&block_rsv->lock);
5451 	if (num_bytes == (u64)-1)
5452 		num_bytes = block_rsv->size;
5453 	block_rsv->size -= num_bytes;
5454 	if (block_rsv->reserved >= block_rsv->size) {
5455 		num_bytes = block_rsv->reserved - block_rsv->size;
5456 		block_rsv->reserved = block_rsv->size;
5457 		block_rsv->full = 1;
5458 	} else {
5459 		num_bytes = 0;
5460 	}
5461 	spin_unlock(&block_rsv->lock);
5462 
5463 	if (num_bytes > 0) {
5464 		if (dest) {
5465 			spin_lock(&dest->lock);
5466 			if (!dest->full) {
5467 				u64 bytes_to_add;
5468 
5469 				bytes_to_add = dest->size - dest->reserved;
5470 				bytes_to_add = min(num_bytes, bytes_to_add);
5471 				dest->reserved += bytes_to_add;
5472 				if (dest->reserved >= dest->size)
5473 					dest->full = 1;
5474 				num_bytes -= bytes_to_add;
5475 			}
5476 			spin_unlock(&dest->lock);
5477 		}
5478 		if (num_bytes)
5479 			space_info_add_old_bytes(fs_info, space_info,
5480 						 num_bytes);
5481 	}
5482 }
5483 
5484 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
5485 			    struct btrfs_block_rsv *dst, u64 num_bytes,
5486 			    int update_size)
5487 {
5488 	int ret;
5489 
5490 	ret = block_rsv_use_bytes(src, num_bytes);
5491 	if (ret)
5492 		return ret;
5493 
5494 	block_rsv_add_bytes(dst, num_bytes, update_size);
5495 	return 0;
5496 }
5497 
5498 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5499 {
5500 	memset(rsv, 0, sizeof(*rsv));
5501 	spin_lock_init(&rsv->lock);
5502 	rsv->type = type;
5503 }
5504 
5505 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
5506 					      unsigned short type)
5507 {
5508 	struct btrfs_block_rsv *block_rsv;
5509 
5510 	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5511 	if (!block_rsv)
5512 		return NULL;
5513 
5514 	btrfs_init_block_rsv(block_rsv, type);
5515 	block_rsv->space_info = __find_space_info(fs_info,
5516 						  BTRFS_BLOCK_GROUP_METADATA);
5517 	return block_rsv;
5518 }
5519 
5520 void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
5521 			  struct btrfs_block_rsv *rsv)
5522 {
5523 	if (!rsv)
5524 		return;
5525 	btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
5526 	kfree(rsv);
5527 }
5528 
5529 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5530 {
5531 	kfree(rsv);
5532 }
5533 
5534 int btrfs_block_rsv_add(struct btrfs_root *root,
5535 			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5536 			enum btrfs_reserve_flush_enum flush)
5537 {
5538 	int ret;
5539 
5540 	if (num_bytes == 0)
5541 		return 0;
5542 
5543 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5544 	if (!ret) {
5545 		block_rsv_add_bytes(block_rsv, num_bytes, 1);
5546 		return 0;
5547 	}
5548 
5549 	return ret;
5550 }
5551 
5552 int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
5553 {
5554 	u64 num_bytes = 0;
5555 	int ret = -ENOSPC;
5556 
5557 	if (!block_rsv)
5558 		return 0;
5559 
5560 	spin_lock(&block_rsv->lock);
5561 	num_bytes = div_factor(block_rsv->size, min_factor);
5562 	if (block_rsv->reserved >= num_bytes)
5563 		ret = 0;
5564 	spin_unlock(&block_rsv->lock);
5565 
5566 	return ret;
5567 }
5568 
5569 int btrfs_block_rsv_refill(struct btrfs_root *root,
5570 			   struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5571 			   enum btrfs_reserve_flush_enum flush)
5572 {
5573 	u64 num_bytes = 0;
5574 	int ret = -ENOSPC;
5575 
5576 	if (!block_rsv)
5577 		return 0;
5578 
5579 	spin_lock(&block_rsv->lock);
5580 	num_bytes = min_reserved;
5581 	if (block_rsv->reserved >= num_bytes)
5582 		ret = 0;
5583 	else
5584 		num_bytes -= block_rsv->reserved;
5585 	spin_unlock(&block_rsv->lock);
5586 
5587 	if (!ret)
5588 		return 0;
5589 
5590 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5591 	if (!ret) {
5592 		block_rsv_add_bytes(block_rsv, num_bytes, 0);
5593 		return 0;
5594 	}
5595 
5596 	return ret;
5597 }
5598 
5599 void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
5600 			     struct btrfs_block_rsv *block_rsv,
5601 			     u64 num_bytes)
5602 {
5603 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5604 
5605 	if (global_rsv == block_rsv ||
5606 	    block_rsv->space_info != global_rsv->space_info)
5607 		global_rsv = NULL;
5608 	block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes);
5609 }
5610 
5611 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5612 {
5613 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5614 	struct btrfs_space_info *sinfo = block_rsv->space_info;
5615 	u64 num_bytes;
5616 
5617 	/*
5618 	 * The global block rsv is based on the size of the extent tree, the
5619 	 * checksum tree and the root tree.  If the fs is empty we want to set
5620 	 * it to a minimal amount for safety.
5621 	 */
5622 	num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
5623 		btrfs_root_used(&fs_info->csum_root->root_item) +
5624 		btrfs_root_used(&fs_info->tree_root->root_item);
5625 	num_bytes = max_t(u64, num_bytes, SZ_16M);
5626 
5627 	spin_lock(&sinfo->lock);
5628 	spin_lock(&block_rsv->lock);
5629 
5630 	block_rsv->size = min_t(u64, num_bytes, SZ_512M);
5631 
5632 	if (block_rsv->reserved < block_rsv->size) {
5633 		num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5634 			sinfo->bytes_reserved + sinfo->bytes_readonly +
5635 			sinfo->bytes_may_use;
5636 		if (sinfo->total_bytes > num_bytes) {
5637 			num_bytes = sinfo->total_bytes - num_bytes;
5638 			num_bytes = min(num_bytes,
5639 					block_rsv->size - block_rsv->reserved);
5640 			block_rsv->reserved += num_bytes;
5641 			sinfo->bytes_may_use += num_bytes;
5642 			trace_btrfs_space_reservation(fs_info, "space_info",
5643 						      sinfo->flags, num_bytes,
5644 						      1);
5645 		}
5646 	} else if (block_rsv->reserved > block_rsv->size) {
5647 		num_bytes = block_rsv->reserved - block_rsv->size;
5648 		sinfo->bytes_may_use -= num_bytes;
5649 		trace_btrfs_space_reservation(fs_info, "space_info",
5650 				      sinfo->flags, num_bytes, 0);
5651 		block_rsv->reserved = block_rsv->size;
5652 	}
5653 
5654 	if (block_rsv->reserved == block_rsv->size)
5655 		block_rsv->full = 1;
5656 	else
5657 		block_rsv->full = 0;
5658 
5659 	spin_unlock(&block_rsv->lock);
5660 	spin_unlock(&sinfo->lock);
5661 }
5662 
5663 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5664 {
5665 	struct btrfs_space_info *space_info;
5666 
5667 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5668 	fs_info->chunk_block_rsv.space_info = space_info;
5669 
5670 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5671 	fs_info->global_block_rsv.space_info = space_info;
5672 	fs_info->delalloc_block_rsv.space_info = space_info;
5673 	fs_info->trans_block_rsv.space_info = space_info;
5674 	fs_info->empty_block_rsv.space_info = space_info;
5675 	fs_info->delayed_block_rsv.space_info = space_info;
5676 
5677 	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5678 	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5679 	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5680 	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5681 	if (fs_info->quota_root)
5682 		fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5683 	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5684 
5685 	update_global_block_rsv(fs_info);
5686 }
5687 
5688 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5689 {
5690 	block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5691 				(u64)-1);
5692 	WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5693 	WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5694 	WARN_ON(fs_info->trans_block_rsv.size > 0);
5695 	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5696 	WARN_ON(fs_info->chunk_block_rsv.size > 0);
5697 	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5698 	WARN_ON(fs_info->delayed_block_rsv.size > 0);
5699 	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5700 }
5701 
5702 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5703 				  struct btrfs_fs_info *fs_info)
5704 {
5705 	if (!trans->block_rsv)
5706 		return;
5707 
5708 	if (!trans->bytes_reserved)
5709 		return;
5710 
5711 	trace_btrfs_space_reservation(fs_info, "transaction",
5712 				      trans->transid, trans->bytes_reserved, 0);
5713 	btrfs_block_rsv_release(fs_info, trans->block_rsv,
5714 				trans->bytes_reserved);
5715 	trans->bytes_reserved = 0;
5716 }
5717 
5718 /*
5719  * To be called after all the new block groups attached to the transaction
5720  * handle have been created (btrfs_create_pending_block_groups()).
5721  */
5722 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5723 {
5724 	struct btrfs_fs_info *fs_info = trans->fs_info;
5725 
5726 	if (!trans->chunk_bytes_reserved)
5727 		return;
5728 
5729 	WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5730 
5731 	block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5732 				trans->chunk_bytes_reserved);
5733 	trans->chunk_bytes_reserved = 0;
5734 }
5735 
5736 /* Can only return 0 or -ENOSPC */
5737 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5738 				  struct inode *inode)
5739 {
5740 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5741 	struct btrfs_root *root = BTRFS_I(inode)->root;
5742 	/*
5743 	 * We always use trans->block_rsv here as we will have reserved space
5744 	 * for our orphan when starting the transaction, using get_block_rsv()
5745 	 * here will sometimes make us choose the wrong block rsv as we could be
5746 	 * doing a reloc inode for a non refcounted root.
5747 	 */
5748 	struct btrfs_block_rsv *src_rsv = trans->block_rsv;
5749 	struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5750 
5751 	/*
5752 	 * We need to hold space in order to delete our orphan item once we've
5753 	 * added it, so this takes the reservation so we can release it later
5754 	 * when we are truly done with the orphan item.
5755 	 */
5756 	u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
5757 
5758 	trace_btrfs_space_reservation(fs_info, "orphan",
5759 				      btrfs_ino(inode), num_bytes, 1);
5760 	return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
5761 }
5762 
5763 void btrfs_orphan_release_metadata(struct inode *inode)
5764 {
5765 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5766 	struct btrfs_root *root = BTRFS_I(inode)->root;
5767 	u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
5768 
5769 	trace_btrfs_space_reservation(fs_info, "orphan",
5770 				      btrfs_ino(inode), num_bytes, 0);
5771 	btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes);
5772 }
5773 
5774 /*
5775  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5776  * root: the root of the parent directory
5777  * rsv: block reservation
5778  * items: the number of items that we need do reservation
5779  * qgroup_reserved: used to return the reserved size in qgroup
5780  *
5781  * This function is used to reserve the space for snapshot/subvolume
5782  * creation and deletion. Those operations are different with the
5783  * common file/directory operations, they change two fs/file trees
5784  * and root tree, the number of items that the qgroup reserves is
5785  * different with the free space reservation. So we can not use
5786  * the space reservation mechanism in start_transaction().
5787  */
5788 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5789 				     struct btrfs_block_rsv *rsv,
5790 				     int items,
5791 				     u64 *qgroup_reserved,
5792 				     bool use_global_rsv)
5793 {
5794 	u64 num_bytes;
5795 	int ret;
5796 	struct btrfs_fs_info *fs_info = root->fs_info;
5797 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5798 
5799 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
5800 		/* One for parent inode, two for dir entries */
5801 		num_bytes = 3 * fs_info->nodesize;
5802 		ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5803 		if (ret)
5804 			return ret;
5805 	} else {
5806 		num_bytes = 0;
5807 	}
5808 
5809 	*qgroup_reserved = num_bytes;
5810 
5811 	num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
5812 	rsv->space_info = __find_space_info(fs_info,
5813 					    BTRFS_BLOCK_GROUP_METADATA);
5814 	ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5815 				  BTRFS_RESERVE_FLUSH_ALL);
5816 
5817 	if (ret == -ENOSPC && use_global_rsv)
5818 		ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
5819 
5820 	if (ret && *qgroup_reserved)
5821 		btrfs_qgroup_free_meta(root, *qgroup_reserved);
5822 
5823 	return ret;
5824 }
5825 
5826 void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
5827 				      struct btrfs_block_rsv *rsv,
5828 				      u64 qgroup_reserved)
5829 {
5830 	btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
5831 }
5832 
5833 /**
5834  * drop_outstanding_extent - drop an outstanding extent
5835  * @inode: the inode we're dropping the extent for
5836  * @num_bytes: the number of bytes we're releasing.
5837  *
5838  * This is called when we are freeing up an outstanding extent, either called
5839  * after an error or after an extent is written.  This will return the number of
5840  * reserved extents that need to be freed.  This must be called with
5841  * BTRFS_I(inode)->lock held.
5842  */
5843 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5844 {
5845 	unsigned drop_inode_space = 0;
5846 	unsigned dropped_extents = 0;
5847 	unsigned num_extents = 0;
5848 
5849 	num_extents = (unsigned)div64_u64(num_bytes +
5850 					  BTRFS_MAX_EXTENT_SIZE - 1,
5851 					  BTRFS_MAX_EXTENT_SIZE);
5852 	ASSERT(num_extents);
5853 	ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5854 	BTRFS_I(inode)->outstanding_extents -= num_extents;
5855 
5856 	if (BTRFS_I(inode)->outstanding_extents == 0 &&
5857 	    test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5858 			       &BTRFS_I(inode)->runtime_flags))
5859 		drop_inode_space = 1;
5860 
5861 	/*
5862 	 * If we have more or the same amount of outstanding extents than we have
5863 	 * reserved then we need to leave the reserved extents count alone.
5864 	 */
5865 	if (BTRFS_I(inode)->outstanding_extents >=
5866 	    BTRFS_I(inode)->reserved_extents)
5867 		return drop_inode_space;
5868 
5869 	dropped_extents = BTRFS_I(inode)->reserved_extents -
5870 		BTRFS_I(inode)->outstanding_extents;
5871 	BTRFS_I(inode)->reserved_extents -= dropped_extents;
5872 	return dropped_extents + drop_inode_space;
5873 }
5874 
5875 /**
5876  * calc_csum_metadata_size - return the amount of metadata space that must be
5877  *	reserved/freed for the given bytes.
5878  * @inode: the inode we're manipulating
5879  * @num_bytes: the number of bytes in question
5880  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5881  *
5882  * This adjusts the number of csum_bytes in the inode and then returns the
5883  * correct amount of metadata that must either be reserved or freed.  We
5884  * calculate how many checksums we can fit into one leaf and then divide the
5885  * number of bytes that will need to be checksumed by this value to figure out
5886  * how many checksums will be required.  If we are adding bytes then the number
5887  * may go up and we will return the number of additional bytes that must be
5888  * reserved.  If it is going down we will return the number of bytes that must
5889  * be freed.
5890  *
5891  * This must be called with BTRFS_I(inode)->lock held.
5892  */
5893 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5894 				   int reserve)
5895 {
5896 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5897 	u64 old_csums, num_csums;
5898 
5899 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5900 	    BTRFS_I(inode)->csum_bytes == 0)
5901 		return 0;
5902 
5903 	old_csums = btrfs_csum_bytes_to_leaves(fs_info,
5904 					       BTRFS_I(inode)->csum_bytes);
5905 	if (reserve)
5906 		BTRFS_I(inode)->csum_bytes += num_bytes;
5907 	else
5908 		BTRFS_I(inode)->csum_bytes -= num_bytes;
5909 	num_csums = btrfs_csum_bytes_to_leaves(fs_info,
5910 					       BTRFS_I(inode)->csum_bytes);
5911 
5912 	/* No change, no need to reserve more */
5913 	if (old_csums == num_csums)
5914 		return 0;
5915 
5916 	if (reserve)
5917 		return btrfs_calc_trans_metadata_size(fs_info,
5918 						      num_csums - old_csums);
5919 
5920 	return btrfs_calc_trans_metadata_size(fs_info, old_csums - num_csums);
5921 }
5922 
5923 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5924 {
5925 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5926 	struct btrfs_root *root = BTRFS_I(inode)->root;
5927 	struct btrfs_block_rsv *block_rsv = &fs_info->delalloc_block_rsv;
5928 	u64 to_reserve = 0;
5929 	u64 csum_bytes;
5930 	unsigned nr_extents = 0;
5931 	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5932 	int ret = 0;
5933 	bool delalloc_lock = true;
5934 	u64 to_free = 0;
5935 	unsigned dropped;
5936 	bool release_extra = false;
5937 
5938 	/* If we are a free space inode we need to not flush since we will be in
5939 	 * the middle of a transaction commit.  We also don't need the delalloc
5940 	 * mutex since we won't race with anybody.  We need this mostly to make
5941 	 * lockdep shut its filthy mouth.
5942 	 *
5943 	 * If we have a transaction open (can happen if we call truncate_block
5944 	 * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
5945 	 */
5946 	if (btrfs_is_free_space_inode(inode)) {
5947 		flush = BTRFS_RESERVE_NO_FLUSH;
5948 		delalloc_lock = false;
5949 	} else if (current->journal_info) {
5950 		flush = BTRFS_RESERVE_FLUSH_LIMIT;
5951 	}
5952 
5953 	if (flush != BTRFS_RESERVE_NO_FLUSH &&
5954 	    btrfs_transaction_in_commit(fs_info))
5955 		schedule_timeout(1);
5956 
5957 	if (delalloc_lock)
5958 		mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5959 
5960 	num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
5961 
5962 	spin_lock(&BTRFS_I(inode)->lock);
5963 	nr_extents = (unsigned)div64_u64(num_bytes +
5964 					 BTRFS_MAX_EXTENT_SIZE - 1,
5965 					 BTRFS_MAX_EXTENT_SIZE);
5966 	BTRFS_I(inode)->outstanding_extents += nr_extents;
5967 
5968 	nr_extents = 0;
5969 	if (BTRFS_I(inode)->outstanding_extents >
5970 	    BTRFS_I(inode)->reserved_extents)
5971 		nr_extents += BTRFS_I(inode)->outstanding_extents -
5972 			BTRFS_I(inode)->reserved_extents;
5973 
5974 	/* We always want to reserve a slot for updating the inode. */
5975 	to_reserve = btrfs_calc_trans_metadata_size(fs_info, nr_extents + 1);
5976 	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5977 	csum_bytes = BTRFS_I(inode)->csum_bytes;
5978 	spin_unlock(&BTRFS_I(inode)->lock);
5979 
5980 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
5981 		ret = btrfs_qgroup_reserve_meta(root,
5982 				nr_extents * fs_info->nodesize);
5983 		if (ret)
5984 			goto out_fail;
5985 	}
5986 
5987 	ret = btrfs_block_rsv_add(root, block_rsv, to_reserve, flush);
5988 	if (unlikely(ret)) {
5989 		btrfs_qgroup_free_meta(root,
5990 				       nr_extents * fs_info->nodesize);
5991 		goto out_fail;
5992 	}
5993 
5994 	spin_lock(&BTRFS_I(inode)->lock);
5995 	if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5996 			     &BTRFS_I(inode)->runtime_flags)) {
5997 		to_reserve -= btrfs_calc_trans_metadata_size(fs_info, 1);
5998 		release_extra = true;
5999 	}
6000 	BTRFS_I(inode)->reserved_extents += nr_extents;
6001 	spin_unlock(&BTRFS_I(inode)->lock);
6002 
6003 	if (delalloc_lock)
6004 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
6005 
6006 	if (to_reserve)
6007 		trace_btrfs_space_reservation(fs_info, "delalloc",
6008 					      btrfs_ino(inode), to_reserve, 1);
6009 	if (release_extra)
6010 		btrfs_block_rsv_release(fs_info, block_rsv,
6011 				btrfs_calc_trans_metadata_size(fs_info, 1));
6012 	return 0;
6013 
6014 out_fail:
6015 	spin_lock(&BTRFS_I(inode)->lock);
6016 	dropped = drop_outstanding_extent(inode, num_bytes);
6017 	/*
6018 	 * If the inodes csum_bytes is the same as the original
6019 	 * csum_bytes then we know we haven't raced with any free()ers
6020 	 * so we can just reduce our inodes csum bytes and carry on.
6021 	 */
6022 	if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
6023 		calc_csum_metadata_size(inode, num_bytes, 0);
6024 	} else {
6025 		u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
6026 		u64 bytes;
6027 
6028 		/*
6029 		 * This is tricky, but first we need to figure out how much we
6030 		 * freed from any free-ers that occurred during this
6031 		 * reservation, so we reset ->csum_bytes to the csum_bytes
6032 		 * before we dropped our lock, and then call the free for the
6033 		 * number of bytes that were freed while we were trying our
6034 		 * reservation.
6035 		 */
6036 		bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
6037 		BTRFS_I(inode)->csum_bytes = csum_bytes;
6038 		to_free = calc_csum_metadata_size(inode, bytes, 0);
6039 
6040 
6041 		/*
6042 		 * Now we need to see how much we would have freed had we not
6043 		 * been making this reservation and our ->csum_bytes were not
6044 		 * artificially inflated.
6045 		 */
6046 		BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
6047 		bytes = csum_bytes - orig_csum_bytes;
6048 		bytes = calc_csum_metadata_size(inode, bytes, 0);
6049 
6050 		/*
6051 		 * Now reset ->csum_bytes to what it should be.  If bytes is
6052 		 * more than to_free then we would have freed more space had we
6053 		 * not had an artificially high ->csum_bytes, so we need to free
6054 		 * the remainder.  If bytes is the same or less then we don't
6055 		 * need to do anything, the other free-ers did the correct
6056 		 * thing.
6057 		 */
6058 		BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
6059 		if (bytes > to_free)
6060 			to_free = bytes - to_free;
6061 		else
6062 			to_free = 0;
6063 	}
6064 	spin_unlock(&BTRFS_I(inode)->lock);
6065 	if (dropped)
6066 		to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
6067 
6068 	if (to_free) {
6069 		btrfs_block_rsv_release(fs_info, block_rsv, to_free);
6070 		trace_btrfs_space_reservation(fs_info, "delalloc",
6071 					      btrfs_ino(inode), to_free, 0);
6072 	}
6073 	if (delalloc_lock)
6074 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
6075 	return ret;
6076 }
6077 
6078 /**
6079  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
6080  * @inode: the inode to release the reservation for
6081  * @num_bytes: the number of bytes we're releasing
6082  *
6083  * This will release the metadata reservation for an inode.  This can be called
6084  * once we complete IO for a given set of bytes to release their metadata
6085  * reservations.
6086  */
6087 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
6088 {
6089 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6090 	u64 to_free = 0;
6091 	unsigned dropped;
6092 
6093 	num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
6094 	spin_lock(&BTRFS_I(inode)->lock);
6095 	dropped = drop_outstanding_extent(inode, num_bytes);
6096 
6097 	if (num_bytes)
6098 		to_free = calc_csum_metadata_size(inode, num_bytes, 0);
6099 	spin_unlock(&BTRFS_I(inode)->lock);
6100 	if (dropped > 0)
6101 		to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
6102 
6103 	if (btrfs_is_testing(fs_info))
6104 		return;
6105 
6106 	trace_btrfs_space_reservation(fs_info, "delalloc",
6107 				      btrfs_ino(inode), to_free, 0);
6108 
6109 	btrfs_block_rsv_release(fs_info, &fs_info->delalloc_block_rsv, to_free);
6110 }
6111 
6112 /**
6113  * btrfs_delalloc_reserve_space - reserve data and metadata space for
6114  * delalloc
6115  * @inode: inode we're writing to
6116  * @start: start range we are writing to
6117  * @len: how long the range we are writing to
6118  *
6119  * This will do the following things
6120  *
6121  * o reserve space in data space info for num bytes
6122  *   and reserve precious corresponding qgroup space
6123  *   (Done in check_data_free_space)
6124  *
6125  * o reserve space for metadata space, based on the number of outstanding
6126  *   extents and how much csums will be needed
6127  *   also reserve metadata space in a per root over-reserve method.
6128  * o add to the inodes->delalloc_bytes
6129  * o add it to the fs_info's delalloc inodes list.
6130  *   (Above 3 all done in delalloc_reserve_metadata)
6131  *
6132  * Return 0 for success
6133  * Return <0 for error(-ENOSPC or -EQUOT)
6134  */
6135 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
6136 {
6137 	int ret;
6138 
6139 	ret = btrfs_check_data_free_space(inode, start, len);
6140 	if (ret < 0)
6141 		return ret;
6142 	ret = btrfs_delalloc_reserve_metadata(inode, len);
6143 	if (ret < 0)
6144 		btrfs_free_reserved_data_space(inode, start, len);
6145 	return ret;
6146 }
6147 
6148 /**
6149  * btrfs_delalloc_release_space - release data and metadata space for delalloc
6150  * @inode: inode we're releasing space for
6151  * @start: start position of the space already reserved
6152  * @len: the len of the space already reserved
6153  *
6154  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
6155  * called in the case that we don't need the metadata AND data reservations
6156  * anymore.  So if there is an error or we insert an inline extent.
6157  *
6158  * This function will release the metadata space that was not used and will
6159  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
6160  * list if there are no delalloc bytes left.
6161  * Also it will handle the qgroup reserved space.
6162  */
6163 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
6164 {
6165 	btrfs_delalloc_release_metadata(inode, len);
6166 	btrfs_free_reserved_data_space(inode, start, len);
6167 }
6168 
6169 static int update_block_group(struct btrfs_trans_handle *trans,
6170 			      struct btrfs_fs_info *info, u64 bytenr,
6171 			      u64 num_bytes, int alloc)
6172 {
6173 	struct btrfs_block_group_cache *cache = NULL;
6174 	u64 total = num_bytes;
6175 	u64 old_val;
6176 	u64 byte_in_group;
6177 	int factor;
6178 
6179 	/* block accounting for super block */
6180 	spin_lock(&info->delalloc_root_lock);
6181 	old_val = btrfs_super_bytes_used(info->super_copy);
6182 	if (alloc)
6183 		old_val += num_bytes;
6184 	else
6185 		old_val -= num_bytes;
6186 	btrfs_set_super_bytes_used(info->super_copy, old_val);
6187 	spin_unlock(&info->delalloc_root_lock);
6188 
6189 	while (total) {
6190 		cache = btrfs_lookup_block_group(info, bytenr);
6191 		if (!cache)
6192 			return -ENOENT;
6193 		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
6194 				    BTRFS_BLOCK_GROUP_RAID1 |
6195 				    BTRFS_BLOCK_GROUP_RAID10))
6196 			factor = 2;
6197 		else
6198 			factor = 1;
6199 		/*
6200 		 * If this block group has free space cache written out, we
6201 		 * need to make sure to load it if we are removing space.  This
6202 		 * is because we need the unpinning stage to actually add the
6203 		 * space back to the block group, otherwise we will leak space.
6204 		 */
6205 		if (!alloc && cache->cached == BTRFS_CACHE_NO)
6206 			cache_block_group(cache, 1);
6207 
6208 		byte_in_group = bytenr - cache->key.objectid;
6209 		WARN_ON(byte_in_group > cache->key.offset);
6210 
6211 		spin_lock(&cache->space_info->lock);
6212 		spin_lock(&cache->lock);
6213 
6214 		if (btrfs_test_opt(info, SPACE_CACHE) &&
6215 		    cache->disk_cache_state < BTRFS_DC_CLEAR)
6216 			cache->disk_cache_state = BTRFS_DC_CLEAR;
6217 
6218 		old_val = btrfs_block_group_used(&cache->item);
6219 		num_bytes = min(total, cache->key.offset - byte_in_group);
6220 		if (alloc) {
6221 			old_val += num_bytes;
6222 			btrfs_set_block_group_used(&cache->item, old_val);
6223 			cache->reserved -= num_bytes;
6224 			cache->space_info->bytes_reserved -= num_bytes;
6225 			cache->space_info->bytes_used += num_bytes;
6226 			cache->space_info->disk_used += num_bytes * factor;
6227 			spin_unlock(&cache->lock);
6228 			spin_unlock(&cache->space_info->lock);
6229 		} else {
6230 			old_val -= num_bytes;
6231 			btrfs_set_block_group_used(&cache->item, old_val);
6232 			cache->pinned += num_bytes;
6233 			cache->space_info->bytes_pinned += num_bytes;
6234 			cache->space_info->bytes_used -= num_bytes;
6235 			cache->space_info->disk_used -= num_bytes * factor;
6236 			spin_unlock(&cache->lock);
6237 			spin_unlock(&cache->space_info->lock);
6238 
6239 			trace_btrfs_space_reservation(info, "pinned",
6240 						      cache->space_info->flags,
6241 						      num_bytes, 1);
6242 			set_extent_dirty(info->pinned_extents,
6243 					 bytenr, bytenr + num_bytes - 1,
6244 					 GFP_NOFS | __GFP_NOFAIL);
6245 		}
6246 
6247 		spin_lock(&trans->transaction->dirty_bgs_lock);
6248 		if (list_empty(&cache->dirty_list)) {
6249 			list_add_tail(&cache->dirty_list,
6250 				      &trans->transaction->dirty_bgs);
6251 				trans->transaction->num_dirty_bgs++;
6252 			btrfs_get_block_group(cache);
6253 		}
6254 		spin_unlock(&trans->transaction->dirty_bgs_lock);
6255 
6256 		/*
6257 		 * No longer have used bytes in this block group, queue it for
6258 		 * deletion. We do this after adding the block group to the
6259 		 * dirty list to avoid races between cleaner kthread and space
6260 		 * cache writeout.
6261 		 */
6262 		if (!alloc && old_val == 0) {
6263 			spin_lock(&info->unused_bgs_lock);
6264 			if (list_empty(&cache->bg_list)) {
6265 				btrfs_get_block_group(cache);
6266 				list_add_tail(&cache->bg_list,
6267 					      &info->unused_bgs);
6268 			}
6269 			spin_unlock(&info->unused_bgs_lock);
6270 		}
6271 
6272 		btrfs_put_block_group(cache);
6273 		total -= num_bytes;
6274 		bytenr += num_bytes;
6275 	}
6276 	return 0;
6277 }
6278 
6279 static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
6280 {
6281 	struct btrfs_block_group_cache *cache;
6282 	u64 bytenr;
6283 
6284 	spin_lock(&fs_info->block_group_cache_lock);
6285 	bytenr = fs_info->first_logical_byte;
6286 	spin_unlock(&fs_info->block_group_cache_lock);
6287 
6288 	if (bytenr < (u64)-1)
6289 		return bytenr;
6290 
6291 	cache = btrfs_lookup_first_block_group(fs_info, search_start);
6292 	if (!cache)
6293 		return 0;
6294 
6295 	bytenr = cache->key.objectid;
6296 	btrfs_put_block_group(cache);
6297 
6298 	return bytenr;
6299 }
6300 
6301 static int pin_down_extent(struct btrfs_fs_info *fs_info,
6302 			   struct btrfs_block_group_cache *cache,
6303 			   u64 bytenr, u64 num_bytes, int reserved)
6304 {
6305 	spin_lock(&cache->space_info->lock);
6306 	spin_lock(&cache->lock);
6307 	cache->pinned += num_bytes;
6308 	cache->space_info->bytes_pinned += num_bytes;
6309 	if (reserved) {
6310 		cache->reserved -= num_bytes;
6311 		cache->space_info->bytes_reserved -= num_bytes;
6312 	}
6313 	spin_unlock(&cache->lock);
6314 	spin_unlock(&cache->space_info->lock);
6315 
6316 	trace_btrfs_space_reservation(fs_info, "pinned",
6317 				      cache->space_info->flags, num_bytes, 1);
6318 	set_extent_dirty(fs_info->pinned_extents, bytenr,
6319 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
6320 	return 0;
6321 }
6322 
6323 /*
6324  * this function must be called within transaction
6325  */
6326 int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
6327 		     u64 bytenr, u64 num_bytes, int reserved)
6328 {
6329 	struct btrfs_block_group_cache *cache;
6330 
6331 	cache = btrfs_lookup_block_group(fs_info, bytenr);
6332 	BUG_ON(!cache); /* Logic error */
6333 
6334 	pin_down_extent(fs_info, cache, bytenr, num_bytes, reserved);
6335 
6336 	btrfs_put_block_group(cache);
6337 	return 0;
6338 }
6339 
6340 /*
6341  * this function must be called within transaction
6342  */
6343 int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
6344 				    u64 bytenr, u64 num_bytes)
6345 {
6346 	struct btrfs_block_group_cache *cache;
6347 	int ret;
6348 
6349 	cache = btrfs_lookup_block_group(fs_info, bytenr);
6350 	if (!cache)
6351 		return -EINVAL;
6352 
6353 	/*
6354 	 * pull in the free space cache (if any) so that our pin
6355 	 * removes the free space from the cache.  We have load_only set
6356 	 * to one because the slow code to read in the free extents does check
6357 	 * the pinned extents.
6358 	 */
6359 	cache_block_group(cache, 1);
6360 
6361 	pin_down_extent(fs_info, cache, bytenr, num_bytes, 0);
6362 
6363 	/* remove us from the free space cache (if we're there at all) */
6364 	ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6365 	btrfs_put_block_group(cache);
6366 	return ret;
6367 }
6368 
6369 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
6370 				   u64 start, u64 num_bytes)
6371 {
6372 	int ret;
6373 	struct btrfs_block_group_cache *block_group;
6374 	struct btrfs_caching_control *caching_ctl;
6375 
6376 	block_group = btrfs_lookup_block_group(fs_info, start);
6377 	if (!block_group)
6378 		return -EINVAL;
6379 
6380 	cache_block_group(block_group, 0);
6381 	caching_ctl = get_caching_control(block_group);
6382 
6383 	if (!caching_ctl) {
6384 		/* Logic error */
6385 		BUG_ON(!block_group_cache_done(block_group));
6386 		ret = btrfs_remove_free_space(block_group, start, num_bytes);
6387 	} else {
6388 		mutex_lock(&caching_ctl->mutex);
6389 
6390 		if (start >= caching_ctl->progress) {
6391 			ret = add_excluded_extent(fs_info, start, num_bytes);
6392 		} else if (start + num_bytes <= caching_ctl->progress) {
6393 			ret = btrfs_remove_free_space(block_group,
6394 						      start, num_bytes);
6395 		} else {
6396 			num_bytes = caching_ctl->progress - start;
6397 			ret = btrfs_remove_free_space(block_group,
6398 						      start, num_bytes);
6399 			if (ret)
6400 				goto out_lock;
6401 
6402 			num_bytes = (start + num_bytes) -
6403 				caching_ctl->progress;
6404 			start = caching_ctl->progress;
6405 			ret = add_excluded_extent(fs_info, start, num_bytes);
6406 		}
6407 out_lock:
6408 		mutex_unlock(&caching_ctl->mutex);
6409 		put_caching_control(caching_ctl);
6410 	}
6411 	btrfs_put_block_group(block_group);
6412 	return ret;
6413 }
6414 
6415 int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
6416 				 struct extent_buffer *eb)
6417 {
6418 	struct btrfs_file_extent_item *item;
6419 	struct btrfs_key key;
6420 	int found_type;
6421 	int i;
6422 
6423 	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
6424 		return 0;
6425 
6426 	for (i = 0; i < btrfs_header_nritems(eb); i++) {
6427 		btrfs_item_key_to_cpu(eb, &key, i);
6428 		if (key.type != BTRFS_EXTENT_DATA_KEY)
6429 			continue;
6430 		item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6431 		found_type = btrfs_file_extent_type(eb, item);
6432 		if (found_type == BTRFS_FILE_EXTENT_INLINE)
6433 			continue;
6434 		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6435 			continue;
6436 		key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6437 		key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6438 		__exclude_logged_extent(fs_info, key.objectid, key.offset);
6439 	}
6440 
6441 	return 0;
6442 }
6443 
6444 static void
6445 btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
6446 {
6447 	atomic_inc(&bg->reservations);
6448 }
6449 
6450 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
6451 					const u64 start)
6452 {
6453 	struct btrfs_block_group_cache *bg;
6454 
6455 	bg = btrfs_lookup_block_group(fs_info, start);
6456 	ASSERT(bg);
6457 	if (atomic_dec_and_test(&bg->reservations))
6458 		wake_up_atomic_t(&bg->reservations);
6459 	btrfs_put_block_group(bg);
6460 }
6461 
6462 static int btrfs_wait_bg_reservations_atomic_t(atomic_t *a)
6463 {
6464 	schedule();
6465 	return 0;
6466 }
6467 
6468 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6469 {
6470 	struct btrfs_space_info *space_info = bg->space_info;
6471 
6472 	ASSERT(bg->ro);
6473 
6474 	if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
6475 		return;
6476 
6477 	/*
6478 	 * Our block group is read only but before we set it to read only,
6479 	 * some task might have had allocated an extent from it already, but it
6480 	 * has not yet created a respective ordered extent (and added it to a
6481 	 * root's list of ordered extents).
6482 	 * Therefore wait for any task currently allocating extents, since the
6483 	 * block group's reservations counter is incremented while a read lock
6484 	 * on the groups' semaphore is held and decremented after releasing
6485 	 * the read access on that semaphore and creating the ordered extent.
6486 	 */
6487 	down_write(&space_info->groups_sem);
6488 	up_write(&space_info->groups_sem);
6489 
6490 	wait_on_atomic_t(&bg->reservations,
6491 			 btrfs_wait_bg_reservations_atomic_t,
6492 			 TASK_UNINTERRUPTIBLE);
6493 }
6494 
6495 /**
6496  * btrfs_add_reserved_bytes - update the block_group and space info counters
6497  * @cache:	The cache we are manipulating
6498  * @ram_bytes:  The number of bytes of file content, and will be same to
6499  *              @num_bytes except for the compress path.
6500  * @num_bytes:	The number of bytes in question
6501  * @delalloc:   The blocks are allocated for the delalloc write
6502  *
6503  * This is called by the allocator when it reserves space. If this is a
6504  * reservation and the block group has become read only we cannot make the
6505  * reservation and return -EAGAIN, otherwise this function always succeeds.
6506  */
6507 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
6508 				    u64 ram_bytes, u64 num_bytes, int delalloc)
6509 {
6510 	struct btrfs_space_info *space_info = cache->space_info;
6511 	int ret = 0;
6512 
6513 	spin_lock(&space_info->lock);
6514 	spin_lock(&cache->lock);
6515 	if (cache->ro) {
6516 		ret = -EAGAIN;
6517 	} else {
6518 		cache->reserved += num_bytes;
6519 		space_info->bytes_reserved += num_bytes;
6520 
6521 		trace_btrfs_space_reservation(cache->fs_info,
6522 				"space_info", space_info->flags,
6523 				ram_bytes, 0);
6524 		space_info->bytes_may_use -= ram_bytes;
6525 		if (delalloc)
6526 			cache->delalloc_bytes += num_bytes;
6527 	}
6528 	spin_unlock(&cache->lock);
6529 	spin_unlock(&space_info->lock);
6530 	return ret;
6531 }
6532 
6533 /**
6534  * btrfs_free_reserved_bytes - update the block_group and space info counters
6535  * @cache:      The cache we are manipulating
6536  * @num_bytes:  The number of bytes in question
6537  * @delalloc:   The blocks are allocated for the delalloc write
6538  *
6539  * This is called by somebody who is freeing space that was never actually used
6540  * on disk.  For example if you reserve some space for a new leaf in transaction
6541  * A and before transaction A commits you free that leaf, you call this with
6542  * reserve set to 0 in order to clear the reservation.
6543  */
6544 
6545 static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
6546 				     u64 num_bytes, int delalloc)
6547 {
6548 	struct btrfs_space_info *space_info = cache->space_info;
6549 	int ret = 0;
6550 
6551 	spin_lock(&space_info->lock);
6552 	spin_lock(&cache->lock);
6553 	if (cache->ro)
6554 		space_info->bytes_readonly += num_bytes;
6555 	cache->reserved -= num_bytes;
6556 	space_info->bytes_reserved -= num_bytes;
6557 
6558 	if (delalloc)
6559 		cache->delalloc_bytes -= num_bytes;
6560 	spin_unlock(&cache->lock);
6561 	spin_unlock(&space_info->lock);
6562 	return ret;
6563 }
6564 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6565 				struct btrfs_fs_info *fs_info)
6566 {
6567 	struct btrfs_caching_control *next;
6568 	struct btrfs_caching_control *caching_ctl;
6569 	struct btrfs_block_group_cache *cache;
6570 
6571 	down_write(&fs_info->commit_root_sem);
6572 
6573 	list_for_each_entry_safe(caching_ctl, next,
6574 				 &fs_info->caching_block_groups, list) {
6575 		cache = caching_ctl->block_group;
6576 		if (block_group_cache_done(cache)) {
6577 			cache->last_byte_to_unpin = (u64)-1;
6578 			list_del_init(&caching_ctl->list);
6579 			put_caching_control(caching_ctl);
6580 		} else {
6581 			cache->last_byte_to_unpin = caching_ctl->progress;
6582 		}
6583 	}
6584 
6585 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6586 		fs_info->pinned_extents = &fs_info->freed_extents[1];
6587 	else
6588 		fs_info->pinned_extents = &fs_info->freed_extents[0];
6589 
6590 	up_write(&fs_info->commit_root_sem);
6591 
6592 	update_global_block_rsv(fs_info);
6593 }
6594 
6595 /*
6596  * Returns the free cluster for the given space info and sets empty_cluster to
6597  * what it should be based on the mount options.
6598  */
6599 static struct btrfs_free_cluster *
6600 fetch_cluster_info(struct btrfs_fs_info *fs_info,
6601 		   struct btrfs_space_info *space_info, u64 *empty_cluster)
6602 {
6603 	struct btrfs_free_cluster *ret = NULL;
6604 	bool ssd = btrfs_test_opt(fs_info, SSD);
6605 
6606 	*empty_cluster = 0;
6607 	if (btrfs_mixed_space_info(space_info))
6608 		return ret;
6609 
6610 	if (ssd)
6611 		*empty_cluster = SZ_2M;
6612 	if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6613 		ret = &fs_info->meta_alloc_cluster;
6614 		if (!ssd)
6615 			*empty_cluster = SZ_64K;
6616 	} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6617 		ret = &fs_info->data_alloc_cluster;
6618 	}
6619 
6620 	return ret;
6621 }
6622 
6623 static int unpin_extent_range(struct btrfs_fs_info *fs_info,
6624 			      u64 start, u64 end,
6625 			      const bool return_free_space)
6626 {
6627 	struct btrfs_block_group_cache *cache = NULL;
6628 	struct btrfs_space_info *space_info;
6629 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6630 	struct btrfs_free_cluster *cluster = NULL;
6631 	u64 len;
6632 	u64 total_unpinned = 0;
6633 	u64 empty_cluster = 0;
6634 	bool readonly;
6635 
6636 	while (start <= end) {
6637 		readonly = false;
6638 		if (!cache ||
6639 		    start >= cache->key.objectid + cache->key.offset) {
6640 			if (cache)
6641 				btrfs_put_block_group(cache);
6642 			total_unpinned = 0;
6643 			cache = btrfs_lookup_block_group(fs_info, start);
6644 			BUG_ON(!cache); /* Logic error */
6645 
6646 			cluster = fetch_cluster_info(fs_info,
6647 						     cache->space_info,
6648 						     &empty_cluster);
6649 			empty_cluster <<= 1;
6650 		}
6651 
6652 		len = cache->key.objectid + cache->key.offset - start;
6653 		len = min(len, end + 1 - start);
6654 
6655 		if (start < cache->last_byte_to_unpin) {
6656 			len = min(len, cache->last_byte_to_unpin - start);
6657 			if (return_free_space)
6658 				btrfs_add_free_space(cache, start, len);
6659 		}
6660 
6661 		start += len;
6662 		total_unpinned += len;
6663 		space_info = cache->space_info;
6664 
6665 		/*
6666 		 * If this space cluster has been marked as fragmented and we've
6667 		 * unpinned enough in this block group to potentially allow a
6668 		 * cluster to be created inside of it go ahead and clear the
6669 		 * fragmented check.
6670 		 */
6671 		if (cluster && cluster->fragmented &&
6672 		    total_unpinned > empty_cluster) {
6673 			spin_lock(&cluster->lock);
6674 			cluster->fragmented = 0;
6675 			spin_unlock(&cluster->lock);
6676 		}
6677 
6678 		spin_lock(&space_info->lock);
6679 		spin_lock(&cache->lock);
6680 		cache->pinned -= len;
6681 		space_info->bytes_pinned -= len;
6682 
6683 		trace_btrfs_space_reservation(fs_info, "pinned",
6684 					      space_info->flags, len, 0);
6685 		space_info->max_extent_size = 0;
6686 		percpu_counter_add(&space_info->total_bytes_pinned, -len);
6687 		if (cache->ro) {
6688 			space_info->bytes_readonly += len;
6689 			readonly = true;
6690 		}
6691 		spin_unlock(&cache->lock);
6692 		if (!readonly && return_free_space &&
6693 		    global_rsv->space_info == space_info) {
6694 			u64 to_add = len;
6695 			WARN_ON(!return_free_space);
6696 			spin_lock(&global_rsv->lock);
6697 			if (!global_rsv->full) {
6698 				to_add = min(len, global_rsv->size -
6699 					     global_rsv->reserved);
6700 				global_rsv->reserved += to_add;
6701 				space_info->bytes_may_use += to_add;
6702 				if (global_rsv->reserved >= global_rsv->size)
6703 					global_rsv->full = 1;
6704 				trace_btrfs_space_reservation(fs_info,
6705 							      "space_info",
6706 							      space_info->flags,
6707 							      to_add, 1);
6708 				len -= to_add;
6709 			}
6710 			spin_unlock(&global_rsv->lock);
6711 			/* Add to any tickets we may have */
6712 			if (len)
6713 				space_info_add_new_bytes(fs_info, space_info,
6714 							 len);
6715 		}
6716 		spin_unlock(&space_info->lock);
6717 	}
6718 
6719 	if (cache)
6720 		btrfs_put_block_group(cache);
6721 	return 0;
6722 }
6723 
6724 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6725 			       struct btrfs_fs_info *fs_info)
6726 {
6727 	struct btrfs_block_group_cache *block_group, *tmp;
6728 	struct list_head *deleted_bgs;
6729 	struct extent_io_tree *unpin;
6730 	u64 start;
6731 	u64 end;
6732 	int ret;
6733 
6734 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6735 		unpin = &fs_info->freed_extents[1];
6736 	else
6737 		unpin = &fs_info->freed_extents[0];
6738 
6739 	while (!trans->aborted) {
6740 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
6741 		ret = find_first_extent_bit(unpin, 0, &start, &end,
6742 					    EXTENT_DIRTY, NULL);
6743 		if (ret) {
6744 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6745 			break;
6746 		}
6747 
6748 		if (btrfs_test_opt(fs_info, DISCARD))
6749 			ret = btrfs_discard_extent(fs_info, start,
6750 						   end + 1 - start, NULL);
6751 
6752 		clear_extent_dirty(unpin, start, end);
6753 		unpin_extent_range(fs_info, start, end, true);
6754 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6755 		cond_resched();
6756 	}
6757 
6758 	/*
6759 	 * Transaction is finished.  We don't need the lock anymore.  We
6760 	 * do need to clean up the block groups in case of a transaction
6761 	 * abort.
6762 	 */
6763 	deleted_bgs = &trans->transaction->deleted_bgs;
6764 	list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6765 		u64 trimmed = 0;
6766 
6767 		ret = -EROFS;
6768 		if (!trans->aborted)
6769 			ret = btrfs_discard_extent(fs_info,
6770 						   block_group->key.objectid,
6771 						   block_group->key.offset,
6772 						   &trimmed);
6773 
6774 		list_del_init(&block_group->bg_list);
6775 		btrfs_put_block_group_trimming(block_group);
6776 		btrfs_put_block_group(block_group);
6777 
6778 		if (ret) {
6779 			const char *errstr = btrfs_decode_error(ret);
6780 			btrfs_warn(fs_info,
6781 				   "Discard failed while removing blockgroup: errno=%d %s\n",
6782 				   ret, errstr);
6783 		}
6784 	}
6785 
6786 	return 0;
6787 }
6788 
6789 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6790 			     u64 owner, u64 root_objectid)
6791 {
6792 	struct btrfs_space_info *space_info;
6793 	u64 flags;
6794 
6795 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6796 		if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6797 			flags = BTRFS_BLOCK_GROUP_SYSTEM;
6798 		else
6799 			flags = BTRFS_BLOCK_GROUP_METADATA;
6800 	} else {
6801 		flags = BTRFS_BLOCK_GROUP_DATA;
6802 	}
6803 
6804 	space_info = __find_space_info(fs_info, flags);
6805 	BUG_ON(!space_info); /* Logic bug */
6806 	percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6807 }
6808 
6809 
6810 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6811 				struct btrfs_fs_info *info,
6812 				struct btrfs_delayed_ref_node *node, u64 parent,
6813 				u64 root_objectid, u64 owner_objectid,
6814 				u64 owner_offset, int refs_to_drop,
6815 				struct btrfs_delayed_extent_op *extent_op)
6816 {
6817 	struct btrfs_key key;
6818 	struct btrfs_path *path;
6819 	struct btrfs_root *extent_root = info->extent_root;
6820 	struct extent_buffer *leaf;
6821 	struct btrfs_extent_item *ei;
6822 	struct btrfs_extent_inline_ref *iref;
6823 	int ret;
6824 	int is_data;
6825 	int extent_slot = 0;
6826 	int found_extent = 0;
6827 	int num_to_del = 1;
6828 	u32 item_size;
6829 	u64 refs;
6830 	u64 bytenr = node->bytenr;
6831 	u64 num_bytes = node->num_bytes;
6832 	int last_ref = 0;
6833 	bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
6834 
6835 	path = btrfs_alloc_path();
6836 	if (!path)
6837 		return -ENOMEM;
6838 
6839 	path->reada = READA_FORWARD;
6840 	path->leave_spinning = 1;
6841 
6842 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6843 	BUG_ON(!is_data && refs_to_drop != 1);
6844 
6845 	if (is_data)
6846 		skinny_metadata = 0;
6847 
6848 	ret = lookup_extent_backref(trans, extent_root, path, &iref,
6849 				    bytenr, num_bytes, parent,
6850 				    root_objectid, owner_objectid,
6851 				    owner_offset);
6852 	if (ret == 0) {
6853 		extent_slot = path->slots[0];
6854 		while (extent_slot >= 0) {
6855 			btrfs_item_key_to_cpu(path->nodes[0], &key,
6856 					      extent_slot);
6857 			if (key.objectid != bytenr)
6858 				break;
6859 			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6860 			    key.offset == num_bytes) {
6861 				found_extent = 1;
6862 				break;
6863 			}
6864 			if (key.type == BTRFS_METADATA_ITEM_KEY &&
6865 			    key.offset == owner_objectid) {
6866 				found_extent = 1;
6867 				break;
6868 			}
6869 			if (path->slots[0] - extent_slot > 5)
6870 				break;
6871 			extent_slot--;
6872 		}
6873 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6874 		item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6875 		if (found_extent && item_size < sizeof(*ei))
6876 			found_extent = 0;
6877 #endif
6878 		if (!found_extent) {
6879 			BUG_ON(iref);
6880 			ret = remove_extent_backref(trans, extent_root, path,
6881 						    NULL, refs_to_drop,
6882 						    is_data, &last_ref);
6883 			if (ret) {
6884 				btrfs_abort_transaction(trans, ret);
6885 				goto out;
6886 			}
6887 			btrfs_release_path(path);
6888 			path->leave_spinning = 1;
6889 
6890 			key.objectid = bytenr;
6891 			key.type = BTRFS_EXTENT_ITEM_KEY;
6892 			key.offset = num_bytes;
6893 
6894 			if (!is_data && skinny_metadata) {
6895 				key.type = BTRFS_METADATA_ITEM_KEY;
6896 				key.offset = owner_objectid;
6897 			}
6898 
6899 			ret = btrfs_search_slot(trans, extent_root,
6900 						&key, path, -1, 1);
6901 			if (ret > 0 && skinny_metadata && path->slots[0]) {
6902 				/*
6903 				 * Couldn't find our skinny metadata item,
6904 				 * see if we have ye olde extent item.
6905 				 */
6906 				path->slots[0]--;
6907 				btrfs_item_key_to_cpu(path->nodes[0], &key,
6908 						      path->slots[0]);
6909 				if (key.objectid == bytenr &&
6910 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
6911 				    key.offset == num_bytes)
6912 					ret = 0;
6913 			}
6914 
6915 			if (ret > 0 && skinny_metadata) {
6916 				skinny_metadata = false;
6917 				key.objectid = bytenr;
6918 				key.type = BTRFS_EXTENT_ITEM_KEY;
6919 				key.offset = num_bytes;
6920 				btrfs_release_path(path);
6921 				ret = btrfs_search_slot(trans, extent_root,
6922 							&key, path, -1, 1);
6923 			}
6924 
6925 			if (ret) {
6926 				btrfs_err(info,
6927 					  "umm, got %d back from search, was looking for %llu",
6928 					  ret, bytenr);
6929 				if (ret > 0)
6930 					btrfs_print_leaf(info, path->nodes[0]);
6931 			}
6932 			if (ret < 0) {
6933 				btrfs_abort_transaction(trans, ret);
6934 				goto out;
6935 			}
6936 			extent_slot = path->slots[0];
6937 		}
6938 	} else if (WARN_ON(ret == -ENOENT)) {
6939 		btrfs_print_leaf(info, path->nodes[0]);
6940 		btrfs_err(info,
6941 			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6942 			bytenr, parent, root_objectid, owner_objectid,
6943 			owner_offset);
6944 		btrfs_abort_transaction(trans, ret);
6945 		goto out;
6946 	} else {
6947 		btrfs_abort_transaction(trans, ret);
6948 		goto out;
6949 	}
6950 
6951 	leaf = path->nodes[0];
6952 	item_size = btrfs_item_size_nr(leaf, extent_slot);
6953 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6954 	if (item_size < sizeof(*ei)) {
6955 		BUG_ON(found_extent || extent_slot != path->slots[0]);
6956 		ret = convert_extent_item_v0(trans, extent_root, path,
6957 					     owner_objectid, 0);
6958 		if (ret < 0) {
6959 			btrfs_abort_transaction(trans, ret);
6960 			goto out;
6961 		}
6962 
6963 		btrfs_release_path(path);
6964 		path->leave_spinning = 1;
6965 
6966 		key.objectid = bytenr;
6967 		key.type = BTRFS_EXTENT_ITEM_KEY;
6968 		key.offset = num_bytes;
6969 
6970 		ret = btrfs_search_slot(trans, extent_root, &key, path,
6971 					-1, 1);
6972 		if (ret) {
6973 			btrfs_err(info,
6974 				  "umm, got %d back from search, was looking for %llu",
6975 				ret, bytenr);
6976 			btrfs_print_leaf(info, path->nodes[0]);
6977 		}
6978 		if (ret < 0) {
6979 			btrfs_abort_transaction(trans, ret);
6980 			goto out;
6981 		}
6982 
6983 		extent_slot = path->slots[0];
6984 		leaf = path->nodes[0];
6985 		item_size = btrfs_item_size_nr(leaf, extent_slot);
6986 	}
6987 #endif
6988 	BUG_ON(item_size < sizeof(*ei));
6989 	ei = btrfs_item_ptr(leaf, extent_slot,
6990 			    struct btrfs_extent_item);
6991 	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6992 	    key.type == BTRFS_EXTENT_ITEM_KEY) {
6993 		struct btrfs_tree_block_info *bi;
6994 		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6995 		bi = (struct btrfs_tree_block_info *)(ei + 1);
6996 		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6997 	}
6998 
6999 	refs = btrfs_extent_refs(leaf, ei);
7000 	if (refs < refs_to_drop) {
7001 		btrfs_err(info,
7002 			  "trying to drop %d refs but we only have %Lu for bytenr %Lu",
7003 			  refs_to_drop, refs, bytenr);
7004 		ret = -EINVAL;
7005 		btrfs_abort_transaction(trans, ret);
7006 		goto out;
7007 	}
7008 	refs -= refs_to_drop;
7009 
7010 	if (refs > 0) {
7011 		if (extent_op)
7012 			__run_delayed_extent_op(extent_op, leaf, ei);
7013 		/*
7014 		 * In the case of inline back ref, reference count will
7015 		 * be updated by remove_extent_backref
7016 		 */
7017 		if (iref) {
7018 			BUG_ON(!found_extent);
7019 		} else {
7020 			btrfs_set_extent_refs(leaf, ei, refs);
7021 			btrfs_mark_buffer_dirty(leaf);
7022 		}
7023 		if (found_extent) {
7024 			ret = remove_extent_backref(trans, extent_root, path,
7025 						    iref, refs_to_drop,
7026 						    is_data, &last_ref);
7027 			if (ret) {
7028 				btrfs_abort_transaction(trans, ret);
7029 				goto out;
7030 			}
7031 		}
7032 		add_pinned_bytes(info, -num_bytes, owner_objectid,
7033 				 root_objectid);
7034 	} else {
7035 		if (found_extent) {
7036 			BUG_ON(is_data && refs_to_drop !=
7037 			       extent_data_ref_count(path, iref));
7038 			if (iref) {
7039 				BUG_ON(path->slots[0] != extent_slot);
7040 			} else {
7041 				BUG_ON(path->slots[0] != extent_slot + 1);
7042 				path->slots[0] = extent_slot;
7043 				num_to_del = 2;
7044 			}
7045 		}
7046 
7047 		last_ref = 1;
7048 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
7049 				      num_to_del);
7050 		if (ret) {
7051 			btrfs_abort_transaction(trans, ret);
7052 			goto out;
7053 		}
7054 		btrfs_release_path(path);
7055 
7056 		if (is_data) {
7057 			ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
7058 			if (ret) {
7059 				btrfs_abort_transaction(trans, ret);
7060 				goto out;
7061 			}
7062 		}
7063 
7064 		ret = add_to_free_space_tree(trans, info, bytenr, num_bytes);
7065 		if (ret) {
7066 			btrfs_abort_transaction(trans, ret);
7067 			goto out;
7068 		}
7069 
7070 		ret = update_block_group(trans, info, bytenr, num_bytes, 0);
7071 		if (ret) {
7072 			btrfs_abort_transaction(trans, ret);
7073 			goto out;
7074 		}
7075 	}
7076 	btrfs_release_path(path);
7077 
7078 out:
7079 	btrfs_free_path(path);
7080 	return ret;
7081 }
7082 
7083 /*
7084  * when we free an block, it is possible (and likely) that we free the last
7085  * delayed ref for that extent as well.  This searches the delayed ref tree for
7086  * a given extent, and if there are no other delayed refs to be processed, it
7087  * removes it from the tree.
7088  */
7089 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
7090 				      u64 bytenr)
7091 {
7092 	struct btrfs_delayed_ref_head *head;
7093 	struct btrfs_delayed_ref_root *delayed_refs;
7094 	int ret = 0;
7095 
7096 	delayed_refs = &trans->transaction->delayed_refs;
7097 	spin_lock(&delayed_refs->lock);
7098 	head = btrfs_find_delayed_ref_head(trans, bytenr);
7099 	if (!head)
7100 		goto out_delayed_unlock;
7101 
7102 	spin_lock(&head->lock);
7103 	if (!list_empty(&head->ref_list))
7104 		goto out;
7105 
7106 	if (head->extent_op) {
7107 		if (!head->must_insert_reserved)
7108 			goto out;
7109 		btrfs_free_delayed_extent_op(head->extent_op);
7110 		head->extent_op = NULL;
7111 	}
7112 
7113 	/*
7114 	 * waiting for the lock here would deadlock.  If someone else has it
7115 	 * locked they are already in the process of dropping it anyway
7116 	 */
7117 	if (!mutex_trylock(&head->mutex))
7118 		goto out;
7119 
7120 	/*
7121 	 * at this point we have a head with no other entries.  Go
7122 	 * ahead and process it.
7123 	 */
7124 	head->node.in_tree = 0;
7125 	rb_erase(&head->href_node, &delayed_refs->href_root);
7126 
7127 	atomic_dec(&delayed_refs->num_entries);
7128 
7129 	/*
7130 	 * we don't take a ref on the node because we're removing it from the
7131 	 * tree, so we just steal the ref the tree was holding.
7132 	 */
7133 	delayed_refs->num_heads--;
7134 	if (head->processing == 0)
7135 		delayed_refs->num_heads_ready--;
7136 	head->processing = 0;
7137 	spin_unlock(&head->lock);
7138 	spin_unlock(&delayed_refs->lock);
7139 
7140 	BUG_ON(head->extent_op);
7141 	if (head->must_insert_reserved)
7142 		ret = 1;
7143 
7144 	mutex_unlock(&head->mutex);
7145 	btrfs_put_delayed_ref(&head->node);
7146 	return ret;
7147 out:
7148 	spin_unlock(&head->lock);
7149 
7150 out_delayed_unlock:
7151 	spin_unlock(&delayed_refs->lock);
7152 	return 0;
7153 }
7154 
7155 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
7156 			   struct btrfs_root *root,
7157 			   struct extent_buffer *buf,
7158 			   u64 parent, int last_ref)
7159 {
7160 	struct btrfs_fs_info *fs_info = root->fs_info;
7161 	int pin = 1;
7162 	int ret;
7163 
7164 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7165 		ret = btrfs_add_delayed_tree_ref(fs_info, trans,
7166 						 buf->start, buf->len,
7167 						 parent,
7168 						 root->root_key.objectid,
7169 						 btrfs_header_level(buf),
7170 						 BTRFS_DROP_DELAYED_REF, NULL);
7171 		BUG_ON(ret); /* -ENOMEM */
7172 	}
7173 
7174 	if (!last_ref)
7175 		return;
7176 
7177 	if (btrfs_header_generation(buf) == trans->transid) {
7178 		struct btrfs_block_group_cache *cache;
7179 
7180 		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7181 			ret = check_ref_cleanup(trans, buf->start);
7182 			if (!ret)
7183 				goto out;
7184 		}
7185 
7186 		cache = btrfs_lookup_block_group(fs_info, buf->start);
7187 
7188 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
7189 			pin_down_extent(fs_info, cache, buf->start,
7190 					buf->len, 1);
7191 			btrfs_put_block_group(cache);
7192 			goto out;
7193 		}
7194 
7195 		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
7196 
7197 		btrfs_add_free_space(cache, buf->start, buf->len);
7198 		btrfs_free_reserved_bytes(cache, buf->len, 0);
7199 		btrfs_put_block_group(cache);
7200 		trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
7201 		pin = 0;
7202 	}
7203 out:
7204 	if (pin)
7205 		add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf),
7206 				 root->root_key.objectid);
7207 
7208 	/*
7209 	 * Deleting the buffer, clear the corrupt flag since it doesn't matter
7210 	 * anymore.
7211 	 */
7212 	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
7213 }
7214 
7215 /* Can return -ENOMEM */
7216 int btrfs_free_extent(struct btrfs_trans_handle *trans,
7217 		      struct btrfs_fs_info *fs_info,
7218 		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
7219 		      u64 owner, u64 offset)
7220 {
7221 	int ret;
7222 
7223 	if (btrfs_is_testing(fs_info))
7224 		return 0;
7225 
7226 	add_pinned_bytes(fs_info, num_bytes, owner, root_objectid);
7227 
7228 	/*
7229 	 * tree log blocks never actually go into the extent allocation
7230 	 * tree, just update pinning info and exit early.
7231 	 */
7232 	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
7233 		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
7234 		/* unlocks the pinned mutex */
7235 		btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
7236 		ret = 0;
7237 	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
7238 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
7239 					num_bytes,
7240 					parent, root_objectid, (int)owner,
7241 					BTRFS_DROP_DELAYED_REF, NULL);
7242 	} else {
7243 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
7244 						num_bytes,
7245 						parent, root_objectid, owner,
7246 						offset, 0,
7247 						BTRFS_DROP_DELAYED_REF, NULL);
7248 	}
7249 	return ret;
7250 }
7251 
7252 /*
7253  * when we wait for progress in the block group caching, its because
7254  * our allocation attempt failed at least once.  So, we must sleep
7255  * and let some progress happen before we try again.
7256  *
7257  * This function will sleep at least once waiting for new free space to
7258  * show up, and then it will check the block group free space numbers
7259  * for our min num_bytes.  Another option is to have it go ahead
7260  * and look in the rbtree for a free extent of a given size, but this
7261  * is a good start.
7262  *
7263  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
7264  * any of the information in this block group.
7265  */
7266 static noinline void
7267 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
7268 				u64 num_bytes)
7269 {
7270 	struct btrfs_caching_control *caching_ctl;
7271 
7272 	caching_ctl = get_caching_control(cache);
7273 	if (!caching_ctl)
7274 		return;
7275 
7276 	wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
7277 		   (cache->free_space_ctl->free_space >= num_bytes));
7278 
7279 	put_caching_control(caching_ctl);
7280 }
7281 
7282 static noinline int
7283 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
7284 {
7285 	struct btrfs_caching_control *caching_ctl;
7286 	int ret = 0;
7287 
7288 	caching_ctl = get_caching_control(cache);
7289 	if (!caching_ctl)
7290 		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
7291 
7292 	wait_event(caching_ctl->wait, block_group_cache_done(cache));
7293 	if (cache->cached == BTRFS_CACHE_ERROR)
7294 		ret = -EIO;
7295 	put_caching_control(caching_ctl);
7296 	return ret;
7297 }
7298 
7299 int __get_raid_index(u64 flags)
7300 {
7301 	if (flags & BTRFS_BLOCK_GROUP_RAID10)
7302 		return BTRFS_RAID_RAID10;
7303 	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
7304 		return BTRFS_RAID_RAID1;
7305 	else if (flags & BTRFS_BLOCK_GROUP_DUP)
7306 		return BTRFS_RAID_DUP;
7307 	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
7308 		return BTRFS_RAID_RAID0;
7309 	else if (flags & BTRFS_BLOCK_GROUP_RAID5)
7310 		return BTRFS_RAID_RAID5;
7311 	else if (flags & BTRFS_BLOCK_GROUP_RAID6)
7312 		return BTRFS_RAID_RAID6;
7313 
7314 	return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
7315 }
7316 
7317 int get_block_group_index(struct btrfs_block_group_cache *cache)
7318 {
7319 	return __get_raid_index(cache->flags);
7320 }
7321 
7322 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
7323 	[BTRFS_RAID_RAID10]	= "raid10",
7324 	[BTRFS_RAID_RAID1]	= "raid1",
7325 	[BTRFS_RAID_DUP]	= "dup",
7326 	[BTRFS_RAID_RAID0]	= "raid0",
7327 	[BTRFS_RAID_SINGLE]	= "single",
7328 	[BTRFS_RAID_RAID5]	= "raid5",
7329 	[BTRFS_RAID_RAID6]	= "raid6",
7330 };
7331 
7332 static const char *get_raid_name(enum btrfs_raid_types type)
7333 {
7334 	if (type >= BTRFS_NR_RAID_TYPES)
7335 		return NULL;
7336 
7337 	return btrfs_raid_type_names[type];
7338 }
7339 
7340 enum btrfs_loop_type {
7341 	LOOP_CACHING_NOWAIT = 0,
7342 	LOOP_CACHING_WAIT = 1,
7343 	LOOP_ALLOC_CHUNK = 2,
7344 	LOOP_NO_EMPTY_SIZE = 3,
7345 };
7346 
7347 static inline void
7348 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
7349 		       int delalloc)
7350 {
7351 	if (delalloc)
7352 		down_read(&cache->data_rwsem);
7353 }
7354 
7355 static inline void
7356 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
7357 		       int delalloc)
7358 {
7359 	btrfs_get_block_group(cache);
7360 	if (delalloc)
7361 		down_read(&cache->data_rwsem);
7362 }
7363 
7364 static struct btrfs_block_group_cache *
7365 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
7366 		   struct btrfs_free_cluster *cluster,
7367 		   int delalloc)
7368 {
7369 	struct btrfs_block_group_cache *used_bg = NULL;
7370 
7371 	spin_lock(&cluster->refill_lock);
7372 	while (1) {
7373 		used_bg = cluster->block_group;
7374 		if (!used_bg)
7375 			return NULL;
7376 
7377 		if (used_bg == block_group)
7378 			return used_bg;
7379 
7380 		btrfs_get_block_group(used_bg);
7381 
7382 		if (!delalloc)
7383 			return used_bg;
7384 
7385 		if (down_read_trylock(&used_bg->data_rwsem))
7386 			return used_bg;
7387 
7388 		spin_unlock(&cluster->refill_lock);
7389 
7390 		/* We should only have one-level nested. */
7391 		down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
7392 
7393 		spin_lock(&cluster->refill_lock);
7394 		if (used_bg == cluster->block_group)
7395 			return used_bg;
7396 
7397 		up_read(&used_bg->data_rwsem);
7398 		btrfs_put_block_group(used_bg);
7399 	}
7400 }
7401 
7402 static inline void
7403 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7404 			 int delalloc)
7405 {
7406 	if (delalloc)
7407 		up_read(&cache->data_rwsem);
7408 	btrfs_put_block_group(cache);
7409 }
7410 
7411 /*
7412  * walks the btree of allocated extents and find a hole of a given size.
7413  * The key ins is changed to record the hole:
7414  * ins->objectid == start position
7415  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7416  * ins->offset == the size of the hole.
7417  * Any available blocks before search_start are skipped.
7418  *
7419  * If there is no suitable free space, we will record the max size of
7420  * the free space extent currently.
7421  */
7422 static noinline int find_free_extent(struct btrfs_root *orig_root,
7423 				u64 ram_bytes, u64 num_bytes, u64 empty_size,
7424 				u64 hint_byte, struct btrfs_key *ins,
7425 				u64 flags, int delalloc)
7426 {
7427 	struct btrfs_fs_info *fs_info = orig_root->fs_info;
7428 	int ret = 0;
7429 	struct btrfs_root *root = fs_info->extent_root;
7430 	struct btrfs_free_cluster *last_ptr = NULL;
7431 	struct btrfs_block_group_cache *block_group = NULL;
7432 	u64 search_start = 0;
7433 	u64 max_extent_size = 0;
7434 	u64 empty_cluster = 0;
7435 	struct btrfs_space_info *space_info;
7436 	int loop = 0;
7437 	int index = __get_raid_index(flags);
7438 	bool failed_cluster_refill = false;
7439 	bool failed_alloc = false;
7440 	bool use_cluster = true;
7441 	bool have_caching_bg = false;
7442 	bool orig_have_caching_bg = false;
7443 	bool full_search = false;
7444 
7445 	WARN_ON(num_bytes < fs_info->sectorsize);
7446 	ins->type = BTRFS_EXTENT_ITEM_KEY;
7447 	ins->objectid = 0;
7448 	ins->offset = 0;
7449 
7450 	trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
7451 
7452 	space_info = __find_space_info(fs_info, flags);
7453 	if (!space_info) {
7454 		btrfs_err(fs_info, "No space info for %llu", flags);
7455 		return -ENOSPC;
7456 	}
7457 
7458 	/*
7459 	 * If our free space is heavily fragmented we may not be able to make
7460 	 * big contiguous allocations, so instead of doing the expensive search
7461 	 * for free space, simply return ENOSPC with our max_extent_size so we
7462 	 * can go ahead and search for a more manageable chunk.
7463 	 *
7464 	 * If our max_extent_size is large enough for our allocation simply
7465 	 * disable clustering since we will likely not be able to find enough
7466 	 * space to create a cluster and induce latency trying.
7467 	 */
7468 	if (unlikely(space_info->max_extent_size)) {
7469 		spin_lock(&space_info->lock);
7470 		if (space_info->max_extent_size &&
7471 		    num_bytes > space_info->max_extent_size) {
7472 			ins->offset = space_info->max_extent_size;
7473 			spin_unlock(&space_info->lock);
7474 			return -ENOSPC;
7475 		} else if (space_info->max_extent_size) {
7476 			use_cluster = false;
7477 		}
7478 		spin_unlock(&space_info->lock);
7479 	}
7480 
7481 	last_ptr = fetch_cluster_info(fs_info, space_info, &empty_cluster);
7482 	if (last_ptr) {
7483 		spin_lock(&last_ptr->lock);
7484 		if (last_ptr->block_group)
7485 			hint_byte = last_ptr->window_start;
7486 		if (last_ptr->fragmented) {
7487 			/*
7488 			 * We still set window_start so we can keep track of the
7489 			 * last place we found an allocation to try and save
7490 			 * some time.
7491 			 */
7492 			hint_byte = last_ptr->window_start;
7493 			use_cluster = false;
7494 		}
7495 		spin_unlock(&last_ptr->lock);
7496 	}
7497 
7498 	search_start = max(search_start, first_logical_byte(fs_info, 0));
7499 	search_start = max(search_start, hint_byte);
7500 	if (search_start == hint_byte) {
7501 		block_group = btrfs_lookup_block_group(fs_info, search_start);
7502 		/*
7503 		 * we don't want to use the block group if it doesn't match our
7504 		 * allocation bits, or if its not cached.
7505 		 *
7506 		 * However if we are re-searching with an ideal block group
7507 		 * picked out then we don't care that the block group is cached.
7508 		 */
7509 		if (block_group && block_group_bits(block_group, flags) &&
7510 		    block_group->cached != BTRFS_CACHE_NO) {
7511 			down_read(&space_info->groups_sem);
7512 			if (list_empty(&block_group->list) ||
7513 			    block_group->ro) {
7514 				/*
7515 				 * someone is removing this block group,
7516 				 * we can't jump into the have_block_group
7517 				 * target because our list pointers are not
7518 				 * valid
7519 				 */
7520 				btrfs_put_block_group(block_group);
7521 				up_read(&space_info->groups_sem);
7522 			} else {
7523 				index = get_block_group_index(block_group);
7524 				btrfs_lock_block_group(block_group, delalloc);
7525 				goto have_block_group;
7526 			}
7527 		} else if (block_group) {
7528 			btrfs_put_block_group(block_group);
7529 		}
7530 	}
7531 search:
7532 	have_caching_bg = false;
7533 	if (index == 0 || index == __get_raid_index(flags))
7534 		full_search = true;
7535 	down_read(&space_info->groups_sem);
7536 	list_for_each_entry(block_group, &space_info->block_groups[index],
7537 			    list) {
7538 		u64 offset;
7539 		int cached;
7540 
7541 		btrfs_grab_block_group(block_group, delalloc);
7542 		search_start = block_group->key.objectid;
7543 
7544 		/*
7545 		 * this can happen if we end up cycling through all the
7546 		 * raid types, but we want to make sure we only allocate
7547 		 * for the proper type.
7548 		 */
7549 		if (!block_group_bits(block_group, flags)) {
7550 		    u64 extra = BTRFS_BLOCK_GROUP_DUP |
7551 				BTRFS_BLOCK_GROUP_RAID1 |
7552 				BTRFS_BLOCK_GROUP_RAID5 |
7553 				BTRFS_BLOCK_GROUP_RAID6 |
7554 				BTRFS_BLOCK_GROUP_RAID10;
7555 
7556 			/*
7557 			 * if they asked for extra copies and this block group
7558 			 * doesn't provide them, bail.  This does allow us to
7559 			 * fill raid0 from raid1.
7560 			 */
7561 			if ((flags & extra) && !(block_group->flags & extra))
7562 				goto loop;
7563 		}
7564 
7565 have_block_group:
7566 		cached = block_group_cache_done(block_group);
7567 		if (unlikely(!cached)) {
7568 			have_caching_bg = true;
7569 			ret = cache_block_group(block_group, 0);
7570 			BUG_ON(ret < 0);
7571 			ret = 0;
7572 		}
7573 
7574 		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7575 			goto loop;
7576 		if (unlikely(block_group->ro))
7577 			goto loop;
7578 
7579 		/*
7580 		 * Ok we want to try and use the cluster allocator, so
7581 		 * lets look there
7582 		 */
7583 		if (last_ptr && use_cluster) {
7584 			struct btrfs_block_group_cache *used_block_group;
7585 			unsigned long aligned_cluster;
7586 			/*
7587 			 * the refill lock keeps out other
7588 			 * people trying to start a new cluster
7589 			 */
7590 			used_block_group = btrfs_lock_cluster(block_group,
7591 							      last_ptr,
7592 							      delalloc);
7593 			if (!used_block_group)
7594 				goto refill_cluster;
7595 
7596 			if (used_block_group != block_group &&
7597 			    (used_block_group->ro ||
7598 			     !block_group_bits(used_block_group, flags)))
7599 				goto release_cluster;
7600 
7601 			offset = btrfs_alloc_from_cluster(used_block_group,
7602 						last_ptr,
7603 						num_bytes,
7604 						used_block_group->key.objectid,
7605 						&max_extent_size);
7606 			if (offset) {
7607 				/* we have a block, we're done */
7608 				spin_unlock(&last_ptr->refill_lock);
7609 				trace_btrfs_reserve_extent_cluster(fs_info,
7610 						used_block_group,
7611 						search_start, num_bytes);
7612 				if (used_block_group != block_group) {
7613 					btrfs_release_block_group(block_group,
7614 								  delalloc);
7615 					block_group = used_block_group;
7616 				}
7617 				goto checks;
7618 			}
7619 
7620 			WARN_ON(last_ptr->block_group != used_block_group);
7621 release_cluster:
7622 			/* If we are on LOOP_NO_EMPTY_SIZE, we can't
7623 			 * set up a new clusters, so lets just skip it
7624 			 * and let the allocator find whatever block
7625 			 * it can find.  If we reach this point, we
7626 			 * will have tried the cluster allocator
7627 			 * plenty of times and not have found
7628 			 * anything, so we are likely way too
7629 			 * fragmented for the clustering stuff to find
7630 			 * anything.
7631 			 *
7632 			 * However, if the cluster is taken from the
7633 			 * current block group, release the cluster
7634 			 * first, so that we stand a better chance of
7635 			 * succeeding in the unclustered
7636 			 * allocation.  */
7637 			if (loop >= LOOP_NO_EMPTY_SIZE &&
7638 			    used_block_group != block_group) {
7639 				spin_unlock(&last_ptr->refill_lock);
7640 				btrfs_release_block_group(used_block_group,
7641 							  delalloc);
7642 				goto unclustered_alloc;
7643 			}
7644 
7645 			/*
7646 			 * this cluster didn't work out, free it and
7647 			 * start over
7648 			 */
7649 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
7650 
7651 			if (used_block_group != block_group)
7652 				btrfs_release_block_group(used_block_group,
7653 							  delalloc);
7654 refill_cluster:
7655 			if (loop >= LOOP_NO_EMPTY_SIZE) {
7656 				spin_unlock(&last_ptr->refill_lock);
7657 				goto unclustered_alloc;
7658 			}
7659 
7660 			aligned_cluster = max_t(unsigned long,
7661 						empty_cluster + empty_size,
7662 					      block_group->full_stripe_len);
7663 
7664 			/* allocate a cluster in this block group */
7665 			ret = btrfs_find_space_cluster(fs_info, block_group,
7666 						       last_ptr, search_start,
7667 						       num_bytes,
7668 						       aligned_cluster);
7669 			if (ret == 0) {
7670 				/*
7671 				 * now pull our allocation out of this
7672 				 * cluster
7673 				 */
7674 				offset = btrfs_alloc_from_cluster(block_group,
7675 							last_ptr,
7676 							num_bytes,
7677 							search_start,
7678 							&max_extent_size);
7679 				if (offset) {
7680 					/* we found one, proceed */
7681 					spin_unlock(&last_ptr->refill_lock);
7682 					trace_btrfs_reserve_extent_cluster(fs_info,
7683 						block_group, search_start,
7684 						num_bytes);
7685 					goto checks;
7686 				}
7687 			} else if (!cached && loop > LOOP_CACHING_NOWAIT
7688 				   && !failed_cluster_refill) {
7689 				spin_unlock(&last_ptr->refill_lock);
7690 
7691 				failed_cluster_refill = true;
7692 				wait_block_group_cache_progress(block_group,
7693 				       num_bytes + empty_cluster + empty_size);
7694 				goto have_block_group;
7695 			}
7696 
7697 			/*
7698 			 * at this point we either didn't find a cluster
7699 			 * or we weren't able to allocate a block from our
7700 			 * cluster.  Free the cluster we've been trying
7701 			 * to use, and go to the next block group
7702 			 */
7703 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
7704 			spin_unlock(&last_ptr->refill_lock);
7705 			goto loop;
7706 		}
7707 
7708 unclustered_alloc:
7709 		/*
7710 		 * We are doing an unclustered alloc, set the fragmented flag so
7711 		 * we don't bother trying to setup a cluster again until we get
7712 		 * more space.
7713 		 */
7714 		if (unlikely(last_ptr)) {
7715 			spin_lock(&last_ptr->lock);
7716 			last_ptr->fragmented = 1;
7717 			spin_unlock(&last_ptr->lock);
7718 		}
7719 		spin_lock(&block_group->free_space_ctl->tree_lock);
7720 		if (cached &&
7721 		    block_group->free_space_ctl->free_space <
7722 		    num_bytes + empty_cluster + empty_size) {
7723 			if (block_group->free_space_ctl->free_space >
7724 			    max_extent_size)
7725 				max_extent_size =
7726 					block_group->free_space_ctl->free_space;
7727 			spin_unlock(&block_group->free_space_ctl->tree_lock);
7728 			goto loop;
7729 		}
7730 		spin_unlock(&block_group->free_space_ctl->tree_lock);
7731 
7732 		offset = btrfs_find_space_for_alloc(block_group, search_start,
7733 						    num_bytes, empty_size,
7734 						    &max_extent_size);
7735 		/*
7736 		 * If we didn't find a chunk, and we haven't failed on this
7737 		 * block group before, and this block group is in the middle of
7738 		 * caching and we are ok with waiting, then go ahead and wait
7739 		 * for progress to be made, and set failed_alloc to true.
7740 		 *
7741 		 * If failed_alloc is true then we've already waited on this
7742 		 * block group once and should move on to the next block group.
7743 		 */
7744 		if (!offset && !failed_alloc && !cached &&
7745 		    loop > LOOP_CACHING_NOWAIT) {
7746 			wait_block_group_cache_progress(block_group,
7747 						num_bytes + empty_size);
7748 			failed_alloc = true;
7749 			goto have_block_group;
7750 		} else if (!offset) {
7751 			goto loop;
7752 		}
7753 checks:
7754 		search_start = ALIGN(offset, fs_info->stripesize);
7755 
7756 		/* move on to the next group */
7757 		if (search_start + num_bytes >
7758 		    block_group->key.objectid + block_group->key.offset) {
7759 			btrfs_add_free_space(block_group, offset, num_bytes);
7760 			goto loop;
7761 		}
7762 
7763 		if (offset < search_start)
7764 			btrfs_add_free_space(block_group, offset,
7765 					     search_start - offset);
7766 		BUG_ON(offset > search_start);
7767 
7768 		ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
7769 				num_bytes, delalloc);
7770 		if (ret == -EAGAIN) {
7771 			btrfs_add_free_space(block_group, offset, num_bytes);
7772 			goto loop;
7773 		}
7774 		btrfs_inc_block_group_reservations(block_group);
7775 
7776 		/* we are all good, lets return */
7777 		ins->objectid = search_start;
7778 		ins->offset = num_bytes;
7779 
7780 		trace_btrfs_reserve_extent(fs_info, block_group,
7781 					   search_start, num_bytes);
7782 		btrfs_release_block_group(block_group, delalloc);
7783 		break;
7784 loop:
7785 		failed_cluster_refill = false;
7786 		failed_alloc = false;
7787 		BUG_ON(index != get_block_group_index(block_group));
7788 		btrfs_release_block_group(block_group, delalloc);
7789 	}
7790 	up_read(&space_info->groups_sem);
7791 
7792 	if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7793 		&& !orig_have_caching_bg)
7794 		orig_have_caching_bg = true;
7795 
7796 	if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7797 		goto search;
7798 
7799 	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7800 		goto search;
7801 
7802 	/*
7803 	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7804 	 *			caching kthreads as we move along
7805 	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7806 	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7807 	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7808 	 *			again
7809 	 */
7810 	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7811 		index = 0;
7812 		if (loop == LOOP_CACHING_NOWAIT) {
7813 			/*
7814 			 * We want to skip the LOOP_CACHING_WAIT step if we
7815 			 * don't have any uncached bgs and we've already done a
7816 			 * full search through.
7817 			 */
7818 			if (orig_have_caching_bg || !full_search)
7819 				loop = LOOP_CACHING_WAIT;
7820 			else
7821 				loop = LOOP_ALLOC_CHUNK;
7822 		} else {
7823 			loop++;
7824 		}
7825 
7826 		if (loop == LOOP_ALLOC_CHUNK) {
7827 			struct btrfs_trans_handle *trans;
7828 			int exist = 0;
7829 
7830 			trans = current->journal_info;
7831 			if (trans)
7832 				exist = 1;
7833 			else
7834 				trans = btrfs_join_transaction(root);
7835 
7836 			if (IS_ERR(trans)) {
7837 				ret = PTR_ERR(trans);
7838 				goto out;
7839 			}
7840 
7841 			ret = do_chunk_alloc(trans, fs_info, flags,
7842 					     CHUNK_ALLOC_FORCE);
7843 
7844 			/*
7845 			 * If we can't allocate a new chunk we've already looped
7846 			 * through at least once, move on to the NO_EMPTY_SIZE
7847 			 * case.
7848 			 */
7849 			if (ret == -ENOSPC)
7850 				loop = LOOP_NO_EMPTY_SIZE;
7851 
7852 			/*
7853 			 * Do not bail out on ENOSPC since we
7854 			 * can do more things.
7855 			 */
7856 			if (ret < 0 && ret != -ENOSPC)
7857 				btrfs_abort_transaction(trans, ret);
7858 			else
7859 				ret = 0;
7860 			if (!exist)
7861 				btrfs_end_transaction(trans);
7862 			if (ret)
7863 				goto out;
7864 		}
7865 
7866 		if (loop == LOOP_NO_EMPTY_SIZE) {
7867 			/*
7868 			 * Don't loop again if we already have no empty_size and
7869 			 * no empty_cluster.
7870 			 */
7871 			if (empty_size == 0 &&
7872 			    empty_cluster == 0) {
7873 				ret = -ENOSPC;
7874 				goto out;
7875 			}
7876 			empty_size = 0;
7877 			empty_cluster = 0;
7878 		}
7879 
7880 		goto search;
7881 	} else if (!ins->objectid) {
7882 		ret = -ENOSPC;
7883 	} else if (ins->objectid) {
7884 		if (!use_cluster && last_ptr) {
7885 			spin_lock(&last_ptr->lock);
7886 			last_ptr->window_start = ins->objectid;
7887 			spin_unlock(&last_ptr->lock);
7888 		}
7889 		ret = 0;
7890 	}
7891 out:
7892 	if (ret == -ENOSPC) {
7893 		spin_lock(&space_info->lock);
7894 		space_info->max_extent_size = max_extent_size;
7895 		spin_unlock(&space_info->lock);
7896 		ins->offset = max_extent_size;
7897 	}
7898 	return ret;
7899 }
7900 
7901 static void dump_space_info(struct btrfs_fs_info *fs_info,
7902 			    struct btrfs_space_info *info, u64 bytes,
7903 			    int dump_block_groups)
7904 {
7905 	struct btrfs_block_group_cache *cache;
7906 	int index = 0;
7907 
7908 	spin_lock(&info->lock);
7909 	btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
7910 		   info->flags,
7911 		   info->total_bytes - info->bytes_used - info->bytes_pinned -
7912 		   info->bytes_reserved - info->bytes_readonly -
7913 		   info->bytes_may_use, (info->full) ? "" : "not ");
7914 	btrfs_info(fs_info,
7915 		"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
7916 		info->total_bytes, info->bytes_used, info->bytes_pinned,
7917 		info->bytes_reserved, info->bytes_may_use,
7918 		info->bytes_readonly);
7919 	spin_unlock(&info->lock);
7920 
7921 	if (!dump_block_groups)
7922 		return;
7923 
7924 	down_read(&info->groups_sem);
7925 again:
7926 	list_for_each_entry(cache, &info->block_groups[index], list) {
7927 		spin_lock(&cache->lock);
7928 		btrfs_info(fs_info,
7929 			"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
7930 			cache->key.objectid, cache->key.offset,
7931 			btrfs_block_group_used(&cache->item), cache->pinned,
7932 			cache->reserved, cache->ro ? "[readonly]" : "");
7933 		btrfs_dump_free_space(cache, bytes);
7934 		spin_unlock(&cache->lock);
7935 	}
7936 	if (++index < BTRFS_NR_RAID_TYPES)
7937 		goto again;
7938 	up_read(&info->groups_sem);
7939 }
7940 
7941 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
7942 			 u64 num_bytes, u64 min_alloc_size,
7943 			 u64 empty_size, u64 hint_byte,
7944 			 struct btrfs_key *ins, int is_data, int delalloc)
7945 {
7946 	struct btrfs_fs_info *fs_info = root->fs_info;
7947 	bool final_tried = num_bytes == min_alloc_size;
7948 	u64 flags;
7949 	int ret;
7950 
7951 	flags = btrfs_get_alloc_profile(root, is_data);
7952 again:
7953 	WARN_ON(num_bytes < fs_info->sectorsize);
7954 	ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
7955 			       hint_byte, ins, flags, delalloc);
7956 	if (!ret && !is_data) {
7957 		btrfs_dec_block_group_reservations(fs_info, ins->objectid);
7958 	} else if (ret == -ENOSPC) {
7959 		if (!final_tried && ins->offset) {
7960 			num_bytes = min(num_bytes >> 1, ins->offset);
7961 			num_bytes = round_down(num_bytes,
7962 					       fs_info->sectorsize);
7963 			num_bytes = max(num_bytes, min_alloc_size);
7964 			ram_bytes = num_bytes;
7965 			if (num_bytes == min_alloc_size)
7966 				final_tried = true;
7967 			goto again;
7968 		} else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
7969 			struct btrfs_space_info *sinfo;
7970 
7971 			sinfo = __find_space_info(fs_info, flags);
7972 			btrfs_err(fs_info,
7973 				  "allocation failed flags %llu, wanted %llu",
7974 				  flags, num_bytes);
7975 			if (sinfo)
7976 				dump_space_info(fs_info, sinfo, num_bytes, 1);
7977 		}
7978 	}
7979 
7980 	return ret;
7981 }
7982 
7983 static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
7984 					u64 start, u64 len,
7985 					int pin, int delalloc)
7986 {
7987 	struct btrfs_block_group_cache *cache;
7988 	int ret = 0;
7989 
7990 	cache = btrfs_lookup_block_group(fs_info, start);
7991 	if (!cache) {
7992 		btrfs_err(fs_info, "Unable to find block group for %llu",
7993 			  start);
7994 		return -ENOSPC;
7995 	}
7996 
7997 	if (pin)
7998 		pin_down_extent(fs_info, cache, start, len, 1);
7999 	else {
8000 		if (btrfs_test_opt(fs_info, DISCARD))
8001 			ret = btrfs_discard_extent(fs_info, start, len, NULL);
8002 		btrfs_add_free_space(cache, start, len);
8003 		btrfs_free_reserved_bytes(cache, len, delalloc);
8004 		trace_btrfs_reserved_extent_free(fs_info, start, len);
8005 	}
8006 
8007 	btrfs_put_block_group(cache);
8008 	return ret;
8009 }
8010 
8011 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
8012 			       u64 start, u64 len, int delalloc)
8013 {
8014 	return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc);
8015 }
8016 
8017 int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
8018 				       u64 start, u64 len)
8019 {
8020 	return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
8021 }
8022 
8023 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8024 				      struct btrfs_fs_info *fs_info,
8025 				      u64 parent, u64 root_objectid,
8026 				      u64 flags, u64 owner, u64 offset,
8027 				      struct btrfs_key *ins, int ref_mod)
8028 {
8029 	int ret;
8030 	struct btrfs_extent_item *extent_item;
8031 	struct btrfs_extent_inline_ref *iref;
8032 	struct btrfs_path *path;
8033 	struct extent_buffer *leaf;
8034 	int type;
8035 	u32 size;
8036 
8037 	if (parent > 0)
8038 		type = BTRFS_SHARED_DATA_REF_KEY;
8039 	else
8040 		type = BTRFS_EXTENT_DATA_REF_KEY;
8041 
8042 	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
8043 
8044 	path = btrfs_alloc_path();
8045 	if (!path)
8046 		return -ENOMEM;
8047 
8048 	path->leave_spinning = 1;
8049 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8050 				      ins, size);
8051 	if (ret) {
8052 		btrfs_free_path(path);
8053 		return ret;
8054 	}
8055 
8056 	leaf = path->nodes[0];
8057 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
8058 				     struct btrfs_extent_item);
8059 	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
8060 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8061 	btrfs_set_extent_flags(leaf, extent_item,
8062 			       flags | BTRFS_EXTENT_FLAG_DATA);
8063 
8064 	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8065 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
8066 	if (parent > 0) {
8067 		struct btrfs_shared_data_ref *ref;
8068 		ref = (struct btrfs_shared_data_ref *)(iref + 1);
8069 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
8070 		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
8071 	} else {
8072 		struct btrfs_extent_data_ref *ref;
8073 		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
8074 		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
8075 		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
8076 		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
8077 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
8078 	}
8079 
8080 	btrfs_mark_buffer_dirty(path->nodes[0]);
8081 	btrfs_free_path(path);
8082 
8083 	ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
8084 					  ins->offset);
8085 	if (ret)
8086 		return ret;
8087 
8088 	ret = update_block_group(trans, fs_info, ins->objectid, ins->offset, 1);
8089 	if (ret) { /* -ENOENT, logic error */
8090 		btrfs_err(fs_info, "update block group failed for %llu %llu",
8091 			ins->objectid, ins->offset);
8092 		BUG();
8093 	}
8094 	trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
8095 	return ret;
8096 }
8097 
8098 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
8099 				     struct btrfs_fs_info *fs_info,
8100 				     u64 parent, u64 root_objectid,
8101 				     u64 flags, struct btrfs_disk_key *key,
8102 				     int level, struct btrfs_key *ins)
8103 {
8104 	int ret;
8105 	struct btrfs_extent_item *extent_item;
8106 	struct btrfs_tree_block_info *block_info;
8107 	struct btrfs_extent_inline_ref *iref;
8108 	struct btrfs_path *path;
8109 	struct extent_buffer *leaf;
8110 	u32 size = sizeof(*extent_item) + sizeof(*iref);
8111 	u64 num_bytes = ins->offset;
8112 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
8113 
8114 	if (!skinny_metadata)
8115 		size += sizeof(*block_info);
8116 
8117 	path = btrfs_alloc_path();
8118 	if (!path) {
8119 		btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
8120 						   fs_info->nodesize);
8121 		return -ENOMEM;
8122 	}
8123 
8124 	path->leave_spinning = 1;
8125 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8126 				      ins, size);
8127 	if (ret) {
8128 		btrfs_free_path(path);
8129 		btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
8130 						   fs_info->nodesize);
8131 		return ret;
8132 	}
8133 
8134 	leaf = path->nodes[0];
8135 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
8136 				     struct btrfs_extent_item);
8137 	btrfs_set_extent_refs(leaf, extent_item, 1);
8138 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8139 	btrfs_set_extent_flags(leaf, extent_item,
8140 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
8141 
8142 	if (skinny_metadata) {
8143 		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8144 		num_bytes = fs_info->nodesize;
8145 	} else {
8146 		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
8147 		btrfs_set_tree_block_key(leaf, block_info, key);
8148 		btrfs_set_tree_block_level(leaf, block_info, level);
8149 		iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
8150 	}
8151 
8152 	if (parent > 0) {
8153 		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
8154 		btrfs_set_extent_inline_ref_type(leaf, iref,
8155 						 BTRFS_SHARED_BLOCK_REF_KEY);
8156 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
8157 	} else {
8158 		btrfs_set_extent_inline_ref_type(leaf, iref,
8159 						 BTRFS_TREE_BLOCK_REF_KEY);
8160 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
8161 	}
8162 
8163 	btrfs_mark_buffer_dirty(leaf);
8164 	btrfs_free_path(path);
8165 
8166 	ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
8167 					  num_bytes);
8168 	if (ret)
8169 		return ret;
8170 
8171 	ret = update_block_group(trans, fs_info, ins->objectid,
8172 				 fs_info->nodesize, 1);
8173 	if (ret) { /* -ENOENT, logic error */
8174 		btrfs_err(fs_info, "update block group failed for %llu %llu",
8175 			ins->objectid, ins->offset);
8176 		BUG();
8177 	}
8178 
8179 	trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid,
8180 					  fs_info->nodesize);
8181 	return ret;
8182 }
8183 
8184 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8185 				     u64 root_objectid, u64 owner,
8186 				     u64 offset, u64 ram_bytes,
8187 				     struct btrfs_key *ins)
8188 {
8189 	struct btrfs_fs_info *fs_info = trans->fs_info;
8190 	int ret;
8191 
8192 	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
8193 
8194 	ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
8195 					 ins->offset, 0,
8196 					 root_objectid, owner, offset,
8197 					 ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
8198 					 NULL);
8199 	return ret;
8200 }
8201 
8202 /*
8203  * this is used by the tree logging recovery code.  It records that
8204  * an extent has been allocated and makes sure to clear the free
8205  * space cache bits as well
8206  */
8207 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
8208 				   struct btrfs_fs_info *fs_info,
8209 				   u64 root_objectid, u64 owner, u64 offset,
8210 				   struct btrfs_key *ins)
8211 {
8212 	int ret;
8213 	struct btrfs_block_group_cache *block_group;
8214 	struct btrfs_space_info *space_info;
8215 
8216 	/*
8217 	 * Mixed block groups will exclude before processing the log so we only
8218 	 * need to do the exclude dance if this fs isn't mixed.
8219 	 */
8220 	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
8221 		ret = __exclude_logged_extent(fs_info, ins->objectid,
8222 					      ins->offset);
8223 		if (ret)
8224 			return ret;
8225 	}
8226 
8227 	block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
8228 	if (!block_group)
8229 		return -EINVAL;
8230 
8231 	space_info = block_group->space_info;
8232 	spin_lock(&space_info->lock);
8233 	spin_lock(&block_group->lock);
8234 	space_info->bytes_reserved += ins->offset;
8235 	block_group->reserved += ins->offset;
8236 	spin_unlock(&block_group->lock);
8237 	spin_unlock(&space_info->lock);
8238 
8239 	ret = alloc_reserved_file_extent(trans, fs_info, 0, root_objectid,
8240 					 0, owner, offset, ins, 1);
8241 	btrfs_put_block_group(block_group);
8242 	return ret;
8243 }
8244 
8245 static struct extent_buffer *
8246 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8247 		      u64 bytenr, int level)
8248 {
8249 	struct btrfs_fs_info *fs_info = root->fs_info;
8250 	struct extent_buffer *buf;
8251 
8252 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
8253 	if (IS_ERR(buf))
8254 		return buf;
8255 
8256 	btrfs_set_header_generation(buf, trans->transid);
8257 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
8258 	btrfs_tree_lock(buf);
8259 	clean_tree_block(trans, fs_info, buf);
8260 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
8261 
8262 	btrfs_set_lock_blocking(buf);
8263 	set_extent_buffer_uptodate(buf);
8264 
8265 	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8266 		buf->log_index = root->log_transid % 2;
8267 		/*
8268 		 * we allow two log transactions at a time, use different
8269 		 * EXENT bit to differentiate dirty pages.
8270 		 */
8271 		if (buf->log_index == 0)
8272 			set_extent_dirty(&root->dirty_log_pages, buf->start,
8273 					buf->start + buf->len - 1, GFP_NOFS);
8274 		else
8275 			set_extent_new(&root->dirty_log_pages, buf->start,
8276 					buf->start + buf->len - 1);
8277 	} else {
8278 		buf->log_index = -1;
8279 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
8280 			 buf->start + buf->len - 1, GFP_NOFS);
8281 	}
8282 	trans->dirty = true;
8283 	/* this returns a buffer locked for blocking */
8284 	return buf;
8285 }
8286 
8287 static struct btrfs_block_rsv *
8288 use_block_rsv(struct btrfs_trans_handle *trans,
8289 	      struct btrfs_root *root, u32 blocksize)
8290 {
8291 	struct btrfs_fs_info *fs_info = root->fs_info;
8292 	struct btrfs_block_rsv *block_rsv;
8293 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
8294 	int ret;
8295 	bool global_updated = false;
8296 
8297 	block_rsv = get_block_rsv(trans, root);
8298 
8299 	if (unlikely(block_rsv->size == 0))
8300 		goto try_reserve;
8301 again:
8302 	ret = block_rsv_use_bytes(block_rsv, blocksize);
8303 	if (!ret)
8304 		return block_rsv;
8305 
8306 	if (block_rsv->failfast)
8307 		return ERR_PTR(ret);
8308 
8309 	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
8310 		global_updated = true;
8311 		update_global_block_rsv(fs_info);
8312 		goto again;
8313 	}
8314 
8315 	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
8316 		static DEFINE_RATELIMIT_STATE(_rs,
8317 				DEFAULT_RATELIMIT_INTERVAL * 10,
8318 				/*DEFAULT_RATELIMIT_BURST*/ 1);
8319 		if (__ratelimit(&_rs))
8320 			WARN(1, KERN_DEBUG
8321 				"BTRFS: block rsv returned %d\n", ret);
8322 	}
8323 try_reserve:
8324 	ret = reserve_metadata_bytes(root, block_rsv, blocksize,
8325 				     BTRFS_RESERVE_NO_FLUSH);
8326 	if (!ret)
8327 		return block_rsv;
8328 	/*
8329 	 * If we couldn't reserve metadata bytes try and use some from
8330 	 * the global reserve if its space type is the same as the global
8331 	 * reservation.
8332 	 */
8333 	if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
8334 	    block_rsv->space_info == global_rsv->space_info) {
8335 		ret = block_rsv_use_bytes(global_rsv, blocksize);
8336 		if (!ret)
8337 			return global_rsv;
8338 	}
8339 	return ERR_PTR(ret);
8340 }
8341 
8342 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
8343 			    struct btrfs_block_rsv *block_rsv, u32 blocksize)
8344 {
8345 	block_rsv_add_bytes(block_rsv, blocksize, 0);
8346 	block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
8347 }
8348 
8349 /*
8350  * finds a free extent and does all the dirty work required for allocation
8351  * returns the tree buffer or an ERR_PTR on error.
8352  */
8353 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
8354 					struct btrfs_root *root,
8355 					u64 parent, u64 root_objectid,
8356 					struct btrfs_disk_key *key, int level,
8357 					u64 hint, u64 empty_size)
8358 {
8359 	struct btrfs_fs_info *fs_info = root->fs_info;
8360 	struct btrfs_key ins;
8361 	struct btrfs_block_rsv *block_rsv;
8362 	struct extent_buffer *buf;
8363 	struct btrfs_delayed_extent_op *extent_op;
8364 	u64 flags = 0;
8365 	int ret;
8366 	u32 blocksize = fs_info->nodesize;
8367 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
8368 
8369 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8370 	if (btrfs_is_testing(fs_info)) {
8371 		buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
8372 					    level);
8373 		if (!IS_ERR(buf))
8374 			root->alloc_bytenr += blocksize;
8375 		return buf;
8376 	}
8377 #endif
8378 
8379 	block_rsv = use_block_rsv(trans, root, blocksize);
8380 	if (IS_ERR(block_rsv))
8381 		return ERR_CAST(block_rsv);
8382 
8383 	ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
8384 				   empty_size, hint, &ins, 0, 0);
8385 	if (ret)
8386 		goto out_unuse;
8387 
8388 	buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
8389 	if (IS_ERR(buf)) {
8390 		ret = PTR_ERR(buf);
8391 		goto out_free_reserved;
8392 	}
8393 
8394 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
8395 		if (parent == 0)
8396 			parent = ins.objectid;
8397 		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
8398 	} else
8399 		BUG_ON(parent > 0);
8400 
8401 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
8402 		extent_op = btrfs_alloc_delayed_extent_op();
8403 		if (!extent_op) {
8404 			ret = -ENOMEM;
8405 			goto out_free_buf;
8406 		}
8407 		if (key)
8408 			memcpy(&extent_op->key, key, sizeof(extent_op->key));
8409 		else
8410 			memset(&extent_op->key, 0, sizeof(extent_op->key));
8411 		extent_op->flags_to_set = flags;
8412 		extent_op->update_key = skinny_metadata ? false : true;
8413 		extent_op->update_flags = true;
8414 		extent_op->is_data = false;
8415 		extent_op->level = level;
8416 
8417 		ret = btrfs_add_delayed_tree_ref(fs_info, trans,
8418 						 ins.objectid, ins.offset,
8419 						 parent, root_objectid, level,
8420 						 BTRFS_ADD_DELAYED_EXTENT,
8421 						 extent_op);
8422 		if (ret)
8423 			goto out_free_delayed;
8424 	}
8425 	return buf;
8426 
8427 out_free_delayed:
8428 	btrfs_free_delayed_extent_op(extent_op);
8429 out_free_buf:
8430 	free_extent_buffer(buf);
8431 out_free_reserved:
8432 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
8433 out_unuse:
8434 	unuse_block_rsv(fs_info, block_rsv, blocksize);
8435 	return ERR_PTR(ret);
8436 }
8437 
8438 struct walk_control {
8439 	u64 refs[BTRFS_MAX_LEVEL];
8440 	u64 flags[BTRFS_MAX_LEVEL];
8441 	struct btrfs_key update_progress;
8442 	int stage;
8443 	int level;
8444 	int shared_level;
8445 	int update_ref;
8446 	int keep_locks;
8447 	int reada_slot;
8448 	int reada_count;
8449 	int for_reloc;
8450 };
8451 
8452 #define DROP_REFERENCE	1
8453 #define UPDATE_BACKREF	2
8454 
8455 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8456 				     struct btrfs_root *root,
8457 				     struct walk_control *wc,
8458 				     struct btrfs_path *path)
8459 {
8460 	struct btrfs_fs_info *fs_info = root->fs_info;
8461 	u64 bytenr;
8462 	u64 generation;
8463 	u64 refs;
8464 	u64 flags;
8465 	u32 nritems;
8466 	struct btrfs_key key;
8467 	struct extent_buffer *eb;
8468 	int ret;
8469 	int slot;
8470 	int nread = 0;
8471 
8472 	if (path->slots[wc->level] < wc->reada_slot) {
8473 		wc->reada_count = wc->reada_count * 2 / 3;
8474 		wc->reada_count = max(wc->reada_count, 2);
8475 	} else {
8476 		wc->reada_count = wc->reada_count * 3 / 2;
8477 		wc->reada_count = min_t(int, wc->reada_count,
8478 					BTRFS_NODEPTRS_PER_BLOCK(fs_info));
8479 	}
8480 
8481 	eb = path->nodes[wc->level];
8482 	nritems = btrfs_header_nritems(eb);
8483 
8484 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8485 		if (nread >= wc->reada_count)
8486 			break;
8487 
8488 		cond_resched();
8489 		bytenr = btrfs_node_blockptr(eb, slot);
8490 		generation = btrfs_node_ptr_generation(eb, slot);
8491 
8492 		if (slot == path->slots[wc->level])
8493 			goto reada;
8494 
8495 		if (wc->stage == UPDATE_BACKREF &&
8496 		    generation <= root->root_key.offset)
8497 			continue;
8498 
8499 		/* We don't lock the tree block, it's OK to be racy here */
8500 		ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
8501 					       wc->level - 1, 1, &refs,
8502 					       &flags);
8503 		/* We don't care about errors in readahead. */
8504 		if (ret < 0)
8505 			continue;
8506 		BUG_ON(refs == 0);
8507 
8508 		if (wc->stage == DROP_REFERENCE) {
8509 			if (refs == 1)
8510 				goto reada;
8511 
8512 			if (wc->level == 1 &&
8513 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8514 				continue;
8515 			if (!wc->update_ref ||
8516 			    generation <= root->root_key.offset)
8517 				continue;
8518 			btrfs_node_key_to_cpu(eb, &key, slot);
8519 			ret = btrfs_comp_cpu_keys(&key,
8520 						  &wc->update_progress);
8521 			if (ret < 0)
8522 				continue;
8523 		} else {
8524 			if (wc->level == 1 &&
8525 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8526 				continue;
8527 		}
8528 reada:
8529 		readahead_tree_block(fs_info, bytenr);
8530 		nread++;
8531 	}
8532 	wc->reada_slot = slot;
8533 }
8534 
8535 /*
8536  * helper to process tree block while walking down the tree.
8537  *
8538  * when wc->stage == UPDATE_BACKREF, this function updates
8539  * back refs for pointers in the block.
8540  *
8541  * NOTE: return value 1 means we should stop walking down.
8542  */
8543 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8544 				   struct btrfs_root *root,
8545 				   struct btrfs_path *path,
8546 				   struct walk_control *wc, int lookup_info)
8547 {
8548 	struct btrfs_fs_info *fs_info = root->fs_info;
8549 	int level = wc->level;
8550 	struct extent_buffer *eb = path->nodes[level];
8551 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8552 	int ret;
8553 
8554 	if (wc->stage == UPDATE_BACKREF &&
8555 	    btrfs_header_owner(eb) != root->root_key.objectid)
8556 		return 1;
8557 
8558 	/*
8559 	 * when reference count of tree block is 1, it won't increase
8560 	 * again. once full backref flag is set, we never clear it.
8561 	 */
8562 	if (lookup_info &&
8563 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8564 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8565 		BUG_ON(!path->locks[level]);
8566 		ret = btrfs_lookup_extent_info(trans, fs_info,
8567 					       eb->start, level, 1,
8568 					       &wc->refs[level],
8569 					       &wc->flags[level]);
8570 		BUG_ON(ret == -ENOMEM);
8571 		if (ret)
8572 			return ret;
8573 		BUG_ON(wc->refs[level] == 0);
8574 	}
8575 
8576 	if (wc->stage == DROP_REFERENCE) {
8577 		if (wc->refs[level] > 1)
8578 			return 1;
8579 
8580 		if (path->locks[level] && !wc->keep_locks) {
8581 			btrfs_tree_unlock_rw(eb, path->locks[level]);
8582 			path->locks[level] = 0;
8583 		}
8584 		return 0;
8585 	}
8586 
8587 	/* wc->stage == UPDATE_BACKREF */
8588 	if (!(wc->flags[level] & flag)) {
8589 		BUG_ON(!path->locks[level]);
8590 		ret = btrfs_inc_ref(trans, root, eb, 1);
8591 		BUG_ON(ret); /* -ENOMEM */
8592 		ret = btrfs_dec_ref(trans, root, eb, 0);
8593 		BUG_ON(ret); /* -ENOMEM */
8594 		ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
8595 						  eb->len, flag,
8596 						  btrfs_header_level(eb), 0);
8597 		BUG_ON(ret); /* -ENOMEM */
8598 		wc->flags[level] |= flag;
8599 	}
8600 
8601 	/*
8602 	 * the block is shared by multiple trees, so it's not good to
8603 	 * keep the tree lock
8604 	 */
8605 	if (path->locks[level] && level > 0) {
8606 		btrfs_tree_unlock_rw(eb, path->locks[level]);
8607 		path->locks[level] = 0;
8608 	}
8609 	return 0;
8610 }
8611 
8612 /*
8613  * helper to process tree block pointer.
8614  *
8615  * when wc->stage == DROP_REFERENCE, this function checks
8616  * reference count of the block pointed to. if the block
8617  * is shared and we need update back refs for the subtree
8618  * rooted at the block, this function changes wc->stage to
8619  * UPDATE_BACKREF. if the block is shared and there is no
8620  * need to update back, this function drops the reference
8621  * to the block.
8622  *
8623  * NOTE: return value 1 means we should stop walking down.
8624  */
8625 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8626 				 struct btrfs_root *root,
8627 				 struct btrfs_path *path,
8628 				 struct walk_control *wc, int *lookup_info)
8629 {
8630 	struct btrfs_fs_info *fs_info = root->fs_info;
8631 	u64 bytenr;
8632 	u64 generation;
8633 	u64 parent;
8634 	u32 blocksize;
8635 	struct btrfs_key key;
8636 	struct extent_buffer *next;
8637 	int level = wc->level;
8638 	int reada = 0;
8639 	int ret = 0;
8640 	bool need_account = false;
8641 
8642 	generation = btrfs_node_ptr_generation(path->nodes[level],
8643 					       path->slots[level]);
8644 	/*
8645 	 * if the lower level block was created before the snapshot
8646 	 * was created, we know there is no need to update back refs
8647 	 * for the subtree
8648 	 */
8649 	if (wc->stage == UPDATE_BACKREF &&
8650 	    generation <= root->root_key.offset) {
8651 		*lookup_info = 1;
8652 		return 1;
8653 	}
8654 
8655 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8656 	blocksize = fs_info->nodesize;
8657 
8658 	next = find_extent_buffer(fs_info, bytenr);
8659 	if (!next) {
8660 		next = btrfs_find_create_tree_block(fs_info, bytenr);
8661 		if (IS_ERR(next))
8662 			return PTR_ERR(next);
8663 
8664 		btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8665 					       level - 1);
8666 		reada = 1;
8667 	}
8668 	btrfs_tree_lock(next);
8669 	btrfs_set_lock_blocking(next);
8670 
8671 	ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
8672 				       &wc->refs[level - 1],
8673 				       &wc->flags[level - 1]);
8674 	if (ret < 0)
8675 		goto out_unlock;
8676 
8677 	if (unlikely(wc->refs[level - 1] == 0)) {
8678 		btrfs_err(fs_info, "Missing references.");
8679 		ret = -EIO;
8680 		goto out_unlock;
8681 	}
8682 	*lookup_info = 0;
8683 
8684 	if (wc->stage == DROP_REFERENCE) {
8685 		if (wc->refs[level - 1] > 1) {
8686 			need_account = true;
8687 			if (level == 1 &&
8688 			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8689 				goto skip;
8690 
8691 			if (!wc->update_ref ||
8692 			    generation <= root->root_key.offset)
8693 				goto skip;
8694 
8695 			btrfs_node_key_to_cpu(path->nodes[level], &key,
8696 					      path->slots[level]);
8697 			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8698 			if (ret < 0)
8699 				goto skip;
8700 
8701 			wc->stage = UPDATE_BACKREF;
8702 			wc->shared_level = level - 1;
8703 		}
8704 	} else {
8705 		if (level == 1 &&
8706 		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8707 			goto skip;
8708 	}
8709 
8710 	if (!btrfs_buffer_uptodate(next, generation, 0)) {
8711 		btrfs_tree_unlock(next);
8712 		free_extent_buffer(next);
8713 		next = NULL;
8714 		*lookup_info = 1;
8715 	}
8716 
8717 	if (!next) {
8718 		if (reada && level == 1)
8719 			reada_walk_down(trans, root, wc, path);
8720 		next = read_tree_block(fs_info, bytenr, generation);
8721 		if (IS_ERR(next)) {
8722 			return PTR_ERR(next);
8723 		} else if (!extent_buffer_uptodate(next)) {
8724 			free_extent_buffer(next);
8725 			return -EIO;
8726 		}
8727 		btrfs_tree_lock(next);
8728 		btrfs_set_lock_blocking(next);
8729 	}
8730 
8731 	level--;
8732 	ASSERT(level == btrfs_header_level(next));
8733 	if (level != btrfs_header_level(next)) {
8734 		btrfs_err(root->fs_info, "mismatched level");
8735 		ret = -EIO;
8736 		goto out_unlock;
8737 	}
8738 	path->nodes[level] = next;
8739 	path->slots[level] = 0;
8740 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8741 	wc->level = level;
8742 	if (wc->level == 1)
8743 		wc->reada_slot = 0;
8744 	return 0;
8745 skip:
8746 	wc->refs[level - 1] = 0;
8747 	wc->flags[level - 1] = 0;
8748 	if (wc->stage == DROP_REFERENCE) {
8749 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8750 			parent = path->nodes[level]->start;
8751 		} else {
8752 			ASSERT(root->root_key.objectid ==
8753 			       btrfs_header_owner(path->nodes[level]));
8754 			if (root->root_key.objectid !=
8755 			    btrfs_header_owner(path->nodes[level])) {
8756 				btrfs_err(root->fs_info,
8757 						"mismatched block owner");
8758 				ret = -EIO;
8759 				goto out_unlock;
8760 			}
8761 			parent = 0;
8762 		}
8763 
8764 		if (need_account) {
8765 			ret = btrfs_qgroup_trace_subtree(trans, root, next,
8766 							 generation, level - 1);
8767 			if (ret) {
8768 				btrfs_err_rl(fs_info,
8769 					     "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
8770 					     ret);
8771 			}
8772 		}
8773 		ret = btrfs_free_extent(trans, fs_info, bytenr, blocksize,
8774 					parent, root->root_key.objectid,
8775 					level - 1, 0);
8776 		if (ret)
8777 			goto out_unlock;
8778 	}
8779 
8780 	*lookup_info = 1;
8781 	ret = 1;
8782 
8783 out_unlock:
8784 	btrfs_tree_unlock(next);
8785 	free_extent_buffer(next);
8786 
8787 	return ret;
8788 }
8789 
8790 /*
8791  * helper to process tree block while walking up the tree.
8792  *
8793  * when wc->stage == DROP_REFERENCE, this function drops
8794  * reference count on the block.
8795  *
8796  * when wc->stage == UPDATE_BACKREF, this function changes
8797  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8798  * to UPDATE_BACKREF previously while processing the block.
8799  *
8800  * NOTE: return value 1 means we should stop walking up.
8801  */
8802 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8803 				 struct btrfs_root *root,
8804 				 struct btrfs_path *path,
8805 				 struct walk_control *wc)
8806 {
8807 	struct btrfs_fs_info *fs_info = root->fs_info;
8808 	int ret;
8809 	int level = wc->level;
8810 	struct extent_buffer *eb = path->nodes[level];
8811 	u64 parent = 0;
8812 
8813 	if (wc->stage == UPDATE_BACKREF) {
8814 		BUG_ON(wc->shared_level < level);
8815 		if (level < wc->shared_level)
8816 			goto out;
8817 
8818 		ret = find_next_key(path, level + 1, &wc->update_progress);
8819 		if (ret > 0)
8820 			wc->update_ref = 0;
8821 
8822 		wc->stage = DROP_REFERENCE;
8823 		wc->shared_level = -1;
8824 		path->slots[level] = 0;
8825 
8826 		/*
8827 		 * check reference count again if the block isn't locked.
8828 		 * we should start walking down the tree again if reference
8829 		 * count is one.
8830 		 */
8831 		if (!path->locks[level]) {
8832 			BUG_ON(level == 0);
8833 			btrfs_tree_lock(eb);
8834 			btrfs_set_lock_blocking(eb);
8835 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8836 
8837 			ret = btrfs_lookup_extent_info(trans, fs_info,
8838 						       eb->start, level, 1,
8839 						       &wc->refs[level],
8840 						       &wc->flags[level]);
8841 			if (ret < 0) {
8842 				btrfs_tree_unlock_rw(eb, path->locks[level]);
8843 				path->locks[level] = 0;
8844 				return ret;
8845 			}
8846 			BUG_ON(wc->refs[level] == 0);
8847 			if (wc->refs[level] == 1) {
8848 				btrfs_tree_unlock_rw(eb, path->locks[level]);
8849 				path->locks[level] = 0;
8850 				return 1;
8851 			}
8852 		}
8853 	}
8854 
8855 	/* wc->stage == DROP_REFERENCE */
8856 	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8857 
8858 	if (wc->refs[level] == 1) {
8859 		if (level == 0) {
8860 			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8861 				ret = btrfs_dec_ref(trans, root, eb, 1);
8862 			else
8863 				ret = btrfs_dec_ref(trans, root, eb, 0);
8864 			BUG_ON(ret); /* -ENOMEM */
8865 			ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, eb);
8866 			if (ret) {
8867 				btrfs_err_rl(fs_info,
8868 					     "error %d accounting leaf items. Quota is out of sync, rescan required.",
8869 					     ret);
8870 			}
8871 		}
8872 		/* make block locked assertion in clean_tree_block happy */
8873 		if (!path->locks[level] &&
8874 		    btrfs_header_generation(eb) == trans->transid) {
8875 			btrfs_tree_lock(eb);
8876 			btrfs_set_lock_blocking(eb);
8877 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8878 		}
8879 		clean_tree_block(trans, fs_info, eb);
8880 	}
8881 
8882 	if (eb == root->node) {
8883 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8884 			parent = eb->start;
8885 		else
8886 			BUG_ON(root->root_key.objectid !=
8887 			       btrfs_header_owner(eb));
8888 	} else {
8889 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8890 			parent = path->nodes[level + 1]->start;
8891 		else
8892 			BUG_ON(root->root_key.objectid !=
8893 			       btrfs_header_owner(path->nodes[level + 1]));
8894 	}
8895 
8896 	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8897 out:
8898 	wc->refs[level] = 0;
8899 	wc->flags[level] = 0;
8900 	return 0;
8901 }
8902 
8903 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8904 				   struct btrfs_root *root,
8905 				   struct btrfs_path *path,
8906 				   struct walk_control *wc)
8907 {
8908 	int level = wc->level;
8909 	int lookup_info = 1;
8910 	int ret;
8911 
8912 	while (level >= 0) {
8913 		ret = walk_down_proc(trans, root, path, wc, lookup_info);
8914 		if (ret > 0)
8915 			break;
8916 
8917 		if (level == 0)
8918 			break;
8919 
8920 		if (path->slots[level] >=
8921 		    btrfs_header_nritems(path->nodes[level]))
8922 			break;
8923 
8924 		ret = do_walk_down(trans, root, path, wc, &lookup_info);
8925 		if (ret > 0) {
8926 			path->slots[level]++;
8927 			continue;
8928 		} else if (ret < 0)
8929 			return ret;
8930 		level = wc->level;
8931 	}
8932 	return 0;
8933 }
8934 
8935 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8936 				 struct btrfs_root *root,
8937 				 struct btrfs_path *path,
8938 				 struct walk_control *wc, int max_level)
8939 {
8940 	int level = wc->level;
8941 	int ret;
8942 
8943 	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8944 	while (level < max_level && path->nodes[level]) {
8945 		wc->level = level;
8946 		if (path->slots[level] + 1 <
8947 		    btrfs_header_nritems(path->nodes[level])) {
8948 			path->slots[level]++;
8949 			return 0;
8950 		} else {
8951 			ret = walk_up_proc(trans, root, path, wc);
8952 			if (ret > 0)
8953 				return 0;
8954 
8955 			if (path->locks[level]) {
8956 				btrfs_tree_unlock_rw(path->nodes[level],
8957 						     path->locks[level]);
8958 				path->locks[level] = 0;
8959 			}
8960 			free_extent_buffer(path->nodes[level]);
8961 			path->nodes[level] = NULL;
8962 			level++;
8963 		}
8964 	}
8965 	return 1;
8966 }
8967 
8968 /*
8969  * drop a subvolume tree.
8970  *
8971  * this function traverses the tree freeing any blocks that only
8972  * referenced by the tree.
8973  *
8974  * when a shared tree block is found. this function decreases its
8975  * reference count by one. if update_ref is true, this function
8976  * also make sure backrefs for the shared block and all lower level
8977  * blocks are properly updated.
8978  *
8979  * If called with for_reloc == 0, may exit early with -EAGAIN
8980  */
8981 int btrfs_drop_snapshot(struct btrfs_root *root,
8982 			 struct btrfs_block_rsv *block_rsv, int update_ref,
8983 			 int for_reloc)
8984 {
8985 	struct btrfs_fs_info *fs_info = root->fs_info;
8986 	struct btrfs_path *path;
8987 	struct btrfs_trans_handle *trans;
8988 	struct btrfs_root *tree_root = fs_info->tree_root;
8989 	struct btrfs_root_item *root_item = &root->root_item;
8990 	struct walk_control *wc;
8991 	struct btrfs_key key;
8992 	int err = 0;
8993 	int ret;
8994 	int level;
8995 	bool root_dropped = false;
8996 
8997 	btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
8998 
8999 	path = btrfs_alloc_path();
9000 	if (!path) {
9001 		err = -ENOMEM;
9002 		goto out;
9003 	}
9004 
9005 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
9006 	if (!wc) {
9007 		btrfs_free_path(path);
9008 		err = -ENOMEM;
9009 		goto out;
9010 	}
9011 
9012 	trans = btrfs_start_transaction(tree_root, 0);
9013 	if (IS_ERR(trans)) {
9014 		err = PTR_ERR(trans);
9015 		goto out_free;
9016 	}
9017 
9018 	if (block_rsv)
9019 		trans->block_rsv = block_rsv;
9020 
9021 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
9022 		level = btrfs_header_level(root->node);
9023 		path->nodes[level] = btrfs_lock_root_node(root);
9024 		btrfs_set_lock_blocking(path->nodes[level]);
9025 		path->slots[level] = 0;
9026 		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9027 		memset(&wc->update_progress, 0,
9028 		       sizeof(wc->update_progress));
9029 	} else {
9030 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
9031 		memcpy(&wc->update_progress, &key,
9032 		       sizeof(wc->update_progress));
9033 
9034 		level = root_item->drop_level;
9035 		BUG_ON(level == 0);
9036 		path->lowest_level = level;
9037 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
9038 		path->lowest_level = 0;
9039 		if (ret < 0) {
9040 			err = ret;
9041 			goto out_end_trans;
9042 		}
9043 		WARN_ON(ret > 0);
9044 
9045 		/*
9046 		 * unlock our path, this is safe because only this
9047 		 * function is allowed to delete this snapshot
9048 		 */
9049 		btrfs_unlock_up_safe(path, 0);
9050 
9051 		level = btrfs_header_level(root->node);
9052 		while (1) {
9053 			btrfs_tree_lock(path->nodes[level]);
9054 			btrfs_set_lock_blocking(path->nodes[level]);
9055 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9056 
9057 			ret = btrfs_lookup_extent_info(trans, fs_info,
9058 						path->nodes[level]->start,
9059 						level, 1, &wc->refs[level],
9060 						&wc->flags[level]);
9061 			if (ret < 0) {
9062 				err = ret;
9063 				goto out_end_trans;
9064 			}
9065 			BUG_ON(wc->refs[level] == 0);
9066 
9067 			if (level == root_item->drop_level)
9068 				break;
9069 
9070 			btrfs_tree_unlock(path->nodes[level]);
9071 			path->locks[level] = 0;
9072 			WARN_ON(wc->refs[level] != 1);
9073 			level--;
9074 		}
9075 	}
9076 
9077 	wc->level = level;
9078 	wc->shared_level = -1;
9079 	wc->stage = DROP_REFERENCE;
9080 	wc->update_ref = update_ref;
9081 	wc->keep_locks = 0;
9082 	wc->for_reloc = for_reloc;
9083 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
9084 
9085 	while (1) {
9086 
9087 		ret = walk_down_tree(trans, root, path, wc);
9088 		if (ret < 0) {
9089 			err = ret;
9090 			break;
9091 		}
9092 
9093 		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
9094 		if (ret < 0) {
9095 			err = ret;
9096 			break;
9097 		}
9098 
9099 		if (ret > 0) {
9100 			BUG_ON(wc->stage != DROP_REFERENCE);
9101 			break;
9102 		}
9103 
9104 		if (wc->stage == DROP_REFERENCE) {
9105 			level = wc->level;
9106 			btrfs_node_key(path->nodes[level],
9107 				       &root_item->drop_progress,
9108 				       path->slots[level]);
9109 			root_item->drop_level = level;
9110 		}
9111 
9112 		BUG_ON(wc->level == 0);
9113 		if (btrfs_should_end_transaction(trans) ||
9114 		    (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
9115 			ret = btrfs_update_root(trans, tree_root,
9116 						&root->root_key,
9117 						root_item);
9118 			if (ret) {
9119 				btrfs_abort_transaction(trans, ret);
9120 				err = ret;
9121 				goto out_end_trans;
9122 			}
9123 
9124 			btrfs_end_transaction_throttle(trans);
9125 			if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
9126 				btrfs_debug(fs_info,
9127 					    "drop snapshot early exit");
9128 				err = -EAGAIN;
9129 				goto out_free;
9130 			}
9131 
9132 			trans = btrfs_start_transaction(tree_root, 0);
9133 			if (IS_ERR(trans)) {
9134 				err = PTR_ERR(trans);
9135 				goto out_free;
9136 			}
9137 			if (block_rsv)
9138 				trans->block_rsv = block_rsv;
9139 		}
9140 	}
9141 	btrfs_release_path(path);
9142 	if (err)
9143 		goto out_end_trans;
9144 
9145 	ret = btrfs_del_root(trans, tree_root, &root->root_key);
9146 	if (ret) {
9147 		btrfs_abort_transaction(trans, ret);
9148 		goto out_end_trans;
9149 	}
9150 
9151 	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
9152 		ret = btrfs_find_root(tree_root, &root->root_key, path,
9153 				      NULL, NULL);
9154 		if (ret < 0) {
9155 			btrfs_abort_transaction(trans, ret);
9156 			err = ret;
9157 			goto out_end_trans;
9158 		} else if (ret > 0) {
9159 			/* if we fail to delete the orphan item this time
9160 			 * around, it'll get picked up the next time.
9161 			 *
9162 			 * The most common failure here is just -ENOENT.
9163 			 */
9164 			btrfs_del_orphan_item(trans, tree_root,
9165 					      root->root_key.objectid);
9166 		}
9167 	}
9168 
9169 	if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
9170 		btrfs_add_dropped_root(trans, root);
9171 	} else {
9172 		free_extent_buffer(root->node);
9173 		free_extent_buffer(root->commit_root);
9174 		btrfs_put_fs_root(root);
9175 	}
9176 	root_dropped = true;
9177 out_end_trans:
9178 	btrfs_end_transaction_throttle(trans);
9179 out_free:
9180 	kfree(wc);
9181 	btrfs_free_path(path);
9182 out:
9183 	/*
9184 	 * So if we need to stop dropping the snapshot for whatever reason we
9185 	 * need to make sure to add it back to the dead root list so that we
9186 	 * keep trying to do the work later.  This also cleans up roots if we
9187 	 * don't have it in the radix (like when we recover after a power fail
9188 	 * or unmount) so we don't leak memory.
9189 	 */
9190 	if (!for_reloc && root_dropped == false)
9191 		btrfs_add_dead_root(root);
9192 	if (err && err != -EAGAIN)
9193 		btrfs_handle_fs_error(fs_info, err, NULL);
9194 	return err;
9195 }
9196 
9197 /*
9198  * drop subtree rooted at tree block 'node'.
9199  *
9200  * NOTE: this function will unlock and release tree block 'node'
9201  * only used by relocation code
9202  */
9203 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
9204 			struct btrfs_root *root,
9205 			struct extent_buffer *node,
9206 			struct extent_buffer *parent)
9207 {
9208 	struct btrfs_fs_info *fs_info = root->fs_info;
9209 	struct btrfs_path *path;
9210 	struct walk_control *wc;
9211 	int level;
9212 	int parent_level;
9213 	int ret = 0;
9214 	int wret;
9215 
9216 	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9217 
9218 	path = btrfs_alloc_path();
9219 	if (!path)
9220 		return -ENOMEM;
9221 
9222 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
9223 	if (!wc) {
9224 		btrfs_free_path(path);
9225 		return -ENOMEM;
9226 	}
9227 
9228 	btrfs_assert_tree_locked(parent);
9229 	parent_level = btrfs_header_level(parent);
9230 	extent_buffer_get(parent);
9231 	path->nodes[parent_level] = parent;
9232 	path->slots[parent_level] = btrfs_header_nritems(parent);
9233 
9234 	btrfs_assert_tree_locked(node);
9235 	level = btrfs_header_level(node);
9236 	path->nodes[level] = node;
9237 	path->slots[level] = 0;
9238 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9239 
9240 	wc->refs[parent_level] = 1;
9241 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9242 	wc->level = level;
9243 	wc->shared_level = -1;
9244 	wc->stage = DROP_REFERENCE;
9245 	wc->update_ref = 0;
9246 	wc->keep_locks = 1;
9247 	wc->for_reloc = 1;
9248 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
9249 
9250 	while (1) {
9251 		wret = walk_down_tree(trans, root, path, wc);
9252 		if (wret < 0) {
9253 			ret = wret;
9254 			break;
9255 		}
9256 
9257 		wret = walk_up_tree(trans, root, path, wc, parent_level);
9258 		if (wret < 0)
9259 			ret = wret;
9260 		if (wret != 0)
9261 			break;
9262 	}
9263 
9264 	kfree(wc);
9265 	btrfs_free_path(path);
9266 	return ret;
9267 }
9268 
9269 static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
9270 {
9271 	u64 num_devices;
9272 	u64 stripped;
9273 
9274 	/*
9275 	 * if restripe for this chunk_type is on pick target profile and
9276 	 * return, otherwise do the usual balance
9277 	 */
9278 	stripped = get_restripe_target(fs_info, flags);
9279 	if (stripped)
9280 		return extended_to_chunk(stripped);
9281 
9282 	num_devices = fs_info->fs_devices->rw_devices;
9283 
9284 	stripped = BTRFS_BLOCK_GROUP_RAID0 |
9285 		BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9286 		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9287 
9288 	if (num_devices == 1) {
9289 		stripped |= BTRFS_BLOCK_GROUP_DUP;
9290 		stripped = flags & ~stripped;
9291 
9292 		/* turn raid0 into single device chunks */
9293 		if (flags & BTRFS_BLOCK_GROUP_RAID0)
9294 			return stripped;
9295 
9296 		/* turn mirroring into duplication */
9297 		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9298 			     BTRFS_BLOCK_GROUP_RAID10))
9299 			return stripped | BTRFS_BLOCK_GROUP_DUP;
9300 	} else {
9301 		/* they already had raid on here, just return */
9302 		if (flags & stripped)
9303 			return flags;
9304 
9305 		stripped |= BTRFS_BLOCK_GROUP_DUP;
9306 		stripped = flags & ~stripped;
9307 
9308 		/* switch duplicated blocks with raid1 */
9309 		if (flags & BTRFS_BLOCK_GROUP_DUP)
9310 			return stripped | BTRFS_BLOCK_GROUP_RAID1;
9311 
9312 		/* this is drive concat, leave it alone */
9313 	}
9314 
9315 	return flags;
9316 }
9317 
9318 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9319 {
9320 	struct btrfs_space_info *sinfo = cache->space_info;
9321 	u64 num_bytes;
9322 	u64 min_allocable_bytes;
9323 	int ret = -ENOSPC;
9324 
9325 	/*
9326 	 * We need some metadata space and system metadata space for
9327 	 * allocating chunks in some corner cases until we force to set
9328 	 * it to be readonly.
9329 	 */
9330 	if ((sinfo->flags &
9331 	     (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9332 	    !force)
9333 		min_allocable_bytes = SZ_1M;
9334 	else
9335 		min_allocable_bytes = 0;
9336 
9337 	spin_lock(&sinfo->lock);
9338 	spin_lock(&cache->lock);
9339 
9340 	if (cache->ro) {
9341 		cache->ro++;
9342 		ret = 0;
9343 		goto out;
9344 	}
9345 
9346 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9347 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
9348 
9349 	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9350 	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9351 	    min_allocable_bytes <= sinfo->total_bytes) {
9352 		sinfo->bytes_readonly += num_bytes;
9353 		cache->ro++;
9354 		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9355 		ret = 0;
9356 	}
9357 out:
9358 	spin_unlock(&cache->lock);
9359 	spin_unlock(&sinfo->lock);
9360 	return ret;
9361 }
9362 
9363 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9364 			     struct btrfs_block_group_cache *cache)
9365 
9366 {
9367 	struct btrfs_fs_info *fs_info = root->fs_info;
9368 	struct btrfs_trans_handle *trans;
9369 	u64 alloc_flags;
9370 	int ret;
9371 
9372 again:
9373 	trans = btrfs_join_transaction(root);
9374 	if (IS_ERR(trans))
9375 		return PTR_ERR(trans);
9376 
9377 	/*
9378 	 * we're not allowed to set block groups readonly after the dirty
9379 	 * block groups cache has started writing.  If it already started,
9380 	 * back off and let this transaction commit
9381 	 */
9382 	mutex_lock(&fs_info->ro_block_group_mutex);
9383 	if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9384 		u64 transid = trans->transid;
9385 
9386 		mutex_unlock(&fs_info->ro_block_group_mutex);
9387 		btrfs_end_transaction(trans);
9388 
9389 		ret = btrfs_wait_for_commit(fs_info, transid);
9390 		if (ret)
9391 			return ret;
9392 		goto again;
9393 	}
9394 
9395 	/*
9396 	 * if we are changing raid levels, try to allocate a corresponding
9397 	 * block group with the new raid level.
9398 	 */
9399 	alloc_flags = update_block_group_flags(fs_info, cache->flags);
9400 	if (alloc_flags != cache->flags) {
9401 		ret = do_chunk_alloc(trans, fs_info, alloc_flags,
9402 				     CHUNK_ALLOC_FORCE);
9403 		/*
9404 		 * ENOSPC is allowed here, we may have enough space
9405 		 * already allocated at the new raid level to
9406 		 * carry on
9407 		 */
9408 		if (ret == -ENOSPC)
9409 			ret = 0;
9410 		if (ret < 0)
9411 			goto out;
9412 	}
9413 
9414 	ret = inc_block_group_ro(cache, 0);
9415 	if (!ret)
9416 		goto out;
9417 	alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
9418 	ret = do_chunk_alloc(trans, fs_info, alloc_flags,
9419 			     CHUNK_ALLOC_FORCE);
9420 	if (ret < 0)
9421 		goto out;
9422 	ret = inc_block_group_ro(cache, 0);
9423 out:
9424 	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9425 		alloc_flags = update_block_group_flags(fs_info, cache->flags);
9426 		mutex_lock(&fs_info->chunk_mutex);
9427 		check_system_chunk(trans, fs_info, alloc_flags);
9428 		mutex_unlock(&fs_info->chunk_mutex);
9429 	}
9430 	mutex_unlock(&fs_info->ro_block_group_mutex);
9431 
9432 	btrfs_end_transaction(trans);
9433 	return ret;
9434 }
9435 
9436 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9437 			    struct btrfs_fs_info *fs_info, u64 type)
9438 {
9439 	u64 alloc_flags = get_alloc_profile(fs_info, type);
9440 
9441 	return do_chunk_alloc(trans, fs_info, alloc_flags, CHUNK_ALLOC_FORCE);
9442 }
9443 
9444 /*
9445  * helper to account the unused space of all the readonly block group in the
9446  * space_info. takes mirrors into account.
9447  */
9448 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9449 {
9450 	struct btrfs_block_group_cache *block_group;
9451 	u64 free_bytes = 0;
9452 	int factor;
9453 
9454 	/* It's df, we don't care if it's racy */
9455 	if (list_empty(&sinfo->ro_bgs))
9456 		return 0;
9457 
9458 	spin_lock(&sinfo->lock);
9459 	list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9460 		spin_lock(&block_group->lock);
9461 
9462 		if (!block_group->ro) {
9463 			spin_unlock(&block_group->lock);
9464 			continue;
9465 		}
9466 
9467 		if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9468 					  BTRFS_BLOCK_GROUP_RAID10 |
9469 					  BTRFS_BLOCK_GROUP_DUP))
9470 			factor = 2;
9471 		else
9472 			factor = 1;
9473 
9474 		free_bytes += (block_group->key.offset -
9475 			       btrfs_block_group_used(&block_group->item)) *
9476 			       factor;
9477 
9478 		spin_unlock(&block_group->lock);
9479 	}
9480 	spin_unlock(&sinfo->lock);
9481 
9482 	return free_bytes;
9483 }
9484 
9485 void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
9486 {
9487 	struct btrfs_space_info *sinfo = cache->space_info;
9488 	u64 num_bytes;
9489 
9490 	BUG_ON(!cache->ro);
9491 
9492 	spin_lock(&sinfo->lock);
9493 	spin_lock(&cache->lock);
9494 	if (!--cache->ro) {
9495 		num_bytes = cache->key.offset - cache->reserved -
9496 			    cache->pinned - cache->bytes_super -
9497 			    btrfs_block_group_used(&cache->item);
9498 		sinfo->bytes_readonly -= num_bytes;
9499 		list_del_init(&cache->ro_list);
9500 	}
9501 	spin_unlock(&cache->lock);
9502 	spin_unlock(&sinfo->lock);
9503 }
9504 
9505 /*
9506  * checks to see if its even possible to relocate this block group.
9507  *
9508  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9509  * ok to go ahead and try.
9510  */
9511 int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
9512 {
9513 	struct btrfs_root *root = fs_info->extent_root;
9514 	struct btrfs_block_group_cache *block_group;
9515 	struct btrfs_space_info *space_info;
9516 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
9517 	struct btrfs_device *device;
9518 	struct btrfs_trans_handle *trans;
9519 	u64 min_free;
9520 	u64 dev_min = 1;
9521 	u64 dev_nr = 0;
9522 	u64 target;
9523 	int debug;
9524 	int index;
9525 	int full = 0;
9526 	int ret = 0;
9527 
9528 	debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG);
9529 
9530 	block_group = btrfs_lookup_block_group(fs_info, bytenr);
9531 
9532 	/* odd, couldn't find the block group, leave it alone */
9533 	if (!block_group) {
9534 		if (debug)
9535 			btrfs_warn(fs_info,
9536 				   "can't find block group for bytenr %llu",
9537 				   bytenr);
9538 		return -1;
9539 	}
9540 
9541 	min_free = btrfs_block_group_used(&block_group->item);
9542 
9543 	/* no bytes used, we're good */
9544 	if (!min_free)
9545 		goto out;
9546 
9547 	space_info = block_group->space_info;
9548 	spin_lock(&space_info->lock);
9549 
9550 	full = space_info->full;
9551 
9552 	/*
9553 	 * if this is the last block group we have in this space, we can't
9554 	 * relocate it unless we're able to allocate a new chunk below.
9555 	 *
9556 	 * Otherwise, we need to make sure we have room in the space to handle
9557 	 * all of the extents from this block group.  If we can, we're good
9558 	 */
9559 	if ((space_info->total_bytes != block_group->key.offset) &&
9560 	    (space_info->bytes_used + space_info->bytes_reserved +
9561 	     space_info->bytes_pinned + space_info->bytes_readonly +
9562 	     min_free < space_info->total_bytes)) {
9563 		spin_unlock(&space_info->lock);
9564 		goto out;
9565 	}
9566 	spin_unlock(&space_info->lock);
9567 
9568 	/*
9569 	 * ok we don't have enough space, but maybe we have free space on our
9570 	 * devices to allocate new chunks for relocation, so loop through our
9571 	 * alloc devices and guess if we have enough space.  if this block
9572 	 * group is going to be restriped, run checks against the target
9573 	 * profile instead of the current one.
9574 	 */
9575 	ret = -1;
9576 
9577 	/*
9578 	 * index:
9579 	 *      0: raid10
9580 	 *      1: raid1
9581 	 *      2: dup
9582 	 *      3: raid0
9583 	 *      4: single
9584 	 */
9585 	target = get_restripe_target(fs_info, block_group->flags);
9586 	if (target) {
9587 		index = __get_raid_index(extended_to_chunk(target));
9588 	} else {
9589 		/*
9590 		 * this is just a balance, so if we were marked as full
9591 		 * we know there is no space for a new chunk
9592 		 */
9593 		if (full) {
9594 			if (debug)
9595 				btrfs_warn(fs_info,
9596 					   "no space to alloc new chunk for block group %llu",
9597 					   block_group->key.objectid);
9598 			goto out;
9599 		}
9600 
9601 		index = get_block_group_index(block_group);
9602 	}
9603 
9604 	if (index == BTRFS_RAID_RAID10) {
9605 		dev_min = 4;
9606 		/* Divide by 2 */
9607 		min_free >>= 1;
9608 	} else if (index == BTRFS_RAID_RAID1) {
9609 		dev_min = 2;
9610 	} else if (index == BTRFS_RAID_DUP) {
9611 		/* Multiply by 2 */
9612 		min_free <<= 1;
9613 	} else if (index == BTRFS_RAID_RAID0) {
9614 		dev_min = fs_devices->rw_devices;
9615 		min_free = div64_u64(min_free, dev_min);
9616 	}
9617 
9618 	/* We need to do this so that we can look at pending chunks */
9619 	trans = btrfs_join_transaction(root);
9620 	if (IS_ERR(trans)) {
9621 		ret = PTR_ERR(trans);
9622 		goto out;
9623 	}
9624 
9625 	mutex_lock(&fs_info->chunk_mutex);
9626 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9627 		u64 dev_offset;
9628 
9629 		/*
9630 		 * check to make sure we can actually find a chunk with enough
9631 		 * space to fit our block group in.
9632 		 */
9633 		if (device->total_bytes > device->bytes_used + min_free &&
9634 		    !device->is_tgtdev_for_dev_replace) {
9635 			ret = find_free_dev_extent(trans, device, min_free,
9636 						   &dev_offset, NULL);
9637 			if (!ret)
9638 				dev_nr++;
9639 
9640 			if (dev_nr >= dev_min)
9641 				break;
9642 
9643 			ret = -1;
9644 		}
9645 	}
9646 	if (debug && ret == -1)
9647 		btrfs_warn(fs_info,
9648 			   "no space to allocate a new chunk for block group %llu",
9649 			   block_group->key.objectid);
9650 	mutex_unlock(&fs_info->chunk_mutex);
9651 	btrfs_end_transaction(trans);
9652 out:
9653 	btrfs_put_block_group(block_group);
9654 	return ret;
9655 }
9656 
9657 static int find_first_block_group(struct btrfs_fs_info *fs_info,
9658 				  struct btrfs_path *path,
9659 				  struct btrfs_key *key)
9660 {
9661 	struct btrfs_root *root = fs_info->extent_root;
9662 	int ret = 0;
9663 	struct btrfs_key found_key;
9664 	struct extent_buffer *leaf;
9665 	int slot;
9666 
9667 	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9668 	if (ret < 0)
9669 		goto out;
9670 
9671 	while (1) {
9672 		slot = path->slots[0];
9673 		leaf = path->nodes[0];
9674 		if (slot >= btrfs_header_nritems(leaf)) {
9675 			ret = btrfs_next_leaf(root, path);
9676 			if (ret == 0)
9677 				continue;
9678 			if (ret < 0)
9679 				goto out;
9680 			break;
9681 		}
9682 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
9683 
9684 		if (found_key.objectid >= key->objectid &&
9685 		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9686 			struct extent_map_tree *em_tree;
9687 			struct extent_map *em;
9688 
9689 			em_tree = &root->fs_info->mapping_tree.map_tree;
9690 			read_lock(&em_tree->lock);
9691 			em = lookup_extent_mapping(em_tree, found_key.objectid,
9692 						   found_key.offset);
9693 			read_unlock(&em_tree->lock);
9694 			if (!em) {
9695 				btrfs_err(fs_info,
9696 			"logical %llu len %llu found bg but no related chunk",
9697 					  found_key.objectid, found_key.offset);
9698 				ret = -ENOENT;
9699 			} else {
9700 				ret = 0;
9701 			}
9702 			free_extent_map(em);
9703 			goto out;
9704 		}
9705 		path->slots[0]++;
9706 	}
9707 out:
9708 	return ret;
9709 }
9710 
9711 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9712 {
9713 	struct btrfs_block_group_cache *block_group;
9714 	u64 last = 0;
9715 
9716 	while (1) {
9717 		struct inode *inode;
9718 
9719 		block_group = btrfs_lookup_first_block_group(info, last);
9720 		while (block_group) {
9721 			spin_lock(&block_group->lock);
9722 			if (block_group->iref)
9723 				break;
9724 			spin_unlock(&block_group->lock);
9725 			block_group = next_block_group(info, block_group);
9726 		}
9727 		if (!block_group) {
9728 			if (last == 0)
9729 				break;
9730 			last = 0;
9731 			continue;
9732 		}
9733 
9734 		inode = block_group->inode;
9735 		block_group->iref = 0;
9736 		block_group->inode = NULL;
9737 		spin_unlock(&block_group->lock);
9738 		ASSERT(block_group->io_ctl.inode == NULL);
9739 		iput(inode);
9740 		last = block_group->key.objectid + block_group->key.offset;
9741 		btrfs_put_block_group(block_group);
9742 	}
9743 }
9744 
9745 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9746 {
9747 	struct btrfs_block_group_cache *block_group;
9748 	struct btrfs_space_info *space_info;
9749 	struct btrfs_caching_control *caching_ctl;
9750 	struct rb_node *n;
9751 
9752 	down_write(&info->commit_root_sem);
9753 	while (!list_empty(&info->caching_block_groups)) {
9754 		caching_ctl = list_entry(info->caching_block_groups.next,
9755 					 struct btrfs_caching_control, list);
9756 		list_del(&caching_ctl->list);
9757 		put_caching_control(caching_ctl);
9758 	}
9759 	up_write(&info->commit_root_sem);
9760 
9761 	spin_lock(&info->unused_bgs_lock);
9762 	while (!list_empty(&info->unused_bgs)) {
9763 		block_group = list_first_entry(&info->unused_bgs,
9764 					       struct btrfs_block_group_cache,
9765 					       bg_list);
9766 		list_del_init(&block_group->bg_list);
9767 		btrfs_put_block_group(block_group);
9768 	}
9769 	spin_unlock(&info->unused_bgs_lock);
9770 
9771 	spin_lock(&info->block_group_cache_lock);
9772 	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9773 		block_group = rb_entry(n, struct btrfs_block_group_cache,
9774 				       cache_node);
9775 		rb_erase(&block_group->cache_node,
9776 			 &info->block_group_cache_tree);
9777 		RB_CLEAR_NODE(&block_group->cache_node);
9778 		spin_unlock(&info->block_group_cache_lock);
9779 
9780 		down_write(&block_group->space_info->groups_sem);
9781 		list_del(&block_group->list);
9782 		up_write(&block_group->space_info->groups_sem);
9783 
9784 		if (block_group->cached == BTRFS_CACHE_STARTED)
9785 			wait_block_group_cache_done(block_group);
9786 
9787 		/*
9788 		 * We haven't cached this block group, which means we could
9789 		 * possibly have excluded extents on this block group.
9790 		 */
9791 		if (block_group->cached == BTRFS_CACHE_NO ||
9792 		    block_group->cached == BTRFS_CACHE_ERROR)
9793 			free_excluded_extents(info, block_group);
9794 
9795 		btrfs_remove_free_space_cache(block_group);
9796 		ASSERT(list_empty(&block_group->dirty_list));
9797 		ASSERT(list_empty(&block_group->io_list));
9798 		ASSERT(list_empty(&block_group->bg_list));
9799 		ASSERT(atomic_read(&block_group->count) == 1);
9800 		btrfs_put_block_group(block_group);
9801 
9802 		spin_lock(&info->block_group_cache_lock);
9803 	}
9804 	spin_unlock(&info->block_group_cache_lock);
9805 
9806 	/* now that all the block groups are freed, go through and
9807 	 * free all the space_info structs.  This is only called during
9808 	 * the final stages of unmount, and so we know nobody is
9809 	 * using them.  We call synchronize_rcu() once before we start,
9810 	 * just to be on the safe side.
9811 	 */
9812 	synchronize_rcu();
9813 
9814 	release_global_block_rsv(info);
9815 
9816 	while (!list_empty(&info->space_info)) {
9817 		int i;
9818 
9819 		space_info = list_entry(info->space_info.next,
9820 					struct btrfs_space_info,
9821 					list);
9822 
9823 		/*
9824 		 * Do not hide this behind enospc_debug, this is actually
9825 		 * important and indicates a real bug if this happens.
9826 		 */
9827 		if (WARN_ON(space_info->bytes_pinned > 0 ||
9828 			    space_info->bytes_reserved > 0 ||
9829 			    space_info->bytes_may_use > 0))
9830 			dump_space_info(info, space_info, 0, 0);
9831 		list_del(&space_info->list);
9832 		for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9833 			struct kobject *kobj;
9834 			kobj = space_info->block_group_kobjs[i];
9835 			space_info->block_group_kobjs[i] = NULL;
9836 			if (kobj) {
9837 				kobject_del(kobj);
9838 				kobject_put(kobj);
9839 			}
9840 		}
9841 		kobject_del(&space_info->kobj);
9842 		kobject_put(&space_info->kobj);
9843 	}
9844 	return 0;
9845 }
9846 
9847 static void __link_block_group(struct btrfs_space_info *space_info,
9848 			       struct btrfs_block_group_cache *cache)
9849 {
9850 	int index = get_block_group_index(cache);
9851 	bool first = false;
9852 
9853 	down_write(&space_info->groups_sem);
9854 	if (list_empty(&space_info->block_groups[index]))
9855 		first = true;
9856 	list_add_tail(&cache->list, &space_info->block_groups[index]);
9857 	up_write(&space_info->groups_sem);
9858 
9859 	if (first) {
9860 		struct raid_kobject *rkobj;
9861 		int ret;
9862 
9863 		rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9864 		if (!rkobj)
9865 			goto out_err;
9866 		rkobj->raid_type = index;
9867 		kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9868 		ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9869 				  "%s", get_raid_name(index));
9870 		if (ret) {
9871 			kobject_put(&rkobj->kobj);
9872 			goto out_err;
9873 		}
9874 		space_info->block_group_kobjs[index] = &rkobj->kobj;
9875 	}
9876 
9877 	return;
9878 out_err:
9879 	btrfs_warn(cache->fs_info,
9880 		   "failed to add kobject for block cache, ignoring");
9881 }
9882 
9883 static struct btrfs_block_group_cache *
9884 btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
9885 			       u64 start, u64 size)
9886 {
9887 	struct btrfs_block_group_cache *cache;
9888 
9889 	cache = kzalloc(sizeof(*cache), GFP_NOFS);
9890 	if (!cache)
9891 		return NULL;
9892 
9893 	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9894 					GFP_NOFS);
9895 	if (!cache->free_space_ctl) {
9896 		kfree(cache);
9897 		return NULL;
9898 	}
9899 
9900 	cache->key.objectid = start;
9901 	cache->key.offset = size;
9902 	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9903 
9904 	cache->sectorsize = fs_info->sectorsize;
9905 	cache->fs_info = fs_info;
9906 	cache->full_stripe_len = btrfs_full_stripe_len(fs_info,
9907 						       &fs_info->mapping_tree,
9908 						       start);
9909 	set_free_space_tree_thresholds(cache);
9910 
9911 	atomic_set(&cache->count, 1);
9912 	spin_lock_init(&cache->lock);
9913 	init_rwsem(&cache->data_rwsem);
9914 	INIT_LIST_HEAD(&cache->list);
9915 	INIT_LIST_HEAD(&cache->cluster_list);
9916 	INIT_LIST_HEAD(&cache->bg_list);
9917 	INIT_LIST_HEAD(&cache->ro_list);
9918 	INIT_LIST_HEAD(&cache->dirty_list);
9919 	INIT_LIST_HEAD(&cache->io_list);
9920 	btrfs_init_free_space_ctl(cache);
9921 	atomic_set(&cache->trimming, 0);
9922 	mutex_init(&cache->free_space_lock);
9923 
9924 	return cache;
9925 }
9926 
9927 int btrfs_read_block_groups(struct btrfs_fs_info *info)
9928 {
9929 	struct btrfs_path *path;
9930 	int ret;
9931 	struct btrfs_block_group_cache *cache;
9932 	struct btrfs_space_info *space_info;
9933 	struct btrfs_key key;
9934 	struct btrfs_key found_key;
9935 	struct extent_buffer *leaf;
9936 	int need_clear = 0;
9937 	u64 cache_gen;
9938 	u64 feature;
9939 	int mixed;
9940 
9941 	feature = btrfs_super_incompat_flags(info->super_copy);
9942 	mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
9943 
9944 	key.objectid = 0;
9945 	key.offset = 0;
9946 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9947 	path = btrfs_alloc_path();
9948 	if (!path)
9949 		return -ENOMEM;
9950 	path->reada = READA_FORWARD;
9951 
9952 	cache_gen = btrfs_super_cache_generation(info->super_copy);
9953 	if (btrfs_test_opt(info, SPACE_CACHE) &&
9954 	    btrfs_super_generation(info->super_copy) != cache_gen)
9955 		need_clear = 1;
9956 	if (btrfs_test_opt(info, CLEAR_CACHE))
9957 		need_clear = 1;
9958 
9959 	while (1) {
9960 		ret = find_first_block_group(info, path, &key);
9961 		if (ret > 0)
9962 			break;
9963 		if (ret != 0)
9964 			goto error;
9965 
9966 		leaf = path->nodes[0];
9967 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9968 
9969 		cache = btrfs_create_block_group_cache(info, found_key.objectid,
9970 						       found_key.offset);
9971 		if (!cache) {
9972 			ret = -ENOMEM;
9973 			goto error;
9974 		}
9975 
9976 		if (need_clear) {
9977 			/*
9978 			 * When we mount with old space cache, we need to
9979 			 * set BTRFS_DC_CLEAR and set dirty flag.
9980 			 *
9981 			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9982 			 *    truncate the old free space cache inode and
9983 			 *    setup a new one.
9984 			 * b) Setting 'dirty flag' makes sure that we flush
9985 			 *    the new space cache info onto disk.
9986 			 */
9987 			if (btrfs_test_opt(info, SPACE_CACHE))
9988 				cache->disk_cache_state = BTRFS_DC_CLEAR;
9989 		}
9990 
9991 		read_extent_buffer(leaf, &cache->item,
9992 				   btrfs_item_ptr_offset(leaf, path->slots[0]),
9993 				   sizeof(cache->item));
9994 		cache->flags = btrfs_block_group_flags(&cache->item);
9995 		if (!mixed &&
9996 		    ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
9997 		    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
9998 			btrfs_err(info,
9999 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
10000 				  cache->key.objectid);
10001 			ret = -EINVAL;
10002 			goto error;
10003 		}
10004 
10005 		key.objectid = found_key.objectid + found_key.offset;
10006 		btrfs_release_path(path);
10007 
10008 		/*
10009 		 * We need to exclude the super stripes now so that the space
10010 		 * info has super bytes accounted for, otherwise we'll think
10011 		 * we have more space than we actually do.
10012 		 */
10013 		ret = exclude_super_stripes(info, cache);
10014 		if (ret) {
10015 			/*
10016 			 * We may have excluded something, so call this just in
10017 			 * case.
10018 			 */
10019 			free_excluded_extents(info, cache);
10020 			btrfs_put_block_group(cache);
10021 			goto error;
10022 		}
10023 
10024 		/*
10025 		 * check for two cases, either we are full, and therefore
10026 		 * don't need to bother with the caching work since we won't
10027 		 * find any space, or we are empty, and we can just add all
10028 		 * the space in and be done with it.  This saves us _alot_ of
10029 		 * time, particularly in the full case.
10030 		 */
10031 		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
10032 			cache->last_byte_to_unpin = (u64)-1;
10033 			cache->cached = BTRFS_CACHE_FINISHED;
10034 			free_excluded_extents(info, cache);
10035 		} else if (btrfs_block_group_used(&cache->item) == 0) {
10036 			cache->last_byte_to_unpin = (u64)-1;
10037 			cache->cached = BTRFS_CACHE_FINISHED;
10038 			add_new_free_space(cache, info,
10039 					   found_key.objectid,
10040 					   found_key.objectid +
10041 					   found_key.offset);
10042 			free_excluded_extents(info, cache);
10043 		}
10044 
10045 		ret = btrfs_add_block_group_cache(info, cache);
10046 		if (ret) {
10047 			btrfs_remove_free_space_cache(cache);
10048 			btrfs_put_block_group(cache);
10049 			goto error;
10050 		}
10051 
10052 		trace_btrfs_add_block_group(info, cache, 0);
10053 		ret = update_space_info(info, cache->flags, found_key.offset,
10054 					btrfs_block_group_used(&cache->item),
10055 					cache->bytes_super, &space_info);
10056 		if (ret) {
10057 			btrfs_remove_free_space_cache(cache);
10058 			spin_lock(&info->block_group_cache_lock);
10059 			rb_erase(&cache->cache_node,
10060 				 &info->block_group_cache_tree);
10061 			RB_CLEAR_NODE(&cache->cache_node);
10062 			spin_unlock(&info->block_group_cache_lock);
10063 			btrfs_put_block_group(cache);
10064 			goto error;
10065 		}
10066 
10067 		cache->space_info = space_info;
10068 
10069 		__link_block_group(space_info, cache);
10070 
10071 		set_avail_alloc_bits(info, cache->flags);
10072 		if (btrfs_chunk_readonly(info, cache->key.objectid)) {
10073 			inc_block_group_ro(cache, 1);
10074 		} else if (btrfs_block_group_used(&cache->item) == 0) {
10075 			spin_lock(&info->unused_bgs_lock);
10076 			/* Should always be true but just in case. */
10077 			if (list_empty(&cache->bg_list)) {
10078 				btrfs_get_block_group(cache);
10079 				list_add_tail(&cache->bg_list,
10080 					      &info->unused_bgs);
10081 			}
10082 			spin_unlock(&info->unused_bgs_lock);
10083 		}
10084 	}
10085 
10086 	list_for_each_entry_rcu(space_info, &info->space_info, list) {
10087 		if (!(get_alloc_profile(info, space_info->flags) &
10088 		      (BTRFS_BLOCK_GROUP_RAID10 |
10089 		       BTRFS_BLOCK_GROUP_RAID1 |
10090 		       BTRFS_BLOCK_GROUP_RAID5 |
10091 		       BTRFS_BLOCK_GROUP_RAID6 |
10092 		       BTRFS_BLOCK_GROUP_DUP)))
10093 			continue;
10094 		/*
10095 		 * avoid allocating from un-mirrored block group if there are
10096 		 * mirrored block groups.
10097 		 */
10098 		list_for_each_entry(cache,
10099 				&space_info->block_groups[BTRFS_RAID_RAID0],
10100 				list)
10101 			inc_block_group_ro(cache, 1);
10102 		list_for_each_entry(cache,
10103 				&space_info->block_groups[BTRFS_RAID_SINGLE],
10104 				list)
10105 			inc_block_group_ro(cache, 1);
10106 	}
10107 
10108 	init_global_block_rsv(info);
10109 	ret = 0;
10110 error:
10111 	btrfs_free_path(path);
10112 	return ret;
10113 }
10114 
10115 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
10116 				       struct btrfs_fs_info *fs_info)
10117 {
10118 	struct btrfs_block_group_cache *block_group, *tmp;
10119 	struct btrfs_root *extent_root = fs_info->extent_root;
10120 	struct btrfs_block_group_item item;
10121 	struct btrfs_key key;
10122 	int ret = 0;
10123 	bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
10124 
10125 	trans->can_flush_pending_bgs = false;
10126 	list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
10127 		if (ret)
10128 			goto next;
10129 
10130 		spin_lock(&block_group->lock);
10131 		memcpy(&item, &block_group->item, sizeof(item));
10132 		memcpy(&key, &block_group->key, sizeof(key));
10133 		spin_unlock(&block_group->lock);
10134 
10135 		ret = btrfs_insert_item(trans, extent_root, &key, &item,
10136 					sizeof(item));
10137 		if (ret)
10138 			btrfs_abort_transaction(trans, ret);
10139 		ret = btrfs_finish_chunk_alloc(trans, fs_info, key.objectid,
10140 					       key.offset);
10141 		if (ret)
10142 			btrfs_abort_transaction(trans, ret);
10143 		add_block_group_free_space(trans, fs_info, block_group);
10144 		/* already aborted the transaction if it failed. */
10145 next:
10146 		list_del_init(&block_group->bg_list);
10147 	}
10148 	trans->can_flush_pending_bgs = can_flush_pending_bgs;
10149 }
10150 
10151 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
10152 			   struct btrfs_fs_info *fs_info, u64 bytes_used,
10153 			   u64 type, u64 chunk_objectid, u64 chunk_offset,
10154 			   u64 size)
10155 {
10156 	struct btrfs_block_group_cache *cache;
10157 	int ret;
10158 
10159 	btrfs_set_log_full_commit(fs_info, trans);
10160 
10161 	cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
10162 	if (!cache)
10163 		return -ENOMEM;
10164 
10165 	btrfs_set_block_group_used(&cache->item, bytes_used);
10166 	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
10167 	btrfs_set_block_group_flags(&cache->item, type);
10168 
10169 	cache->flags = type;
10170 	cache->last_byte_to_unpin = (u64)-1;
10171 	cache->cached = BTRFS_CACHE_FINISHED;
10172 	cache->needs_free_space = 1;
10173 	ret = exclude_super_stripes(fs_info, cache);
10174 	if (ret) {
10175 		/*
10176 		 * We may have excluded something, so call this just in
10177 		 * case.
10178 		 */
10179 		free_excluded_extents(fs_info, cache);
10180 		btrfs_put_block_group(cache);
10181 		return ret;
10182 	}
10183 
10184 	add_new_free_space(cache, fs_info, chunk_offset, chunk_offset + size);
10185 
10186 	free_excluded_extents(fs_info, cache);
10187 
10188 #ifdef CONFIG_BTRFS_DEBUG
10189 	if (btrfs_should_fragment_free_space(cache)) {
10190 		u64 new_bytes_used = size - bytes_used;
10191 
10192 		bytes_used += new_bytes_used >> 1;
10193 		fragment_free_space(cache);
10194 	}
10195 #endif
10196 	/*
10197 	 * Call to ensure the corresponding space_info object is created and
10198 	 * assigned to our block group, but don't update its counters just yet.
10199 	 * We want our bg to be added to the rbtree with its ->space_info set.
10200 	 */
10201 	ret = update_space_info(fs_info, cache->flags, 0, 0, 0,
10202 				&cache->space_info);
10203 	if (ret) {
10204 		btrfs_remove_free_space_cache(cache);
10205 		btrfs_put_block_group(cache);
10206 		return ret;
10207 	}
10208 
10209 	ret = btrfs_add_block_group_cache(fs_info, cache);
10210 	if (ret) {
10211 		btrfs_remove_free_space_cache(cache);
10212 		btrfs_put_block_group(cache);
10213 		return ret;
10214 	}
10215 
10216 	/*
10217 	 * Now that our block group has its ->space_info set and is inserted in
10218 	 * the rbtree, update the space info's counters.
10219 	 */
10220 	trace_btrfs_add_block_group(fs_info, cache, 1);
10221 	ret = update_space_info(fs_info, cache->flags, size, bytes_used,
10222 				cache->bytes_super, &cache->space_info);
10223 	if (ret) {
10224 		btrfs_remove_free_space_cache(cache);
10225 		spin_lock(&fs_info->block_group_cache_lock);
10226 		rb_erase(&cache->cache_node,
10227 			 &fs_info->block_group_cache_tree);
10228 		RB_CLEAR_NODE(&cache->cache_node);
10229 		spin_unlock(&fs_info->block_group_cache_lock);
10230 		btrfs_put_block_group(cache);
10231 		return ret;
10232 	}
10233 	update_global_block_rsv(fs_info);
10234 
10235 	__link_block_group(cache->space_info, cache);
10236 
10237 	list_add_tail(&cache->bg_list, &trans->new_bgs);
10238 
10239 	set_avail_alloc_bits(fs_info, type);
10240 	return 0;
10241 }
10242 
10243 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
10244 {
10245 	u64 extra_flags = chunk_to_extended(flags) &
10246 				BTRFS_EXTENDED_PROFILE_MASK;
10247 
10248 	write_seqlock(&fs_info->profiles_lock);
10249 	if (flags & BTRFS_BLOCK_GROUP_DATA)
10250 		fs_info->avail_data_alloc_bits &= ~extra_flags;
10251 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
10252 		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
10253 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
10254 		fs_info->avail_system_alloc_bits &= ~extra_flags;
10255 	write_sequnlock(&fs_info->profiles_lock);
10256 }
10257 
10258 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10259 			     struct btrfs_fs_info *fs_info, u64 group_start,
10260 			     struct extent_map *em)
10261 {
10262 	struct btrfs_root *root = fs_info->extent_root;
10263 	struct btrfs_path *path;
10264 	struct btrfs_block_group_cache *block_group;
10265 	struct btrfs_free_cluster *cluster;
10266 	struct btrfs_root *tree_root = fs_info->tree_root;
10267 	struct btrfs_key key;
10268 	struct inode *inode;
10269 	struct kobject *kobj = NULL;
10270 	int ret;
10271 	int index;
10272 	int factor;
10273 	struct btrfs_caching_control *caching_ctl = NULL;
10274 	bool remove_em;
10275 
10276 	block_group = btrfs_lookup_block_group(fs_info, group_start);
10277 	BUG_ON(!block_group);
10278 	BUG_ON(!block_group->ro);
10279 
10280 	/*
10281 	 * Free the reserved super bytes from this block group before
10282 	 * remove it.
10283 	 */
10284 	free_excluded_extents(fs_info, block_group);
10285 
10286 	memcpy(&key, &block_group->key, sizeof(key));
10287 	index = get_block_group_index(block_group);
10288 	if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
10289 				  BTRFS_BLOCK_GROUP_RAID1 |
10290 				  BTRFS_BLOCK_GROUP_RAID10))
10291 		factor = 2;
10292 	else
10293 		factor = 1;
10294 
10295 	/* make sure this block group isn't part of an allocation cluster */
10296 	cluster = &fs_info->data_alloc_cluster;
10297 	spin_lock(&cluster->refill_lock);
10298 	btrfs_return_cluster_to_free_space(block_group, cluster);
10299 	spin_unlock(&cluster->refill_lock);
10300 
10301 	/*
10302 	 * make sure this block group isn't part of a metadata
10303 	 * allocation cluster
10304 	 */
10305 	cluster = &fs_info->meta_alloc_cluster;
10306 	spin_lock(&cluster->refill_lock);
10307 	btrfs_return_cluster_to_free_space(block_group, cluster);
10308 	spin_unlock(&cluster->refill_lock);
10309 
10310 	path = btrfs_alloc_path();
10311 	if (!path) {
10312 		ret = -ENOMEM;
10313 		goto out;
10314 	}
10315 
10316 	/*
10317 	 * get the inode first so any iput calls done for the io_list
10318 	 * aren't the final iput (no unlinks allowed now)
10319 	 */
10320 	inode = lookup_free_space_inode(tree_root, block_group, path);
10321 
10322 	mutex_lock(&trans->transaction->cache_write_mutex);
10323 	/*
10324 	 * make sure our free spache cache IO is done before remove the
10325 	 * free space inode
10326 	 */
10327 	spin_lock(&trans->transaction->dirty_bgs_lock);
10328 	if (!list_empty(&block_group->io_list)) {
10329 		list_del_init(&block_group->io_list);
10330 
10331 		WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10332 
10333 		spin_unlock(&trans->transaction->dirty_bgs_lock);
10334 		btrfs_wait_cache_io(trans, block_group, path);
10335 		btrfs_put_block_group(block_group);
10336 		spin_lock(&trans->transaction->dirty_bgs_lock);
10337 	}
10338 
10339 	if (!list_empty(&block_group->dirty_list)) {
10340 		list_del_init(&block_group->dirty_list);
10341 		btrfs_put_block_group(block_group);
10342 	}
10343 	spin_unlock(&trans->transaction->dirty_bgs_lock);
10344 	mutex_unlock(&trans->transaction->cache_write_mutex);
10345 
10346 	if (!IS_ERR(inode)) {
10347 		ret = btrfs_orphan_add(trans, inode);
10348 		if (ret) {
10349 			btrfs_add_delayed_iput(inode);
10350 			goto out;
10351 		}
10352 		clear_nlink(inode);
10353 		/* One for the block groups ref */
10354 		spin_lock(&block_group->lock);
10355 		if (block_group->iref) {
10356 			block_group->iref = 0;
10357 			block_group->inode = NULL;
10358 			spin_unlock(&block_group->lock);
10359 			iput(inode);
10360 		} else {
10361 			spin_unlock(&block_group->lock);
10362 		}
10363 		/* One for our lookup ref */
10364 		btrfs_add_delayed_iput(inode);
10365 	}
10366 
10367 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10368 	key.offset = block_group->key.objectid;
10369 	key.type = 0;
10370 
10371 	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10372 	if (ret < 0)
10373 		goto out;
10374 	if (ret > 0)
10375 		btrfs_release_path(path);
10376 	if (ret == 0) {
10377 		ret = btrfs_del_item(trans, tree_root, path);
10378 		if (ret)
10379 			goto out;
10380 		btrfs_release_path(path);
10381 	}
10382 
10383 	spin_lock(&fs_info->block_group_cache_lock);
10384 	rb_erase(&block_group->cache_node,
10385 		 &fs_info->block_group_cache_tree);
10386 	RB_CLEAR_NODE(&block_group->cache_node);
10387 
10388 	if (fs_info->first_logical_byte == block_group->key.objectid)
10389 		fs_info->first_logical_byte = (u64)-1;
10390 	spin_unlock(&fs_info->block_group_cache_lock);
10391 
10392 	down_write(&block_group->space_info->groups_sem);
10393 	/*
10394 	 * we must use list_del_init so people can check to see if they
10395 	 * are still on the list after taking the semaphore
10396 	 */
10397 	list_del_init(&block_group->list);
10398 	if (list_empty(&block_group->space_info->block_groups[index])) {
10399 		kobj = block_group->space_info->block_group_kobjs[index];
10400 		block_group->space_info->block_group_kobjs[index] = NULL;
10401 		clear_avail_alloc_bits(fs_info, block_group->flags);
10402 	}
10403 	up_write(&block_group->space_info->groups_sem);
10404 	if (kobj) {
10405 		kobject_del(kobj);
10406 		kobject_put(kobj);
10407 	}
10408 
10409 	if (block_group->has_caching_ctl)
10410 		caching_ctl = get_caching_control(block_group);
10411 	if (block_group->cached == BTRFS_CACHE_STARTED)
10412 		wait_block_group_cache_done(block_group);
10413 	if (block_group->has_caching_ctl) {
10414 		down_write(&fs_info->commit_root_sem);
10415 		if (!caching_ctl) {
10416 			struct btrfs_caching_control *ctl;
10417 
10418 			list_for_each_entry(ctl,
10419 				    &fs_info->caching_block_groups, list)
10420 				if (ctl->block_group == block_group) {
10421 					caching_ctl = ctl;
10422 					atomic_inc(&caching_ctl->count);
10423 					break;
10424 				}
10425 		}
10426 		if (caching_ctl)
10427 			list_del_init(&caching_ctl->list);
10428 		up_write(&fs_info->commit_root_sem);
10429 		if (caching_ctl) {
10430 			/* Once for the caching bgs list and once for us. */
10431 			put_caching_control(caching_ctl);
10432 			put_caching_control(caching_ctl);
10433 		}
10434 	}
10435 
10436 	spin_lock(&trans->transaction->dirty_bgs_lock);
10437 	if (!list_empty(&block_group->dirty_list)) {
10438 		WARN_ON(1);
10439 	}
10440 	if (!list_empty(&block_group->io_list)) {
10441 		WARN_ON(1);
10442 	}
10443 	spin_unlock(&trans->transaction->dirty_bgs_lock);
10444 	btrfs_remove_free_space_cache(block_group);
10445 
10446 	spin_lock(&block_group->space_info->lock);
10447 	list_del_init(&block_group->ro_list);
10448 
10449 	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
10450 		WARN_ON(block_group->space_info->total_bytes
10451 			< block_group->key.offset);
10452 		WARN_ON(block_group->space_info->bytes_readonly
10453 			< block_group->key.offset);
10454 		WARN_ON(block_group->space_info->disk_total
10455 			< block_group->key.offset * factor);
10456 	}
10457 	block_group->space_info->total_bytes -= block_group->key.offset;
10458 	block_group->space_info->bytes_readonly -= block_group->key.offset;
10459 	block_group->space_info->disk_total -= block_group->key.offset * factor;
10460 
10461 	spin_unlock(&block_group->space_info->lock);
10462 
10463 	memcpy(&key, &block_group->key, sizeof(key));
10464 
10465 	mutex_lock(&fs_info->chunk_mutex);
10466 	if (!list_empty(&em->list)) {
10467 		/* We're in the transaction->pending_chunks list. */
10468 		free_extent_map(em);
10469 	}
10470 	spin_lock(&block_group->lock);
10471 	block_group->removed = 1;
10472 	/*
10473 	 * At this point trimming can't start on this block group, because we
10474 	 * removed the block group from the tree fs_info->block_group_cache_tree
10475 	 * so no one can't find it anymore and even if someone already got this
10476 	 * block group before we removed it from the rbtree, they have already
10477 	 * incremented block_group->trimming - if they didn't, they won't find
10478 	 * any free space entries because we already removed them all when we
10479 	 * called btrfs_remove_free_space_cache().
10480 	 *
10481 	 * And we must not remove the extent map from the fs_info->mapping_tree
10482 	 * to prevent the same logical address range and physical device space
10483 	 * ranges from being reused for a new block group. This is because our
10484 	 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10485 	 * completely transactionless, so while it is trimming a range the
10486 	 * currently running transaction might finish and a new one start,
10487 	 * allowing for new block groups to be created that can reuse the same
10488 	 * physical device locations unless we take this special care.
10489 	 *
10490 	 * There may also be an implicit trim operation if the file system
10491 	 * is mounted with -odiscard. The same protections must remain
10492 	 * in place until the extents have been discarded completely when
10493 	 * the transaction commit has completed.
10494 	 */
10495 	remove_em = (atomic_read(&block_group->trimming) == 0);
10496 	/*
10497 	 * Make sure a trimmer task always sees the em in the pinned_chunks list
10498 	 * if it sees block_group->removed == 1 (needs to lock block_group->lock
10499 	 * before checking block_group->removed).
10500 	 */
10501 	if (!remove_em) {
10502 		/*
10503 		 * Our em might be in trans->transaction->pending_chunks which
10504 		 * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10505 		 * and so is the fs_info->pinned_chunks list.
10506 		 *
10507 		 * So at this point we must be holding the chunk_mutex to avoid
10508 		 * any races with chunk allocation (more specifically at
10509 		 * volumes.c:contains_pending_extent()), to ensure it always
10510 		 * sees the em, either in the pending_chunks list or in the
10511 		 * pinned_chunks list.
10512 		 */
10513 		list_move_tail(&em->list, &fs_info->pinned_chunks);
10514 	}
10515 	spin_unlock(&block_group->lock);
10516 
10517 	if (remove_em) {
10518 		struct extent_map_tree *em_tree;
10519 
10520 		em_tree = &fs_info->mapping_tree.map_tree;
10521 		write_lock(&em_tree->lock);
10522 		/*
10523 		 * The em might be in the pending_chunks list, so make sure the
10524 		 * chunk mutex is locked, since remove_extent_mapping() will
10525 		 * delete us from that list.
10526 		 */
10527 		remove_extent_mapping(em_tree, em);
10528 		write_unlock(&em_tree->lock);
10529 		/* once for the tree */
10530 		free_extent_map(em);
10531 	}
10532 
10533 	mutex_unlock(&fs_info->chunk_mutex);
10534 
10535 	ret = remove_block_group_free_space(trans, fs_info, block_group);
10536 	if (ret)
10537 		goto out;
10538 
10539 	btrfs_put_block_group(block_group);
10540 	btrfs_put_block_group(block_group);
10541 
10542 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10543 	if (ret > 0)
10544 		ret = -EIO;
10545 	if (ret < 0)
10546 		goto out;
10547 
10548 	ret = btrfs_del_item(trans, root, path);
10549 out:
10550 	btrfs_free_path(path);
10551 	return ret;
10552 }
10553 
10554 struct btrfs_trans_handle *
10555 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10556 				     const u64 chunk_offset)
10557 {
10558 	struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10559 	struct extent_map *em;
10560 	struct map_lookup *map;
10561 	unsigned int num_items;
10562 
10563 	read_lock(&em_tree->lock);
10564 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10565 	read_unlock(&em_tree->lock);
10566 	ASSERT(em && em->start == chunk_offset);
10567 
10568 	/*
10569 	 * We need to reserve 3 + N units from the metadata space info in order
10570 	 * to remove a block group (done at btrfs_remove_chunk() and at
10571 	 * btrfs_remove_block_group()), which are used for:
10572 	 *
10573 	 * 1 unit for adding the free space inode's orphan (located in the tree
10574 	 * of tree roots).
10575 	 * 1 unit for deleting the block group item (located in the extent
10576 	 * tree).
10577 	 * 1 unit for deleting the free space item (located in tree of tree
10578 	 * roots).
10579 	 * N units for deleting N device extent items corresponding to each
10580 	 * stripe (located in the device tree).
10581 	 *
10582 	 * In order to remove a block group we also need to reserve units in the
10583 	 * system space info in order to update the chunk tree (update one or
10584 	 * more device items and remove one chunk item), but this is done at
10585 	 * btrfs_remove_chunk() through a call to check_system_chunk().
10586 	 */
10587 	map = em->map_lookup;
10588 	num_items = 3 + map->num_stripes;
10589 	free_extent_map(em);
10590 
10591 	return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10592 							   num_items, 1);
10593 }
10594 
10595 /*
10596  * Process the unused_bgs list and remove any that don't have any allocated
10597  * space inside of them.
10598  */
10599 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10600 {
10601 	struct btrfs_block_group_cache *block_group;
10602 	struct btrfs_space_info *space_info;
10603 	struct btrfs_trans_handle *trans;
10604 	int ret = 0;
10605 
10606 	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
10607 		return;
10608 
10609 	spin_lock(&fs_info->unused_bgs_lock);
10610 	while (!list_empty(&fs_info->unused_bgs)) {
10611 		u64 start, end;
10612 		int trimming;
10613 
10614 		block_group = list_first_entry(&fs_info->unused_bgs,
10615 					       struct btrfs_block_group_cache,
10616 					       bg_list);
10617 		list_del_init(&block_group->bg_list);
10618 
10619 		space_info = block_group->space_info;
10620 
10621 		if (ret || btrfs_mixed_space_info(space_info)) {
10622 			btrfs_put_block_group(block_group);
10623 			continue;
10624 		}
10625 		spin_unlock(&fs_info->unused_bgs_lock);
10626 
10627 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
10628 
10629 		/* Don't want to race with allocators so take the groups_sem */
10630 		down_write(&space_info->groups_sem);
10631 		spin_lock(&block_group->lock);
10632 		if (block_group->reserved ||
10633 		    btrfs_block_group_used(&block_group->item) ||
10634 		    block_group->ro ||
10635 		    list_is_singular(&block_group->list)) {
10636 			/*
10637 			 * We want to bail if we made new allocations or have
10638 			 * outstanding allocations in this block group.  We do
10639 			 * the ro check in case balance is currently acting on
10640 			 * this block group.
10641 			 */
10642 			spin_unlock(&block_group->lock);
10643 			up_write(&space_info->groups_sem);
10644 			goto next;
10645 		}
10646 		spin_unlock(&block_group->lock);
10647 
10648 		/* We don't want to force the issue, only flip if it's ok. */
10649 		ret = inc_block_group_ro(block_group, 0);
10650 		up_write(&space_info->groups_sem);
10651 		if (ret < 0) {
10652 			ret = 0;
10653 			goto next;
10654 		}
10655 
10656 		/*
10657 		 * Want to do this before we do anything else so we can recover
10658 		 * properly if we fail to join the transaction.
10659 		 */
10660 		trans = btrfs_start_trans_remove_block_group(fs_info,
10661 						     block_group->key.objectid);
10662 		if (IS_ERR(trans)) {
10663 			btrfs_dec_block_group_ro(block_group);
10664 			ret = PTR_ERR(trans);
10665 			goto next;
10666 		}
10667 
10668 		/*
10669 		 * We could have pending pinned extents for this block group,
10670 		 * just delete them, we don't care about them anymore.
10671 		 */
10672 		start = block_group->key.objectid;
10673 		end = start + block_group->key.offset - 1;
10674 		/*
10675 		 * Hold the unused_bg_unpin_mutex lock to avoid racing with
10676 		 * btrfs_finish_extent_commit(). If we are at transaction N,
10677 		 * another task might be running finish_extent_commit() for the
10678 		 * previous transaction N - 1, and have seen a range belonging
10679 		 * to the block group in freed_extents[] before we were able to
10680 		 * clear the whole block group range from freed_extents[]. This
10681 		 * means that task can lookup for the block group after we
10682 		 * unpinned it from freed_extents[] and removed it, leading to
10683 		 * a BUG_ON() at btrfs_unpin_extent_range().
10684 		 */
10685 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
10686 		ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10687 				  EXTENT_DIRTY);
10688 		if (ret) {
10689 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10690 			btrfs_dec_block_group_ro(block_group);
10691 			goto end_trans;
10692 		}
10693 		ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10694 				  EXTENT_DIRTY);
10695 		if (ret) {
10696 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10697 			btrfs_dec_block_group_ro(block_group);
10698 			goto end_trans;
10699 		}
10700 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10701 
10702 		/* Reset pinned so btrfs_put_block_group doesn't complain */
10703 		spin_lock(&space_info->lock);
10704 		spin_lock(&block_group->lock);
10705 
10706 		space_info->bytes_pinned -= block_group->pinned;
10707 		space_info->bytes_readonly += block_group->pinned;
10708 		percpu_counter_add(&space_info->total_bytes_pinned,
10709 				   -block_group->pinned);
10710 		block_group->pinned = 0;
10711 
10712 		spin_unlock(&block_group->lock);
10713 		spin_unlock(&space_info->lock);
10714 
10715 		/* DISCARD can flip during remount */
10716 		trimming = btrfs_test_opt(fs_info, DISCARD);
10717 
10718 		/* Implicit trim during transaction commit. */
10719 		if (trimming)
10720 			btrfs_get_block_group_trimming(block_group);
10721 
10722 		/*
10723 		 * Btrfs_remove_chunk will abort the transaction if things go
10724 		 * horribly wrong.
10725 		 */
10726 		ret = btrfs_remove_chunk(trans, fs_info,
10727 					 block_group->key.objectid);
10728 
10729 		if (ret) {
10730 			if (trimming)
10731 				btrfs_put_block_group_trimming(block_group);
10732 			goto end_trans;
10733 		}
10734 
10735 		/*
10736 		 * If we're not mounted with -odiscard, we can just forget
10737 		 * about this block group. Otherwise we'll need to wait
10738 		 * until transaction commit to do the actual discard.
10739 		 */
10740 		if (trimming) {
10741 			spin_lock(&fs_info->unused_bgs_lock);
10742 			/*
10743 			 * A concurrent scrub might have added us to the list
10744 			 * fs_info->unused_bgs, so use a list_move operation
10745 			 * to add the block group to the deleted_bgs list.
10746 			 */
10747 			list_move(&block_group->bg_list,
10748 				  &trans->transaction->deleted_bgs);
10749 			spin_unlock(&fs_info->unused_bgs_lock);
10750 			btrfs_get_block_group(block_group);
10751 		}
10752 end_trans:
10753 		btrfs_end_transaction(trans);
10754 next:
10755 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
10756 		btrfs_put_block_group(block_group);
10757 		spin_lock(&fs_info->unused_bgs_lock);
10758 	}
10759 	spin_unlock(&fs_info->unused_bgs_lock);
10760 }
10761 
10762 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10763 {
10764 	struct btrfs_space_info *space_info;
10765 	struct btrfs_super_block *disk_super;
10766 	u64 features;
10767 	u64 flags;
10768 	int mixed = 0;
10769 	int ret;
10770 
10771 	disk_super = fs_info->super_copy;
10772 	if (!btrfs_super_root(disk_super))
10773 		return -EINVAL;
10774 
10775 	features = btrfs_super_incompat_flags(disk_super);
10776 	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10777 		mixed = 1;
10778 
10779 	flags = BTRFS_BLOCK_GROUP_SYSTEM;
10780 	ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10781 	if (ret)
10782 		goto out;
10783 
10784 	if (mixed) {
10785 		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10786 		ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10787 	} else {
10788 		flags = BTRFS_BLOCK_GROUP_METADATA;
10789 		ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10790 		if (ret)
10791 			goto out;
10792 
10793 		flags = BTRFS_BLOCK_GROUP_DATA;
10794 		ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
10795 	}
10796 out:
10797 	return ret;
10798 }
10799 
10800 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
10801 				   u64 start, u64 end)
10802 {
10803 	return unpin_extent_range(fs_info, start, end, false);
10804 }
10805 
10806 /*
10807  * It used to be that old block groups would be left around forever.
10808  * Iterating over them would be enough to trim unused space.  Since we
10809  * now automatically remove them, we also need to iterate over unallocated
10810  * space.
10811  *
10812  * We don't want a transaction for this since the discard may take a
10813  * substantial amount of time.  We don't require that a transaction be
10814  * running, but we do need to take a running transaction into account
10815  * to ensure that we're not discarding chunks that were released in
10816  * the current transaction.
10817  *
10818  * Holding the chunks lock will prevent other threads from allocating
10819  * or releasing chunks, but it won't prevent a running transaction
10820  * from committing and releasing the memory that the pending chunks
10821  * list head uses.  For that, we need to take a reference to the
10822  * transaction.
10823  */
10824 static int btrfs_trim_free_extents(struct btrfs_device *device,
10825 				   u64 minlen, u64 *trimmed)
10826 {
10827 	u64 start = 0, len = 0;
10828 	int ret;
10829 
10830 	*trimmed = 0;
10831 
10832 	/* Not writeable = nothing to do. */
10833 	if (!device->writeable)
10834 		return 0;
10835 
10836 	/* No free space = nothing to do. */
10837 	if (device->total_bytes <= device->bytes_used)
10838 		return 0;
10839 
10840 	ret = 0;
10841 
10842 	while (1) {
10843 		struct btrfs_fs_info *fs_info = device->fs_info;
10844 		struct btrfs_transaction *trans;
10845 		u64 bytes;
10846 
10847 		ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10848 		if (ret)
10849 			return ret;
10850 
10851 		down_read(&fs_info->commit_root_sem);
10852 
10853 		spin_lock(&fs_info->trans_lock);
10854 		trans = fs_info->running_transaction;
10855 		if (trans)
10856 			atomic_inc(&trans->use_count);
10857 		spin_unlock(&fs_info->trans_lock);
10858 
10859 		ret = find_free_dev_extent_start(trans, device, minlen, start,
10860 						 &start, &len);
10861 		if (trans)
10862 			btrfs_put_transaction(trans);
10863 
10864 		if (ret) {
10865 			up_read(&fs_info->commit_root_sem);
10866 			mutex_unlock(&fs_info->chunk_mutex);
10867 			if (ret == -ENOSPC)
10868 				ret = 0;
10869 			break;
10870 		}
10871 
10872 		ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10873 		up_read(&fs_info->commit_root_sem);
10874 		mutex_unlock(&fs_info->chunk_mutex);
10875 
10876 		if (ret)
10877 			break;
10878 
10879 		start += len;
10880 		*trimmed += bytes;
10881 
10882 		if (fatal_signal_pending(current)) {
10883 			ret = -ERESTARTSYS;
10884 			break;
10885 		}
10886 
10887 		cond_resched();
10888 	}
10889 
10890 	return ret;
10891 }
10892 
10893 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
10894 {
10895 	struct btrfs_block_group_cache *cache = NULL;
10896 	struct btrfs_device *device;
10897 	struct list_head *devices;
10898 	u64 group_trimmed;
10899 	u64 start;
10900 	u64 end;
10901 	u64 trimmed = 0;
10902 	u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10903 	int ret = 0;
10904 
10905 	/*
10906 	 * try to trim all FS space, our block group may start from non-zero.
10907 	 */
10908 	if (range->len == total_bytes)
10909 		cache = btrfs_lookup_first_block_group(fs_info, range->start);
10910 	else
10911 		cache = btrfs_lookup_block_group(fs_info, range->start);
10912 
10913 	while (cache) {
10914 		if (cache->key.objectid >= (range->start + range->len)) {
10915 			btrfs_put_block_group(cache);
10916 			break;
10917 		}
10918 
10919 		start = max(range->start, cache->key.objectid);
10920 		end = min(range->start + range->len,
10921 				cache->key.objectid + cache->key.offset);
10922 
10923 		if (end - start >= range->minlen) {
10924 			if (!block_group_cache_done(cache)) {
10925 				ret = cache_block_group(cache, 0);
10926 				if (ret) {
10927 					btrfs_put_block_group(cache);
10928 					break;
10929 				}
10930 				ret = wait_block_group_cache_done(cache);
10931 				if (ret) {
10932 					btrfs_put_block_group(cache);
10933 					break;
10934 				}
10935 			}
10936 			ret = btrfs_trim_block_group(cache,
10937 						     &group_trimmed,
10938 						     start,
10939 						     end,
10940 						     range->minlen);
10941 
10942 			trimmed += group_trimmed;
10943 			if (ret) {
10944 				btrfs_put_block_group(cache);
10945 				break;
10946 			}
10947 		}
10948 
10949 		cache = next_block_group(fs_info, cache);
10950 	}
10951 
10952 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
10953 	devices = &fs_info->fs_devices->alloc_list;
10954 	list_for_each_entry(device, devices, dev_alloc_list) {
10955 		ret = btrfs_trim_free_extents(device, range->minlen,
10956 					      &group_trimmed);
10957 		if (ret)
10958 			break;
10959 
10960 		trimmed += group_trimmed;
10961 	}
10962 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
10963 
10964 	range->len = trimmed;
10965 	return ret;
10966 }
10967 
10968 /*
10969  * btrfs_{start,end}_write_no_snapshoting() are similar to
10970  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10971  * data into the page cache through nocow before the subvolume is snapshoted,
10972  * but flush the data into disk after the snapshot creation, or to prevent
10973  * operations while snapshoting is ongoing and that cause the snapshot to be
10974  * inconsistent (writes followed by expanding truncates for example).
10975  */
10976 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10977 {
10978 	percpu_counter_dec(&root->subv_writers->counter);
10979 	/*
10980 	 * Make sure counter is updated before we wake up waiters.
10981 	 */
10982 	smp_mb();
10983 	if (waitqueue_active(&root->subv_writers->wait))
10984 		wake_up(&root->subv_writers->wait);
10985 }
10986 
10987 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10988 {
10989 	if (atomic_read(&root->will_be_snapshoted))
10990 		return 0;
10991 
10992 	percpu_counter_inc(&root->subv_writers->counter);
10993 	/*
10994 	 * Make sure counter is updated before we check for snapshot creation.
10995 	 */
10996 	smp_mb();
10997 	if (atomic_read(&root->will_be_snapshoted)) {
10998 		btrfs_end_write_no_snapshoting(root);
10999 		return 0;
11000 	}
11001 	return 1;
11002 }
11003 
11004 static int wait_snapshoting_atomic_t(atomic_t *a)
11005 {
11006 	schedule();
11007 	return 0;
11008 }
11009 
11010 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
11011 {
11012 	while (true) {
11013 		int ret;
11014 
11015 		ret = btrfs_start_write_no_snapshoting(root);
11016 		if (ret)
11017 			break;
11018 		wait_on_atomic_t(&root->will_be_snapshoted,
11019 				 wait_snapshoting_atomic_t,
11020 				 TASK_UNINTERRUPTIBLE);
11021 	}
11022 }
11023