xref: /openbmc/linux/fs/btrfs/extent-tree.c (revision b1a3e75e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/sched/signal.h>
8 #include <linux/pagemap.h>
9 #include <linux/writeback.h>
10 #include <linux/blkdev.h>
11 #include <linux/sort.h>
12 #include <linux/rcupdate.h>
13 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/ratelimit.h>
16 #include <linux/percpu_counter.h>
17 #include <linux/lockdep.h>
18 #include <linux/crc32c.h>
19 #include "misc.h"
20 #include "tree-log.h"
21 #include "disk-io.h"
22 #include "print-tree.h"
23 #include "volumes.h"
24 #include "raid56.h"
25 #include "locking.h"
26 #include "free-space-cache.h"
27 #include "free-space-tree.h"
28 #include "sysfs.h"
29 #include "qgroup.h"
30 #include "ref-verify.h"
31 #include "space-info.h"
32 #include "block-rsv.h"
33 #include "delalloc-space.h"
34 #include "block-group.h"
35 #include "discard.h"
36 #include "rcu-string.h"
37 
38 #undef SCRAMBLE_DELAYED_REFS
39 
40 
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 			       struct btrfs_delayed_ref_node *node, u64 parent,
43 			       u64 root_objectid, u64 owner_objectid,
44 			       u64 owner_offset, int refs_to_drop,
45 			       struct btrfs_delayed_extent_op *extra_op);
46 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
47 				    struct extent_buffer *leaf,
48 				    struct btrfs_extent_item *ei);
49 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
50 				      u64 parent, u64 root_objectid,
51 				      u64 flags, u64 owner, u64 offset,
52 				      struct btrfs_key *ins, int ref_mod);
53 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
54 				     struct btrfs_delayed_ref_node *node,
55 				     struct btrfs_delayed_extent_op *extent_op);
56 static int find_next_key(struct btrfs_path *path, int level,
57 			 struct btrfs_key *key);
58 
59 static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
60 {
61 	return (cache->flags & bits) == bits;
62 }
63 
64 int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
65 			      u64 start, u64 num_bytes)
66 {
67 	u64 end = start + num_bytes - 1;
68 	set_extent_bits(&fs_info->excluded_extents, start, end,
69 			EXTENT_UPTODATE);
70 	return 0;
71 }
72 
73 void btrfs_free_excluded_extents(struct btrfs_block_group *cache)
74 {
75 	struct btrfs_fs_info *fs_info = cache->fs_info;
76 	u64 start, end;
77 
78 	start = cache->start;
79 	end = start + cache->length - 1;
80 
81 	clear_extent_bits(&fs_info->excluded_extents, start, end,
82 			  EXTENT_UPTODATE);
83 }
84 
85 static u64 generic_ref_to_space_flags(struct btrfs_ref *ref)
86 {
87 	if (ref->type == BTRFS_REF_METADATA) {
88 		if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
89 			return BTRFS_BLOCK_GROUP_SYSTEM;
90 		else
91 			return BTRFS_BLOCK_GROUP_METADATA;
92 	}
93 	return BTRFS_BLOCK_GROUP_DATA;
94 }
95 
96 static void add_pinned_bytes(struct btrfs_fs_info *fs_info,
97 			     struct btrfs_ref *ref)
98 {
99 	struct btrfs_space_info *space_info;
100 	u64 flags = generic_ref_to_space_flags(ref);
101 
102 	space_info = btrfs_find_space_info(fs_info, flags);
103 	ASSERT(space_info);
104 	percpu_counter_add_batch(&space_info->total_bytes_pinned, ref->len,
105 		    BTRFS_TOTAL_BYTES_PINNED_BATCH);
106 }
107 
108 static void sub_pinned_bytes(struct btrfs_fs_info *fs_info,
109 			     struct btrfs_ref *ref)
110 {
111 	struct btrfs_space_info *space_info;
112 	u64 flags = generic_ref_to_space_flags(ref);
113 
114 	space_info = btrfs_find_space_info(fs_info, flags);
115 	ASSERT(space_info);
116 	percpu_counter_add_batch(&space_info->total_bytes_pinned, -ref->len,
117 		    BTRFS_TOTAL_BYTES_PINNED_BATCH);
118 }
119 
120 /* simple helper to search for an existing data extent at a given offset */
121 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
122 {
123 	int ret;
124 	struct btrfs_key key;
125 	struct btrfs_path *path;
126 
127 	path = btrfs_alloc_path();
128 	if (!path)
129 		return -ENOMEM;
130 
131 	key.objectid = start;
132 	key.offset = len;
133 	key.type = BTRFS_EXTENT_ITEM_KEY;
134 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
135 	btrfs_free_path(path);
136 	return ret;
137 }
138 
139 /*
140  * helper function to lookup reference count and flags of a tree block.
141  *
142  * the head node for delayed ref is used to store the sum of all the
143  * reference count modifications queued up in the rbtree. the head
144  * node may also store the extent flags to set. This way you can check
145  * to see what the reference count and extent flags would be if all of
146  * the delayed refs are not processed.
147  */
148 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
149 			     struct btrfs_fs_info *fs_info, u64 bytenr,
150 			     u64 offset, int metadata, u64 *refs, u64 *flags)
151 {
152 	struct btrfs_delayed_ref_head *head;
153 	struct btrfs_delayed_ref_root *delayed_refs;
154 	struct btrfs_path *path;
155 	struct btrfs_extent_item *ei;
156 	struct extent_buffer *leaf;
157 	struct btrfs_key key;
158 	u32 item_size;
159 	u64 num_refs;
160 	u64 extent_flags;
161 	int ret;
162 
163 	/*
164 	 * If we don't have skinny metadata, don't bother doing anything
165 	 * different
166 	 */
167 	if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
168 		offset = fs_info->nodesize;
169 		metadata = 0;
170 	}
171 
172 	path = btrfs_alloc_path();
173 	if (!path)
174 		return -ENOMEM;
175 
176 	if (!trans) {
177 		path->skip_locking = 1;
178 		path->search_commit_root = 1;
179 	}
180 
181 search_again:
182 	key.objectid = bytenr;
183 	key.offset = offset;
184 	if (metadata)
185 		key.type = BTRFS_METADATA_ITEM_KEY;
186 	else
187 		key.type = BTRFS_EXTENT_ITEM_KEY;
188 
189 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
190 	if (ret < 0)
191 		goto out_free;
192 
193 	if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
194 		if (path->slots[0]) {
195 			path->slots[0]--;
196 			btrfs_item_key_to_cpu(path->nodes[0], &key,
197 					      path->slots[0]);
198 			if (key.objectid == bytenr &&
199 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
200 			    key.offset == fs_info->nodesize)
201 				ret = 0;
202 		}
203 	}
204 
205 	if (ret == 0) {
206 		leaf = path->nodes[0];
207 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
208 		if (item_size >= sizeof(*ei)) {
209 			ei = btrfs_item_ptr(leaf, path->slots[0],
210 					    struct btrfs_extent_item);
211 			num_refs = btrfs_extent_refs(leaf, ei);
212 			extent_flags = btrfs_extent_flags(leaf, ei);
213 		} else {
214 			ret = -EINVAL;
215 			btrfs_print_v0_err(fs_info);
216 			if (trans)
217 				btrfs_abort_transaction(trans, ret);
218 			else
219 				btrfs_handle_fs_error(fs_info, ret, NULL);
220 
221 			goto out_free;
222 		}
223 
224 		BUG_ON(num_refs == 0);
225 	} else {
226 		num_refs = 0;
227 		extent_flags = 0;
228 		ret = 0;
229 	}
230 
231 	if (!trans)
232 		goto out;
233 
234 	delayed_refs = &trans->transaction->delayed_refs;
235 	spin_lock(&delayed_refs->lock);
236 	head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
237 	if (head) {
238 		if (!mutex_trylock(&head->mutex)) {
239 			refcount_inc(&head->refs);
240 			spin_unlock(&delayed_refs->lock);
241 
242 			btrfs_release_path(path);
243 
244 			/*
245 			 * Mutex was contended, block until it's released and try
246 			 * again
247 			 */
248 			mutex_lock(&head->mutex);
249 			mutex_unlock(&head->mutex);
250 			btrfs_put_delayed_ref_head(head);
251 			goto search_again;
252 		}
253 		spin_lock(&head->lock);
254 		if (head->extent_op && head->extent_op->update_flags)
255 			extent_flags |= head->extent_op->flags_to_set;
256 		else
257 			BUG_ON(num_refs == 0);
258 
259 		num_refs += head->ref_mod;
260 		spin_unlock(&head->lock);
261 		mutex_unlock(&head->mutex);
262 	}
263 	spin_unlock(&delayed_refs->lock);
264 out:
265 	WARN_ON(num_refs == 0);
266 	if (refs)
267 		*refs = num_refs;
268 	if (flags)
269 		*flags = extent_flags;
270 out_free:
271 	btrfs_free_path(path);
272 	return ret;
273 }
274 
275 /*
276  * Back reference rules.  Back refs have three main goals:
277  *
278  * 1) differentiate between all holders of references to an extent so that
279  *    when a reference is dropped we can make sure it was a valid reference
280  *    before freeing the extent.
281  *
282  * 2) Provide enough information to quickly find the holders of an extent
283  *    if we notice a given block is corrupted or bad.
284  *
285  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
286  *    maintenance.  This is actually the same as #2, but with a slightly
287  *    different use case.
288  *
289  * There are two kinds of back refs. The implicit back refs is optimized
290  * for pointers in non-shared tree blocks. For a given pointer in a block,
291  * back refs of this kind provide information about the block's owner tree
292  * and the pointer's key. These information allow us to find the block by
293  * b-tree searching. The full back refs is for pointers in tree blocks not
294  * referenced by their owner trees. The location of tree block is recorded
295  * in the back refs. Actually the full back refs is generic, and can be
296  * used in all cases the implicit back refs is used. The major shortcoming
297  * of the full back refs is its overhead. Every time a tree block gets
298  * COWed, we have to update back refs entry for all pointers in it.
299  *
300  * For a newly allocated tree block, we use implicit back refs for
301  * pointers in it. This means most tree related operations only involve
302  * implicit back refs. For a tree block created in old transaction, the
303  * only way to drop a reference to it is COW it. So we can detect the
304  * event that tree block loses its owner tree's reference and do the
305  * back refs conversion.
306  *
307  * When a tree block is COWed through a tree, there are four cases:
308  *
309  * The reference count of the block is one and the tree is the block's
310  * owner tree. Nothing to do in this case.
311  *
312  * The reference count of the block is one and the tree is not the
313  * block's owner tree. In this case, full back refs is used for pointers
314  * in the block. Remove these full back refs, add implicit back refs for
315  * every pointers in the new block.
316  *
317  * The reference count of the block is greater than one and the tree is
318  * the block's owner tree. In this case, implicit back refs is used for
319  * pointers in the block. Add full back refs for every pointers in the
320  * block, increase lower level extents' reference counts. The original
321  * implicit back refs are entailed to the new block.
322  *
323  * The reference count of the block is greater than one and the tree is
324  * not the block's owner tree. Add implicit back refs for every pointer in
325  * the new block, increase lower level extents' reference count.
326  *
327  * Back Reference Key composing:
328  *
329  * The key objectid corresponds to the first byte in the extent,
330  * The key type is used to differentiate between types of back refs.
331  * There are different meanings of the key offset for different types
332  * of back refs.
333  *
334  * File extents can be referenced by:
335  *
336  * - multiple snapshots, subvolumes, or different generations in one subvol
337  * - different files inside a single subvolume
338  * - different offsets inside a file (bookend extents in file.c)
339  *
340  * The extent ref structure for the implicit back refs has fields for:
341  *
342  * - Objectid of the subvolume root
343  * - objectid of the file holding the reference
344  * - original offset in the file
345  * - how many bookend extents
346  *
347  * The key offset for the implicit back refs is hash of the first
348  * three fields.
349  *
350  * The extent ref structure for the full back refs has field for:
351  *
352  * - number of pointers in the tree leaf
353  *
354  * The key offset for the implicit back refs is the first byte of
355  * the tree leaf
356  *
357  * When a file extent is allocated, The implicit back refs is used.
358  * the fields are filled in:
359  *
360  *     (root_key.objectid, inode objectid, offset in file, 1)
361  *
362  * When a file extent is removed file truncation, we find the
363  * corresponding implicit back refs and check the following fields:
364  *
365  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
366  *
367  * Btree extents can be referenced by:
368  *
369  * - Different subvolumes
370  *
371  * Both the implicit back refs and the full back refs for tree blocks
372  * only consist of key. The key offset for the implicit back refs is
373  * objectid of block's owner tree. The key offset for the full back refs
374  * is the first byte of parent block.
375  *
376  * When implicit back refs is used, information about the lowest key and
377  * level of the tree block are required. These information are stored in
378  * tree block info structure.
379  */
380 
381 /*
382  * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
383  * is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
384  * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
385  */
386 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
387 				     struct btrfs_extent_inline_ref *iref,
388 				     enum btrfs_inline_ref_type is_data)
389 {
390 	int type = btrfs_extent_inline_ref_type(eb, iref);
391 	u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
392 
393 	if (type == BTRFS_TREE_BLOCK_REF_KEY ||
394 	    type == BTRFS_SHARED_BLOCK_REF_KEY ||
395 	    type == BTRFS_SHARED_DATA_REF_KEY ||
396 	    type == BTRFS_EXTENT_DATA_REF_KEY) {
397 		if (is_data == BTRFS_REF_TYPE_BLOCK) {
398 			if (type == BTRFS_TREE_BLOCK_REF_KEY)
399 				return type;
400 			if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
401 				ASSERT(eb->fs_info);
402 				/*
403 				 * Every shared one has parent tree
404 				 * block, which must be aligned to
405 				 * nodesize.
406 				 */
407 				if (offset &&
408 				    IS_ALIGNED(offset, eb->fs_info->nodesize))
409 					return type;
410 			}
411 		} else if (is_data == BTRFS_REF_TYPE_DATA) {
412 			if (type == BTRFS_EXTENT_DATA_REF_KEY)
413 				return type;
414 			if (type == BTRFS_SHARED_DATA_REF_KEY) {
415 				ASSERT(eb->fs_info);
416 				/*
417 				 * Every shared one has parent tree
418 				 * block, which must be aligned to
419 				 * nodesize.
420 				 */
421 				if (offset &&
422 				    IS_ALIGNED(offset, eb->fs_info->nodesize))
423 					return type;
424 			}
425 		} else {
426 			ASSERT(is_data == BTRFS_REF_TYPE_ANY);
427 			return type;
428 		}
429 	}
430 
431 	btrfs_print_leaf((struct extent_buffer *)eb);
432 	btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
433 		  eb->start, type);
434 	WARN_ON(1);
435 
436 	return BTRFS_REF_TYPE_INVALID;
437 }
438 
439 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
440 {
441 	u32 high_crc = ~(u32)0;
442 	u32 low_crc = ~(u32)0;
443 	__le64 lenum;
444 
445 	lenum = cpu_to_le64(root_objectid);
446 	high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
447 	lenum = cpu_to_le64(owner);
448 	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
449 	lenum = cpu_to_le64(offset);
450 	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
451 
452 	return ((u64)high_crc << 31) ^ (u64)low_crc;
453 }
454 
455 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
456 				     struct btrfs_extent_data_ref *ref)
457 {
458 	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
459 				    btrfs_extent_data_ref_objectid(leaf, ref),
460 				    btrfs_extent_data_ref_offset(leaf, ref));
461 }
462 
463 static int match_extent_data_ref(struct extent_buffer *leaf,
464 				 struct btrfs_extent_data_ref *ref,
465 				 u64 root_objectid, u64 owner, u64 offset)
466 {
467 	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
468 	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
469 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
470 		return 0;
471 	return 1;
472 }
473 
474 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
475 					   struct btrfs_path *path,
476 					   u64 bytenr, u64 parent,
477 					   u64 root_objectid,
478 					   u64 owner, u64 offset)
479 {
480 	struct btrfs_root *root = trans->fs_info->extent_root;
481 	struct btrfs_key key;
482 	struct btrfs_extent_data_ref *ref;
483 	struct extent_buffer *leaf;
484 	u32 nritems;
485 	int ret;
486 	int recow;
487 	int err = -ENOENT;
488 
489 	key.objectid = bytenr;
490 	if (parent) {
491 		key.type = BTRFS_SHARED_DATA_REF_KEY;
492 		key.offset = parent;
493 	} else {
494 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
495 		key.offset = hash_extent_data_ref(root_objectid,
496 						  owner, offset);
497 	}
498 again:
499 	recow = 0;
500 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
501 	if (ret < 0) {
502 		err = ret;
503 		goto fail;
504 	}
505 
506 	if (parent) {
507 		if (!ret)
508 			return 0;
509 		goto fail;
510 	}
511 
512 	leaf = path->nodes[0];
513 	nritems = btrfs_header_nritems(leaf);
514 	while (1) {
515 		if (path->slots[0] >= nritems) {
516 			ret = btrfs_next_leaf(root, path);
517 			if (ret < 0)
518 				err = ret;
519 			if (ret)
520 				goto fail;
521 
522 			leaf = path->nodes[0];
523 			nritems = btrfs_header_nritems(leaf);
524 			recow = 1;
525 		}
526 
527 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
528 		if (key.objectid != bytenr ||
529 		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
530 			goto fail;
531 
532 		ref = btrfs_item_ptr(leaf, path->slots[0],
533 				     struct btrfs_extent_data_ref);
534 
535 		if (match_extent_data_ref(leaf, ref, root_objectid,
536 					  owner, offset)) {
537 			if (recow) {
538 				btrfs_release_path(path);
539 				goto again;
540 			}
541 			err = 0;
542 			break;
543 		}
544 		path->slots[0]++;
545 	}
546 fail:
547 	return err;
548 }
549 
550 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
551 					   struct btrfs_path *path,
552 					   u64 bytenr, u64 parent,
553 					   u64 root_objectid, u64 owner,
554 					   u64 offset, int refs_to_add)
555 {
556 	struct btrfs_root *root = trans->fs_info->extent_root;
557 	struct btrfs_key key;
558 	struct extent_buffer *leaf;
559 	u32 size;
560 	u32 num_refs;
561 	int ret;
562 
563 	key.objectid = bytenr;
564 	if (parent) {
565 		key.type = BTRFS_SHARED_DATA_REF_KEY;
566 		key.offset = parent;
567 		size = sizeof(struct btrfs_shared_data_ref);
568 	} else {
569 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
570 		key.offset = hash_extent_data_ref(root_objectid,
571 						  owner, offset);
572 		size = sizeof(struct btrfs_extent_data_ref);
573 	}
574 
575 	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
576 	if (ret && ret != -EEXIST)
577 		goto fail;
578 
579 	leaf = path->nodes[0];
580 	if (parent) {
581 		struct btrfs_shared_data_ref *ref;
582 		ref = btrfs_item_ptr(leaf, path->slots[0],
583 				     struct btrfs_shared_data_ref);
584 		if (ret == 0) {
585 			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
586 		} else {
587 			num_refs = btrfs_shared_data_ref_count(leaf, ref);
588 			num_refs += refs_to_add;
589 			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
590 		}
591 	} else {
592 		struct btrfs_extent_data_ref *ref;
593 		while (ret == -EEXIST) {
594 			ref = btrfs_item_ptr(leaf, path->slots[0],
595 					     struct btrfs_extent_data_ref);
596 			if (match_extent_data_ref(leaf, ref, root_objectid,
597 						  owner, offset))
598 				break;
599 			btrfs_release_path(path);
600 			key.offset++;
601 			ret = btrfs_insert_empty_item(trans, root, path, &key,
602 						      size);
603 			if (ret && ret != -EEXIST)
604 				goto fail;
605 
606 			leaf = path->nodes[0];
607 		}
608 		ref = btrfs_item_ptr(leaf, path->slots[0],
609 				     struct btrfs_extent_data_ref);
610 		if (ret == 0) {
611 			btrfs_set_extent_data_ref_root(leaf, ref,
612 						       root_objectid);
613 			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
614 			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
615 			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
616 		} else {
617 			num_refs = btrfs_extent_data_ref_count(leaf, ref);
618 			num_refs += refs_to_add;
619 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
620 		}
621 	}
622 	btrfs_mark_buffer_dirty(leaf);
623 	ret = 0;
624 fail:
625 	btrfs_release_path(path);
626 	return ret;
627 }
628 
629 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
630 					   struct btrfs_path *path,
631 					   int refs_to_drop, int *last_ref)
632 {
633 	struct btrfs_key key;
634 	struct btrfs_extent_data_ref *ref1 = NULL;
635 	struct btrfs_shared_data_ref *ref2 = NULL;
636 	struct extent_buffer *leaf;
637 	u32 num_refs = 0;
638 	int ret = 0;
639 
640 	leaf = path->nodes[0];
641 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
642 
643 	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
644 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
645 				      struct btrfs_extent_data_ref);
646 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
647 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
648 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
649 				      struct btrfs_shared_data_ref);
650 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
651 	} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
652 		btrfs_print_v0_err(trans->fs_info);
653 		btrfs_abort_transaction(trans, -EINVAL);
654 		return -EINVAL;
655 	} else {
656 		BUG();
657 	}
658 
659 	BUG_ON(num_refs < refs_to_drop);
660 	num_refs -= refs_to_drop;
661 
662 	if (num_refs == 0) {
663 		ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
664 		*last_ref = 1;
665 	} else {
666 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
667 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
668 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
669 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
670 		btrfs_mark_buffer_dirty(leaf);
671 	}
672 	return ret;
673 }
674 
675 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
676 					  struct btrfs_extent_inline_ref *iref)
677 {
678 	struct btrfs_key key;
679 	struct extent_buffer *leaf;
680 	struct btrfs_extent_data_ref *ref1;
681 	struct btrfs_shared_data_ref *ref2;
682 	u32 num_refs = 0;
683 	int type;
684 
685 	leaf = path->nodes[0];
686 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
687 
688 	BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
689 	if (iref) {
690 		/*
691 		 * If type is invalid, we should have bailed out earlier than
692 		 * this call.
693 		 */
694 		type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
695 		ASSERT(type != BTRFS_REF_TYPE_INVALID);
696 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
697 			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
698 			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
699 		} else {
700 			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
701 			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
702 		}
703 	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
704 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
705 				      struct btrfs_extent_data_ref);
706 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
707 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
708 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
709 				      struct btrfs_shared_data_ref);
710 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
711 	} else {
712 		WARN_ON(1);
713 	}
714 	return num_refs;
715 }
716 
717 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
718 					  struct btrfs_path *path,
719 					  u64 bytenr, u64 parent,
720 					  u64 root_objectid)
721 {
722 	struct btrfs_root *root = trans->fs_info->extent_root;
723 	struct btrfs_key key;
724 	int ret;
725 
726 	key.objectid = bytenr;
727 	if (parent) {
728 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
729 		key.offset = parent;
730 	} else {
731 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
732 		key.offset = root_objectid;
733 	}
734 
735 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
736 	if (ret > 0)
737 		ret = -ENOENT;
738 	return ret;
739 }
740 
741 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
742 					  struct btrfs_path *path,
743 					  u64 bytenr, u64 parent,
744 					  u64 root_objectid)
745 {
746 	struct btrfs_key key;
747 	int ret;
748 
749 	key.objectid = bytenr;
750 	if (parent) {
751 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
752 		key.offset = parent;
753 	} else {
754 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
755 		key.offset = root_objectid;
756 	}
757 
758 	ret = btrfs_insert_empty_item(trans, trans->fs_info->extent_root,
759 				      path, &key, 0);
760 	btrfs_release_path(path);
761 	return ret;
762 }
763 
764 static inline int extent_ref_type(u64 parent, u64 owner)
765 {
766 	int type;
767 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
768 		if (parent > 0)
769 			type = BTRFS_SHARED_BLOCK_REF_KEY;
770 		else
771 			type = BTRFS_TREE_BLOCK_REF_KEY;
772 	} else {
773 		if (parent > 0)
774 			type = BTRFS_SHARED_DATA_REF_KEY;
775 		else
776 			type = BTRFS_EXTENT_DATA_REF_KEY;
777 	}
778 	return type;
779 }
780 
781 static int find_next_key(struct btrfs_path *path, int level,
782 			 struct btrfs_key *key)
783 
784 {
785 	for (; level < BTRFS_MAX_LEVEL; level++) {
786 		if (!path->nodes[level])
787 			break;
788 		if (path->slots[level] + 1 >=
789 		    btrfs_header_nritems(path->nodes[level]))
790 			continue;
791 		if (level == 0)
792 			btrfs_item_key_to_cpu(path->nodes[level], key,
793 					      path->slots[level] + 1);
794 		else
795 			btrfs_node_key_to_cpu(path->nodes[level], key,
796 					      path->slots[level] + 1);
797 		return 0;
798 	}
799 	return 1;
800 }
801 
802 /*
803  * look for inline back ref. if back ref is found, *ref_ret is set
804  * to the address of inline back ref, and 0 is returned.
805  *
806  * if back ref isn't found, *ref_ret is set to the address where it
807  * should be inserted, and -ENOENT is returned.
808  *
809  * if insert is true and there are too many inline back refs, the path
810  * points to the extent item, and -EAGAIN is returned.
811  *
812  * NOTE: inline back refs are ordered in the same way that back ref
813  *	 items in the tree are ordered.
814  */
815 static noinline_for_stack
816 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
817 				 struct btrfs_path *path,
818 				 struct btrfs_extent_inline_ref **ref_ret,
819 				 u64 bytenr, u64 num_bytes,
820 				 u64 parent, u64 root_objectid,
821 				 u64 owner, u64 offset, int insert)
822 {
823 	struct btrfs_fs_info *fs_info = trans->fs_info;
824 	struct btrfs_root *root = fs_info->extent_root;
825 	struct btrfs_key key;
826 	struct extent_buffer *leaf;
827 	struct btrfs_extent_item *ei;
828 	struct btrfs_extent_inline_ref *iref;
829 	u64 flags;
830 	u64 item_size;
831 	unsigned long ptr;
832 	unsigned long end;
833 	int extra_size;
834 	int type;
835 	int want;
836 	int ret;
837 	int err = 0;
838 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
839 	int needed;
840 
841 	key.objectid = bytenr;
842 	key.type = BTRFS_EXTENT_ITEM_KEY;
843 	key.offset = num_bytes;
844 
845 	want = extent_ref_type(parent, owner);
846 	if (insert) {
847 		extra_size = btrfs_extent_inline_ref_size(want);
848 		path->keep_locks = 1;
849 	} else
850 		extra_size = -1;
851 
852 	/*
853 	 * Owner is our level, so we can just add one to get the level for the
854 	 * block we are interested in.
855 	 */
856 	if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
857 		key.type = BTRFS_METADATA_ITEM_KEY;
858 		key.offset = owner;
859 	}
860 
861 again:
862 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
863 	if (ret < 0) {
864 		err = ret;
865 		goto out;
866 	}
867 
868 	/*
869 	 * We may be a newly converted file system which still has the old fat
870 	 * extent entries for metadata, so try and see if we have one of those.
871 	 */
872 	if (ret > 0 && skinny_metadata) {
873 		skinny_metadata = false;
874 		if (path->slots[0]) {
875 			path->slots[0]--;
876 			btrfs_item_key_to_cpu(path->nodes[0], &key,
877 					      path->slots[0]);
878 			if (key.objectid == bytenr &&
879 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
880 			    key.offset == num_bytes)
881 				ret = 0;
882 		}
883 		if (ret) {
884 			key.objectid = bytenr;
885 			key.type = BTRFS_EXTENT_ITEM_KEY;
886 			key.offset = num_bytes;
887 			btrfs_release_path(path);
888 			goto again;
889 		}
890 	}
891 
892 	if (ret && !insert) {
893 		err = -ENOENT;
894 		goto out;
895 	} else if (WARN_ON(ret)) {
896 		err = -EIO;
897 		goto out;
898 	}
899 
900 	leaf = path->nodes[0];
901 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
902 	if (unlikely(item_size < sizeof(*ei))) {
903 		err = -EINVAL;
904 		btrfs_print_v0_err(fs_info);
905 		btrfs_abort_transaction(trans, err);
906 		goto out;
907 	}
908 
909 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
910 	flags = btrfs_extent_flags(leaf, ei);
911 
912 	ptr = (unsigned long)(ei + 1);
913 	end = (unsigned long)ei + item_size;
914 
915 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
916 		ptr += sizeof(struct btrfs_tree_block_info);
917 		BUG_ON(ptr > end);
918 	}
919 
920 	if (owner >= BTRFS_FIRST_FREE_OBJECTID)
921 		needed = BTRFS_REF_TYPE_DATA;
922 	else
923 		needed = BTRFS_REF_TYPE_BLOCK;
924 
925 	err = -ENOENT;
926 	while (1) {
927 		if (ptr >= end) {
928 			WARN_ON(ptr > end);
929 			break;
930 		}
931 		iref = (struct btrfs_extent_inline_ref *)ptr;
932 		type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
933 		if (type == BTRFS_REF_TYPE_INVALID) {
934 			err = -EUCLEAN;
935 			goto out;
936 		}
937 
938 		if (want < type)
939 			break;
940 		if (want > type) {
941 			ptr += btrfs_extent_inline_ref_size(type);
942 			continue;
943 		}
944 
945 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
946 			struct btrfs_extent_data_ref *dref;
947 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
948 			if (match_extent_data_ref(leaf, dref, root_objectid,
949 						  owner, offset)) {
950 				err = 0;
951 				break;
952 			}
953 			if (hash_extent_data_ref_item(leaf, dref) <
954 			    hash_extent_data_ref(root_objectid, owner, offset))
955 				break;
956 		} else {
957 			u64 ref_offset;
958 			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
959 			if (parent > 0) {
960 				if (parent == ref_offset) {
961 					err = 0;
962 					break;
963 				}
964 				if (ref_offset < parent)
965 					break;
966 			} else {
967 				if (root_objectid == ref_offset) {
968 					err = 0;
969 					break;
970 				}
971 				if (ref_offset < root_objectid)
972 					break;
973 			}
974 		}
975 		ptr += btrfs_extent_inline_ref_size(type);
976 	}
977 	if (err == -ENOENT && insert) {
978 		if (item_size + extra_size >=
979 		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
980 			err = -EAGAIN;
981 			goto out;
982 		}
983 		/*
984 		 * To add new inline back ref, we have to make sure
985 		 * there is no corresponding back ref item.
986 		 * For simplicity, we just do not add new inline back
987 		 * ref if there is any kind of item for this block
988 		 */
989 		if (find_next_key(path, 0, &key) == 0 &&
990 		    key.objectid == bytenr &&
991 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
992 			err = -EAGAIN;
993 			goto out;
994 		}
995 	}
996 	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
997 out:
998 	if (insert) {
999 		path->keep_locks = 0;
1000 		btrfs_unlock_up_safe(path, 1);
1001 	}
1002 	return err;
1003 }
1004 
1005 /*
1006  * helper to add new inline back ref
1007  */
1008 static noinline_for_stack
1009 void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
1010 				 struct btrfs_path *path,
1011 				 struct btrfs_extent_inline_ref *iref,
1012 				 u64 parent, u64 root_objectid,
1013 				 u64 owner, u64 offset, int refs_to_add,
1014 				 struct btrfs_delayed_extent_op *extent_op)
1015 {
1016 	struct extent_buffer *leaf;
1017 	struct btrfs_extent_item *ei;
1018 	unsigned long ptr;
1019 	unsigned long end;
1020 	unsigned long item_offset;
1021 	u64 refs;
1022 	int size;
1023 	int type;
1024 
1025 	leaf = path->nodes[0];
1026 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1027 	item_offset = (unsigned long)iref - (unsigned long)ei;
1028 
1029 	type = extent_ref_type(parent, owner);
1030 	size = btrfs_extent_inline_ref_size(type);
1031 
1032 	btrfs_extend_item(path, size);
1033 
1034 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1035 	refs = btrfs_extent_refs(leaf, ei);
1036 	refs += refs_to_add;
1037 	btrfs_set_extent_refs(leaf, ei, refs);
1038 	if (extent_op)
1039 		__run_delayed_extent_op(extent_op, leaf, ei);
1040 
1041 	ptr = (unsigned long)ei + item_offset;
1042 	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1043 	if (ptr < end - size)
1044 		memmove_extent_buffer(leaf, ptr + size, ptr,
1045 				      end - size - ptr);
1046 
1047 	iref = (struct btrfs_extent_inline_ref *)ptr;
1048 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1049 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1050 		struct btrfs_extent_data_ref *dref;
1051 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1052 		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1053 		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1054 		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1055 		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1056 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1057 		struct btrfs_shared_data_ref *sref;
1058 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1059 		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1060 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1061 	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1062 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1063 	} else {
1064 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1065 	}
1066 	btrfs_mark_buffer_dirty(leaf);
1067 }
1068 
1069 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1070 				 struct btrfs_path *path,
1071 				 struct btrfs_extent_inline_ref **ref_ret,
1072 				 u64 bytenr, u64 num_bytes, u64 parent,
1073 				 u64 root_objectid, u64 owner, u64 offset)
1074 {
1075 	int ret;
1076 
1077 	ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr,
1078 					   num_bytes, parent, root_objectid,
1079 					   owner, offset, 0);
1080 	if (ret != -ENOENT)
1081 		return ret;
1082 
1083 	btrfs_release_path(path);
1084 	*ref_ret = NULL;
1085 
1086 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1087 		ret = lookup_tree_block_ref(trans, path, bytenr, parent,
1088 					    root_objectid);
1089 	} else {
1090 		ret = lookup_extent_data_ref(trans, path, bytenr, parent,
1091 					     root_objectid, owner, offset);
1092 	}
1093 	return ret;
1094 }
1095 
1096 /*
1097  * helper to update/remove inline back ref
1098  */
1099 static noinline_for_stack
1100 void update_inline_extent_backref(struct btrfs_path *path,
1101 				  struct btrfs_extent_inline_ref *iref,
1102 				  int refs_to_mod,
1103 				  struct btrfs_delayed_extent_op *extent_op,
1104 				  int *last_ref)
1105 {
1106 	struct extent_buffer *leaf = path->nodes[0];
1107 	struct btrfs_extent_item *ei;
1108 	struct btrfs_extent_data_ref *dref = NULL;
1109 	struct btrfs_shared_data_ref *sref = NULL;
1110 	unsigned long ptr;
1111 	unsigned long end;
1112 	u32 item_size;
1113 	int size;
1114 	int type;
1115 	u64 refs;
1116 
1117 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1118 	refs = btrfs_extent_refs(leaf, ei);
1119 	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1120 	refs += refs_to_mod;
1121 	btrfs_set_extent_refs(leaf, ei, refs);
1122 	if (extent_op)
1123 		__run_delayed_extent_op(extent_op, leaf, ei);
1124 
1125 	/*
1126 	 * If type is invalid, we should have bailed out after
1127 	 * lookup_inline_extent_backref().
1128 	 */
1129 	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
1130 	ASSERT(type != BTRFS_REF_TYPE_INVALID);
1131 
1132 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1133 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1134 		refs = btrfs_extent_data_ref_count(leaf, dref);
1135 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1136 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1137 		refs = btrfs_shared_data_ref_count(leaf, sref);
1138 	} else {
1139 		refs = 1;
1140 		BUG_ON(refs_to_mod != -1);
1141 	}
1142 
1143 	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1144 	refs += refs_to_mod;
1145 
1146 	if (refs > 0) {
1147 		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1148 			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1149 		else
1150 			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1151 	} else {
1152 		*last_ref = 1;
1153 		size =  btrfs_extent_inline_ref_size(type);
1154 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1155 		ptr = (unsigned long)iref;
1156 		end = (unsigned long)ei + item_size;
1157 		if (ptr + size < end)
1158 			memmove_extent_buffer(leaf, ptr, ptr + size,
1159 					      end - ptr - size);
1160 		item_size -= size;
1161 		btrfs_truncate_item(path, item_size, 1);
1162 	}
1163 	btrfs_mark_buffer_dirty(leaf);
1164 }
1165 
1166 static noinline_for_stack
1167 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1168 				 struct btrfs_path *path,
1169 				 u64 bytenr, u64 num_bytes, u64 parent,
1170 				 u64 root_objectid, u64 owner,
1171 				 u64 offset, int refs_to_add,
1172 				 struct btrfs_delayed_extent_op *extent_op)
1173 {
1174 	struct btrfs_extent_inline_ref *iref;
1175 	int ret;
1176 
1177 	ret = lookup_inline_extent_backref(trans, path, &iref, bytenr,
1178 					   num_bytes, parent, root_objectid,
1179 					   owner, offset, 1);
1180 	if (ret == 0) {
1181 		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1182 		update_inline_extent_backref(path, iref, refs_to_add,
1183 					     extent_op, NULL);
1184 	} else if (ret == -ENOENT) {
1185 		setup_inline_extent_backref(trans->fs_info, path, iref, parent,
1186 					    root_objectid, owner, offset,
1187 					    refs_to_add, extent_op);
1188 		ret = 0;
1189 	}
1190 	return ret;
1191 }
1192 
1193 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1194 				 struct btrfs_path *path,
1195 				 struct btrfs_extent_inline_ref *iref,
1196 				 int refs_to_drop, int is_data, int *last_ref)
1197 {
1198 	int ret = 0;
1199 
1200 	BUG_ON(!is_data && refs_to_drop != 1);
1201 	if (iref) {
1202 		update_inline_extent_backref(path, iref, -refs_to_drop, NULL,
1203 					     last_ref);
1204 	} else if (is_data) {
1205 		ret = remove_extent_data_ref(trans, path, refs_to_drop,
1206 					     last_ref);
1207 	} else {
1208 		*last_ref = 1;
1209 		ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
1210 	}
1211 	return ret;
1212 }
1213 
1214 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1215 			       u64 *discarded_bytes)
1216 {
1217 	int j, ret = 0;
1218 	u64 bytes_left, end;
1219 	u64 aligned_start = ALIGN(start, 1 << 9);
1220 
1221 	if (WARN_ON(start != aligned_start)) {
1222 		len -= aligned_start - start;
1223 		len = round_down(len, 1 << 9);
1224 		start = aligned_start;
1225 	}
1226 
1227 	*discarded_bytes = 0;
1228 
1229 	if (!len)
1230 		return 0;
1231 
1232 	end = start + len;
1233 	bytes_left = len;
1234 
1235 	/* Skip any superblocks on this device. */
1236 	for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1237 		u64 sb_start = btrfs_sb_offset(j);
1238 		u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1239 		u64 size = sb_start - start;
1240 
1241 		if (!in_range(sb_start, start, bytes_left) &&
1242 		    !in_range(sb_end, start, bytes_left) &&
1243 		    !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1244 			continue;
1245 
1246 		/*
1247 		 * Superblock spans beginning of range.  Adjust start and
1248 		 * try again.
1249 		 */
1250 		if (sb_start <= start) {
1251 			start += sb_end - start;
1252 			if (start > end) {
1253 				bytes_left = 0;
1254 				break;
1255 			}
1256 			bytes_left = end - start;
1257 			continue;
1258 		}
1259 
1260 		if (size) {
1261 			ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1262 						   GFP_NOFS, 0);
1263 			if (!ret)
1264 				*discarded_bytes += size;
1265 			else if (ret != -EOPNOTSUPP)
1266 				return ret;
1267 		}
1268 
1269 		start = sb_end;
1270 		if (start > end) {
1271 			bytes_left = 0;
1272 			break;
1273 		}
1274 		bytes_left = end - start;
1275 	}
1276 
1277 	if (bytes_left) {
1278 		ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
1279 					   GFP_NOFS, 0);
1280 		if (!ret)
1281 			*discarded_bytes += bytes_left;
1282 	}
1283 	return ret;
1284 }
1285 
1286 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1287 			 u64 num_bytes, u64 *actual_bytes)
1288 {
1289 	int ret = 0;
1290 	u64 discarded_bytes = 0;
1291 	u64 end = bytenr + num_bytes;
1292 	u64 cur = bytenr;
1293 	struct btrfs_bio *bbio = NULL;
1294 
1295 
1296 	/*
1297 	 * Avoid races with device replace and make sure our bbio has devices
1298 	 * associated to its stripes that don't go away while we are discarding.
1299 	 */
1300 	btrfs_bio_counter_inc_blocked(fs_info);
1301 	while (cur < end) {
1302 		struct btrfs_bio_stripe *stripe;
1303 		int i;
1304 
1305 		num_bytes = end - cur;
1306 		/* Tell the block device(s) that the sectors can be discarded */
1307 		ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur,
1308 				      &num_bytes, &bbio, 0);
1309 		/*
1310 		 * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or
1311 		 * -EOPNOTSUPP. For any such error, @num_bytes is not updated,
1312 		 * thus we can't continue anyway.
1313 		 */
1314 		if (ret < 0)
1315 			goto out;
1316 
1317 		stripe = bbio->stripes;
1318 		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1319 			u64 bytes;
1320 			struct request_queue *req_q;
1321 
1322 			if (!stripe->dev->bdev) {
1323 				ASSERT(btrfs_test_opt(fs_info, DEGRADED));
1324 				continue;
1325 			}
1326 			req_q = bdev_get_queue(stripe->dev->bdev);
1327 			if (!blk_queue_discard(req_q))
1328 				continue;
1329 
1330 			ret = btrfs_issue_discard(stripe->dev->bdev,
1331 						  stripe->physical,
1332 						  stripe->length,
1333 						  &bytes);
1334 			if (!ret) {
1335 				discarded_bytes += bytes;
1336 			} else if (ret != -EOPNOTSUPP) {
1337 				/*
1338 				 * Logic errors or -ENOMEM, or -EIO, but
1339 				 * unlikely to happen.
1340 				 *
1341 				 * And since there are two loops, explicitly
1342 				 * go to out to avoid confusion.
1343 				 */
1344 				btrfs_put_bbio(bbio);
1345 				goto out;
1346 			}
1347 
1348 			/*
1349 			 * Just in case we get back EOPNOTSUPP for some reason,
1350 			 * just ignore the return value so we don't screw up
1351 			 * people calling discard_extent.
1352 			 */
1353 			ret = 0;
1354 		}
1355 		btrfs_put_bbio(bbio);
1356 		cur += num_bytes;
1357 	}
1358 out:
1359 	btrfs_bio_counter_dec(fs_info);
1360 
1361 	if (actual_bytes)
1362 		*actual_bytes = discarded_bytes;
1363 
1364 
1365 	if (ret == -EOPNOTSUPP)
1366 		ret = 0;
1367 	return ret;
1368 }
1369 
1370 /* Can return -ENOMEM */
1371 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1372 			 struct btrfs_ref *generic_ref)
1373 {
1374 	struct btrfs_fs_info *fs_info = trans->fs_info;
1375 	int old_ref_mod, new_ref_mod;
1376 	int ret;
1377 
1378 	ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
1379 	       generic_ref->action);
1380 	BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
1381 	       generic_ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID);
1382 
1383 	if (generic_ref->type == BTRFS_REF_METADATA)
1384 		ret = btrfs_add_delayed_tree_ref(trans, generic_ref,
1385 				NULL, &old_ref_mod, &new_ref_mod);
1386 	else
1387 		ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0,
1388 						 &old_ref_mod, &new_ref_mod);
1389 
1390 	btrfs_ref_tree_mod(fs_info, generic_ref);
1391 
1392 	if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
1393 		sub_pinned_bytes(fs_info, generic_ref);
1394 
1395 	return ret;
1396 }
1397 
1398 /*
1399  * __btrfs_inc_extent_ref - insert backreference for a given extent
1400  *
1401  * @trans:	    Handle of transaction
1402  *
1403  * @node:	    The delayed ref node used to get the bytenr/length for
1404  *		    extent whose references are incremented.
1405  *
1406  * @parent:	    If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/
1407  *		    BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical
1408  *		    bytenr of the parent block. Since new extents are always
1409  *		    created with indirect references, this will only be the case
1410  *		    when relocating a shared extent. In that case, root_objectid
1411  *		    will be BTRFS_TREE_RELOC_OBJECTID. Otheriwse, parent must
1412  *		    be 0
1413  *
1414  * @root_objectid:  The id of the root where this modification has originated,
1415  *		    this can be either one of the well-known metadata trees or
1416  *		    the subvolume id which references this extent.
1417  *
1418  * @owner:	    For data extents it is the inode number of the owning file.
1419  *		    For metadata extents this parameter holds the level in the
1420  *		    tree of the extent.
1421  *
1422  * @offset:	    For metadata extents the offset is ignored and is currently
1423  *		    always passed as 0. For data extents it is the fileoffset
1424  *		    this extent belongs to.
1425  *
1426  * @refs_to_add     Number of references to add
1427  *
1428  * @extent_op       Pointer to a structure, holding information necessary when
1429  *                  updating a tree block's flags
1430  *
1431  */
1432 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1433 				  struct btrfs_delayed_ref_node *node,
1434 				  u64 parent, u64 root_objectid,
1435 				  u64 owner, u64 offset, int refs_to_add,
1436 				  struct btrfs_delayed_extent_op *extent_op)
1437 {
1438 	struct btrfs_path *path;
1439 	struct extent_buffer *leaf;
1440 	struct btrfs_extent_item *item;
1441 	struct btrfs_key key;
1442 	u64 bytenr = node->bytenr;
1443 	u64 num_bytes = node->num_bytes;
1444 	u64 refs;
1445 	int ret;
1446 
1447 	path = btrfs_alloc_path();
1448 	if (!path)
1449 		return -ENOMEM;
1450 
1451 	path->leave_spinning = 1;
1452 	/* this will setup the path even if it fails to insert the back ref */
1453 	ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
1454 					   parent, root_objectid, owner,
1455 					   offset, refs_to_add, extent_op);
1456 	if ((ret < 0 && ret != -EAGAIN) || !ret)
1457 		goto out;
1458 
1459 	/*
1460 	 * Ok we had -EAGAIN which means we didn't have space to insert and
1461 	 * inline extent ref, so just update the reference count and add a
1462 	 * normal backref.
1463 	 */
1464 	leaf = path->nodes[0];
1465 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1466 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1467 	refs = btrfs_extent_refs(leaf, item);
1468 	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1469 	if (extent_op)
1470 		__run_delayed_extent_op(extent_op, leaf, item);
1471 
1472 	btrfs_mark_buffer_dirty(leaf);
1473 	btrfs_release_path(path);
1474 
1475 	path->leave_spinning = 1;
1476 	/* now insert the actual backref */
1477 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1478 		BUG_ON(refs_to_add != 1);
1479 		ret = insert_tree_block_ref(trans, path, bytenr, parent,
1480 					    root_objectid);
1481 	} else {
1482 		ret = insert_extent_data_ref(trans, path, bytenr, parent,
1483 					     root_objectid, owner, offset,
1484 					     refs_to_add);
1485 	}
1486 	if (ret)
1487 		btrfs_abort_transaction(trans, ret);
1488 out:
1489 	btrfs_free_path(path);
1490 	return ret;
1491 }
1492 
1493 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1494 				struct btrfs_delayed_ref_node *node,
1495 				struct btrfs_delayed_extent_op *extent_op,
1496 				int insert_reserved)
1497 {
1498 	int ret = 0;
1499 	struct btrfs_delayed_data_ref *ref;
1500 	struct btrfs_key ins;
1501 	u64 parent = 0;
1502 	u64 ref_root = 0;
1503 	u64 flags = 0;
1504 
1505 	ins.objectid = node->bytenr;
1506 	ins.offset = node->num_bytes;
1507 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1508 
1509 	ref = btrfs_delayed_node_to_data_ref(node);
1510 	trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action);
1511 
1512 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1513 		parent = ref->parent;
1514 	ref_root = ref->root;
1515 
1516 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1517 		if (extent_op)
1518 			flags |= extent_op->flags_to_set;
1519 		ret = alloc_reserved_file_extent(trans, parent, ref_root,
1520 						 flags, ref->objectid,
1521 						 ref->offset, &ins,
1522 						 node->ref_mod);
1523 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1524 		ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
1525 					     ref->objectid, ref->offset,
1526 					     node->ref_mod, extent_op);
1527 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1528 		ret = __btrfs_free_extent(trans, node, parent,
1529 					  ref_root, ref->objectid,
1530 					  ref->offset, node->ref_mod,
1531 					  extent_op);
1532 	} else {
1533 		BUG();
1534 	}
1535 	return ret;
1536 }
1537 
1538 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1539 				    struct extent_buffer *leaf,
1540 				    struct btrfs_extent_item *ei)
1541 {
1542 	u64 flags = btrfs_extent_flags(leaf, ei);
1543 	if (extent_op->update_flags) {
1544 		flags |= extent_op->flags_to_set;
1545 		btrfs_set_extent_flags(leaf, ei, flags);
1546 	}
1547 
1548 	if (extent_op->update_key) {
1549 		struct btrfs_tree_block_info *bi;
1550 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1551 		bi = (struct btrfs_tree_block_info *)(ei + 1);
1552 		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1553 	}
1554 }
1555 
1556 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1557 				 struct btrfs_delayed_ref_head *head,
1558 				 struct btrfs_delayed_extent_op *extent_op)
1559 {
1560 	struct btrfs_fs_info *fs_info = trans->fs_info;
1561 	struct btrfs_key key;
1562 	struct btrfs_path *path;
1563 	struct btrfs_extent_item *ei;
1564 	struct extent_buffer *leaf;
1565 	u32 item_size;
1566 	int ret;
1567 	int err = 0;
1568 	int metadata = !extent_op->is_data;
1569 
1570 	if (TRANS_ABORTED(trans))
1571 		return 0;
1572 
1573 	if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1574 		metadata = 0;
1575 
1576 	path = btrfs_alloc_path();
1577 	if (!path)
1578 		return -ENOMEM;
1579 
1580 	key.objectid = head->bytenr;
1581 
1582 	if (metadata) {
1583 		key.type = BTRFS_METADATA_ITEM_KEY;
1584 		key.offset = extent_op->level;
1585 	} else {
1586 		key.type = BTRFS_EXTENT_ITEM_KEY;
1587 		key.offset = head->num_bytes;
1588 	}
1589 
1590 again:
1591 	path->leave_spinning = 1;
1592 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
1593 	if (ret < 0) {
1594 		err = ret;
1595 		goto out;
1596 	}
1597 	if (ret > 0) {
1598 		if (metadata) {
1599 			if (path->slots[0] > 0) {
1600 				path->slots[0]--;
1601 				btrfs_item_key_to_cpu(path->nodes[0], &key,
1602 						      path->slots[0]);
1603 				if (key.objectid == head->bytenr &&
1604 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
1605 				    key.offset == head->num_bytes)
1606 					ret = 0;
1607 			}
1608 			if (ret > 0) {
1609 				btrfs_release_path(path);
1610 				metadata = 0;
1611 
1612 				key.objectid = head->bytenr;
1613 				key.offset = head->num_bytes;
1614 				key.type = BTRFS_EXTENT_ITEM_KEY;
1615 				goto again;
1616 			}
1617 		} else {
1618 			err = -EIO;
1619 			goto out;
1620 		}
1621 	}
1622 
1623 	leaf = path->nodes[0];
1624 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1625 
1626 	if (unlikely(item_size < sizeof(*ei))) {
1627 		err = -EINVAL;
1628 		btrfs_print_v0_err(fs_info);
1629 		btrfs_abort_transaction(trans, err);
1630 		goto out;
1631 	}
1632 
1633 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1634 	__run_delayed_extent_op(extent_op, leaf, ei);
1635 
1636 	btrfs_mark_buffer_dirty(leaf);
1637 out:
1638 	btrfs_free_path(path);
1639 	return err;
1640 }
1641 
1642 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1643 				struct btrfs_delayed_ref_node *node,
1644 				struct btrfs_delayed_extent_op *extent_op,
1645 				int insert_reserved)
1646 {
1647 	int ret = 0;
1648 	struct btrfs_delayed_tree_ref *ref;
1649 	u64 parent = 0;
1650 	u64 ref_root = 0;
1651 
1652 	ref = btrfs_delayed_node_to_tree_ref(node);
1653 	trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action);
1654 
1655 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1656 		parent = ref->parent;
1657 	ref_root = ref->root;
1658 
1659 	if (node->ref_mod != 1) {
1660 		btrfs_err(trans->fs_info,
1661 	"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
1662 			  node->bytenr, node->ref_mod, node->action, ref_root,
1663 			  parent);
1664 		return -EIO;
1665 	}
1666 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1667 		BUG_ON(!extent_op || !extent_op->update_flags);
1668 		ret = alloc_reserved_tree_block(trans, node, extent_op);
1669 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1670 		ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
1671 					     ref->level, 0, 1, extent_op);
1672 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1673 		ret = __btrfs_free_extent(trans, node, parent, ref_root,
1674 					  ref->level, 0, 1, extent_op);
1675 	} else {
1676 		BUG();
1677 	}
1678 	return ret;
1679 }
1680 
1681 /* helper function to actually process a single delayed ref entry */
1682 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1683 			       struct btrfs_delayed_ref_node *node,
1684 			       struct btrfs_delayed_extent_op *extent_op,
1685 			       int insert_reserved)
1686 {
1687 	int ret = 0;
1688 
1689 	if (TRANS_ABORTED(trans)) {
1690 		if (insert_reserved)
1691 			btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
1692 		return 0;
1693 	}
1694 
1695 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1696 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1697 		ret = run_delayed_tree_ref(trans, node, extent_op,
1698 					   insert_reserved);
1699 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1700 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
1701 		ret = run_delayed_data_ref(trans, node, extent_op,
1702 					   insert_reserved);
1703 	else
1704 		BUG();
1705 	if (ret && insert_reserved)
1706 		btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
1707 	return ret;
1708 }
1709 
1710 static inline struct btrfs_delayed_ref_node *
1711 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1712 {
1713 	struct btrfs_delayed_ref_node *ref;
1714 
1715 	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
1716 		return NULL;
1717 
1718 	/*
1719 	 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
1720 	 * This is to prevent a ref count from going down to zero, which deletes
1721 	 * the extent item from the extent tree, when there still are references
1722 	 * to add, which would fail because they would not find the extent item.
1723 	 */
1724 	if (!list_empty(&head->ref_add_list))
1725 		return list_first_entry(&head->ref_add_list,
1726 				struct btrfs_delayed_ref_node, add_list);
1727 
1728 	ref = rb_entry(rb_first_cached(&head->ref_tree),
1729 		       struct btrfs_delayed_ref_node, ref_node);
1730 	ASSERT(list_empty(&ref->add_list));
1731 	return ref;
1732 }
1733 
1734 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
1735 				      struct btrfs_delayed_ref_head *head)
1736 {
1737 	spin_lock(&delayed_refs->lock);
1738 	head->processing = 0;
1739 	delayed_refs->num_heads_ready++;
1740 	spin_unlock(&delayed_refs->lock);
1741 	btrfs_delayed_ref_unlock(head);
1742 }
1743 
1744 static struct btrfs_delayed_extent_op *cleanup_extent_op(
1745 				struct btrfs_delayed_ref_head *head)
1746 {
1747 	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
1748 
1749 	if (!extent_op)
1750 		return NULL;
1751 
1752 	if (head->must_insert_reserved) {
1753 		head->extent_op = NULL;
1754 		btrfs_free_delayed_extent_op(extent_op);
1755 		return NULL;
1756 	}
1757 	return extent_op;
1758 }
1759 
1760 static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
1761 				     struct btrfs_delayed_ref_head *head)
1762 {
1763 	struct btrfs_delayed_extent_op *extent_op;
1764 	int ret;
1765 
1766 	extent_op = cleanup_extent_op(head);
1767 	if (!extent_op)
1768 		return 0;
1769 	head->extent_op = NULL;
1770 	spin_unlock(&head->lock);
1771 	ret = run_delayed_extent_op(trans, head, extent_op);
1772 	btrfs_free_delayed_extent_op(extent_op);
1773 	return ret ? ret : 1;
1774 }
1775 
1776 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
1777 				  struct btrfs_delayed_ref_root *delayed_refs,
1778 				  struct btrfs_delayed_ref_head *head)
1779 {
1780 	int nr_items = 1;	/* Dropping this ref head update. */
1781 
1782 	if (head->total_ref_mod < 0) {
1783 		struct btrfs_space_info *space_info;
1784 		u64 flags;
1785 
1786 		if (head->is_data)
1787 			flags = BTRFS_BLOCK_GROUP_DATA;
1788 		else if (head->is_system)
1789 			flags = BTRFS_BLOCK_GROUP_SYSTEM;
1790 		else
1791 			flags = BTRFS_BLOCK_GROUP_METADATA;
1792 		space_info = btrfs_find_space_info(fs_info, flags);
1793 		ASSERT(space_info);
1794 		percpu_counter_add_batch(&space_info->total_bytes_pinned,
1795 				   -head->num_bytes,
1796 				   BTRFS_TOTAL_BYTES_PINNED_BATCH);
1797 
1798 		/*
1799 		 * We had csum deletions accounted for in our delayed refs rsv,
1800 		 * we need to drop the csum leaves for this update from our
1801 		 * delayed_refs_rsv.
1802 		 */
1803 		if (head->is_data) {
1804 			spin_lock(&delayed_refs->lock);
1805 			delayed_refs->pending_csums -= head->num_bytes;
1806 			spin_unlock(&delayed_refs->lock);
1807 			nr_items += btrfs_csum_bytes_to_leaves(fs_info,
1808 				head->num_bytes);
1809 		}
1810 	}
1811 
1812 	btrfs_delayed_refs_rsv_release(fs_info, nr_items);
1813 }
1814 
1815 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
1816 			    struct btrfs_delayed_ref_head *head)
1817 {
1818 
1819 	struct btrfs_fs_info *fs_info = trans->fs_info;
1820 	struct btrfs_delayed_ref_root *delayed_refs;
1821 	int ret;
1822 
1823 	delayed_refs = &trans->transaction->delayed_refs;
1824 
1825 	ret = run_and_cleanup_extent_op(trans, head);
1826 	if (ret < 0) {
1827 		unselect_delayed_ref_head(delayed_refs, head);
1828 		btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
1829 		return ret;
1830 	} else if (ret) {
1831 		return ret;
1832 	}
1833 
1834 	/*
1835 	 * Need to drop our head ref lock and re-acquire the delayed ref lock
1836 	 * and then re-check to make sure nobody got added.
1837 	 */
1838 	spin_unlock(&head->lock);
1839 	spin_lock(&delayed_refs->lock);
1840 	spin_lock(&head->lock);
1841 	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
1842 		spin_unlock(&head->lock);
1843 		spin_unlock(&delayed_refs->lock);
1844 		return 1;
1845 	}
1846 	btrfs_delete_ref_head(delayed_refs, head);
1847 	spin_unlock(&head->lock);
1848 	spin_unlock(&delayed_refs->lock);
1849 
1850 	if (head->must_insert_reserved) {
1851 		btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1);
1852 		if (head->is_data) {
1853 			ret = btrfs_del_csums(trans, fs_info->csum_root,
1854 					      head->bytenr, head->num_bytes);
1855 		}
1856 	}
1857 
1858 	btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1859 
1860 	trace_run_delayed_ref_head(fs_info, head, 0);
1861 	btrfs_delayed_ref_unlock(head);
1862 	btrfs_put_delayed_ref_head(head);
1863 	return 0;
1864 }
1865 
1866 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
1867 					struct btrfs_trans_handle *trans)
1868 {
1869 	struct btrfs_delayed_ref_root *delayed_refs =
1870 		&trans->transaction->delayed_refs;
1871 	struct btrfs_delayed_ref_head *head = NULL;
1872 	int ret;
1873 
1874 	spin_lock(&delayed_refs->lock);
1875 	head = btrfs_select_ref_head(delayed_refs);
1876 	if (!head) {
1877 		spin_unlock(&delayed_refs->lock);
1878 		return head;
1879 	}
1880 
1881 	/*
1882 	 * Grab the lock that says we are going to process all the refs for
1883 	 * this head
1884 	 */
1885 	ret = btrfs_delayed_ref_lock(delayed_refs, head);
1886 	spin_unlock(&delayed_refs->lock);
1887 
1888 	/*
1889 	 * We may have dropped the spin lock to get the head mutex lock, and
1890 	 * that might have given someone else time to free the head.  If that's
1891 	 * true, it has been removed from our list and we can move on.
1892 	 */
1893 	if (ret == -EAGAIN)
1894 		head = ERR_PTR(-EAGAIN);
1895 
1896 	return head;
1897 }
1898 
1899 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
1900 				    struct btrfs_delayed_ref_head *locked_ref,
1901 				    unsigned long *run_refs)
1902 {
1903 	struct btrfs_fs_info *fs_info = trans->fs_info;
1904 	struct btrfs_delayed_ref_root *delayed_refs;
1905 	struct btrfs_delayed_extent_op *extent_op;
1906 	struct btrfs_delayed_ref_node *ref;
1907 	int must_insert_reserved = 0;
1908 	int ret;
1909 
1910 	delayed_refs = &trans->transaction->delayed_refs;
1911 
1912 	lockdep_assert_held(&locked_ref->mutex);
1913 	lockdep_assert_held(&locked_ref->lock);
1914 
1915 	while ((ref = select_delayed_ref(locked_ref))) {
1916 		if (ref->seq &&
1917 		    btrfs_check_delayed_seq(fs_info, ref->seq)) {
1918 			spin_unlock(&locked_ref->lock);
1919 			unselect_delayed_ref_head(delayed_refs, locked_ref);
1920 			return -EAGAIN;
1921 		}
1922 
1923 		(*run_refs)++;
1924 		ref->in_tree = 0;
1925 		rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
1926 		RB_CLEAR_NODE(&ref->ref_node);
1927 		if (!list_empty(&ref->add_list))
1928 			list_del(&ref->add_list);
1929 		/*
1930 		 * When we play the delayed ref, also correct the ref_mod on
1931 		 * head
1932 		 */
1933 		switch (ref->action) {
1934 		case BTRFS_ADD_DELAYED_REF:
1935 		case BTRFS_ADD_DELAYED_EXTENT:
1936 			locked_ref->ref_mod -= ref->ref_mod;
1937 			break;
1938 		case BTRFS_DROP_DELAYED_REF:
1939 			locked_ref->ref_mod += ref->ref_mod;
1940 			break;
1941 		default:
1942 			WARN_ON(1);
1943 		}
1944 		atomic_dec(&delayed_refs->num_entries);
1945 
1946 		/*
1947 		 * Record the must_insert_reserved flag before we drop the
1948 		 * spin lock.
1949 		 */
1950 		must_insert_reserved = locked_ref->must_insert_reserved;
1951 		locked_ref->must_insert_reserved = 0;
1952 
1953 		extent_op = locked_ref->extent_op;
1954 		locked_ref->extent_op = NULL;
1955 		spin_unlock(&locked_ref->lock);
1956 
1957 		ret = run_one_delayed_ref(trans, ref, extent_op,
1958 					  must_insert_reserved);
1959 
1960 		btrfs_free_delayed_extent_op(extent_op);
1961 		if (ret) {
1962 			unselect_delayed_ref_head(delayed_refs, locked_ref);
1963 			btrfs_put_delayed_ref(ref);
1964 			btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
1965 				    ret);
1966 			return ret;
1967 		}
1968 
1969 		btrfs_put_delayed_ref(ref);
1970 		cond_resched();
1971 
1972 		spin_lock(&locked_ref->lock);
1973 		btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
1974 	}
1975 
1976 	return 0;
1977 }
1978 
1979 /*
1980  * Returns 0 on success or if called with an already aborted transaction.
1981  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
1982  */
1983 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1984 					     unsigned long nr)
1985 {
1986 	struct btrfs_fs_info *fs_info = trans->fs_info;
1987 	struct btrfs_delayed_ref_root *delayed_refs;
1988 	struct btrfs_delayed_ref_head *locked_ref = NULL;
1989 	ktime_t start = ktime_get();
1990 	int ret;
1991 	unsigned long count = 0;
1992 	unsigned long actual_count = 0;
1993 
1994 	delayed_refs = &trans->transaction->delayed_refs;
1995 	do {
1996 		if (!locked_ref) {
1997 			locked_ref = btrfs_obtain_ref_head(trans);
1998 			if (IS_ERR_OR_NULL(locked_ref)) {
1999 				if (PTR_ERR(locked_ref) == -EAGAIN) {
2000 					continue;
2001 				} else {
2002 					break;
2003 				}
2004 			}
2005 			count++;
2006 		}
2007 		/*
2008 		 * We need to try and merge add/drops of the same ref since we
2009 		 * can run into issues with relocate dropping the implicit ref
2010 		 * and then it being added back again before the drop can
2011 		 * finish.  If we merged anything we need to re-loop so we can
2012 		 * get a good ref.
2013 		 * Or we can get node references of the same type that weren't
2014 		 * merged when created due to bumps in the tree mod seq, and
2015 		 * we need to merge them to prevent adding an inline extent
2016 		 * backref before dropping it (triggering a BUG_ON at
2017 		 * insert_inline_extent_backref()).
2018 		 */
2019 		spin_lock(&locked_ref->lock);
2020 		btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
2021 
2022 		ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
2023 						      &actual_count);
2024 		if (ret < 0 && ret != -EAGAIN) {
2025 			/*
2026 			 * Error, btrfs_run_delayed_refs_for_head already
2027 			 * unlocked everything so just bail out
2028 			 */
2029 			return ret;
2030 		} else if (!ret) {
2031 			/*
2032 			 * Success, perform the usual cleanup of a processed
2033 			 * head
2034 			 */
2035 			ret = cleanup_ref_head(trans, locked_ref);
2036 			if (ret > 0 ) {
2037 				/* We dropped our lock, we need to loop. */
2038 				ret = 0;
2039 				continue;
2040 			} else if (ret) {
2041 				return ret;
2042 			}
2043 		}
2044 
2045 		/*
2046 		 * Either success case or btrfs_run_delayed_refs_for_head
2047 		 * returned -EAGAIN, meaning we need to select another head
2048 		 */
2049 
2050 		locked_ref = NULL;
2051 		cond_resched();
2052 	} while ((nr != -1 && count < nr) || locked_ref);
2053 
2054 	/*
2055 	 * We don't want to include ref heads since we can have empty ref heads
2056 	 * and those will drastically skew our runtime down since we just do
2057 	 * accounting, no actual extent tree updates.
2058 	 */
2059 	if (actual_count > 0) {
2060 		u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2061 		u64 avg;
2062 
2063 		/*
2064 		 * We weigh the current average higher than our current runtime
2065 		 * to avoid large swings in the average.
2066 		 */
2067 		spin_lock(&delayed_refs->lock);
2068 		avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2069 		fs_info->avg_delayed_ref_runtime = avg >> 2;	/* div by 4 */
2070 		spin_unlock(&delayed_refs->lock);
2071 	}
2072 	return 0;
2073 }
2074 
2075 #ifdef SCRAMBLE_DELAYED_REFS
2076 /*
2077  * Normally delayed refs get processed in ascending bytenr order. This
2078  * correlates in most cases to the order added. To expose dependencies on this
2079  * order, we start to process the tree in the middle instead of the beginning
2080  */
2081 static u64 find_middle(struct rb_root *root)
2082 {
2083 	struct rb_node *n = root->rb_node;
2084 	struct btrfs_delayed_ref_node *entry;
2085 	int alt = 1;
2086 	u64 middle;
2087 	u64 first = 0, last = 0;
2088 
2089 	n = rb_first(root);
2090 	if (n) {
2091 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2092 		first = entry->bytenr;
2093 	}
2094 	n = rb_last(root);
2095 	if (n) {
2096 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2097 		last = entry->bytenr;
2098 	}
2099 	n = root->rb_node;
2100 
2101 	while (n) {
2102 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2103 		WARN_ON(!entry->in_tree);
2104 
2105 		middle = entry->bytenr;
2106 
2107 		if (alt)
2108 			n = n->rb_left;
2109 		else
2110 			n = n->rb_right;
2111 
2112 		alt = 1 - alt;
2113 	}
2114 	return middle;
2115 }
2116 #endif
2117 
2118 /*
2119  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2120  * would require to store the csums for that many bytes.
2121  */
2122 u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
2123 {
2124 	u64 csum_size;
2125 	u64 num_csums_per_leaf;
2126 	u64 num_csums;
2127 
2128 	csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
2129 	num_csums_per_leaf = div64_u64(csum_size,
2130 			(u64)btrfs_super_csum_size(fs_info->super_copy));
2131 	num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
2132 	num_csums += num_csums_per_leaf - 1;
2133 	num_csums = div64_u64(num_csums, num_csums_per_leaf);
2134 	return num_csums;
2135 }
2136 
2137 /*
2138  * this starts processing the delayed reference count updates and
2139  * extent insertions we have queued up so far.  count can be
2140  * 0, which means to process everything in the tree at the start
2141  * of the run (but not newly added entries), or it can be some target
2142  * number you'd like to process.
2143  *
2144  * Returns 0 on success or if called with an aborted transaction
2145  * Returns <0 on error and aborts the transaction
2146  */
2147 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2148 			   unsigned long count)
2149 {
2150 	struct btrfs_fs_info *fs_info = trans->fs_info;
2151 	struct rb_node *node;
2152 	struct btrfs_delayed_ref_root *delayed_refs;
2153 	struct btrfs_delayed_ref_head *head;
2154 	int ret;
2155 	int run_all = count == (unsigned long)-1;
2156 
2157 	/* We'll clean this up in btrfs_cleanup_transaction */
2158 	if (TRANS_ABORTED(trans))
2159 		return 0;
2160 
2161 	if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
2162 		return 0;
2163 
2164 	delayed_refs = &trans->transaction->delayed_refs;
2165 	if (count == 0)
2166 		count = atomic_read(&delayed_refs->num_entries) * 2;
2167 
2168 again:
2169 #ifdef SCRAMBLE_DELAYED_REFS
2170 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2171 #endif
2172 	ret = __btrfs_run_delayed_refs(trans, count);
2173 	if (ret < 0) {
2174 		btrfs_abort_transaction(trans, ret);
2175 		return ret;
2176 	}
2177 
2178 	if (run_all) {
2179 		btrfs_create_pending_block_groups(trans);
2180 
2181 		spin_lock(&delayed_refs->lock);
2182 		node = rb_first_cached(&delayed_refs->href_root);
2183 		if (!node) {
2184 			spin_unlock(&delayed_refs->lock);
2185 			goto out;
2186 		}
2187 		head = rb_entry(node, struct btrfs_delayed_ref_head,
2188 				href_node);
2189 		refcount_inc(&head->refs);
2190 		spin_unlock(&delayed_refs->lock);
2191 
2192 		/* Mutex was contended, block until it's released and retry. */
2193 		mutex_lock(&head->mutex);
2194 		mutex_unlock(&head->mutex);
2195 
2196 		btrfs_put_delayed_ref_head(head);
2197 		cond_resched();
2198 		goto again;
2199 	}
2200 out:
2201 	return 0;
2202 }
2203 
2204 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2205 				struct extent_buffer *eb, u64 flags,
2206 				int level, int is_data)
2207 {
2208 	struct btrfs_delayed_extent_op *extent_op;
2209 	int ret;
2210 
2211 	extent_op = btrfs_alloc_delayed_extent_op();
2212 	if (!extent_op)
2213 		return -ENOMEM;
2214 
2215 	extent_op->flags_to_set = flags;
2216 	extent_op->update_flags = true;
2217 	extent_op->update_key = false;
2218 	extent_op->is_data = is_data ? true : false;
2219 	extent_op->level = level;
2220 
2221 	ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op);
2222 	if (ret)
2223 		btrfs_free_delayed_extent_op(extent_op);
2224 	return ret;
2225 }
2226 
2227 static noinline int check_delayed_ref(struct btrfs_root *root,
2228 				      struct btrfs_path *path,
2229 				      u64 objectid, u64 offset, u64 bytenr)
2230 {
2231 	struct btrfs_delayed_ref_head *head;
2232 	struct btrfs_delayed_ref_node *ref;
2233 	struct btrfs_delayed_data_ref *data_ref;
2234 	struct btrfs_delayed_ref_root *delayed_refs;
2235 	struct btrfs_transaction *cur_trans;
2236 	struct rb_node *node;
2237 	int ret = 0;
2238 
2239 	spin_lock(&root->fs_info->trans_lock);
2240 	cur_trans = root->fs_info->running_transaction;
2241 	if (cur_trans)
2242 		refcount_inc(&cur_trans->use_count);
2243 	spin_unlock(&root->fs_info->trans_lock);
2244 	if (!cur_trans)
2245 		return 0;
2246 
2247 	delayed_refs = &cur_trans->delayed_refs;
2248 	spin_lock(&delayed_refs->lock);
2249 	head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
2250 	if (!head) {
2251 		spin_unlock(&delayed_refs->lock);
2252 		btrfs_put_transaction(cur_trans);
2253 		return 0;
2254 	}
2255 
2256 	if (!mutex_trylock(&head->mutex)) {
2257 		refcount_inc(&head->refs);
2258 		spin_unlock(&delayed_refs->lock);
2259 
2260 		btrfs_release_path(path);
2261 
2262 		/*
2263 		 * Mutex was contended, block until it's released and let
2264 		 * caller try again
2265 		 */
2266 		mutex_lock(&head->mutex);
2267 		mutex_unlock(&head->mutex);
2268 		btrfs_put_delayed_ref_head(head);
2269 		btrfs_put_transaction(cur_trans);
2270 		return -EAGAIN;
2271 	}
2272 	spin_unlock(&delayed_refs->lock);
2273 
2274 	spin_lock(&head->lock);
2275 	/*
2276 	 * XXX: We should replace this with a proper search function in the
2277 	 * future.
2278 	 */
2279 	for (node = rb_first_cached(&head->ref_tree); node;
2280 	     node = rb_next(node)) {
2281 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
2282 		/* If it's a shared ref we know a cross reference exists */
2283 		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2284 			ret = 1;
2285 			break;
2286 		}
2287 
2288 		data_ref = btrfs_delayed_node_to_data_ref(ref);
2289 
2290 		/*
2291 		 * If our ref doesn't match the one we're currently looking at
2292 		 * then we have a cross reference.
2293 		 */
2294 		if (data_ref->root != root->root_key.objectid ||
2295 		    data_ref->objectid != objectid ||
2296 		    data_ref->offset != offset) {
2297 			ret = 1;
2298 			break;
2299 		}
2300 	}
2301 	spin_unlock(&head->lock);
2302 	mutex_unlock(&head->mutex);
2303 	btrfs_put_transaction(cur_trans);
2304 	return ret;
2305 }
2306 
2307 static noinline int check_committed_ref(struct btrfs_root *root,
2308 					struct btrfs_path *path,
2309 					u64 objectid, u64 offset, u64 bytenr)
2310 {
2311 	struct btrfs_fs_info *fs_info = root->fs_info;
2312 	struct btrfs_root *extent_root = fs_info->extent_root;
2313 	struct extent_buffer *leaf;
2314 	struct btrfs_extent_data_ref *ref;
2315 	struct btrfs_extent_inline_ref *iref;
2316 	struct btrfs_extent_item *ei;
2317 	struct btrfs_key key;
2318 	u32 item_size;
2319 	int type;
2320 	int ret;
2321 
2322 	key.objectid = bytenr;
2323 	key.offset = (u64)-1;
2324 	key.type = BTRFS_EXTENT_ITEM_KEY;
2325 
2326 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2327 	if (ret < 0)
2328 		goto out;
2329 	BUG_ON(ret == 0); /* Corruption */
2330 
2331 	ret = -ENOENT;
2332 	if (path->slots[0] == 0)
2333 		goto out;
2334 
2335 	path->slots[0]--;
2336 	leaf = path->nodes[0];
2337 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2338 
2339 	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2340 		goto out;
2341 
2342 	ret = 1;
2343 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2344 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2345 
2346 	/* If extent item has more than 1 inline ref then it's shared */
2347 	if (item_size != sizeof(*ei) +
2348 	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2349 		goto out;
2350 
2351 	/* If extent created before last snapshot => it's definitely shared */
2352 	if (btrfs_extent_generation(leaf, ei) <=
2353 	    btrfs_root_last_snapshot(&root->root_item))
2354 		goto out;
2355 
2356 	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2357 
2358 	/* If this extent has SHARED_DATA_REF then it's shared */
2359 	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
2360 	if (type != BTRFS_EXTENT_DATA_REF_KEY)
2361 		goto out;
2362 
2363 	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2364 	if (btrfs_extent_refs(leaf, ei) !=
2365 	    btrfs_extent_data_ref_count(leaf, ref) ||
2366 	    btrfs_extent_data_ref_root(leaf, ref) !=
2367 	    root->root_key.objectid ||
2368 	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2369 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
2370 		goto out;
2371 
2372 	ret = 0;
2373 out:
2374 	return ret;
2375 }
2376 
2377 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
2378 			  u64 bytenr)
2379 {
2380 	struct btrfs_path *path;
2381 	int ret;
2382 
2383 	path = btrfs_alloc_path();
2384 	if (!path)
2385 		return -ENOMEM;
2386 
2387 	do {
2388 		ret = check_committed_ref(root, path, objectid,
2389 					  offset, bytenr);
2390 		if (ret && ret != -ENOENT)
2391 			goto out;
2392 
2393 		ret = check_delayed_ref(root, path, objectid, offset, bytenr);
2394 	} while (ret == -EAGAIN);
2395 
2396 out:
2397 	btrfs_free_path(path);
2398 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2399 		WARN_ON(ret > 0);
2400 	return ret;
2401 }
2402 
2403 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2404 			   struct btrfs_root *root,
2405 			   struct extent_buffer *buf,
2406 			   int full_backref, int inc)
2407 {
2408 	struct btrfs_fs_info *fs_info = root->fs_info;
2409 	u64 bytenr;
2410 	u64 num_bytes;
2411 	u64 parent;
2412 	u64 ref_root;
2413 	u32 nritems;
2414 	struct btrfs_key key;
2415 	struct btrfs_file_extent_item *fi;
2416 	struct btrfs_ref generic_ref = { 0 };
2417 	bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC);
2418 	int i;
2419 	int action;
2420 	int level;
2421 	int ret = 0;
2422 
2423 	if (btrfs_is_testing(fs_info))
2424 		return 0;
2425 
2426 	ref_root = btrfs_header_owner(buf);
2427 	nritems = btrfs_header_nritems(buf);
2428 	level = btrfs_header_level(buf);
2429 
2430 	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0)
2431 		return 0;
2432 
2433 	if (full_backref)
2434 		parent = buf->start;
2435 	else
2436 		parent = 0;
2437 	if (inc)
2438 		action = BTRFS_ADD_DELAYED_REF;
2439 	else
2440 		action = BTRFS_DROP_DELAYED_REF;
2441 
2442 	for (i = 0; i < nritems; i++) {
2443 		if (level == 0) {
2444 			btrfs_item_key_to_cpu(buf, &key, i);
2445 			if (key.type != BTRFS_EXTENT_DATA_KEY)
2446 				continue;
2447 			fi = btrfs_item_ptr(buf, i,
2448 					    struct btrfs_file_extent_item);
2449 			if (btrfs_file_extent_type(buf, fi) ==
2450 			    BTRFS_FILE_EXTENT_INLINE)
2451 				continue;
2452 			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2453 			if (bytenr == 0)
2454 				continue;
2455 
2456 			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2457 			key.offset -= btrfs_file_extent_offset(buf, fi);
2458 			btrfs_init_generic_ref(&generic_ref, action, bytenr,
2459 					       num_bytes, parent);
2460 			generic_ref.real_root = root->root_key.objectid;
2461 			btrfs_init_data_ref(&generic_ref, ref_root, key.objectid,
2462 					    key.offset);
2463 			generic_ref.skip_qgroup = for_reloc;
2464 			if (inc)
2465 				ret = btrfs_inc_extent_ref(trans, &generic_ref);
2466 			else
2467 				ret = btrfs_free_extent(trans, &generic_ref);
2468 			if (ret)
2469 				goto fail;
2470 		} else {
2471 			bytenr = btrfs_node_blockptr(buf, i);
2472 			num_bytes = fs_info->nodesize;
2473 			btrfs_init_generic_ref(&generic_ref, action, bytenr,
2474 					       num_bytes, parent);
2475 			generic_ref.real_root = root->root_key.objectid;
2476 			btrfs_init_tree_ref(&generic_ref, level - 1, ref_root);
2477 			generic_ref.skip_qgroup = for_reloc;
2478 			if (inc)
2479 				ret = btrfs_inc_extent_ref(trans, &generic_ref);
2480 			else
2481 				ret = btrfs_free_extent(trans, &generic_ref);
2482 			if (ret)
2483 				goto fail;
2484 		}
2485 	}
2486 	return 0;
2487 fail:
2488 	return ret;
2489 }
2490 
2491 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2492 		  struct extent_buffer *buf, int full_backref)
2493 {
2494 	return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2495 }
2496 
2497 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2498 		  struct extent_buffer *buf, int full_backref)
2499 {
2500 	return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2501 }
2502 
2503 int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
2504 {
2505 	struct btrfs_block_group *block_group;
2506 	int readonly = 0;
2507 
2508 	block_group = btrfs_lookup_block_group(fs_info, bytenr);
2509 	if (!block_group || block_group->ro)
2510 		readonly = 1;
2511 	if (block_group)
2512 		btrfs_put_block_group(block_group);
2513 	return readonly;
2514 }
2515 
2516 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
2517 {
2518 	struct btrfs_fs_info *fs_info = root->fs_info;
2519 	u64 flags;
2520 	u64 ret;
2521 
2522 	if (data)
2523 		flags = BTRFS_BLOCK_GROUP_DATA;
2524 	else if (root == fs_info->chunk_root)
2525 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
2526 	else
2527 		flags = BTRFS_BLOCK_GROUP_METADATA;
2528 
2529 	ret = btrfs_get_alloc_profile(fs_info, flags);
2530 	return ret;
2531 }
2532 
2533 static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
2534 {
2535 	struct btrfs_block_group *cache;
2536 	u64 bytenr;
2537 
2538 	spin_lock(&fs_info->block_group_cache_lock);
2539 	bytenr = fs_info->first_logical_byte;
2540 	spin_unlock(&fs_info->block_group_cache_lock);
2541 
2542 	if (bytenr < (u64)-1)
2543 		return bytenr;
2544 
2545 	cache = btrfs_lookup_first_block_group(fs_info, search_start);
2546 	if (!cache)
2547 		return 0;
2548 
2549 	bytenr = cache->start;
2550 	btrfs_put_block_group(cache);
2551 
2552 	return bytenr;
2553 }
2554 
2555 static int pin_down_extent(struct btrfs_trans_handle *trans,
2556 			   struct btrfs_block_group *cache,
2557 			   u64 bytenr, u64 num_bytes, int reserved)
2558 {
2559 	struct btrfs_fs_info *fs_info = cache->fs_info;
2560 
2561 	spin_lock(&cache->space_info->lock);
2562 	spin_lock(&cache->lock);
2563 	cache->pinned += num_bytes;
2564 	btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info,
2565 					     num_bytes);
2566 	if (reserved) {
2567 		cache->reserved -= num_bytes;
2568 		cache->space_info->bytes_reserved -= num_bytes;
2569 	}
2570 	spin_unlock(&cache->lock);
2571 	spin_unlock(&cache->space_info->lock);
2572 
2573 	percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
2574 		    num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
2575 	set_extent_dirty(&trans->transaction->pinned_extents, bytenr,
2576 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
2577 	return 0;
2578 }
2579 
2580 int btrfs_pin_extent(struct btrfs_trans_handle *trans,
2581 		     u64 bytenr, u64 num_bytes, int reserved)
2582 {
2583 	struct btrfs_block_group *cache;
2584 
2585 	cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
2586 	BUG_ON(!cache); /* Logic error */
2587 
2588 	pin_down_extent(trans, cache, bytenr, num_bytes, reserved);
2589 
2590 	btrfs_put_block_group(cache);
2591 	return 0;
2592 }
2593 
2594 /*
2595  * this function must be called within transaction
2596  */
2597 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
2598 				    u64 bytenr, u64 num_bytes)
2599 {
2600 	struct btrfs_block_group *cache;
2601 	int ret;
2602 
2603 	btrfs_add_excluded_extent(trans->fs_info, bytenr, num_bytes);
2604 
2605 	cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
2606 	if (!cache)
2607 		return -EINVAL;
2608 
2609 	/*
2610 	 * pull in the free space cache (if any) so that our pin
2611 	 * removes the free space from the cache.  We have load_only set
2612 	 * to one because the slow code to read in the free extents does check
2613 	 * the pinned extents.
2614 	 */
2615 	btrfs_cache_block_group(cache, 1);
2616 
2617 	pin_down_extent(trans, cache, bytenr, num_bytes, 0);
2618 
2619 	/* remove us from the free space cache (if we're there at all) */
2620 	ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
2621 	btrfs_put_block_group(cache);
2622 	return ret;
2623 }
2624 
2625 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
2626 				   u64 start, u64 num_bytes)
2627 {
2628 	int ret;
2629 	struct btrfs_block_group *block_group;
2630 	struct btrfs_caching_control *caching_ctl;
2631 
2632 	block_group = btrfs_lookup_block_group(fs_info, start);
2633 	if (!block_group)
2634 		return -EINVAL;
2635 
2636 	btrfs_cache_block_group(block_group, 0);
2637 	caching_ctl = btrfs_get_caching_control(block_group);
2638 
2639 	if (!caching_ctl) {
2640 		/* Logic error */
2641 		BUG_ON(!btrfs_block_group_done(block_group));
2642 		ret = btrfs_remove_free_space(block_group, start, num_bytes);
2643 	} else {
2644 		mutex_lock(&caching_ctl->mutex);
2645 
2646 		if (start >= caching_ctl->progress) {
2647 			ret = btrfs_add_excluded_extent(fs_info, start,
2648 							num_bytes);
2649 		} else if (start + num_bytes <= caching_ctl->progress) {
2650 			ret = btrfs_remove_free_space(block_group,
2651 						      start, num_bytes);
2652 		} else {
2653 			num_bytes = caching_ctl->progress - start;
2654 			ret = btrfs_remove_free_space(block_group,
2655 						      start, num_bytes);
2656 			if (ret)
2657 				goto out_lock;
2658 
2659 			num_bytes = (start + num_bytes) -
2660 				caching_ctl->progress;
2661 			start = caching_ctl->progress;
2662 			ret = btrfs_add_excluded_extent(fs_info, start,
2663 							num_bytes);
2664 		}
2665 out_lock:
2666 		mutex_unlock(&caching_ctl->mutex);
2667 		btrfs_put_caching_control(caching_ctl);
2668 	}
2669 	btrfs_put_block_group(block_group);
2670 	return ret;
2671 }
2672 
2673 int btrfs_exclude_logged_extents(struct extent_buffer *eb)
2674 {
2675 	struct btrfs_fs_info *fs_info = eb->fs_info;
2676 	struct btrfs_file_extent_item *item;
2677 	struct btrfs_key key;
2678 	int found_type;
2679 	int i;
2680 	int ret = 0;
2681 
2682 	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
2683 		return 0;
2684 
2685 	for (i = 0; i < btrfs_header_nritems(eb); i++) {
2686 		btrfs_item_key_to_cpu(eb, &key, i);
2687 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2688 			continue;
2689 		item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2690 		found_type = btrfs_file_extent_type(eb, item);
2691 		if (found_type == BTRFS_FILE_EXTENT_INLINE)
2692 			continue;
2693 		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
2694 			continue;
2695 		key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
2696 		key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
2697 		ret = __exclude_logged_extent(fs_info, key.objectid, key.offset);
2698 		if (ret)
2699 			break;
2700 	}
2701 
2702 	return ret;
2703 }
2704 
2705 static void
2706 btrfs_inc_block_group_reservations(struct btrfs_block_group *bg)
2707 {
2708 	atomic_inc(&bg->reservations);
2709 }
2710 
2711 void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
2712 {
2713 	struct btrfs_caching_control *next;
2714 	struct btrfs_caching_control *caching_ctl;
2715 	struct btrfs_block_group *cache;
2716 
2717 	down_write(&fs_info->commit_root_sem);
2718 
2719 	list_for_each_entry_safe(caching_ctl, next,
2720 				 &fs_info->caching_block_groups, list) {
2721 		cache = caching_ctl->block_group;
2722 		if (btrfs_block_group_done(cache)) {
2723 			cache->last_byte_to_unpin = (u64)-1;
2724 			list_del_init(&caching_ctl->list);
2725 			btrfs_put_caching_control(caching_ctl);
2726 		} else {
2727 			cache->last_byte_to_unpin = caching_ctl->progress;
2728 		}
2729 	}
2730 
2731 	up_write(&fs_info->commit_root_sem);
2732 
2733 	btrfs_update_global_block_rsv(fs_info);
2734 }
2735 
2736 /*
2737  * Returns the free cluster for the given space info and sets empty_cluster to
2738  * what it should be based on the mount options.
2739  */
2740 static struct btrfs_free_cluster *
2741 fetch_cluster_info(struct btrfs_fs_info *fs_info,
2742 		   struct btrfs_space_info *space_info, u64 *empty_cluster)
2743 {
2744 	struct btrfs_free_cluster *ret = NULL;
2745 
2746 	*empty_cluster = 0;
2747 	if (btrfs_mixed_space_info(space_info))
2748 		return ret;
2749 
2750 	if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
2751 		ret = &fs_info->meta_alloc_cluster;
2752 		if (btrfs_test_opt(fs_info, SSD))
2753 			*empty_cluster = SZ_2M;
2754 		else
2755 			*empty_cluster = SZ_64K;
2756 	} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
2757 		   btrfs_test_opt(fs_info, SSD_SPREAD)) {
2758 		*empty_cluster = SZ_2M;
2759 		ret = &fs_info->data_alloc_cluster;
2760 	}
2761 
2762 	return ret;
2763 }
2764 
2765 static int unpin_extent_range(struct btrfs_fs_info *fs_info,
2766 			      u64 start, u64 end,
2767 			      const bool return_free_space)
2768 {
2769 	struct btrfs_block_group *cache = NULL;
2770 	struct btrfs_space_info *space_info;
2771 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
2772 	struct btrfs_free_cluster *cluster = NULL;
2773 	u64 len;
2774 	u64 total_unpinned = 0;
2775 	u64 empty_cluster = 0;
2776 	bool readonly;
2777 
2778 	while (start <= end) {
2779 		readonly = false;
2780 		if (!cache ||
2781 		    start >= cache->start + cache->length) {
2782 			if (cache)
2783 				btrfs_put_block_group(cache);
2784 			total_unpinned = 0;
2785 			cache = btrfs_lookup_block_group(fs_info, start);
2786 			BUG_ON(!cache); /* Logic error */
2787 
2788 			cluster = fetch_cluster_info(fs_info,
2789 						     cache->space_info,
2790 						     &empty_cluster);
2791 			empty_cluster <<= 1;
2792 		}
2793 
2794 		len = cache->start + cache->length - start;
2795 		len = min(len, end + 1 - start);
2796 
2797 		if (start < cache->last_byte_to_unpin) {
2798 			len = min(len, cache->last_byte_to_unpin - start);
2799 			if (return_free_space)
2800 				btrfs_add_free_space(cache, start, len);
2801 		}
2802 
2803 		start += len;
2804 		total_unpinned += len;
2805 		space_info = cache->space_info;
2806 
2807 		/*
2808 		 * If this space cluster has been marked as fragmented and we've
2809 		 * unpinned enough in this block group to potentially allow a
2810 		 * cluster to be created inside of it go ahead and clear the
2811 		 * fragmented check.
2812 		 */
2813 		if (cluster && cluster->fragmented &&
2814 		    total_unpinned > empty_cluster) {
2815 			spin_lock(&cluster->lock);
2816 			cluster->fragmented = 0;
2817 			spin_unlock(&cluster->lock);
2818 		}
2819 
2820 		spin_lock(&space_info->lock);
2821 		spin_lock(&cache->lock);
2822 		cache->pinned -= len;
2823 		btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
2824 		space_info->max_extent_size = 0;
2825 		percpu_counter_add_batch(&space_info->total_bytes_pinned,
2826 			    -len, BTRFS_TOTAL_BYTES_PINNED_BATCH);
2827 		if (cache->ro) {
2828 			space_info->bytes_readonly += len;
2829 			readonly = true;
2830 		}
2831 		spin_unlock(&cache->lock);
2832 		if (!readonly && return_free_space &&
2833 		    global_rsv->space_info == space_info) {
2834 			u64 to_add = len;
2835 
2836 			spin_lock(&global_rsv->lock);
2837 			if (!global_rsv->full) {
2838 				to_add = min(len, global_rsv->size -
2839 					     global_rsv->reserved);
2840 				global_rsv->reserved += to_add;
2841 				btrfs_space_info_update_bytes_may_use(fs_info,
2842 						space_info, to_add);
2843 				if (global_rsv->reserved >= global_rsv->size)
2844 					global_rsv->full = 1;
2845 				len -= to_add;
2846 			}
2847 			spin_unlock(&global_rsv->lock);
2848 			/* Add to any tickets we may have */
2849 			if (len)
2850 				btrfs_try_granting_tickets(fs_info,
2851 							   space_info);
2852 		}
2853 		spin_unlock(&space_info->lock);
2854 	}
2855 
2856 	if (cache)
2857 		btrfs_put_block_group(cache);
2858 	return 0;
2859 }
2860 
2861 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
2862 {
2863 	struct btrfs_fs_info *fs_info = trans->fs_info;
2864 	struct btrfs_block_group *block_group, *tmp;
2865 	struct list_head *deleted_bgs;
2866 	struct extent_io_tree *unpin;
2867 	u64 start;
2868 	u64 end;
2869 	int ret;
2870 
2871 	unpin = &trans->transaction->pinned_extents;
2872 
2873 	while (!TRANS_ABORTED(trans)) {
2874 		struct extent_state *cached_state = NULL;
2875 
2876 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
2877 		ret = find_first_extent_bit(unpin, 0, &start, &end,
2878 					    EXTENT_DIRTY, &cached_state);
2879 		if (ret) {
2880 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
2881 			break;
2882 		}
2883 		if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
2884 			clear_extent_bits(&fs_info->excluded_extents, start,
2885 					  end, EXTENT_UPTODATE);
2886 
2887 		if (btrfs_test_opt(fs_info, DISCARD_SYNC))
2888 			ret = btrfs_discard_extent(fs_info, start,
2889 						   end + 1 - start, NULL);
2890 
2891 		clear_extent_dirty(unpin, start, end, &cached_state);
2892 		unpin_extent_range(fs_info, start, end, true);
2893 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
2894 		free_extent_state(cached_state);
2895 		cond_resched();
2896 	}
2897 
2898 	if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
2899 		btrfs_discard_calc_delay(&fs_info->discard_ctl);
2900 		btrfs_discard_schedule_work(&fs_info->discard_ctl, true);
2901 	}
2902 
2903 	/*
2904 	 * Transaction is finished.  We don't need the lock anymore.  We
2905 	 * do need to clean up the block groups in case of a transaction
2906 	 * abort.
2907 	 */
2908 	deleted_bgs = &trans->transaction->deleted_bgs;
2909 	list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
2910 		u64 trimmed = 0;
2911 
2912 		ret = -EROFS;
2913 		if (!TRANS_ABORTED(trans))
2914 			ret = btrfs_discard_extent(fs_info,
2915 						   block_group->start,
2916 						   block_group->length,
2917 						   &trimmed);
2918 
2919 		list_del_init(&block_group->bg_list);
2920 		btrfs_unfreeze_block_group(block_group);
2921 		btrfs_put_block_group(block_group);
2922 
2923 		if (ret) {
2924 			const char *errstr = btrfs_decode_error(ret);
2925 			btrfs_warn(fs_info,
2926 			   "discard failed while removing blockgroup: errno=%d %s",
2927 				   ret, errstr);
2928 		}
2929 	}
2930 
2931 	return 0;
2932 }
2933 
2934 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2935 			       struct btrfs_delayed_ref_node *node, u64 parent,
2936 			       u64 root_objectid, u64 owner_objectid,
2937 			       u64 owner_offset, int refs_to_drop,
2938 			       struct btrfs_delayed_extent_op *extent_op)
2939 {
2940 	struct btrfs_fs_info *info = trans->fs_info;
2941 	struct btrfs_key key;
2942 	struct btrfs_path *path;
2943 	struct btrfs_root *extent_root = info->extent_root;
2944 	struct extent_buffer *leaf;
2945 	struct btrfs_extent_item *ei;
2946 	struct btrfs_extent_inline_ref *iref;
2947 	int ret;
2948 	int is_data;
2949 	int extent_slot = 0;
2950 	int found_extent = 0;
2951 	int num_to_del = 1;
2952 	u32 item_size;
2953 	u64 refs;
2954 	u64 bytenr = node->bytenr;
2955 	u64 num_bytes = node->num_bytes;
2956 	int last_ref = 0;
2957 	bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
2958 
2959 	path = btrfs_alloc_path();
2960 	if (!path)
2961 		return -ENOMEM;
2962 
2963 	path->leave_spinning = 1;
2964 
2965 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
2966 	BUG_ON(!is_data && refs_to_drop != 1);
2967 
2968 	if (is_data)
2969 		skinny_metadata = false;
2970 
2971 	ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
2972 				    parent, root_objectid, owner_objectid,
2973 				    owner_offset);
2974 	if (ret == 0) {
2975 		extent_slot = path->slots[0];
2976 		while (extent_slot >= 0) {
2977 			btrfs_item_key_to_cpu(path->nodes[0], &key,
2978 					      extent_slot);
2979 			if (key.objectid != bytenr)
2980 				break;
2981 			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
2982 			    key.offset == num_bytes) {
2983 				found_extent = 1;
2984 				break;
2985 			}
2986 			if (key.type == BTRFS_METADATA_ITEM_KEY &&
2987 			    key.offset == owner_objectid) {
2988 				found_extent = 1;
2989 				break;
2990 			}
2991 			if (path->slots[0] - extent_slot > 5)
2992 				break;
2993 			extent_slot--;
2994 		}
2995 
2996 		if (!found_extent) {
2997 			BUG_ON(iref);
2998 			ret = remove_extent_backref(trans, path, NULL,
2999 						    refs_to_drop,
3000 						    is_data, &last_ref);
3001 			if (ret) {
3002 				btrfs_abort_transaction(trans, ret);
3003 				goto out;
3004 			}
3005 			btrfs_release_path(path);
3006 			path->leave_spinning = 1;
3007 
3008 			key.objectid = bytenr;
3009 			key.type = BTRFS_EXTENT_ITEM_KEY;
3010 			key.offset = num_bytes;
3011 
3012 			if (!is_data && skinny_metadata) {
3013 				key.type = BTRFS_METADATA_ITEM_KEY;
3014 				key.offset = owner_objectid;
3015 			}
3016 
3017 			ret = btrfs_search_slot(trans, extent_root,
3018 						&key, path, -1, 1);
3019 			if (ret > 0 && skinny_metadata && path->slots[0]) {
3020 				/*
3021 				 * Couldn't find our skinny metadata item,
3022 				 * see if we have ye olde extent item.
3023 				 */
3024 				path->slots[0]--;
3025 				btrfs_item_key_to_cpu(path->nodes[0], &key,
3026 						      path->slots[0]);
3027 				if (key.objectid == bytenr &&
3028 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
3029 				    key.offset == num_bytes)
3030 					ret = 0;
3031 			}
3032 
3033 			if (ret > 0 && skinny_metadata) {
3034 				skinny_metadata = false;
3035 				key.objectid = bytenr;
3036 				key.type = BTRFS_EXTENT_ITEM_KEY;
3037 				key.offset = num_bytes;
3038 				btrfs_release_path(path);
3039 				ret = btrfs_search_slot(trans, extent_root,
3040 							&key, path, -1, 1);
3041 			}
3042 
3043 			if (ret) {
3044 				btrfs_err(info,
3045 					  "umm, got %d back from search, was looking for %llu",
3046 					  ret, bytenr);
3047 				if (ret > 0)
3048 					btrfs_print_leaf(path->nodes[0]);
3049 			}
3050 			if (ret < 0) {
3051 				btrfs_abort_transaction(trans, ret);
3052 				goto out;
3053 			}
3054 			extent_slot = path->slots[0];
3055 		}
3056 	} else if (WARN_ON(ret == -ENOENT)) {
3057 		btrfs_print_leaf(path->nodes[0]);
3058 		btrfs_err(info,
3059 			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
3060 			bytenr, parent, root_objectid, owner_objectid,
3061 			owner_offset);
3062 		btrfs_abort_transaction(trans, ret);
3063 		goto out;
3064 	} else {
3065 		btrfs_abort_transaction(trans, ret);
3066 		goto out;
3067 	}
3068 
3069 	leaf = path->nodes[0];
3070 	item_size = btrfs_item_size_nr(leaf, extent_slot);
3071 	if (unlikely(item_size < sizeof(*ei))) {
3072 		ret = -EINVAL;
3073 		btrfs_print_v0_err(info);
3074 		btrfs_abort_transaction(trans, ret);
3075 		goto out;
3076 	}
3077 	ei = btrfs_item_ptr(leaf, extent_slot,
3078 			    struct btrfs_extent_item);
3079 	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
3080 	    key.type == BTRFS_EXTENT_ITEM_KEY) {
3081 		struct btrfs_tree_block_info *bi;
3082 		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
3083 		bi = (struct btrfs_tree_block_info *)(ei + 1);
3084 		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3085 	}
3086 
3087 	refs = btrfs_extent_refs(leaf, ei);
3088 	if (refs < refs_to_drop) {
3089 		btrfs_err(info,
3090 			  "trying to drop %d refs but we only have %Lu for bytenr %Lu",
3091 			  refs_to_drop, refs, bytenr);
3092 		ret = -EINVAL;
3093 		btrfs_abort_transaction(trans, ret);
3094 		goto out;
3095 	}
3096 	refs -= refs_to_drop;
3097 
3098 	if (refs > 0) {
3099 		if (extent_op)
3100 			__run_delayed_extent_op(extent_op, leaf, ei);
3101 		/*
3102 		 * In the case of inline back ref, reference count will
3103 		 * be updated by remove_extent_backref
3104 		 */
3105 		if (iref) {
3106 			BUG_ON(!found_extent);
3107 		} else {
3108 			btrfs_set_extent_refs(leaf, ei, refs);
3109 			btrfs_mark_buffer_dirty(leaf);
3110 		}
3111 		if (found_extent) {
3112 			ret = remove_extent_backref(trans, path, iref,
3113 						    refs_to_drop, is_data,
3114 						    &last_ref);
3115 			if (ret) {
3116 				btrfs_abort_transaction(trans, ret);
3117 				goto out;
3118 			}
3119 		}
3120 	} else {
3121 		if (found_extent) {
3122 			BUG_ON(is_data && refs_to_drop !=
3123 			       extent_data_ref_count(path, iref));
3124 			if (iref) {
3125 				BUG_ON(path->slots[0] != extent_slot);
3126 			} else {
3127 				BUG_ON(path->slots[0] != extent_slot + 1);
3128 				path->slots[0] = extent_slot;
3129 				num_to_del = 2;
3130 			}
3131 		}
3132 
3133 		last_ref = 1;
3134 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3135 				      num_to_del);
3136 		if (ret) {
3137 			btrfs_abort_transaction(trans, ret);
3138 			goto out;
3139 		}
3140 		btrfs_release_path(path);
3141 
3142 		if (is_data) {
3143 			ret = btrfs_del_csums(trans, info->csum_root, bytenr,
3144 					      num_bytes);
3145 			if (ret) {
3146 				btrfs_abort_transaction(trans, ret);
3147 				goto out;
3148 			}
3149 		}
3150 
3151 		ret = add_to_free_space_tree(trans, bytenr, num_bytes);
3152 		if (ret) {
3153 			btrfs_abort_transaction(trans, ret);
3154 			goto out;
3155 		}
3156 
3157 		ret = btrfs_update_block_group(trans, bytenr, num_bytes, 0);
3158 		if (ret) {
3159 			btrfs_abort_transaction(trans, ret);
3160 			goto out;
3161 		}
3162 	}
3163 	btrfs_release_path(path);
3164 
3165 out:
3166 	btrfs_free_path(path);
3167 	return ret;
3168 }
3169 
3170 /*
3171  * when we free an block, it is possible (and likely) that we free the last
3172  * delayed ref for that extent as well.  This searches the delayed ref tree for
3173  * a given extent, and if there are no other delayed refs to be processed, it
3174  * removes it from the tree.
3175  */
3176 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3177 				      u64 bytenr)
3178 {
3179 	struct btrfs_delayed_ref_head *head;
3180 	struct btrfs_delayed_ref_root *delayed_refs;
3181 	int ret = 0;
3182 
3183 	delayed_refs = &trans->transaction->delayed_refs;
3184 	spin_lock(&delayed_refs->lock);
3185 	head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
3186 	if (!head)
3187 		goto out_delayed_unlock;
3188 
3189 	spin_lock(&head->lock);
3190 	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
3191 		goto out;
3192 
3193 	if (cleanup_extent_op(head) != NULL)
3194 		goto out;
3195 
3196 	/*
3197 	 * waiting for the lock here would deadlock.  If someone else has it
3198 	 * locked they are already in the process of dropping it anyway
3199 	 */
3200 	if (!mutex_trylock(&head->mutex))
3201 		goto out;
3202 
3203 	btrfs_delete_ref_head(delayed_refs, head);
3204 	head->processing = 0;
3205 
3206 	spin_unlock(&head->lock);
3207 	spin_unlock(&delayed_refs->lock);
3208 
3209 	BUG_ON(head->extent_op);
3210 	if (head->must_insert_reserved)
3211 		ret = 1;
3212 
3213 	btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head);
3214 	mutex_unlock(&head->mutex);
3215 	btrfs_put_delayed_ref_head(head);
3216 	return ret;
3217 out:
3218 	spin_unlock(&head->lock);
3219 
3220 out_delayed_unlock:
3221 	spin_unlock(&delayed_refs->lock);
3222 	return 0;
3223 }
3224 
3225 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
3226 			   struct btrfs_root *root,
3227 			   struct extent_buffer *buf,
3228 			   u64 parent, int last_ref)
3229 {
3230 	struct btrfs_fs_info *fs_info = root->fs_info;
3231 	struct btrfs_ref generic_ref = { 0 };
3232 	int pin = 1;
3233 	int ret;
3234 
3235 	btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
3236 			       buf->start, buf->len, parent);
3237 	btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf),
3238 			    root->root_key.objectid);
3239 
3240 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
3241 		int old_ref_mod, new_ref_mod;
3242 
3243 		btrfs_ref_tree_mod(fs_info, &generic_ref);
3244 		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL,
3245 						 &old_ref_mod, &new_ref_mod);
3246 		BUG_ON(ret); /* -ENOMEM */
3247 		pin = old_ref_mod >= 0 && new_ref_mod < 0;
3248 	}
3249 
3250 	if (last_ref && btrfs_header_generation(buf) == trans->transid) {
3251 		struct btrfs_block_group *cache;
3252 
3253 		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
3254 			ret = check_ref_cleanup(trans, buf->start);
3255 			if (!ret)
3256 				goto out;
3257 		}
3258 
3259 		pin = 0;
3260 		cache = btrfs_lookup_block_group(fs_info, buf->start);
3261 
3262 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3263 			pin_down_extent(trans, cache, buf->start, buf->len, 1);
3264 			btrfs_put_block_group(cache);
3265 			goto out;
3266 		}
3267 
3268 		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
3269 
3270 		btrfs_add_free_space(cache, buf->start, buf->len);
3271 		btrfs_free_reserved_bytes(cache, buf->len, 0);
3272 		btrfs_put_block_group(cache);
3273 		trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
3274 	}
3275 out:
3276 	if (pin)
3277 		add_pinned_bytes(fs_info, &generic_ref);
3278 
3279 	if (last_ref) {
3280 		/*
3281 		 * Deleting the buffer, clear the corrupt flag since it doesn't
3282 		 * matter anymore.
3283 		 */
3284 		clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
3285 	}
3286 }
3287 
3288 /* Can return -ENOMEM */
3289 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
3290 {
3291 	struct btrfs_fs_info *fs_info = trans->fs_info;
3292 	int old_ref_mod, new_ref_mod;
3293 	int ret;
3294 
3295 	if (btrfs_is_testing(fs_info))
3296 		return 0;
3297 
3298 	/*
3299 	 * tree log blocks never actually go into the extent allocation
3300 	 * tree, just update pinning info and exit early.
3301 	 */
3302 	if ((ref->type == BTRFS_REF_METADATA &&
3303 	     ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) ||
3304 	    (ref->type == BTRFS_REF_DATA &&
3305 	     ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) {
3306 		/* unlocks the pinned mutex */
3307 		btrfs_pin_extent(trans, ref->bytenr, ref->len, 1);
3308 		old_ref_mod = new_ref_mod = 0;
3309 		ret = 0;
3310 	} else if (ref->type == BTRFS_REF_METADATA) {
3311 		ret = btrfs_add_delayed_tree_ref(trans, ref, NULL,
3312 						 &old_ref_mod, &new_ref_mod);
3313 	} else {
3314 		ret = btrfs_add_delayed_data_ref(trans, ref, 0,
3315 						 &old_ref_mod, &new_ref_mod);
3316 	}
3317 
3318 	if (!((ref->type == BTRFS_REF_METADATA &&
3319 	       ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) ||
3320 	      (ref->type == BTRFS_REF_DATA &&
3321 	       ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)))
3322 		btrfs_ref_tree_mod(fs_info, ref);
3323 
3324 	if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
3325 		add_pinned_bytes(fs_info, ref);
3326 
3327 	return ret;
3328 }
3329 
3330 enum btrfs_loop_type {
3331 	LOOP_CACHING_NOWAIT,
3332 	LOOP_CACHING_WAIT,
3333 	LOOP_ALLOC_CHUNK,
3334 	LOOP_NO_EMPTY_SIZE,
3335 };
3336 
3337 static inline void
3338 btrfs_lock_block_group(struct btrfs_block_group *cache,
3339 		       int delalloc)
3340 {
3341 	if (delalloc)
3342 		down_read(&cache->data_rwsem);
3343 }
3344 
3345 static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
3346 		       int delalloc)
3347 {
3348 	btrfs_get_block_group(cache);
3349 	if (delalloc)
3350 		down_read(&cache->data_rwsem);
3351 }
3352 
3353 static struct btrfs_block_group *btrfs_lock_cluster(
3354 		   struct btrfs_block_group *block_group,
3355 		   struct btrfs_free_cluster *cluster,
3356 		   int delalloc)
3357 	__acquires(&cluster->refill_lock)
3358 {
3359 	struct btrfs_block_group *used_bg = NULL;
3360 
3361 	spin_lock(&cluster->refill_lock);
3362 	while (1) {
3363 		used_bg = cluster->block_group;
3364 		if (!used_bg)
3365 			return NULL;
3366 
3367 		if (used_bg == block_group)
3368 			return used_bg;
3369 
3370 		btrfs_get_block_group(used_bg);
3371 
3372 		if (!delalloc)
3373 			return used_bg;
3374 
3375 		if (down_read_trylock(&used_bg->data_rwsem))
3376 			return used_bg;
3377 
3378 		spin_unlock(&cluster->refill_lock);
3379 
3380 		/* We should only have one-level nested. */
3381 		down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
3382 
3383 		spin_lock(&cluster->refill_lock);
3384 		if (used_bg == cluster->block_group)
3385 			return used_bg;
3386 
3387 		up_read(&used_bg->data_rwsem);
3388 		btrfs_put_block_group(used_bg);
3389 	}
3390 }
3391 
3392 static inline void
3393 btrfs_release_block_group(struct btrfs_block_group *cache,
3394 			 int delalloc)
3395 {
3396 	if (delalloc)
3397 		up_read(&cache->data_rwsem);
3398 	btrfs_put_block_group(cache);
3399 }
3400 
3401 enum btrfs_extent_allocation_policy {
3402 	BTRFS_EXTENT_ALLOC_CLUSTERED,
3403 };
3404 
3405 /*
3406  * Structure used internally for find_free_extent() function.  Wraps needed
3407  * parameters.
3408  */
3409 struct find_free_extent_ctl {
3410 	/* Basic allocation info */
3411 	u64 num_bytes;
3412 	u64 empty_size;
3413 	u64 flags;
3414 	int delalloc;
3415 
3416 	/* Where to start the search inside the bg */
3417 	u64 search_start;
3418 
3419 	/* For clustered allocation */
3420 	u64 empty_cluster;
3421 	struct btrfs_free_cluster *last_ptr;
3422 	bool use_cluster;
3423 
3424 	bool have_caching_bg;
3425 	bool orig_have_caching_bg;
3426 
3427 	/* RAID index, converted from flags */
3428 	int index;
3429 
3430 	/*
3431 	 * Current loop number, check find_free_extent_update_loop() for details
3432 	 */
3433 	int loop;
3434 
3435 	/*
3436 	 * Whether we're refilling a cluster, if true we need to re-search
3437 	 * current block group but don't try to refill the cluster again.
3438 	 */
3439 	bool retry_clustered;
3440 
3441 	/*
3442 	 * Whether we're updating free space cache, if true we need to re-search
3443 	 * current block group but don't try updating free space cache again.
3444 	 */
3445 	bool retry_unclustered;
3446 
3447 	/* If current block group is cached */
3448 	int cached;
3449 
3450 	/* Max contiguous hole found */
3451 	u64 max_extent_size;
3452 
3453 	/* Total free space from free space cache, not always contiguous */
3454 	u64 total_free_space;
3455 
3456 	/* Found result */
3457 	u64 found_offset;
3458 
3459 	/* Hint where to start looking for an empty space */
3460 	u64 hint_byte;
3461 
3462 	/* Allocation policy */
3463 	enum btrfs_extent_allocation_policy policy;
3464 };
3465 
3466 
3467 /*
3468  * Helper function for find_free_extent().
3469  *
3470  * Return -ENOENT to inform caller that we need fallback to unclustered mode.
3471  * Return -EAGAIN to inform caller that we need to re-search this block group
3472  * Return >0 to inform caller that we find nothing
3473  * Return 0 means we have found a location and set ffe_ctl->found_offset.
3474  */
3475 static int find_free_extent_clustered(struct btrfs_block_group *bg,
3476 				      struct find_free_extent_ctl *ffe_ctl,
3477 				      struct btrfs_block_group **cluster_bg_ret)
3478 {
3479 	struct btrfs_block_group *cluster_bg;
3480 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3481 	u64 aligned_cluster;
3482 	u64 offset;
3483 	int ret;
3484 
3485 	cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc);
3486 	if (!cluster_bg)
3487 		goto refill_cluster;
3488 	if (cluster_bg != bg && (cluster_bg->ro ||
3489 	    !block_group_bits(cluster_bg, ffe_ctl->flags)))
3490 		goto release_cluster;
3491 
3492 	offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
3493 			ffe_ctl->num_bytes, cluster_bg->start,
3494 			&ffe_ctl->max_extent_size);
3495 	if (offset) {
3496 		/* We have a block, we're done */
3497 		spin_unlock(&last_ptr->refill_lock);
3498 		trace_btrfs_reserve_extent_cluster(cluster_bg,
3499 				ffe_ctl->search_start, ffe_ctl->num_bytes);
3500 		*cluster_bg_ret = cluster_bg;
3501 		ffe_ctl->found_offset = offset;
3502 		return 0;
3503 	}
3504 	WARN_ON(last_ptr->block_group != cluster_bg);
3505 
3506 release_cluster:
3507 	/*
3508 	 * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so
3509 	 * lets just skip it and let the allocator find whatever block it can
3510 	 * find. If we reach this point, we will have tried the cluster
3511 	 * allocator plenty of times and not have found anything, so we are
3512 	 * likely way too fragmented for the clustering stuff to find anything.
3513 	 *
3514 	 * However, if the cluster is taken from the current block group,
3515 	 * release the cluster first, so that we stand a better chance of
3516 	 * succeeding in the unclustered allocation.
3517 	 */
3518 	if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) {
3519 		spin_unlock(&last_ptr->refill_lock);
3520 		btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
3521 		return -ENOENT;
3522 	}
3523 
3524 	/* This cluster didn't work out, free it and start over */
3525 	btrfs_return_cluster_to_free_space(NULL, last_ptr);
3526 
3527 	if (cluster_bg != bg)
3528 		btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
3529 
3530 refill_cluster:
3531 	if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) {
3532 		spin_unlock(&last_ptr->refill_lock);
3533 		return -ENOENT;
3534 	}
3535 
3536 	aligned_cluster = max_t(u64,
3537 			ffe_ctl->empty_cluster + ffe_ctl->empty_size,
3538 			bg->full_stripe_len);
3539 	ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start,
3540 			ffe_ctl->num_bytes, aligned_cluster);
3541 	if (ret == 0) {
3542 		/* Now pull our allocation out of this cluster */
3543 		offset = btrfs_alloc_from_cluster(bg, last_ptr,
3544 				ffe_ctl->num_bytes, ffe_ctl->search_start,
3545 				&ffe_ctl->max_extent_size);
3546 		if (offset) {
3547 			/* We found one, proceed */
3548 			spin_unlock(&last_ptr->refill_lock);
3549 			trace_btrfs_reserve_extent_cluster(bg,
3550 					ffe_ctl->search_start,
3551 					ffe_ctl->num_bytes);
3552 			ffe_ctl->found_offset = offset;
3553 			return 0;
3554 		}
3555 	} else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
3556 		   !ffe_ctl->retry_clustered) {
3557 		spin_unlock(&last_ptr->refill_lock);
3558 
3559 		ffe_ctl->retry_clustered = true;
3560 		btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
3561 				ffe_ctl->empty_cluster + ffe_ctl->empty_size);
3562 		return -EAGAIN;
3563 	}
3564 	/*
3565 	 * At this point we either didn't find a cluster or we weren't able to
3566 	 * allocate a block from our cluster.  Free the cluster we've been
3567 	 * trying to use, and go to the next block group.
3568 	 */
3569 	btrfs_return_cluster_to_free_space(NULL, last_ptr);
3570 	spin_unlock(&last_ptr->refill_lock);
3571 	return 1;
3572 }
3573 
3574 /*
3575  * Return >0 to inform caller that we find nothing
3576  * Return 0 when we found an free extent and set ffe_ctrl->found_offset
3577  * Return -EAGAIN to inform caller that we need to re-search this block group
3578  */
3579 static int find_free_extent_unclustered(struct btrfs_block_group *bg,
3580 					struct find_free_extent_ctl *ffe_ctl)
3581 {
3582 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3583 	u64 offset;
3584 
3585 	/*
3586 	 * We are doing an unclustered allocation, set the fragmented flag so
3587 	 * we don't bother trying to setup a cluster again until we get more
3588 	 * space.
3589 	 */
3590 	if (unlikely(last_ptr)) {
3591 		spin_lock(&last_ptr->lock);
3592 		last_ptr->fragmented = 1;
3593 		spin_unlock(&last_ptr->lock);
3594 	}
3595 	if (ffe_ctl->cached) {
3596 		struct btrfs_free_space_ctl *free_space_ctl;
3597 
3598 		free_space_ctl = bg->free_space_ctl;
3599 		spin_lock(&free_space_ctl->tree_lock);
3600 		if (free_space_ctl->free_space <
3601 		    ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
3602 		    ffe_ctl->empty_size) {
3603 			ffe_ctl->total_free_space = max_t(u64,
3604 					ffe_ctl->total_free_space,
3605 					free_space_ctl->free_space);
3606 			spin_unlock(&free_space_ctl->tree_lock);
3607 			return 1;
3608 		}
3609 		spin_unlock(&free_space_ctl->tree_lock);
3610 	}
3611 
3612 	offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
3613 			ffe_ctl->num_bytes, ffe_ctl->empty_size,
3614 			&ffe_ctl->max_extent_size);
3615 
3616 	/*
3617 	 * If we didn't find a chunk, and we haven't failed on this block group
3618 	 * before, and this block group is in the middle of caching and we are
3619 	 * ok with waiting, then go ahead and wait for progress to be made, and
3620 	 * set @retry_unclustered to true.
3621 	 *
3622 	 * If @retry_unclustered is true then we've already waited on this
3623 	 * block group once and should move on to the next block group.
3624 	 */
3625 	if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
3626 	    ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
3627 		btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
3628 						      ffe_ctl->empty_size);
3629 		ffe_ctl->retry_unclustered = true;
3630 		return -EAGAIN;
3631 	} else if (!offset) {
3632 		return 1;
3633 	}
3634 	ffe_ctl->found_offset = offset;
3635 	return 0;
3636 }
3637 
3638 static int do_allocation_clustered(struct btrfs_block_group *block_group,
3639 				   struct find_free_extent_ctl *ffe_ctl,
3640 				   struct btrfs_block_group **bg_ret)
3641 {
3642 	int ret;
3643 
3644 	/* We want to try and use the cluster allocator, so lets look there */
3645 	if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) {
3646 		ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret);
3647 		if (ret >= 0 || ret == -EAGAIN)
3648 			return ret;
3649 		/* ret == -ENOENT case falls through */
3650 	}
3651 
3652 	return find_free_extent_unclustered(block_group, ffe_ctl);
3653 }
3654 
3655 static int do_allocation(struct btrfs_block_group *block_group,
3656 			 struct find_free_extent_ctl *ffe_ctl,
3657 			 struct btrfs_block_group **bg_ret)
3658 {
3659 	switch (ffe_ctl->policy) {
3660 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3661 		return do_allocation_clustered(block_group, ffe_ctl, bg_ret);
3662 	default:
3663 		BUG();
3664 	}
3665 }
3666 
3667 static void release_block_group(struct btrfs_block_group *block_group,
3668 				struct find_free_extent_ctl *ffe_ctl,
3669 				int delalloc)
3670 {
3671 	switch (ffe_ctl->policy) {
3672 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3673 		ffe_ctl->retry_clustered = false;
3674 		ffe_ctl->retry_unclustered = false;
3675 		break;
3676 	default:
3677 		BUG();
3678 	}
3679 
3680 	BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
3681 	       ffe_ctl->index);
3682 	btrfs_release_block_group(block_group, delalloc);
3683 }
3684 
3685 static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl,
3686 				   struct btrfs_key *ins)
3687 {
3688 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3689 
3690 	if (!ffe_ctl->use_cluster && last_ptr) {
3691 		spin_lock(&last_ptr->lock);
3692 		last_ptr->window_start = ins->objectid;
3693 		spin_unlock(&last_ptr->lock);
3694 	}
3695 }
3696 
3697 static void found_extent(struct find_free_extent_ctl *ffe_ctl,
3698 			 struct btrfs_key *ins)
3699 {
3700 	switch (ffe_ctl->policy) {
3701 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3702 		found_extent_clustered(ffe_ctl, ins);
3703 		break;
3704 	default:
3705 		BUG();
3706 	}
3707 }
3708 
3709 static int chunk_allocation_failed(struct find_free_extent_ctl *ffe_ctl)
3710 {
3711 	switch (ffe_ctl->policy) {
3712 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3713 		/*
3714 		 * If we can't allocate a new chunk we've already looped through
3715 		 * at least once, move on to the NO_EMPTY_SIZE case.
3716 		 */
3717 		ffe_ctl->loop = LOOP_NO_EMPTY_SIZE;
3718 		return 0;
3719 	default:
3720 		BUG();
3721 	}
3722 }
3723 
3724 /*
3725  * Return >0 means caller needs to re-search for free extent
3726  * Return 0 means we have the needed free extent.
3727  * Return <0 means we failed to locate any free extent.
3728  */
3729 static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
3730 					struct btrfs_key *ins,
3731 					struct find_free_extent_ctl *ffe_ctl,
3732 					bool full_search)
3733 {
3734 	struct btrfs_root *root = fs_info->extent_root;
3735 	int ret;
3736 
3737 	if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) &&
3738 	    ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg)
3739 		ffe_ctl->orig_have_caching_bg = true;
3740 
3741 	if (!ins->objectid && ffe_ctl->loop >= LOOP_CACHING_WAIT &&
3742 	    ffe_ctl->have_caching_bg)
3743 		return 1;
3744 
3745 	if (!ins->objectid && ++(ffe_ctl->index) < BTRFS_NR_RAID_TYPES)
3746 		return 1;
3747 
3748 	if (ins->objectid) {
3749 		found_extent(ffe_ctl, ins);
3750 		return 0;
3751 	}
3752 
3753 	/*
3754 	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
3755 	 *			caching kthreads as we move along
3756 	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
3757 	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
3758 	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
3759 	 *		       again
3760 	 */
3761 	if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) {
3762 		ffe_ctl->index = 0;
3763 		if (ffe_ctl->loop == LOOP_CACHING_NOWAIT) {
3764 			/*
3765 			 * We want to skip the LOOP_CACHING_WAIT step if we
3766 			 * don't have any uncached bgs and we've already done a
3767 			 * full search through.
3768 			 */
3769 			if (ffe_ctl->orig_have_caching_bg || !full_search)
3770 				ffe_ctl->loop = LOOP_CACHING_WAIT;
3771 			else
3772 				ffe_ctl->loop = LOOP_ALLOC_CHUNK;
3773 		} else {
3774 			ffe_ctl->loop++;
3775 		}
3776 
3777 		if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) {
3778 			struct btrfs_trans_handle *trans;
3779 			int exist = 0;
3780 
3781 			trans = current->journal_info;
3782 			if (trans)
3783 				exist = 1;
3784 			else
3785 				trans = btrfs_join_transaction(root);
3786 
3787 			if (IS_ERR(trans)) {
3788 				ret = PTR_ERR(trans);
3789 				return ret;
3790 			}
3791 
3792 			ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
3793 						CHUNK_ALLOC_FORCE);
3794 
3795 			/* Do not bail out on ENOSPC since we can do more. */
3796 			if (ret == -ENOSPC)
3797 				ret = chunk_allocation_failed(ffe_ctl);
3798 			else if (ret < 0)
3799 				btrfs_abort_transaction(trans, ret);
3800 			else
3801 				ret = 0;
3802 			if (!exist)
3803 				btrfs_end_transaction(trans);
3804 			if (ret)
3805 				return ret;
3806 		}
3807 
3808 		if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) {
3809 			if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED)
3810 				return -ENOSPC;
3811 
3812 			/*
3813 			 * Don't loop again if we already have no empty_size and
3814 			 * no empty_cluster.
3815 			 */
3816 			if (ffe_ctl->empty_size == 0 &&
3817 			    ffe_ctl->empty_cluster == 0)
3818 				return -ENOSPC;
3819 			ffe_ctl->empty_size = 0;
3820 			ffe_ctl->empty_cluster = 0;
3821 		}
3822 		return 1;
3823 	}
3824 	return -ENOSPC;
3825 }
3826 
3827 static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
3828 					struct find_free_extent_ctl *ffe_ctl,
3829 					struct btrfs_space_info *space_info,
3830 					struct btrfs_key *ins)
3831 {
3832 	/*
3833 	 * If our free space is heavily fragmented we may not be able to make
3834 	 * big contiguous allocations, so instead of doing the expensive search
3835 	 * for free space, simply return ENOSPC with our max_extent_size so we
3836 	 * can go ahead and search for a more manageable chunk.
3837 	 *
3838 	 * If our max_extent_size is large enough for our allocation simply
3839 	 * disable clustering since we will likely not be able to find enough
3840 	 * space to create a cluster and induce latency trying.
3841 	 */
3842 	if (space_info->max_extent_size) {
3843 		spin_lock(&space_info->lock);
3844 		if (space_info->max_extent_size &&
3845 		    ffe_ctl->num_bytes > space_info->max_extent_size) {
3846 			ins->offset = space_info->max_extent_size;
3847 			spin_unlock(&space_info->lock);
3848 			return -ENOSPC;
3849 		} else if (space_info->max_extent_size) {
3850 			ffe_ctl->use_cluster = false;
3851 		}
3852 		spin_unlock(&space_info->lock);
3853 	}
3854 
3855 	ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info,
3856 					       &ffe_ctl->empty_cluster);
3857 	if (ffe_ctl->last_ptr) {
3858 		struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3859 
3860 		spin_lock(&last_ptr->lock);
3861 		if (last_ptr->block_group)
3862 			ffe_ctl->hint_byte = last_ptr->window_start;
3863 		if (last_ptr->fragmented) {
3864 			/*
3865 			 * We still set window_start so we can keep track of the
3866 			 * last place we found an allocation to try and save
3867 			 * some time.
3868 			 */
3869 			ffe_ctl->hint_byte = last_ptr->window_start;
3870 			ffe_ctl->use_cluster = false;
3871 		}
3872 		spin_unlock(&last_ptr->lock);
3873 	}
3874 
3875 	return 0;
3876 }
3877 
3878 static int prepare_allocation(struct btrfs_fs_info *fs_info,
3879 			      struct find_free_extent_ctl *ffe_ctl,
3880 			      struct btrfs_space_info *space_info,
3881 			      struct btrfs_key *ins)
3882 {
3883 	switch (ffe_ctl->policy) {
3884 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3885 		return prepare_allocation_clustered(fs_info, ffe_ctl,
3886 						    space_info, ins);
3887 	default:
3888 		BUG();
3889 	}
3890 }
3891 
3892 /*
3893  * walks the btree of allocated extents and find a hole of a given size.
3894  * The key ins is changed to record the hole:
3895  * ins->objectid == start position
3896  * ins->flags = BTRFS_EXTENT_ITEM_KEY
3897  * ins->offset == the size of the hole.
3898  * Any available blocks before search_start are skipped.
3899  *
3900  * If there is no suitable free space, we will record the max size of
3901  * the free space extent currently.
3902  *
3903  * The overall logic and call chain:
3904  *
3905  * find_free_extent()
3906  * |- Iterate through all block groups
3907  * |  |- Get a valid block group
3908  * |  |- Try to do clustered allocation in that block group
3909  * |  |- Try to do unclustered allocation in that block group
3910  * |  |- Check if the result is valid
3911  * |  |  |- If valid, then exit
3912  * |  |- Jump to next block group
3913  * |
3914  * |- Push harder to find free extents
3915  *    |- If not found, re-iterate all block groups
3916  */
3917 static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
3918 				u64 ram_bytes, u64 num_bytes, u64 empty_size,
3919 				u64 hint_byte_orig, struct btrfs_key *ins,
3920 				u64 flags, int delalloc)
3921 {
3922 	int ret = 0;
3923 	int cache_block_group_error = 0;
3924 	struct btrfs_block_group *block_group = NULL;
3925 	struct find_free_extent_ctl ffe_ctl = {0};
3926 	struct btrfs_space_info *space_info;
3927 	bool full_search = false;
3928 
3929 	WARN_ON(num_bytes < fs_info->sectorsize);
3930 
3931 	ffe_ctl.num_bytes = num_bytes;
3932 	ffe_ctl.empty_size = empty_size;
3933 	ffe_ctl.flags = flags;
3934 	ffe_ctl.search_start = 0;
3935 	ffe_ctl.delalloc = delalloc;
3936 	ffe_ctl.index = btrfs_bg_flags_to_raid_index(flags);
3937 	ffe_ctl.have_caching_bg = false;
3938 	ffe_ctl.orig_have_caching_bg = false;
3939 	ffe_ctl.found_offset = 0;
3940 	ffe_ctl.hint_byte = hint_byte_orig;
3941 	ffe_ctl.policy = BTRFS_EXTENT_ALLOC_CLUSTERED;
3942 
3943 	/* For clustered allocation */
3944 	ffe_ctl.retry_clustered = false;
3945 	ffe_ctl.retry_unclustered = false;
3946 	ffe_ctl.last_ptr = NULL;
3947 	ffe_ctl.use_cluster = true;
3948 
3949 	ins->type = BTRFS_EXTENT_ITEM_KEY;
3950 	ins->objectid = 0;
3951 	ins->offset = 0;
3952 
3953 	trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
3954 
3955 	space_info = btrfs_find_space_info(fs_info, flags);
3956 	if (!space_info) {
3957 		btrfs_err(fs_info, "No space info for %llu", flags);
3958 		return -ENOSPC;
3959 	}
3960 
3961 	ret = prepare_allocation(fs_info, &ffe_ctl, space_info, ins);
3962 	if (ret < 0)
3963 		return ret;
3964 
3965 	ffe_ctl.search_start = max(ffe_ctl.search_start,
3966 				   first_logical_byte(fs_info, 0));
3967 	ffe_ctl.search_start = max(ffe_ctl.search_start, ffe_ctl.hint_byte);
3968 	if (ffe_ctl.search_start == ffe_ctl.hint_byte) {
3969 		block_group = btrfs_lookup_block_group(fs_info,
3970 						       ffe_ctl.search_start);
3971 		/*
3972 		 * we don't want to use the block group if it doesn't match our
3973 		 * allocation bits, or if its not cached.
3974 		 *
3975 		 * However if we are re-searching with an ideal block group
3976 		 * picked out then we don't care that the block group is cached.
3977 		 */
3978 		if (block_group && block_group_bits(block_group, flags) &&
3979 		    block_group->cached != BTRFS_CACHE_NO) {
3980 			down_read(&space_info->groups_sem);
3981 			if (list_empty(&block_group->list) ||
3982 			    block_group->ro) {
3983 				/*
3984 				 * someone is removing this block group,
3985 				 * we can't jump into the have_block_group
3986 				 * target because our list pointers are not
3987 				 * valid
3988 				 */
3989 				btrfs_put_block_group(block_group);
3990 				up_read(&space_info->groups_sem);
3991 			} else {
3992 				ffe_ctl.index = btrfs_bg_flags_to_raid_index(
3993 						block_group->flags);
3994 				btrfs_lock_block_group(block_group, delalloc);
3995 				goto have_block_group;
3996 			}
3997 		} else if (block_group) {
3998 			btrfs_put_block_group(block_group);
3999 		}
4000 	}
4001 search:
4002 	ffe_ctl.have_caching_bg = false;
4003 	if (ffe_ctl.index == btrfs_bg_flags_to_raid_index(flags) ||
4004 	    ffe_ctl.index == 0)
4005 		full_search = true;
4006 	down_read(&space_info->groups_sem);
4007 	list_for_each_entry(block_group,
4008 			    &space_info->block_groups[ffe_ctl.index], list) {
4009 		struct btrfs_block_group *bg_ret;
4010 
4011 		/* If the block group is read-only, we can skip it entirely. */
4012 		if (unlikely(block_group->ro))
4013 			continue;
4014 
4015 		btrfs_grab_block_group(block_group, delalloc);
4016 		ffe_ctl.search_start = block_group->start;
4017 
4018 		/*
4019 		 * this can happen if we end up cycling through all the
4020 		 * raid types, but we want to make sure we only allocate
4021 		 * for the proper type.
4022 		 */
4023 		if (!block_group_bits(block_group, flags)) {
4024 			u64 extra = BTRFS_BLOCK_GROUP_DUP |
4025 				BTRFS_BLOCK_GROUP_RAID1_MASK |
4026 				BTRFS_BLOCK_GROUP_RAID56_MASK |
4027 				BTRFS_BLOCK_GROUP_RAID10;
4028 
4029 			/*
4030 			 * if they asked for extra copies and this block group
4031 			 * doesn't provide them, bail.  This does allow us to
4032 			 * fill raid0 from raid1.
4033 			 */
4034 			if ((flags & extra) && !(block_group->flags & extra))
4035 				goto loop;
4036 
4037 			/*
4038 			 * This block group has different flags than we want.
4039 			 * It's possible that we have MIXED_GROUP flag but no
4040 			 * block group is mixed.  Just skip such block group.
4041 			 */
4042 			btrfs_release_block_group(block_group, delalloc);
4043 			continue;
4044 		}
4045 
4046 have_block_group:
4047 		ffe_ctl.cached = btrfs_block_group_done(block_group);
4048 		if (unlikely(!ffe_ctl.cached)) {
4049 			ffe_ctl.have_caching_bg = true;
4050 			ret = btrfs_cache_block_group(block_group, 0);
4051 
4052 			/*
4053 			 * If we get ENOMEM here or something else we want to
4054 			 * try other block groups, because it may not be fatal.
4055 			 * However if we can't find anything else we need to
4056 			 * save our return here so that we return the actual
4057 			 * error that caused problems, not ENOSPC.
4058 			 */
4059 			if (ret < 0) {
4060 				if (!cache_block_group_error)
4061 					cache_block_group_error = ret;
4062 				ret = 0;
4063 				goto loop;
4064 			}
4065 			ret = 0;
4066 		}
4067 
4068 		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
4069 			goto loop;
4070 
4071 		bg_ret = NULL;
4072 		ret = do_allocation(block_group, &ffe_ctl, &bg_ret);
4073 		if (ret == 0) {
4074 			if (bg_ret && bg_ret != block_group) {
4075 				btrfs_release_block_group(block_group, delalloc);
4076 				block_group = bg_ret;
4077 			}
4078 		} else if (ret == -EAGAIN) {
4079 			goto have_block_group;
4080 		} else if (ret > 0) {
4081 			goto loop;
4082 		}
4083 
4084 		/* Checks */
4085 		ffe_ctl.search_start = round_up(ffe_ctl.found_offset,
4086 					     fs_info->stripesize);
4087 
4088 		/* move on to the next group */
4089 		if (ffe_ctl.search_start + num_bytes >
4090 		    block_group->start + block_group->length) {
4091 			btrfs_add_free_space(block_group, ffe_ctl.found_offset,
4092 					     num_bytes);
4093 			goto loop;
4094 		}
4095 
4096 		if (ffe_ctl.found_offset < ffe_ctl.search_start)
4097 			btrfs_add_free_space(block_group, ffe_ctl.found_offset,
4098 				ffe_ctl.search_start - ffe_ctl.found_offset);
4099 
4100 		ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
4101 				num_bytes, delalloc);
4102 		if (ret == -EAGAIN) {
4103 			btrfs_add_free_space(block_group, ffe_ctl.found_offset,
4104 					     num_bytes);
4105 			goto loop;
4106 		}
4107 		btrfs_inc_block_group_reservations(block_group);
4108 
4109 		/* we are all good, lets return */
4110 		ins->objectid = ffe_ctl.search_start;
4111 		ins->offset = num_bytes;
4112 
4113 		trace_btrfs_reserve_extent(block_group, ffe_ctl.search_start,
4114 					   num_bytes);
4115 		btrfs_release_block_group(block_group, delalloc);
4116 		break;
4117 loop:
4118 		release_block_group(block_group, &ffe_ctl, delalloc);
4119 		cond_resched();
4120 	}
4121 	up_read(&space_info->groups_sem);
4122 
4123 	ret = find_free_extent_update_loop(fs_info, ins, &ffe_ctl, full_search);
4124 	if (ret > 0)
4125 		goto search;
4126 
4127 	if (ret == -ENOSPC && !cache_block_group_error) {
4128 		/*
4129 		 * Use ffe_ctl->total_free_space as fallback if we can't find
4130 		 * any contiguous hole.
4131 		 */
4132 		if (!ffe_ctl.max_extent_size)
4133 			ffe_ctl.max_extent_size = ffe_ctl.total_free_space;
4134 		spin_lock(&space_info->lock);
4135 		space_info->max_extent_size = ffe_ctl.max_extent_size;
4136 		spin_unlock(&space_info->lock);
4137 		ins->offset = ffe_ctl.max_extent_size;
4138 	} else if (ret == -ENOSPC) {
4139 		ret = cache_block_group_error;
4140 	}
4141 	return ret;
4142 }
4143 
4144 /*
4145  * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
4146  *			  hole that is at least as big as @num_bytes.
4147  *
4148  * @root           -	The root that will contain this extent
4149  *
4150  * @ram_bytes      -	The amount of space in ram that @num_bytes take. This
4151  *			is used for accounting purposes. This value differs
4152  *			from @num_bytes only in the case of compressed extents.
4153  *
4154  * @num_bytes      -	Number of bytes to allocate on-disk.
4155  *
4156  * @min_alloc_size -	Indicates the minimum amount of space that the
4157  *			allocator should try to satisfy. In some cases
4158  *			@num_bytes may be larger than what is required and if
4159  *			the filesystem is fragmented then allocation fails.
4160  *			However, the presence of @min_alloc_size gives a
4161  *			chance to try and satisfy the smaller allocation.
4162  *
4163  * @empty_size     -	A hint that you plan on doing more COW. This is the
4164  *			size in bytes the allocator should try to find free
4165  *			next to the block it returns.  This is just a hint and
4166  *			may be ignored by the allocator.
4167  *
4168  * @hint_byte      -	Hint to the allocator to start searching above the byte
4169  *			address passed. It might be ignored.
4170  *
4171  * @ins            -	This key is modified to record the found hole. It will
4172  *			have the following values:
4173  *			ins->objectid == start position
4174  *			ins->flags = BTRFS_EXTENT_ITEM_KEY
4175  *			ins->offset == the size of the hole.
4176  *
4177  * @is_data        -	Boolean flag indicating whether an extent is
4178  *			allocated for data (true) or metadata (false)
4179  *
4180  * @delalloc       -	Boolean flag indicating whether this allocation is for
4181  *			delalloc or not. If 'true' data_rwsem of block groups
4182  *			is going to be acquired.
4183  *
4184  *
4185  * Returns 0 when an allocation succeeded or < 0 when an error occurred. In
4186  * case -ENOSPC is returned then @ins->offset will contain the size of the
4187  * largest available hole the allocator managed to find.
4188  */
4189 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
4190 			 u64 num_bytes, u64 min_alloc_size,
4191 			 u64 empty_size, u64 hint_byte,
4192 			 struct btrfs_key *ins, int is_data, int delalloc)
4193 {
4194 	struct btrfs_fs_info *fs_info = root->fs_info;
4195 	bool final_tried = num_bytes == min_alloc_size;
4196 	u64 flags;
4197 	int ret;
4198 
4199 	flags = get_alloc_profile_by_root(root, is_data);
4200 again:
4201 	WARN_ON(num_bytes < fs_info->sectorsize);
4202 	ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
4203 			       hint_byte, ins, flags, delalloc);
4204 	if (!ret && !is_data) {
4205 		btrfs_dec_block_group_reservations(fs_info, ins->objectid);
4206 	} else if (ret == -ENOSPC) {
4207 		if (!final_tried && ins->offset) {
4208 			num_bytes = min(num_bytes >> 1, ins->offset);
4209 			num_bytes = round_down(num_bytes,
4210 					       fs_info->sectorsize);
4211 			num_bytes = max(num_bytes, min_alloc_size);
4212 			ram_bytes = num_bytes;
4213 			if (num_bytes == min_alloc_size)
4214 				final_tried = true;
4215 			goto again;
4216 		} else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
4217 			struct btrfs_space_info *sinfo;
4218 
4219 			sinfo = btrfs_find_space_info(fs_info, flags);
4220 			btrfs_err(fs_info,
4221 				  "allocation failed flags %llu, wanted %llu",
4222 				  flags, num_bytes);
4223 			if (sinfo)
4224 				btrfs_dump_space_info(fs_info, sinfo,
4225 						      num_bytes, 1);
4226 		}
4227 	}
4228 
4229 	return ret;
4230 }
4231 
4232 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
4233 			       u64 start, u64 len, int delalloc)
4234 {
4235 	struct btrfs_block_group *cache;
4236 
4237 	cache = btrfs_lookup_block_group(fs_info, start);
4238 	if (!cache) {
4239 		btrfs_err(fs_info, "Unable to find block group for %llu",
4240 			  start);
4241 		return -ENOSPC;
4242 	}
4243 
4244 	btrfs_add_free_space(cache, start, len);
4245 	btrfs_free_reserved_bytes(cache, len, delalloc);
4246 	trace_btrfs_reserved_extent_free(fs_info, start, len);
4247 
4248 	btrfs_put_block_group(cache);
4249 	return 0;
4250 }
4251 
4252 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start,
4253 			      u64 len)
4254 {
4255 	struct btrfs_block_group *cache;
4256 	int ret = 0;
4257 
4258 	cache = btrfs_lookup_block_group(trans->fs_info, start);
4259 	if (!cache) {
4260 		btrfs_err(trans->fs_info, "unable to find block group for %llu",
4261 			  start);
4262 		return -ENOSPC;
4263 	}
4264 
4265 	ret = pin_down_extent(trans, cache, start, len, 1);
4266 	btrfs_put_block_group(cache);
4267 	return ret;
4268 }
4269 
4270 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4271 				      u64 parent, u64 root_objectid,
4272 				      u64 flags, u64 owner, u64 offset,
4273 				      struct btrfs_key *ins, int ref_mod)
4274 {
4275 	struct btrfs_fs_info *fs_info = trans->fs_info;
4276 	int ret;
4277 	struct btrfs_extent_item *extent_item;
4278 	struct btrfs_extent_inline_ref *iref;
4279 	struct btrfs_path *path;
4280 	struct extent_buffer *leaf;
4281 	int type;
4282 	u32 size;
4283 
4284 	if (parent > 0)
4285 		type = BTRFS_SHARED_DATA_REF_KEY;
4286 	else
4287 		type = BTRFS_EXTENT_DATA_REF_KEY;
4288 
4289 	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
4290 
4291 	path = btrfs_alloc_path();
4292 	if (!path)
4293 		return -ENOMEM;
4294 
4295 	path->leave_spinning = 1;
4296 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4297 				      ins, size);
4298 	if (ret) {
4299 		btrfs_free_path(path);
4300 		return ret;
4301 	}
4302 
4303 	leaf = path->nodes[0];
4304 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
4305 				     struct btrfs_extent_item);
4306 	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4307 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4308 	btrfs_set_extent_flags(leaf, extent_item,
4309 			       flags | BTRFS_EXTENT_FLAG_DATA);
4310 
4311 	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4312 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
4313 	if (parent > 0) {
4314 		struct btrfs_shared_data_ref *ref;
4315 		ref = (struct btrfs_shared_data_ref *)(iref + 1);
4316 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4317 		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4318 	} else {
4319 		struct btrfs_extent_data_ref *ref;
4320 		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4321 		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4322 		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4323 		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4324 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4325 	}
4326 
4327 	btrfs_mark_buffer_dirty(path->nodes[0]);
4328 	btrfs_free_path(path);
4329 
4330 	ret = remove_from_free_space_tree(trans, ins->objectid, ins->offset);
4331 	if (ret)
4332 		return ret;
4333 
4334 	ret = btrfs_update_block_group(trans, ins->objectid, ins->offset, 1);
4335 	if (ret) { /* -ENOENT, logic error */
4336 		btrfs_err(fs_info, "update block group failed for %llu %llu",
4337 			ins->objectid, ins->offset);
4338 		BUG();
4339 	}
4340 	trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
4341 	return ret;
4342 }
4343 
4344 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4345 				     struct btrfs_delayed_ref_node *node,
4346 				     struct btrfs_delayed_extent_op *extent_op)
4347 {
4348 	struct btrfs_fs_info *fs_info = trans->fs_info;
4349 	int ret;
4350 	struct btrfs_extent_item *extent_item;
4351 	struct btrfs_key extent_key;
4352 	struct btrfs_tree_block_info *block_info;
4353 	struct btrfs_extent_inline_ref *iref;
4354 	struct btrfs_path *path;
4355 	struct extent_buffer *leaf;
4356 	struct btrfs_delayed_tree_ref *ref;
4357 	u32 size = sizeof(*extent_item) + sizeof(*iref);
4358 	u64 num_bytes;
4359 	u64 flags = extent_op->flags_to_set;
4360 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
4361 
4362 	ref = btrfs_delayed_node_to_tree_ref(node);
4363 
4364 	extent_key.objectid = node->bytenr;
4365 	if (skinny_metadata) {
4366 		extent_key.offset = ref->level;
4367 		extent_key.type = BTRFS_METADATA_ITEM_KEY;
4368 		num_bytes = fs_info->nodesize;
4369 	} else {
4370 		extent_key.offset = node->num_bytes;
4371 		extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4372 		size += sizeof(*block_info);
4373 		num_bytes = node->num_bytes;
4374 	}
4375 
4376 	path = btrfs_alloc_path();
4377 	if (!path)
4378 		return -ENOMEM;
4379 
4380 	path->leave_spinning = 1;
4381 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4382 				      &extent_key, size);
4383 	if (ret) {
4384 		btrfs_free_path(path);
4385 		return ret;
4386 	}
4387 
4388 	leaf = path->nodes[0];
4389 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
4390 				     struct btrfs_extent_item);
4391 	btrfs_set_extent_refs(leaf, extent_item, 1);
4392 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4393 	btrfs_set_extent_flags(leaf, extent_item,
4394 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4395 
4396 	if (skinny_metadata) {
4397 		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4398 	} else {
4399 		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4400 		btrfs_set_tree_block_key(leaf, block_info, &extent_op->key);
4401 		btrfs_set_tree_block_level(leaf, block_info, ref->level);
4402 		iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4403 	}
4404 
4405 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
4406 		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4407 		btrfs_set_extent_inline_ref_type(leaf, iref,
4408 						 BTRFS_SHARED_BLOCK_REF_KEY);
4409 		btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent);
4410 	} else {
4411 		btrfs_set_extent_inline_ref_type(leaf, iref,
4412 						 BTRFS_TREE_BLOCK_REF_KEY);
4413 		btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
4414 	}
4415 
4416 	btrfs_mark_buffer_dirty(leaf);
4417 	btrfs_free_path(path);
4418 
4419 	ret = remove_from_free_space_tree(trans, extent_key.objectid,
4420 					  num_bytes);
4421 	if (ret)
4422 		return ret;
4423 
4424 	ret = btrfs_update_block_group(trans, extent_key.objectid,
4425 				       fs_info->nodesize, 1);
4426 	if (ret) { /* -ENOENT, logic error */
4427 		btrfs_err(fs_info, "update block group failed for %llu %llu",
4428 			extent_key.objectid, extent_key.offset);
4429 		BUG();
4430 	}
4431 
4432 	trace_btrfs_reserved_extent_alloc(fs_info, extent_key.objectid,
4433 					  fs_info->nodesize);
4434 	return ret;
4435 }
4436 
4437 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4438 				     struct btrfs_root *root, u64 owner,
4439 				     u64 offset, u64 ram_bytes,
4440 				     struct btrfs_key *ins)
4441 {
4442 	struct btrfs_ref generic_ref = { 0 };
4443 	int ret;
4444 
4445 	BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
4446 
4447 	btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
4448 			       ins->objectid, ins->offset, 0);
4449 	btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, offset);
4450 	btrfs_ref_tree_mod(root->fs_info, &generic_ref);
4451 	ret = btrfs_add_delayed_data_ref(trans, &generic_ref,
4452 					 ram_bytes, NULL, NULL);
4453 	return ret;
4454 }
4455 
4456 /*
4457  * this is used by the tree logging recovery code.  It records that
4458  * an extent has been allocated and makes sure to clear the free
4459  * space cache bits as well
4460  */
4461 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4462 				   u64 root_objectid, u64 owner, u64 offset,
4463 				   struct btrfs_key *ins)
4464 {
4465 	struct btrfs_fs_info *fs_info = trans->fs_info;
4466 	int ret;
4467 	struct btrfs_block_group *block_group;
4468 	struct btrfs_space_info *space_info;
4469 
4470 	/*
4471 	 * Mixed block groups will exclude before processing the log so we only
4472 	 * need to do the exclude dance if this fs isn't mixed.
4473 	 */
4474 	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
4475 		ret = __exclude_logged_extent(fs_info, ins->objectid,
4476 					      ins->offset);
4477 		if (ret)
4478 			return ret;
4479 	}
4480 
4481 	block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
4482 	if (!block_group)
4483 		return -EINVAL;
4484 
4485 	space_info = block_group->space_info;
4486 	spin_lock(&space_info->lock);
4487 	spin_lock(&block_group->lock);
4488 	space_info->bytes_reserved += ins->offset;
4489 	block_group->reserved += ins->offset;
4490 	spin_unlock(&block_group->lock);
4491 	spin_unlock(&space_info->lock);
4492 
4493 	ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
4494 					 offset, ins, 1);
4495 	if (ret)
4496 		btrfs_pin_extent(trans, ins->objectid, ins->offset, 1);
4497 	btrfs_put_block_group(block_group);
4498 	return ret;
4499 }
4500 
4501 static struct extent_buffer *
4502 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4503 		      u64 bytenr, int level, u64 owner)
4504 {
4505 	struct btrfs_fs_info *fs_info = root->fs_info;
4506 	struct extent_buffer *buf;
4507 
4508 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
4509 	if (IS_ERR(buf))
4510 		return buf;
4511 
4512 	/*
4513 	 * Extra safety check in case the extent tree is corrupted and extent
4514 	 * allocator chooses to use a tree block which is already used and
4515 	 * locked.
4516 	 */
4517 	if (buf->lock_owner == current->pid) {
4518 		btrfs_err_rl(fs_info,
4519 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
4520 			buf->start, btrfs_header_owner(buf), current->pid);
4521 		free_extent_buffer(buf);
4522 		return ERR_PTR(-EUCLEAN);
4523 	}
4524 
4525 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
4526 	btrfs_tree_lock(buf);
4527 	btrfs_clean_tree_block(buf);
4528 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
4529 
4530 	btrfs_set_lock_blocking_write(buf);
4531 	set_extent_buffer_uptodate(buf);
4532 
4533 	memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
4534 	btrfs_set_header_level(buf, level);
4535 	btrfs_set_header_bytenr(buf, buf->start);
4536 	btrfs_set_header_generation(buf, trans->transid);
4537 	btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV);
4538 	btrfs_set_header_owner(buf, owner);
4539 	write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid);
4540 	write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
4541 	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4542 		buf->log_index = root->log_transid % 2;
4543 		/*
4544 		 * we allow two log transactions at a time, use different
4545 		 * EXTENT bit to differentiate dirty pages.
4546 		 */
4547 		if (buf->log_index == 0)
4548 			set_extent_dirty(&root->dirty_log_pages, buf->start,
4549 					buf->start + buf->len - 1, GFP_NOFS);
4550 		else
4551 			set_extent_new(&root->dirty_log_pages, buf->start,
4552 					buf->start + buf->len - 1);
4553 	} else {
4554 		buf->log_index = -1;
4555 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4556 			 buf->start + buf->len - 1, GFP_NOFS);
4557 	}
4558 	trans->dirty = true;
4559 	/* this returns a buffer locked for blocking */
4560 	return buf;
4561 }
4562 
4563 /*
4564  * finds a free extent and does all the dirty work required for allocation
4565  * returns the tree buffer or an ERR_PTR on error.
4566  */
4567 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
4568 					     struct btrfs_root *root,
4569 					     u64 parent, u64 root_objectid,
4570 					     const struct btrfs_disk_key *key,
4571 					     int level, u64 hint,
4572 					     u64 empty_size)
4573 {
4574 	struct btrfs_fs_info *fs_info = root->fs_info;
4575 	struct btrfs_key ins;
4576 	struct btrfs_block_rsv *block_rsv;
4577 	struct extent_buffer *buf;
4578 	struct btrfs_delayed_extent_op *extent_op;
4579 	struct btrfs_ref generic_ref = { 0 };
4580 	u64 flags = 0;
4581 	int ret;
4582 	u32 blocksize = fs_info->nodesize;
4583 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
4584 
4585 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4586 	if (btrfs_is_testing(fs_info)) {
4587 		buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
4588 					    level, root_objectid);
4589 		if (!IS_ERR(buf))
4590 			root->alloc_bytenr += blocksize;
4591 		return buf;
4592 	}
4593 #endif
4594 
4595 	block_rsv = btrfs_use_block_rsv(trans, root, blocksize);
4596 	if (IS_ERR(block_rsv))
4597 		return ERR_CAST(block_rsv);
4598 
4599 	ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
4600 				   empty_size, hint, &ins, 0, 0);
4601 	if (ret)
4602 		goto out_unuse;
4603 
4604 	buf = btrfs_init_new_buffer(trans, root, ins.objectid, level,
4605 				    root_objectid);
4606 	if (IS_ERR(buf)) {
4607 		ret = PTR_ERR(buf);
4608 		goto out_free_reserved;
4609 	}
4610 
4611 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4612 		if (parent == 0)
4613 			parent = ins.objectid;
4614 		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4615 	} else
4616 		BUG_ON(parent > 0);
4617 
4618 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
4619 		extent_op = btrfs_alloc_delayed_extent_op();
4620 		if (!extent_op) {
4621 			ret = -ENOMEM;
4622 			goto out_free_buf;
4623 		}
4624 		if (key)
4625 			memcpy(&extent_op->key, key, sizeof(extent_op->key));
4626 		else
4627 			memset(&extent_op->key, 0, sizeof(extent_op->key));
4628 		extent_op->flags_to_set = flags;
4629 		extent_op->update_key = skinny_metadata ? false : true;
4630 		extent_op->update_flags = true;
4631 		extent_op->is_data = false;
4632 		extent_op->level = level;
4633 
4634 		btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
4635 				       ins.objectid, ins.offset, parent);
4636 		generic_ref.real_root = root->root_key.objectid;
4637 		btrfs_init_tree_ref(&generic_ref, level, root_objectid);
4638 		btrfs_ref_tree_mod(fs_info, &generic_ref);
4639 		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref,
4640 						 extent_op, NULL, NULL);
4641 		if (ret)
4642 			goto out_free_delayed;
4643 	}
4644 	return buf;
4645 
4646 out_free_delayed:
4647 	btrfs_free_delayed_extent_op(extent_op);
4648 out_free_buf:
4649 	free_extent_buffer(buf);
4650 out_free_reserved:
4651 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
4652 out_unuse:
4653 	btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize);
4654 	return ERR_PTR(ret);
4655 }
4656 
4657 struct walk_control {
4658 	u64 refs[BTRFS_MAX_LEVEL];
4659 	u64 flags[BTRFS_MAX_LEVEL];
4660 	struct btrfs_key update_progress;
4661 	struct btrfs_key drop_progress;
4662 	int drop_level;
4663 	int stage;
4664 	int level;
4665 	int shared_level;
4666 	int update_ref;
4667 	int keep_locks;
4668 	int reada_slot;
4669 	int reada_count;
4670 	int restarted;
4671 };
4672 
4673 #define DROP_REFERENCE	1
4674 #define UPDATE_BACKREF	2
4675 
4676 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4677 				     struct btrfs_root *root,
4678 				     struct walk_control *wc,
4679 				     struct btrfs_path *path)
4680 {
4681 	struct btrfs_fs_info *fs_info = root->fs_info;
4682 	u64 bytenr;
4683 	u64 generation;
4684 	u64 refs;
4685 	u64 flags;
4686 	u32 nritems;
4687 	struct btrfs_key key;
4688 	struct extent_buffer *eb;
4689 	int ret;
4690 	int slot;
4691 	int nread = 0;
4692 
4693 	if (path->slots[wc->level] < wc->reada_slot) {
4694 		wc->reada_count = wc->reada_count * 2 / 3;
4695 		wc->reada_count = max(wc->reada_count, 2);
4696 	} else {
4697 		wc->reada_count = wc->reada_count * 3 / 2;
4698 		wc->reada_count = min_t(int, wc->reada_count,
4699 					BTRFS_NODEPTRS_PER_BLOCK(fs_info));
4700 	}
4701 
4702 	eb = path->nodes[wc->level];
4703 	nritems = btrfs_header_nritems(eb);
4704 
4705 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
4706 		if (nread >= wc->reada_count)
4707 			break;
4708 
4709 		cond_resched();
4710 		bytenr = btrfs_node_blockptr(eb, slot);
4711 		generation = btrfs_node_ptr_generation(eb, slot);
4712 
4713 		if (slot == path->slots[wc->level])
4714 			goto reada;
4715 
4716 		if (wc->stage == UPDATE_BACKREF &&
4717 		    generation <= root->root_key.offset)
4718 			continue;
4719 
4720 		/* We don't lock the tree block, it's OK to be racy here */
4721 		ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
4722 					       wc->level - 1, 1, &refs,
4723 					       &flags);
4724 		/* We don't care about errors in readahead. */
4725 		if (ret < 0)
4726 			continue;
4727 		BUG_ON(refs == 0);
4728 
4729 		if (wc->stage == DROP_REFERENCE) {
4730 			if (refs == 1)
4731 				goto reada;
4732 
4733 			if (wc->level == 1 &&
4734 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
4735 				continue;
4736 			if (!wc->update_ref ||
4737 			    generation <= root->root_key.offset)
4738 				continue;
4739 			btrfs_node_key_to_cpu(eb, &key, slot);
4740 			ret = btrfs_comp_cpu_keys(&key,
4741 						  &wc->update_progress);
4742 			if (ret < 0)
4743 				continue;
4744 		} else {
4745 			if (wc->level == 1 &&
4746 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
4747 				continue;
4748 		}
4749 reada:
4750 		readahead_tree_block(fs_info, bytenr);
4751 		nread++;
4752 	}
4753 	wc->reada_slot = slot;
4754 }
4755 
4756 /*
4757  * helper to process tree block while walking down the tree.
4758  *
4759  * when wc->stage == UPDATE_BACKREF, this function updates
4760  * back refs for pointers in the block.
4761  *
4762  * NOTE: return value 1 means we should stop walking down.
4763  */
4764 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4765 				   struct btrfs_root *root,
4766 				   struct btrfs_path *path,
4767 				   struct walk_control *wc, int lookup_info)
4768 {
4769 	struct btrfs_fs_info *fs_info = root->fs_info;
4770 	int level = wc->level;
4771 	struct extent_buffer *eb = path->nodes[level];
4772 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
4773 	int ret;
4774 
4775 	if (wc->stage == UPDATE_BACKREF &&
4776 	    btrfs_header_owner(eb) != root->root_key.objectid)
4777 		return 1;
4778 
4779 	/*
4780 	 * when reference count of tree block is 1, it won't increase
4781 	 * again. once full backref flag is set, we never clear it.
4782 	 */
4783 	if (lookup_info &&
4784 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
4785 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
4786 		BUG_ON(!path->locks[level]);
4787 		ret = btrfs_lookup_extent_info(trans, fs_info,
4788 					       eb->start, level, 1,
4789 					       &wc->refs[level],
4790 					       &wc->flags[level]);
4791 		BUG_ON(ret == -ENOMEM);
4792 		if (ret)
4793 			return ret;
4794 		BUG_ON(wc->refs[level] == 0);
4795 	}
4796 
4797 	if (wc->stage == DROP_REFERENCE) {
4798 		if (wc->refs[level] > 1)
4799 			return 1;
4800 
4801 		if (path->locks[level] && !wc->keep_locks) {
4802 			btrfs_tree_unlock_rw(eb, path->locks[level]);
4803 			path->locks[level] = 0;
4804 		}
4805 		return 0;
4806 	}
4807 
4808 	/* wc->stage == UPDATE_BACKREF */
4809 	if (!(wc->flags[level] & flag)) {
4810 		BUG_ON(!path->locks[level]);
4811 		ret = btrfs_inc_ref(trans, root, eb, 1);
4812 		BUG_ON(ret); /* -ENOMEM */
4813 		ret = btrfs_dec_ref(trans, root, eb, 0);
4814 		BUG_ON(ret); /* -ENOMEM */
4815 		ret = btrfs_set_disk_extent_flags(trans, eb, flag,
4816 						  btrfs_header_level(eb), 0);
4817 		BUG_ON(ret); /* -ENOMEM */
4818 		wc->flags[level] |= flag;
4819 	}
4820 
4821 	/*
4822 	 * the block is shared by multiple trees, so it's not good to
4823 	 * keep the tree lock
4824 	 */
4825 	if (path->locks[level] && level > 0) {
4826 		btrfs_tree_unlock_rw(eb, path->locks[level]);
4827 		path->locks[level] = 0;
4828 	}
4829 	return 0;
4830 }
4831 
4832 /*
4833  * This is used to verify a ref exists for this root to deal with a bug where we
4834  * would have a drop_progress key that hadn't been updated properly.
4835  */
4836 static int check_ref_exists(struct btrfs_trans_handle *trans,
4837 			    struct btrfs_root *root, u64 bytenr, u64 parent,
4838 			    int level)
4839 {
4840 	struct btrfs_path *path;
4841 	struct btrfs_extent_inline_ref *iref;
4842 	int ret;
4843 
4844 	path = btrfs_alloc_path();
4845 	if (!path)
4846 		return -ENOMEM;
4847 
4848 	ret = lookup_extent_backref(trans, path, &iref, bytenr,
4849 				    root->fs_info->nodesize, parent,
4850 				    root->root_key.objectid, level, 0);
4851 	btrfs_free_path(path);
4852 	if (ret == -ENOENT)
4853 		return 0;
4854 	if (ret < 0)
4855 		return ret;
4856 	return 1;
4857 }
4858 
4859 /*
4860  * helper to process tree block pointer.
4861  *
4862  * when wc->stage == DROP_REFERENCE, this function checks
4863  * reference count of the block pointed to. if the block
4864  * is shared and we need update back refs for the subtree
4865  * rooted at the block, this function changes wc->stage to
4866  * UPDATE_BACKREF. if the block is shared and there is no
4867  * need to update back, this function drops the reference
4868  * to the block.
4869  *
4870  * NOTE: return value 1 means we should stop walking down.
4871  */
4872 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4873 				 struct btrfs_root *root,
4874 				 struct btrfs_path *path,
4875 				 struct walk_control *wc, int *lookup_info)
4876 {
4877 	struct btrfs_fs_info *fs_info = root->fs_info;
4878 	u64 bytenr;
4879 	u64 generation;
4880 	u64 parent;
4881 	struct btrfs_key key;
4882 	struct btrfs_key first_key;
4883 	struct btrfs_ref ref = { 0 };
4884 	struct extent_buffer *next;
4885 	int level = wc->level;
4886 	int reada = 0;
4887 	int ret = 0;
4888 	bool need_account = false;
4889 
4890 	generation = btrfs_node_ptr_generation(path->nodes[level],
4891 					       path->slots[level]);
4892 	/*
4893 	 * if the lower level block was created before the snapshot
4894 	 * was created, we know there is no need to update back refs
4895 	 * for the subtree
4896 	 */
4897 	if (wc->stage == UPDATE_BACKREF &&
4898 	    generation <= root->root_key.offset) {
4899 		*lookup_info = 1;
4900 		return 1;
4901 	}
4902 
4903 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
4904 	btrfs_node_key_to_cpu(path->nodes[level], &first_key,
4905 			      path->slots[level]);
4906 
4907 	next = find_extent_buffer(fs_info, bytenr);
4908 	if (!next) {
4909 		next = btrfs_find_create_tree_block(fs_info, bytenr);
4910 		if (IS_ERR(next))
4911 			return PTR_ERR(next);
4912 
4913 		btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
4914 					       level - 1);
4915 		reada = 1;
4916 	}
4917 	btrfs_tree_lock(next);
4918 	btrfs_set_lock_blocking_write(next);
4919 
4920 	ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
4921 				       &wc->refs[level - 1],
4922 				       &wc->flags[level - 1]);
4923 	if (ret < 0)
4924 		goto out_unlock;
4925 
4926 	if (unlikely(wc->refs[level - 1] == 0)) {
4927 		btrfs_err(fs_info, "Missing references.");
4928 		ret = -EIO;
4929 		goto out_unlock;
4930 	}
4931 	*lookup_info = 0;
4932 
4933 	if (wc->stage == DROP_REFERENCE) {
4934 		if (wc->refs[level - 1] > 1) {
4935 			need_account = true;
4936 			if (level == 1 &&
4937 			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
4938 				goto skip;
4939 
4940 			if (!wc->update_ref ||
4941 			    generation <= root->root_key.offset)
4942 				goto skip;
4943 
4944 			btrfs_node_key_to_cpu(path->nodes[level], &key,
4945 					      path->slots[level]);
4946 			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
4947 			if (ret < 0)
4948 				goto skip;
4949 
4950 			wc->stage = UPDATE_BACKREF;
4951 			wc->shared_level = level - 1;
4952 		}
4953 	} else {
4954 		if (level == 1 &&
4955 		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
4956 			goto skip;
4957 	}
4958 
4959 	if (!btrfs_buffer_uptodate(next, generation, 0)) {
4960 		btrfs_tree_unlock(next);
4961 		free_extent_buffer(next);
4962 		next = NULL;
4963 		*lookup_info = 1;
4964 	}
4965 
4966 	if (!next) {
4967 		if (reada && level == 1)
4968 			reada_walk_down(trans, root, wc, path);
4969 		next = read_tree_block(fs_info, bytenr, generation, level - 1,
4970 				       &first_key);
4971 		if (IS_ERR(next)) {
4972 			return PTR_ERR(next);
4973 		} else if (!extent_buffer_uptodate(next)) {
4974 			free_extent_buffer(next);
4975 			return -EIO;
4976 		}
4977 		btrfs_tree_lock(next);
4978 		btrfs_set_lock_blocking_write(next);
4979 	}
4980 
4981 	level--;
4982 	ASSERT(level == btrfs_header_level(next));
4983 	if (level != btrfs_header_level(next)) {
4984 		btrfs_err(root->fs_info, "mismatched level");
4985 		ret = -EIO;
4986 		goto out_unlock;
4987 	}
4988 	path->nodes[level] = next;
4989 	path->slots[level] = 0;
4990 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
4991 	wc->level = level;
4992 	if (wc->level == 1)
4993 		wc->reada_slot = 0;
4994 	return 0;
4995 skip:
4996 	wc->refs[level - 1] = 0;
4997 	wc->flags[level - 1] = 0;
4998 	if (wc->stage == DROP_REFERENCE) {
4999 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5000 			parent = path->nodes[level]->start;
5001 		} else {
5002 			ASSERT(root->root_key.objectid ==
5003 			       btrfs_header_owner(path->nodes[level]));
5004 			if (root->root_key.objectid !=
5005 			    btrfs_header_owner(path->nodes[level])) {
5006 				btrfs_err(root->fs_info,
5007 						"mismatched block owner");
5008 				ret = -EIO;
5009 				goto out_unlock;
5010 			}
5011 			parent = 0;
5012 		}
5013 
5014 		/*
5015 		 * If we had a drop_progress we need to verify the refs are set
5016 		 * as expected.  If we find our ref then we know that from here
5017 		 * on out everything should be correct, and we can clear the
5018 		 * ->restarted flag.
5019 		 */
5020 		if (wc->restarted) {
5021 			ret = check_ref_exists(trans, root, bytenr, parent,
5022 					       level - 1);
5023 			if (ret < 0)
5024 				goto out_unlock;
5025 			if (ret == 0)
5026 				goto no_delete;
5027 			ret = 0;
5028 			wc->restarted = 0;
5029 		}
5030 
5031 		/*
5032 		 * Reloc tree doesn't contribute to qgroup numbers, and we have
5033 		 * already accounted them at merge time (replace_path),
5034 		 * thus we could skip expensive subtree trace here.
5035 		 */
5036 		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
5037 		    need_account) {
5038 			ret = btrfs_qgroup_trace_subtree(trans, next,
5039 							 generation, level - 1);
5040 			if (ret) {
5041 				btrfs_err_rl(fs_info,
5042 					     "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
5043 					     ret);
5044 			}
5045 		}
5046 
5047 		/*
5048 		 * We need to update the next key in our walk control so we can
5049 		 * update the drop_progress key accordingly.  We don't care if
5050 		 * find_next_key doesn't find a key because that means we're at
5051 		 * the end and are going to clean up now.
5052 		 */
5053 		wc->drop_level = level;
5054 		find_next_key(path, level, &wc->drop_progress);
5055 
5056 		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
5057 				       fs_info->nodesize, parent);
5058 		btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid);
5059 		ret = btrfs_free_extent(trans, &ref);
5060 		if (ret)
5061 			goto out_unlock;
5062 	}
5063 no_delete:
5064 	*lookup_info = 1;
5065 	ret = 1;
5066 
5067 out_unlock:
5068 	btrfs_tree_unlock(next);
5069 	free_extent_buffer(next);
5070 
5071 	return ret;
5072 }
5073 
5074 /*
5075  * helper to process tree block while walking up the tree.
5076  *
5077  * when wc->stage == DROP_REFERENCE, this function drops
5078  * reference count on the block.
5079  *
5080  * when wc->stage == UPDATE_BACKREF, this function changes
5081  * wc->stage back to DROP_REFERENCE if we changed wc->stage
5082  * to UPDATE_BACKREF previously while processing the block.
5083  *
5084  * NOTE: return value 1 means we should stop walking up.
5085  */
5086 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5087 				 struct btrfs_root *root,
5088 				 struct btrfs_path *path,
5089 				 struct walk_control *wc)
5090 {
5091 	struct btrfs_fs_info *fs_info = root->fs_info;
5092 	int ret;
5093 	int level = wc->level;
5094 	struct extent_buffer *eb = path->nodes[level];
5095 	u64 parent = 0;
5096 
5097 	if (wc->stage == UPDATE_BACKREF) {
5098 		BUG_ON(wc->shared_level < level);
5099 		if (level < wc->shared_level)
5100 			goto out;
5101 
5102 		ret = find_next_key(path, level + 1, &wc->update_progress);
5103 		if (ret > 0)
5104 			wc->update_ref = 0;
5105 
5106 		wc->stage = DROP_REFERENCE;
5107 		wc->shared_level = -1;
5108 		path->slots[level] = 0;
5109 
5110 		/*
5111 		 * check reference count again if the block isn't locked.
5112 		 * we should start walking down the tree again if reference
5113 		 * count is one.
5114 		 */
5115 		if (!path->locks[level]) {
5116 			BUG_ON(level == 0);
5117 			btrfs_tree_lock(eb);
5118 			btrfs_set_lock_blocking_write(eb);
5119 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
5120 
5121 			ret = btrfs_lookup_extent_info(trans, fs_info,
5122 						       eb->start, level, 1,
5123 						       &wc->refs[level],
5124 						       &wc->flags[level]);
5125 			if (ret < 0) {
5126 				btrfs_tree_unlock_rw(eb, path->locks[level]);
5127 				path->locks[level] = 0;
5128 				return ret;
5129 			}
5130 			BUG_ON(wc->refs[level] == 0);
5131 			if (wc->refs[level] == 1) {
5132 				btrfs_tree_unlock_rw(eb, path->locks[level]);
5133 				path->locks[level] = 0;
5134 				return 1;
5135 			}
5136 		}
5137 	}
5138 
5139 	/* wc->stage == DROP_REFERENCE */
5140 	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5141 
5142 	if (wc->refs[level] == 1) {
5143 		if (level == 0) {
5144 			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5145 				ret = btrfs_dec_ref(trans, root, eb, 1);
5146 			else
5147 				ret = btrfs_dec_ref(trans, root, eb, 0);
5148 			BUG_ON(ret); /* -ENOMEM */
5149 			if (is_fstree(root->root_key.objectid)) {
5150 				ret = btrfs_qgroup_trace_leaf_items(trans, eb);
5151 				if (ret) {
5152 					btrfs_err_rl(fs_info,
5153 	"error %d accounting leaf items, quota is out of sync, rescan required",
5154 					     ret);
5155 				}
5156 			}
5157 		}
5158 		/* make block locked assertion in btrfs_clean_tree_block happy */
5159 		if (!path->locks[level] &&
5160 		    btrfs_header_generation(eb) == trans->transid) {
5161 			btrfs_tree_lock(eb);
5162 			btrfs_set_lock_blocking_write(eb);
5163 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
5164 		}
5165 		btrfs_clean_tree_block(eb);
5166 	}
5167 
5168 	if (eb == root->node) {
5169 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5170 			parent = eb->start;
5171 		else if (root->root_key.objectid != btrfs_header_owner(eb))
5172 			goto owner_mismatch;
5173 	} else {
5174 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5175 			parent = path->nodes[level + 1]->start;
5176 		else if (root->root_key.objectid !=
5177 			 btrfs_header_owner(path->nodes[level + 1]))
5178 			goto owner_mismatch;
5179 	}
5180 
5181 	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
5182 out:
5183 	wc->refs[level] = 0;
5184 	wc->flags[level] = 0;
5185 	return 0;
5186 
5187 owner_mismatch:
5188 	btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
5189 		     btrfs_header_owner(eb), root->root_key.objectid);
5190 	return -EUCLEAN;
5191 }
5192 
5193 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5194 				   struct btrfs_root *root,
5195 				   struct btrfs_path *path,
5196 				   struct walk_control *wc)
5197 {
5198 	int level = wc->level;
5199 	int lookup_info = 1;
5200 	int ret;
5201 
5202 	while (level >= 0) {
5203 		ret = walk_down_proc(trans, root, path, wc, lookup_info);
5204 		if (ret > 0)
5205 			break;
5206 
5207 		if (level == 0)
5208 			break;
5209 
5210 		if (path->slots[level] >=
5211 		    btrfs_header_nritems(path->nodes[level]))
5212 			break;
5213 
5214 		ret = do_walk_down(trans, root, path, wc, &lookup_info);
5215 		if (ret > 0) {
5216 			path->slots[level]++;
5217 			continue;
5218 		} else if (ret < 0)
5219 			return ret;
5220 		level = wc->level;
5221 	}
5222 	return 0;
5223 }
5224 
5225 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5226 				 struct btrfs_root *root,
5227 				 struct btrfs_path *path,
5228 				 struct walk_control *wc, int max_level)
5229 {
5230 	int level = wc->level;
5231 	int ret;
5232 
5233 	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5234 	while (level < max_level && path->nodes[level]) {
5235 		wc->level = level;
5236 		if (path->slots[level] + 1 <
5237 		    btrfs_header_nritems(path->nodes[level])) {
5238 			path->slots[level]++;
5239 			return 0;
5240 		} else {
5241 			ret = walk_up_proc(trans, root, path, wc);
5242 			if (ret > 0)
5243 				return 0;
5244 			if (ret < 0)
5245 				return ret;
5246 
5247 			if (path->locks[level]) {
5248 				btrfs_tree_unlock_rw(path->nodes[level],
5249 						     path->locks[level]);
5250 				path->locks[level] = 0;
5251 			}
5252 			free_extent_buffer(path->nodes[level]);
5253 			path->nodes[level] = NULL;
5254 			level++;
5255 		}
5256 	}
5257 	return 1;
5258 }
5259 
5260 /*
5261  * drop a subvolume tree.
5262  *
5263  * this function traverses the tree freeing any blocks that only
5264  * referenced by the tree.
5265  *
5266  * when a shared tree block is found. this function decreases its
5267  * reference count by one. if update_ref is true, this function
5268  * also make sure backrefs for the shared block and all lower level
5269  * blocks are properly updated.
5270  *
5271  * If called with for_reloc == 0, may exit early with -EAGAIN
5272  */
5273 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
5274 {
5275 	struct btrfs_fs_info *fs_info = root->fs_info;
5276 	struct btrfs_path *path;
5277 	struct btrfs_trans_handle *trans;
5278 	struct btrfs_root *tree_root = fs_info->tree_root;
5279 	struct btrfs_root_item *root_item = &root->root_item;
5280 	struct walk_control *wc;
5281 	struct btrfs_key key;
5282 	int err = 0;
5283 	int ret;
5284 	int level;
5285 	bool root_dropped = false;
5286 
5287 	btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid);
5288 
5289 	path = btrfs_alloc_path();
5290 	if (!path) {
5291 		err = -ENOMEM;
5292 		goto out;
5293 	}
5294 
5295 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
5296 	if (!wc) {
5297 		btrfs_free_path(path);
5298 		err = -ENOMEM;
5299 		goto out;
5300 	}
5301 
5302 	/*
5303 	 * Use join to avoid potential EINTR from transaction start. See
5304 	 * wait_reserve_ticket and the whole reservation callchain.
5305 	 */
5306 	if (for_reloc)
5307 		trans = btrfs_join_transaction(tree_root);
5308 	else
5309 		trans = btrfs_start_transaction(tree_root, 0);
5310 	if (IS_ERR(trans)) {
5311 		err = PTR_ERR(trans);
5312 		goto out_free;
5313 	}
5314 
5315 	err = btrfs_run_delayed_items(trans);
5316 	if (err)
5317 		goto out_end_trans;
5318 
5319 	/*
5320 	 * This will help us catch people modifying the fs tree while we're
5321 	 * dropping it.  It is unsafe to mess with the fs tree while it's being
5322 	 * dropped as we unlock the root node and parent nodes as we walk down
5323 	 * the tree, assuming nothing will change.  If something does change
5324 	 * then we'll have stale information and drop references to blocks we've
5325 	 * already dropped.
5326 	 */
5327 	set_bit(BTRFS_ROOT_DELETING, &root->state);
5328 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
5329 		level = btrfs_header_level(root->node);
5330 		path->nodes[level] = btrfs_lock_root_node(root);
5331 		btrfs_set_lock_blocking_write(path->nodes[level]);
5332 		path->slots[level] = 0;
5333 		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
5334 		memset(&wc->update_progress, 0,
5335 		       sizeof(wc->update_progress));
5336 	} else {
5337 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
5338 		memcpy(&wc->update_progress, &key,
5339 		       sizeof(wc->update_progress));
5340 
5341 		level = root_item->drop_level;
5342 		BUG_ON(level == 0);
5343 		path->lowest_level = level;
5344 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5345 		path->lowest_level = 0;
5346 		if (ret < 0) {
5347 			err = ret;
5348 			goto out_end_trans;
5349 		}
5350 		WARN_ON(ret > 0);
5351 
5352 		/*
5353 		 * unlock our path, this is safe because only this
5354 		 * function is allowed to delete this snapshot
5355 		 */
5356 		btrfs_unlock_up_safe(path, 0);
5357 
5358 		level = btrfs_header_level(root->node);
5359 		while (1) {
5360 			btrfs_tree_lock(path->nodes[level]);
5361 			btrfs_set_lock_blocking_write(path->nodes[level]);
5362 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
5363 
5364 			ret = btrfs_lookup_extent_info(trans, fs_info,
5365 						path->nodes[level]->start,
5366 						level, 1, &wc->refs[level],
5367 						&wc->flags[level]);
5368 			if (ret < 0) {
5369 				err = ret;
5370 				goto out_end_trans;
5371 			}
5372 			BUG_ON(wc->refs[level] == 0);
5373 
5374 			if (level == root_item->drop_level)
5375 				break;
5376 
5377 			btrfs_tree_unlock(path->nodes[level]);
5378 			path->locks[level] = 0;
5379 			WARN_ON(wc->refs[level] != 1);
5380 			level--;
5381 		}
5382 	}
5383 
5384 	wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
5385 	wc->level = level;
5386 	wc->shared_level = -1;
5387 	wc->stage = DROP_REFERENCE;
5388 	wc->update_ref = update_ref;
5389 	wc->keep_locks = 0;
5390 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
5391 
5392 	while (1) {
5393 
5394 		ret = walk_down_tree(trans, root, path, wc);
5395 		if (ret < 0) {
5396 			err = ret;
5397 			break;
5398 		}
5399 
5400 		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5401 		if (ret < 0) {
5402 			err = ret;
5403 			break;
5404 		}
5405 
5406 		if (ret > 0) {
5407 			BUG_ON(wc->stage != DROP_REFERENCE);
5408 			break;
5409 		}
5410 
5411 		if (wc->stage == DROP_REFERENCE) {
5412 			wc->drop_level = wc->level;
5413 			btrfs_node_key_to_cpu(path->nodes[wc->drop_level],
5414 					      &wc->drop_progress,
5415 					      path->slots[wc->drop_level]);
5416 		}
5417 		btrfs_cpu_key_to_disk(&root_item->drop_progress,
5418 				      &wc->drop_progress);
5419 		root_item->drop_level = wc->drop_level;
5420 
5421 		BUG_ON(wc->level == 0);
5422 		if (btrfs_should_end_transaction(trans) ||
5423 		    (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
5424 			ret = btrfs_update_root(trans, tree_root,
5425 						&root->root_key,
5426 						root_item);
5427 			if (ret) {
5428 				btrfs_abort_transaction(trans, ret);
5429 				err = ret;
5430 				goto out_end_trans;
5431 			}
5432 
5433 			btrfs_end_transaction_throttle(trans);
5434 			if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
5435 				btrfs_debug(fs_info,
5436 					    "drop snapshot early exit");
5437 				err = -EAGAIN;
5438 				goto out_free;
5439 			}
5440 
5441 			trans = btrfs_start_transaction(tree_root, 0);
5442 			if (IS_ERR(trans)) {
5443 				err = PTR_ERR(trans);
5444 				goto out_free;
5445 			}
5446 		}
5447 	}
5448 	btrfs_release_path(path);
5449 	if (err)
5450 		goto out_end_trans;
5451 
5452 	ret = btrfs_del_root(trans, &root->root_key);
5453 	if (ret) {
5454 		btrfs_abort_transaction(trans, ret);
5455 		err = ret;
5456 		goto out_end_trans;
5457 	}
5458 
5459 	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
5460 		ret = btrfs_find_root(tree_root, &root->root_key, path,
5461 				      NULL, NULL);
5462 		if (ret < 0) {
5463 			btrfs_abort_transaction(trans, ret);
5464 			err = ret;
5465 			goto out_end_trans;
5466 		} else if (ret > 0) {
5467 			/* if we fail to delete the orphan item this time
5468 			 * around, it'll get picked up the next time.
5469 			 *
5470 			 * The most common failure here is just -ENOENT.
5471 			 */
5472 			btrfs_del_orphan_item(trans, tree_root,
5473 					      root->root_key.objectid);
5474 		}
5475 	}
5476 
5477 	/*
5478 	 * This subvolume is going to be completely dropped, and won't be
5479 	 * recorded as dirty roots, thus pertrans meta rsv will not be freed at
5480 	 * commit transaction time.  So free it here manually.
5481 	 */
5482 	btrfs_qgroup_convert_reserved_meta(root, INT_MAX);
5483 	btrfs_qgroup_free_meta_all_pertrans(root);
5484 
5485 	if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state))
5486 		btrfs_add_dropped_root(trans, root);
5487 	else
5488 		btrfs_put_root(root);
5489 	root_dropped = true;
5490 out_end_trans:
5491 	btrfs_end_transaction_throttle(trans);
5492 out_free:
5493 	kfree(wc);
5494 	btrfs_free_path(path);
5495 out:
5496 	/*
5497 	 * So if we need to stop dropping the snapshot for whatever reason we
5498 	 * need to make sure to add it back to the dead root list so that we
5499 	 * keep trying to do the work later.  This also cleans up roots if we
5500 	 * don't have it in the radix (like when we recover after a power fail
5501 	 * or unmount) so we don't leak memory.
5502 	 */
5503 	if (!for_reloc && !root_dropped)
5504 		btrfs_add_dead_root(root);
5505 	return err;
5506 }
5507 
5508 /*
5509  * drop subtree rooted at tree block 'node'.
5510  *
5511  * NOTE: this function will unlock and release tree block 'node'
5512  * only used by relocation code
5513  */
5514 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5515 			struct btrfs_root *root,
5516 			struct extent_buffer *node,
5517 			struct extent_buffer *parent)
5518 {
5519 	struct btrfs_fs_info *fs_info = root->fs_info;
5520 	struct btrfs_path *path;
5521 	struct walk_control *wc;
5522 	int level;
5523 	int parent_level;
5524 	int ret = 0;
5525 	int wret;
5526 
5527 	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5528 
5529 	path = btrfs_alloc_path();
5530 	if (!path)
5531 		return -ENOMEM;
5532 
5533 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
5534 	if (!wc) {
5535 		btrfs_free_path(path);
5536 		return -ENOMEM;
5537 	}
5538 
5539 	btrfs_assert_tree_locked(parent);
5540 	parent_level = btrfs_header_level(parent);
5541 	atomic_inc(&parent->refs);
5542 	path->nodes[parent_level] = parent;
5543 	path->slots[parent_level] = btrfs_header_nritems(parent);
5544 
5545 	btrfs_assert_tree_locked(node);
5546 	level = btrfs_header_level(node);
5547 	path->nodes[level] = node;
5548 	path->slots[level] = 0;
5549 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
5550 
5551 	wc->refs[parent_level] = 1;
5552 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5553 	wc->level = level;
5554 	wc->shared_level = -1;
5555 	wc->stage = DROP_REFERENCE;
5556 	wc->update_ref = 0;
5557 	wc->keep_locks = 1;
5558 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
5559 
5560 	while (1) {
5561 		wret = walk_down_tree(trans, root, path, wc);
5562 		if (wret < 0) {
5563 			ret = wret;
5564 			break;
5565 		}
5566 
5567 		wret = walk_up_tree(trans, root, path, wc, parent_level);
5568 		if (wret < 0)
5569 			ret = wret;
5570 		if (wret != 0)
5571 			break;
5572 	}
5573 
5574 	kfree(wc);
5575 	btrfs_free_path(path);
5576 	return ret;
5577 }
5578 
5579 /*
5580  * helper to account the unused space of all the readonly block group in the
5581  * space_info. takes mirrors into account.
5582  */
5583 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
5584 {
5585 	struct btrfs_block_group *block_group;
5586 	u64 free_bytes = 0;
5587 	int factor;
5588 
5589 	/* It's df, we don't care if it's racy */
5590 	if (list_empty(&sinfo->ro_bgs))
5591 		return 0;
5592 
5593 	spin_lock(&sinfo->lock);
5594 	list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
5595 		spin_lock(&block_group->lock);
5596 
5597 		if (!block_group->ro) {
5598 			spin_unlock(&block_group->lock);
5599 			continue;
5600 		}
5601 
5602 		factor = btrfs_bg_type_to_factor(block_group->flags);
5603 		free_bytes += (block_group->length -
5604 			       block_group->used) * factor;
5605 
5606 		spin_unlock(&block_group->lock);
5607 	}
5608 	spin_unlock(&sinfo->lock);
5609 
5610 	return free_bytes;
5611 }
5612 
5613 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
5614 				   u64 start, u64 end)
5615 {
5616 	return unpin_extent_range(fs_info, start, end, false);
5617 }
5618 
5619 /*
5620  * It used to be that old block groups would be left around forever.
5621  * Iterating over them would be enough to trim unused space.  Since we
5622  * now automatically remove them, we also need to iterate over unallocated
5623  * space.
5624  *
5625  * We don't want a transaction for this since the discard may take a
5626  * substantial amount of time.  We don't require that a transaction be
5627  * running, but we do need to take a running transaction into account
5628  * to ensure that we're not discarding chunks that were released or
5629  * allocated in the current transaction.
5630  *
5631  * Holding the chunks lock will prevent other threads from allocating
5632  * or releasing chunks, but it won't prevent a running transaction
5633  * from committing and releasing the memory that the pending chunks
5634  * list head uses.  For that, we need to take a reference to the
5635  * transaction and hold the commit root sem.  We only need to hold
5636  * it while performing the free space search since we have already
5637  * held back allocations.
5638  */
5639 static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
5640 {
5641 	u64 start = SZ_1M, len = 0, end = 0;
5642 	int ret;
5643 
5644 	*trimmed = 0;
5645 
5646 	/* Discard not supported = nothing to do. */
5647 	if (!blk_queue_discard(bdev_get_queue(device->bdev)))
5648 		return 0;
5649 
5650 	/* Not writable = nothing to do. */
5651 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
5652 		return 0;
5653 
5654 	/* No free space = nothing to do. */
5655 	if (device->total_bytes <= device->bytes_used)
5656 		return 0;
5657 
5658 	ret = 0;
5659 
5660 	while (1) {
5661 		struct btrfs_fs_info *fs_info = device->fs_info;
5662 		u64 bytes;
5663 
5664 		ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
5665 		if (ret)
5666 			break;
5667 
5668 		find_first_clear_extent_bit(&device->alloc_state, start,
5669 					    &start, &end,
5670 					    CHUNK_TRIMMED | CHUNK_ALLOCATED);
5671 
5672 		/* Check if there are any CHUNK_* bits left */
5673 		if (start > device->total_bytes) {
5674 			WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
5675 			btrfs_warn_in_rcu(fs_info,
5676 "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu",
5677 					  start, end - start + 1,
5678 					  rcu_str_deref(device->name),
5679 					  device->total_bytes);
5680 			mutex_unlock(&fs_info->chunk_mutex);
5681 			ret = 0;
5682 			break;
5683 		}
5684 
5685 		/* Ensure we skip the reserved area in the first 1M */
5686 		start = max_t(u64, start, SZ_1M);
5687 
5688 		/*
5689 		 * If find_first_clear_extent_bit find a range that spans the
5690 		 * end of the device it will set end to -1, in this case it's up
5691 		 * to the caller to trim the value to the size of the device.
5692 		 */
5693 		end = min(end, device->total_bytes - 1);
5694 
5695 		len = end - start + 1;
5696 
5697 		/* We didn't find any extents */
5698 		if (!len) {
5699 			mutex_unlock(&fs_info->chunk_mutex);
5700 			ret = 0;
5701 			break;
5702 		}
5703 
5704 		ret = btrfs_issue_discard(device->bdev, start, len,
5705 					  &bytes);
5706 		if (!ret)
5707 			set_extent_bits(&device->alloc_state, start,
5708 					start + bytes - 1,
5709 					CHUNK_TRIMMED);
5710 		mutex_unlock(&fs_info->chunk_mutex);
5711 
5712 		if (ret)
5713 			break;
5714 
5715 		start += len;
5716 		*trimmed += bytes;
5717 
5718 		if (fatal_signal_pending(current)) {
5719 			ret = -ERESTARTSYS;
5720 			break;
5721 		}
5722 
5723 		cond_resched();
5724 	}
5725 
5726 	return ret;
5727 }
5728 
5729 /*
5730  * Trim the whole filesystem by:
5731  * 1) trimming the free space in each block group
5732  * 2) trimming the unallocated space on each device
5733  *
5734  * This will also continue trimming even if a block group or device encounters
5735  * an error.  The return value will be the last error, or 0 if nothing bad
5736  * happens.
5737  */
5738 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
5739 {
5740 	struct btrfs_block_group *cache = NULL;
5741 	struct btrfs_device *device;
5742 	struct list_head *devices;
5743 	u64 group_trimmed;
5744 	u64 range_end = U64_MAX;
5745 	u64 start;
5746 	u64 end;
5747 	u64 trimmed = 0;
5748 	u64 bg_failed = 0;
5749 	u64 dev_failed = 0;
5750 	int bg_ret = 0;
5751 	int dev_ret = 0;
5752 	int ret = 0;
5753 
5754 	/*
5755 	 * Check range overflow if range->len is set.
5756 	 * The default range->len is U64_MAX.
5757 	 */
5758 	if (range->len != U64_MAX &&
5759 	    check_add_overflow(range->start, range->len, &range_end))
5760 		return -EINVAL;
5761 
5762 	cache = btrfs_lookup_first_block_group(fs_info, range->start);
5763 	for (; cache; cache = btrfs_next_block_group(cache)) {
5764 		if (cache->start >= range_end) {
5765 			btrfs_put_block_group(cache);
5766 			break;
5767 		}
5768 
5769 		start = max(range->start, cache->start);
5770 		end = min(range_end, cache->start + cache->length);
5771 
5772 		if (end - start >= range->minlen) {
5773 			if (!btrfs_block_group_done(cache)) {
5774 				ret = btrfs_cache_block_group(cache, 0);
5775 				if (ret) {
5776 					bg_failed++;
5777 					bg_ret = ret;
5778 					continue;
5779 				}
5780 				ret = btrfs_wait_block_group_cache_done(cache);
5781 				if (ret) {
5782 					bg_failed++;
5783 					bg_ret = ret;
5784 					continue;
5785 				}
5786 			}
5787 			ret = btrfs_trim_block_group(cache,
5788 						     &group_trimmed,
5789 						     start,
5790 						     end,
5791 						     range->minlen);
5792 
5793 			trimmed += group_trimmed;
5794 			if (ret) {
5795 				bg_failed++;
5796 				bg_ret = ret;
5797 				continue;
5798 			}
5799 		}
5800 	}
5801 
5802 	if (bg_failed)
5803 		btrfs_warn(fs_info,
5804 			"failed to trim %llu block group(s), last error %d",
5805 			bg_failed, bg_ret);
5806 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
5807 	devices = &fs_info->fs_devices->devices;
5808 	list_for_each_entry(device, devices, dev_list) {
5809 		ret = btrfs_trim_free_extents(device, &group_trimmed);
5810 		if (ret) {
5811 			dev_failed++;
5812 			dev_ret = ret;
5813 			break;
5814 		}
5815 
5816 		trimmed += group_trimmed;
5817 	}
5818 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5819 
5820 	if (dev_failed)
5821 		btrfs_warn(fs_info,
5822 			"failed to trim %llu device(s), last error %d",
5823 			dev_failed, dev_ret);
5824 	range->len = trimmed;
5825 	if (bg_ret)
5826 		return bg_ret;
5827 	return dev_ret;
5828 }
5829