1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/sched/signal.h>
8 #include <linux/pagemap.h>
9 #include <linux/writeback.h>
10 #include <linux/blkdev.h>
11 #include <linux/sort.h>
12 #include <linux/rcupdate.h>
13 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/ratelimit.h>
16 #include <linux/percpu_counter.h>
17 #include <linux/lockdep.h>
18 #include <linux/crc32c.h>
19 #include "ctree.h"
20 #include "extent-tree.h"
21 #include "tree-log.h"
22 #include "disk-io.h"
23 #include "print-tree.h"
24 #include "volumes.h"
25 #include "raid56.h"
26 #include "locking.h"
27 #include "free-space-cache.h"
28 #include "free-space-tree.h"
29 #include "sysfs.h"
30 #include "qgroup.h"
31 #include "ref-verify.h"
32 #include "space-info.h"
33 #include "block-rsv.h"
34 #include "delalloc-space.h"
35 #include "discard.h"
36 #include "rcu-string.h"
37 #include "zoned.h"
38 #include "dev-replace.h"
39 #include "fs.h"
40 #include "accessors.h"
41 #include "root-tree.h"
42 #include "file-item.h"
43 #include "orphan.h"
44 #include "tree-checker.h"
45
46 #undef SCRAMBLE_DELAYED_REFS
47
48
49 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
50 struct btrfs_delayed_ref_node *node, u64 parent,
51 u64 root_objectid, u64 owner_objectid,
52 u64 owner_offset, int refs_to_drop,
53 struct btrfs_delayed_extent_op *extra_op);
54 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
55 struct extent_buffer *leaf,
56 struct btrfs_extent_item *ei);
57 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
58 u64 parent, u64 root_objectid,
59 u64 flags, u64 owner, u64 offset,
60 struct btrfs_key *ins, int ref_mod);
61 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
62 struct btrfs_delayed_ref_node *node,
63 struct btrfs_delayed_extent_op *extent_op);
64 static int find_next_key(struct btrfs_path *path, int level,
65 struct btrfs_key *key);
66
block_group_bits(struct btrfs_block_group * cache,u64 bits)67 static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
68 {
69 return (cache->flags & bits) == bits;
70 }
71
72 /* simple helper to search for an existing data extent at a given offset */
btrfs_lookup_data_extent(struct btrfs_fs_info * fs_info,u64 start,u64 len)73 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
74 {
75 struct btrfs_root *root = btrfs_extent_root(fs_info, start);
76 int ret;
77 struct btrfs_key key;
78 struct btrfs_path *path;
79
80 path = btrfs_alloc_path();
81 if (!path)
82 return -ENOMEM;
83
84 key.objectid = start;
85 key.offset = len;
86 key.type = BTRFS_EXTENT_ITEM_KEY;
87 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
88 btrfs_free_path(path);
89 return ret;
90 }
91
92 /*
93 * helper function to lookup reference count and flags of a tree block.
94 *
95 * the head node for delayed ref is used to store the sum of all the
96 * reference count modifications queued up in the rbtree. the head
97 * node may also store the extent flags to set. This way you can check
98 * to see what the reference count and extent flags would be if all of
99 * the delayed refs are not processed.
100 */
btrfs_lookup_extent_info(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 offset,int metadata,u64 * refs,u64 * flags)101 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
102 struct btrfs_fs_info *fs_info, u64 bytenr,
103 u64 offset, int metadata, u64 *refs, u64 *flags)
104 {
105 struct btrfs_root *extent_root;
106 struct btrfs_delayed_ref_head *head;
107 struct btrfs_delayed_ref_root *delayed_refs;
108 struct btrfs_path *path;
109 struct btrfs_extent_item *ei;
110 struct extent_buffer *leaf;
111 struct btrfs_key key;
112 u32 item_size;
113 u64 num_refs;
114 u64 extent_flags;
115 int ret;
116
117 /*
118 * If we don't have skinny metadata, don't bother doing anything
119 * different
120 */
121 if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
122 offset = fs_info->nodesize;
123 metadata = 0;
124 }
125
126 path = btrfs_alloc_path();
127 if (!path)
128 return -ENOMEM;
129
130 if (!trans) {
131 path->skip_locking = 1;
132 path->search_commit_root = 1;
133 }
134
135 search_again:
136 key.objectid = bytenr;
137 key.offset = offset;
138 if (metadata)
139 key.type = BTRFS_METADATA_ITEM_KEY;
140 else
141 key.type = BTRFS_EXTENT_ITEM_KEY;
142
143 extent_root = btrfs_extent_root(fs_info, bytenr);
144 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
145 if (ret < 0)
146 goto out_free;
147
148 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
149 if (path->slots[0]) {
150 path->slots[0]--;
151 btrfs_item_key_to_cpu(path->nodes[0], &key,
152 path->slots[0]);
153 if (key.objectid == bytenr &&
154 key.type == BTRFS_EXTENT_ITEM_KEY &&
155 key.offset == fs_info->nodesize)
156 ret = 0;
157 }
158 }
159
160 if (ret == 0) {
161 leaf = path->nodes[0];
162 item_size = btrfs_item_size(leaf, path->slots[0]);
163 if (item_size >= sizeof(*ei)) {
164 ei = btrfs_item_ptr(leaf, path->slots[0],
165 struct btrfs_extent_item);
166 num_refs = btrfs_extent_refs(leaf, ei);
167 extent_flags = btrfs_extent_flags(leaf, ei);
168 } else {
169 ret = -EUCLEAN;
170 btrfs_err(fs_info,
171 "unexpected extent item size, has %u expect >= %zu",
172 item_size, sizeof(*ei));
173 if (trans)
174 btrfs_abort_transaction(trans, ret);
175 else
176 btrfs_handle_fs_error(fs_info, ret, NULL);
177
178 goto out_free;
179 }
180
181 BUG_ON(num_refs == 0);
182 } else {
183 num_refs = 0;
184 extent_flags = 0;
185 ret = 0;
186 }
187
188 if (!trans)
189 goto out;
190
191 delayed_refs = &trans->transaction->delayed_refs;
192 spin_lock(&delayed_refs->lock);
193 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
194 if (head) {
195 if (!mutex_trylock(&head->mutex)) {
196 refcount_inc(&head->refs);
197 spin_unlock(&delayed_refs->lock);
198
199 btrfs_release_path(path);
200
201 /*
202 * Mutex was contended, block until it's released and try
203 * again
204 */
205 mutex_lock(&head->mutex);
206 mutex_unlock(&head->mutex);
207 btrfs_put_delayed_ref_head(head);
208 goto search_again;
209 }
210 spin_lock(&head->lock);
211 if (head->extent_op && head->extent_op->update_flags)
212 extent_flags |= head->extent_op->flags_to_set;
213 else
214 BUG_ON(num_refs == 0);
215
216 num_refs += head->ref_mod;
217 spin_unlock(&head->lock);
218 mutex_unlock(&head->mutex);
219 }
220 spin_unlock(&delayed_refs->lock);
221 out:
222 WARN_ON(num_refs == 0);
223 if (refs)
224 *refs = num_refs;
225 if (flags)
226 *flags = extent_flags;
227 out_free:
228 btrfs_free_path(path);
229 return ret;
230 }
231
232 /*
233 * Back reference rules. Back refs have three main goals:
234 *
235 * 1) differentiate between all holders of references to an extent so that
236 * when a reference is dropped we can make sure it was a valid reference
237 * before freeing the extent.
238 *
239 * 2) Provide enough information to quickly find the holders of an extent
240 * if we notice a given block is corrupted or bad.
241 *
242 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
243 * maintenance. This is actually the same as #2, but with a slightly
244 * different use case.
245 *
246 * There are two kinds of back refs. The implicit back refs is optimized
247 * for pointers in non-shared tree blocks. For a given pointer in a block,
248 * back refs of this kind provide information about the block's owner tree
249 * and the pointer's key. These information allow us to find the block by
250 * b-tree searching. The full back refs is for pointers in tree blocks not
251 * referenced by their owner trees. The location of tree block is recorded
252 * in the back refs. Actually the full back refs is generic, and can be
253 * used in all cases the implicit back refs is used. The major shortcoming
254 * of the full back refs is its overhead. Every time a tree block gets
255 * COWed, we have to update back refs entry for all pointers in it.
256 *
257 * For a newly allocated tree block, we use implicit back refs for
258 * pointers in it. This means most tree related operations only involve
259 * implicit back refs. For a tree block created in old transaction, the
260 * only way to drop a reference to it is COW it. So we can detect the
261 * event that tree block loses its owner tree's reference and do the
262 * back refs conversion.
263 *
264 * When a tree block is COWed through a tree, there are four cases:
265 *
266 * The reference count of the block is one and the tree is the block's
267 * owner tree. Nothing to do in this case.
268 *
269 * The reference count of the block is one and the tree is not the
270 * block's owner tree. In this case, full back refs is used for pointers
271 * in the block. Remove these full back refs, add implicit back refs for
272 * every pointers in the new block.
273 *
274 * The reference count of the block is greater than one and the tree is
275 * the block's owner tree. In this case, implicit back refs is used for
276 * pointers in the block. Add full back refs for every pointers in the
277 * block, increase lower level extents' reference counts. The original
278 * implicit back refs are entailed to the new block.
279 *
280 * The reference count of the block is greater than one and the tree is
281 * not the block's owner tree. Add implicit back refs for every pointer in
282 * the new block, increase lower level extents' reference count.
283 *
284 * Back Reference Key composing:
285 *
286 * The key objectid corresponds to the first byte in the extent,
287 * The key type is used to differentiate between types of back refs.
288 * There are different meanings of the key offset for different types
289 * of back refs.
290 *
291 * File extents can be referenced by:
292 *
293 * - multiple snapshots, subvolumes, or different generations in one subvol
294 * - different files inside a single subvolume
295 * - different offsets inside a file (bookend extents in file.c)
296 *
297 * The extent ref structure for the implicit back refs has fields for:
298 *
299 * - Objectid of the subvolume root
300 * - objectid of the file holding the reference
301 * - original offset in the file
302 * - how many bookend extents
303 *
304 * The key offset for the implicit back refs is hash of the first
305 * three fields.
306 *
307 * The extent ref structure for the full back refs has field for:
308 *
309 * - number of pointers in the tree leaf
310 *
311 * The key offset for the implicit back refs is the first byte of
312 * the tree leaf
313 *
314 * When a file extent is allocated, The implicit back refs is used.
315 * the fields are filled in:
316 *
317 * (root_key.objectid, inode objectid, offset in file, 1)
318 *
319 * When a file extent is removed file truncation, we find the
320 * corresponding implicit back refs and check the following fields:
321 *
322 * (btrfs_header_owner(leaf), inode objectid, offset in file)
323 *
324 * Btree extents can be referenced by:
325 *
326 * - Different subvolumes
327 *
328 * Both the implicit back refs and the full back refs for tree blocks
329 * only consist of key. The key offset for the implicit back refs is
330 * objectid of block's owner tree. The key offset for the full back refs
331 * is the first byte of parent block.
332 *
333 * When implicit back refs is used, information about the lowest key and
334 * level of the tree block are required. These information are stored in
335 * tree block info structure.
336 */
337
338 /*
339 * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
340 * is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
341 * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
342 */
btrfs_get_extent_inline_ref_type(const struct extent_buffer * eb,struct btrfs_extent_inline_ref * iref,enum btrfs_inline_ref_type is_data)343 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
344 struct btrfs_extent_inline_ref *iref,
345 enum btrfs_inline_ref_type is_data)
346 {
347 int type = btrfs_extent_inline_ref_type(eb, iref);
348 u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
349
350 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
351 type == BTRFS_SHARED_BLOCK_REF_KEY ||
352 type == BTRFS_SHARED_DATA_REF_KEY ||
353 type == BTRFS_EXTENT_DATA_REF_KEY) {
354 if (is_data == BTRFS_REF_TYPE_BLOCK) {
355 if (type == BTRFS_TREE_BLOCK_REF_KEY)
356 return type;
357 if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
358 ASSERT(eb->fs_info);
359 /*
360 * Every shared one has parent tree block,
361 * which must be aligned to sector size.
362 */
363 if (offset &&
364 IS_ALIGNED(offset, eb->fs_info->sectorsize))
365 return type;
366 }
367 } else if (is_data == BTRFS_REF_TYPE_DATA) {
368 if (type == BTRFS_EXTENT_DATA_REF_KEY)
369 return type;
370 if (type == BTRFS_SHARED_DATA_REF_KEY) {
371 ASSERT(eb->fs_info);
372 /*
373 * Every shared one has parent tree block,
374 * which must be aligned to sector size.
375 */
376 if (offset &&
377 IS_ALIGNED(offset, eb->fs_info->sectorsize))
378 return type;
379 }
380 } else {
381 ASSERT(is_data == BTRFS_REF_TYPE_ANY);
382 return type;
383 }
384 }
385
386 WARN_ON(1);
387 btrfs_print_leaf(eb);
388 btrfs_err(eb->fs_info,
389 "eb %llu iref 0x%lx invalid extent inline ref type %d",
390 eb->start, (unsigned long)iref, type);
391
392 return BTRFS_REF_TYPE_INVALID;
393 }
394
hash_extent_data_ref(u64 root_objectid,u64 owner,u64 offset)395 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
396 {
397 u32 high_crc = ~(u32)0;
398 u32 low_crc = ~(u32)0;
399 __le64 lenum;
400
401 lenum = cpu_to_le64(root_objectid);
402 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
403 lenum = cpu_to_le64(owner);
404 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
405 lenum = cpu_to_le64(offset);
406 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
407
408 return ((u64)high_crc << 31) ^ (u64)low_crc;
409 }
410
hash_extent_data_ref_item(struct extent_buffer * leaf,struct btrfs_extent_data_ref * ref)411 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
412 struct btrfs_extent_data_ref *ref)
413 {
414 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
415 btrfs_extent_data_ref_objectid(leaf, ref),
416 btrfs_extent_data_ref_offset(leaf, ref));
417 }
418
match_extent_data_ref(struct extent_buffer * leaf,struct btrfs_extent_data_ref * ref,u64 root_objectid,u64 owner,u64 offset)419 static int match_extent_data_ref(struct extent_buffer *leaf,
420 struct btrfs_extent_data_ref *ref,
421 u64 root_objectid, u64 owner, u64 offset)
422 {
423 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
424 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
425 btrfs_extent_data_ref_offset(leaf, ref) != offset)
426 return 0;
427 return 1;
428 }
429
lookup_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid,u64 owner,u64 offset)430 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
431 struct btrfs_path *path,
432 u64 bytenr, u64 parent,
433 u64 root_objectid,
434 u64 owner, u64 offset)
435 {
436 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
437 struct btrfs_key key;
438 struct btrfs_extent_data_ref *ref;
439 struct extent_buffer *leaf;
440 u32 nritems;
441 int ret;
442 int recow;
443 int err = -ENOENT;
444
445 key.objectid = bytenr;
446 if (parent) {
447 key.type = BTRFS_SHARED_DATA_REF_KEY;
448 key.offset = parent;
449 } else {
450 key.type = BTRFS_EXTENT_DATA_REF_KEY;
451 key.offset = hash_extent_data_ref(root_objectid,
452 owner, offset);
453 }
454 again:
455 recow = 0;
456 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
457 if (ret < 0) {
458 err = ret;
459 goto fail;
460 }
461
462 if (parent) {
463 if (!ret)
464 return 0;
465 goto fail;
466 }
467
468 leaf = path->nodes[0];
469 nritems = btrfs_header_nritems(leaf);
470 while (1) {
471 if (path->slots[0] >= nritems) {
472 ret = btrfs_next_leaf(root, path);
473 if (ret < 0)
474 err = ret;
475 if (ret)
476 goto fail;
477
478 leaf = path->nodes[0];
479 nritems = btrfs_header_nritems(leaf);
480 recow = 1;
481 }
482
483 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
484 if (key.objectid != bytenr ||
485 key.type != BTRFS_EXTENT_DATA_REF_KEY)
486 goto fail;
487
488 ref = btrfs_item_ptr(leaf, path->slots[0],
489 struct btrfs_extent_data_ref);
490
491 if (match_extent_data_ref(leaf, ref, root_objectid,
492 owner, offset)) {
493 if (recow) {
494 btrfs_release_path(path);
495 goto again;
496 }
497 err = 0;
498 break;
499 }
500 path->slots[0]++;
501 }
502 fail:
503 return err;
504 }
505
insert_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add)506 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
507 struct btrfs_path *path,
508 u64 bytenr, u64 parent,
509 u64 root_objectid, u64 owner,
510 u64 offset, int refs_to_add)
511 {
512 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
513 struct btrfs_key key;
514 struct extent_buffer *leaf;
515 u32 size;
516 u32 num_refs;
517 int ret;
518
519 key.objectid = bytenr;
520 if (parent) {
521 key.type = BTRFS_SHARED_DATA_REF_KEY;
522 key.offset = parent;
523 size = sizeof(struct btrfs_shared_data_ref);
524 } else {
525 key.type = BTRFS_EXTENT_DATA_REF_KEY;
526 key.offset = hash_extent_data_ref(root_objectid,
527 owner, offset);
528 size = sizeof(struct btrfs_extent_data_ref);
529 }
530
531 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
532 if (ret && ret != -EEXIST)
533 goto fail;
534
535 leaf = path->nodes[0];
536 if (parent) {
537 struct btrfs_shared_data_ref *ref;
538 ref = btrfs_item_ptr(leaf, path->slots[0],
539 struct btrfs_shared_data_ref);
540 if (ret == 0) {
541 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
542 } else {
543 num_refs = btrfs_shared_data_ref_count(leaf, ref);
544 num_refs += refs_to_add;
545 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
546 }
547 } else {
548 struct btrfs_extent_data_ref *ref;
549 while (ret == -EEXIST) {
550 ref = btrfs_item_ptr(leaf, path->slots[0],
551 struct btrfs_extent_data_ref);
552 if (match_extent_data_ref(leaf, ref, root_objectid,
553 owner, offset))
554 break;
555 btrfs_release_path(path);
556 key.offset++;
557 ret = btrfs_insert_empty_item(trans, root, path, &key,
558 size);
559 if (ret && ret != -EEXIST)
560 goto fail;
561
562 leaf = path->nodes[0];
563 }
564 ref = btrfs_item_ptr(leaf, path->slots[0],
565 struct btrfs_extent_data_ref);
566 if (ret == 0) {
567 btrfs_set_extent_data_ref_root(leaf, ref,
568 root_objectid);
569 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
570 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
571 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
572 } else {
573 num_refs = btrfs_extent_data_ref_count(leaf, ref);
574 num_refs += refs_to_add;
575 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
576 }
577 }
578 btrfs_mark_buffer_dirty(trans, leaf);
579 ret = 0;
580 fail:
581 btrfs_release_path(path);
582 return ret;
583 }
584
remove_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int refs_to_drop)585 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
586 struct btrfs_root *root,
587 struct btrfs_path *path,
588 int refs_to_drop)
589 {
590 struct btrfs_key key;
591 struct btrfs_extent_data_ref *ref1 = NULL;
592 struct btrfs_shared_data_ref *ref2 = NULL;
593 struct extent_buffer *leaf;
594 u32 num_refs = 0;
595 int ret = 0;
596
597 leaf = path->nodes[0];
598 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
599
600 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
601 ref1 = btrfs_item_ptr(leaf, path->slots[0],
602 struct btrfs_extent_data_ref);
603 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
604 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
605 ref2 = btrfs_item_ptr(leaf, path->slots[0],
606 struct btrfs_shared_data_ref);
607 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
608 } else {
609 btrfs_err(trans->fs_info,
610 "unrecognized backref key (%llu %u %llu)",
611 key.objectid, key.type, key.offset);
612 btrfs_abort_transaction(trans, -EUCLEAN);
613 return -EUCLEAN;
614 }
615
616 BUG_ON(num_refs < refs_to_drop);
617 num_refs -= refs_to_drop;
618
619 if (num_refs == 0) {
620 ret = btrfs_del_item(trans, root, path);
621 } else {
622 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
623 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
624 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
625 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
626 btrfs_mark_buffer_dirty(trans, leaf);
627 }
628 return ret;
629 }
630
extent_data_ref_count(struct btrfs_path * path,struct btrfs_extent_inline_ref * iref)631 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
632 struct btrfs_extent_inline_ref *iref)
633 {
634 struct btrfs_key key;
635 struct extent_buffer *leaf;
636 struct btrfs_extent_data_ref *ref1;
637 struct btrfs_shared_data_ref *ref2;
638 u32 num_refs = 0;
639 int type;
640
641 leaf = path->nodes[0];
642 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
643
644 if (iref) {
645 /*
646 * If type is invalid, we should have bailed out earlier than
647 * this call.
648 */
649 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
650 ASSERT(type != BTRFS_REF_TYPE_INVALID);
651 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
652 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
653 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
654 } else {
655 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
656 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
657 }
658 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
659 ref1 = btrfs_item_ptr(leaf, path->slots[0],
660 struct btrfs_extent_data_ref);
661 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
662 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
663 ref2 = btrfs_item_ptr(leaf, path->slots[0],
664 struct btrfs_shared_data_ref);
665 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
666 } else {
667 WARN_ON(1);
668 }
669 return num_refs;
670 }
671
lookup_tree_block_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid)672 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
673 struct btrfs_path *path,
674 u64 bytenr, u64 parent,
675 u64 root_objectid)
676 {
677 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
678 struct btrfs_key key;
679 int ret;
680
681 key.objectid = bytenr;
682 if (parent) {
683 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
684 key.offset = parent;
685 } else {
686 key.type = BTRFS_TREE_BLOCK_REF_KEY;
687 key.offset = root_objectid;
688 }
689
690 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
691 if (ret > 0)
692 ret = -ENOENT;
693 return ret;
694 }
695
insert_tree_block_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid)696 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
697 struct btrfs_path *path,
698 u64 bytenr, u64 parent,
699 u64 root_objectid)
700 {
701 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
702 struct btrfs_key key;
703 int ret;
704
705 key.objectid = bytenr;
706 if (parent) {
707 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
708 key.offset = parent;
709 } else {
710 key.type = BTRFS_TREE_BLOCK_REF_KEY;
711 key.offset = root_objectid;
712 }
713
714 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
715 btrfs_release_path(path);
716 return ret;
717 }
718
extent_ref_type(u64 parent,u64 owner)719 static inline int extent_ref_type(u64 parent, u64 owner)
720 {
721 int type;
722 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
723 if (parent > 0)
724 type = BTRFS_SHARED_BLOCK_REF_KEY;
725 else
726 type = BTRFS_TREE_BLOCK_REF_KEY;
727 } else {
728 if (parent > 0)
729 type = BTRFS_SHARED_DATA_REF_KEY;
730 else
731 type = BTRFS_EXTENT_DATA_REF_KEY;
732 }
733 return type;
734 }
735
find_next_key(struct btrfs_path * path,int level,struct btrfs_key * key)736 static int find_next_key(struct btrfs_path *path, int level,
737 struct btrfs_key *key)
738
739 {
740 for (; level < BTRFS_MAX_LEVEL; level++) {
741 if (!path->nodes[level])
742 break;
743 if (path->slots[level] + 1 >=
744 btrfs_header_nritems(path->nodes[level]))
745 continue;
746 if (level == 0)
747 btrfs_item_key_to_cpu(path->nodes[level], key,
748 path->slots[level] + 1);
749 else
750 btrfs_node_key_to_cpu(path->nodes[level], key,
751 path->slots[level] + 1);
752 return 0;
753 }
754 return 1;
755 }
756
757 /*
758 * look for inline back ref. if back ref is found, *ref_ret is set
759 * to the address of inline back ref, and 0 is returned.
760 *
761 * if back ref isn't found, *ref_ret is set to the address where it
762 * should be inserted, and -ENOENT is returned.
763 *
764 * if insert is true and there are too many inline back refs, the path
765 * points to the extent item, and -EAGAIN is returned.
766 *
767 * NOTE: inline back refs are ordered in the same way that back ref
768 * items in the tree are ordered.
769 */
770 static noinline_for_stack
lookup_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref ** ref_ret,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset,int insert)771 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
772 struct btrfs_path *path,
773 struct btrfs_extent_inline_ref **ref_ret,
774 u64 bytenr, u64 num_bytes,
775 u64 parent, u64 root_objectid,
776 u64 owner, u64 offset, int insert)
777 {
778 struct btrfs_fs_info *fs_info = trans->fs_info;
779 struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr);
780 struct btrfs_key key;
781 struct extent_buffer *leaf;
782 struct btrfs_extent_item *ei;
783 struct btrfs_extent_inline_ref *iref;
784 u64 flags;
785 u64 item_size;
786 unsigned long ptr;
787 unsigned long end;
788 int extra_size;
789 int type;
790 int want;
791 int ret;
792 int err = 0;
793 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
794 int needed;
795
796 key.objectid = bytenr;
797 key.type = BTRFS_EXTENT_ITEM_KEY;
798 key.offset = num_bytes;
799
800 want = extent_ref_type(parent, owner);
801 if (insert) {
802 extra_size = btrfs_extent_inline_ref_size(want);
803 path->search_for_extension = 1;
804 path->keep_locks = 1;
805 } else
806 extra_size = -1;
807
808 /*
809 * Owner is our level, so we can just add one to get the level for the
810 * block we are interested in.
811 */
812 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
813 key.type = BTRFS_METADATA_ITEM_KEY;
814 key.offset = owner;
815 }
816
817 again:
818 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
819 if (ret < 0) {
820 err = ret;
821 goto out;
822 }
823
824 /*
825 * We may be a newly converted file system which still has the old fat
826 * extent entries for metadata, so try and see if we have one of those.
827 */
828 if (ret > 0 && skinny_metadata) {
829 skinny_metadata = false;
830 if (path->slots[0]) {
831 path->slots[0]--;
832 btrfs_item_key_to_cpu(path->nodes[0], &key,
833 path->slots[0]);
834 if (key.objectid == bytenr &&
835 key.type == BTRFS_EXTENT_ITEM_KEY &&
836 key.offset == num_bytes)
837 ret = 0;
838 }
839 if (ret) {
840 key.objectid = bytenr;
841 key.type = BTRFS_EXTENT_ITEM_KEY;
842 key.offset = num_bytes;
843 btrfs_release_path(path);
844 goto again;
845 }
846 }
847
848 if (ret && !insert) {
849 err = -ENOENT;
850 goto out;
851 } else if (WARN_ON(ret)) {
852 btrfs_print_leaf(path->nodes[0]);
853 btrfs_err(fs_info,
854 "extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu",
855 bytenr, num_bytes, parent, root_objectid, owner,
856 offset);
857 err = -EIO;
858 goto out;
859 }
860
861 leaf = path->nodes[0];
862 item_size = btrfs_item_size(leaf, path->slots[0]);
863 if (unlikely(item_size < sizeof(*ei))) {
864 err = -EUCLEAN;
865 btrfs_err(fs_info,
866 "unexpected extent item size, has %llu expect >= %zu",
867 item_size, sizeof(*ei));
868 btrfs_abort_transaction(trans, err);
869 goto out;
870 }
871
872 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
873 flags = btrfs_extent_flags(leaf, ei);
874
875 ptr = (unsigned long)(ei + 1);
876 end = (unsigned long)ei + item_size;
877
878 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
879 ptr += sizeof(struct btrfs_tree_block_info);
880 BUG_ON(ptr > end);
881 }
882
883 if (owner >= BTRFS_FIRST_FREE_OBJECTID)
884 needed = BTRFS_REF_TYPE_DATA;
885 else
886 needed = BTRFS_REF_TYPE_BLOCK;
887
888 err = -ENOENT;
889 while (1) {
890 if (ptr >= end) {
891 if (ptr > end) {
892 err = -EUCLEAN;
893 btrfs_print_leaf(path->nodes[0]);
894 btrfs_crit(fs_info,
895 "overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu",
896 path->slots[0], root_objectid, owner, offset, parent);
897 }
898 break;
899 }
900 iref = (struct btrfs_extent_inline_ref *)ptr;
901 type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
902 if (type == BTRFS_REF_TYPE_INVALID) {
903 err = -EUCLEAN;
904 goto out;
905 }
906
907 if (want < type)
908 break;
909 if (want > type) {
910 ptr += btrfs_extent_inline_ref_size(type);
911 continue;
912 }
913
914 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
915 struct btrfs_extent_data_ref *dref;
916 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
917 if (match_extent_data_ref(leaf, dref, root_objectid,
918 owner, offset)) {
919 err = 0;
920 break;
921 }
922 if (hash_extent_data_ref_item(leaf, dref) <
923 hash_extent_data_ref(root_objectid, owner, offset))
924 break;
925 } else {
926 u64 ref_offset;
927 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
928 if (parent > 0) {
929 if (parent == ref_offset) {
930 err = 0;
931 break;
932 }
933 if (ref_offset < parent)
934 break;
935 } else {
936 if (root_objectid == ref_offset) {
937 err = 0;
938 break;
939 }
940 if (ref_offset < root_objectid)
941 break;
942 }
943 }
944 ptr += btrfs_extent_inline_ref_size(type);
945 }
946 if (err == -ENOENT && insert) {
947 if (item_size + extra_size >=
948 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
949 err = -EAGAIN;
950 goto out;
951 }
952 /*
953 * To add new inline back ref, we have to make sure
954 * there is no corresponding back ref item.
955 * For simplicity, we just do not add new inline back
956 * ref if there is any kind of item for this block
957 */
958 if (find_next_key(path, 0, &key) == 0 &&
959 key.objectid == bytenr &&
960 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
961 err = -EAGAIN;
962 goto out;
963 }
964 }
965 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
966 out:
967 if (insert) {
968 path->keep_locks = 0;
969 path->search_for_extension = 0;
970 btrfs_unlock_up_safe(path, 1);
971 }
972 return err;
973 }
974
975 /*
976 * helper to add new inline back ref
977 */
978 static noinline_for_stack
setup_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)979 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
980 struct btrfs_path *path,
981 struct btrfs_extent_inline_ref *iref,
982 u64 parent, u64 root_objectid,
983 u64 owner, u64 offset, int refs_to_add,
984 struct btrfs_delayed_extent_op *extent_op)
985 {
986 struct extent_buffer *leaf;
987 struct btrfs_extent_item *ei;
988 unsigned long ptr;
989 unsigned long end;
990 unsigned long item_offset;
991 u64 refs;
992 int size;
993 int type;
994
995 leaf = path->nodes[0];
996 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
997 item_offset = (unsigned long)iref - (unsigned long)ei;
998
999 type = extent_ref_type(parent, owner);
1000 size = btrfs_extent_inline_ref_size(type);
1001
1002 btrfs_extend_item(trans, path, size);
1003
1004 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1005 refs = btrfs_extent_refs(leaf, ei);
1006 refs += refs_to_add;
1007 btrfs_set_extent_refs(leaf, ei, refs);
1008 if (extent_op)
1009 __run_delayed_extent_op(extent_op, leaf, ei);
1010
1011 ptr = (unsigned long)ei + item_offset;
1012 end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]);
1013 if (ptr < end - size)
1014 memmove_extent_buffer(leaf, ptr + size, ptr,
1015 end - size - ptr);
1016
1017 iref = (struct btrfs_extent_inline_ref *)ptr;
1018 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1019 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1020 struct btrfs_extent_data_ref *dref;
1021 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1022 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1023 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1024 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1025 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1026 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1027 struct btrfs_shared_data_ref *sref;
1028 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1029 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1030 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1031 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1032 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1033 } else {
1034 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1035 }
1036 btrfs_mark_buffer_dirty(trans, leaf);
1037 }
1038
lookup_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref ** ref_ret,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset)1039 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1040 struct btrfs_path *path,
1041 struct btrfs_extent_inline_ref **ref_ret,
1042 u64 bytenr, u64 num_bytes, u64 parent,
1043 u64 root_objectid, u64 owner, u64 offset)
1044 {
1045 int ret;
1046
1047 ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr,
1048 num_bytes, parent, root_objectid,
1049 owner, offset, 0);
1050 if (ret != -ENOENT)
1051 return ret;
1052
1053 btrfs_release_path(path);
1054 *ref_ret = NULL;
1055
1056 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1057 ret = lookup_tree_block_ref(trans, path, bytenr, parent,
1058 root_objectid);
1059 } else {
1060 ret = lookup_extent_data_ref(trans, path, bytenr, parent,
1061 root_objectid, owner, offset);
1062 }
1063 return ret;
1064 }
1065
1066 /*
1067 * helper to update/remove inline back ref
1068 */
update_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,int refs_to_mod,struct btrfs_delayed_extent_op * extent_op)1069 static noinline_for_stack int update_inline_extent_backref(
1070 struct btrfs_trans_handle *trans,
1071 struct btrfs_path *path,
1072 struct btrfs_extent_inline_ref *iref,
1073 int refs_to_mod,
1074 struct btrfs_delayed_extent_op *extent_op)
1075 {
1076 struct extent_buffer *leaf = path->nodes[0];
1077 struct btrfs_fs_info *fs_info = leaf->fs_info;
1078 struct btrfs_extent_item *ei;
1079 struct btrfs_extent_data_ref *dref = NULL;
1080 struct btrfs_shared_data_ref *sref = NULL;
1081 unsigned long ptr;
1082 unsigned long end;
1083 u32 item_size;
1084 int size;
1085 int type;
1086 u64 refs;
1087
1088 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1089 refs = btrfs_extent_refs(leaf, ei);
1090 if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) {
1091 struct btrfs_key key;
1092 u32 extent_size;
1093
1094 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1095 if (key.type == BTRFS_METADATA_ITEM_KEY)
1096 extent_size = fs_info->nodesize;
1097 else
1098 extent_size = key.offset;
1099 btrfs_print_leaf(leaf);
1100 btrfs_err(fs_info,
1101 "invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu",
1102 key.objectid, extent_size, refs_to_mod, refs);
1103 return -EUCLEAN;
1104 }
1105 refs += refs_to_mod;
1106 btrfs_set_extent_refs(leaf, ei, refs);
1107 if (extent_op)
1108 __run_delayed_extent_op(extent_op, leaf, ei);
1109
1110 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
1111 /*
1112 * Function btrfs_get_extent_inline_ref_type() has already printed
1113 * error messages.
1114 */
1115 if (unlikely(type == BTRFS_REF_TYPE_INVALID))
1116 return -EUCLEAN;
1117
1118 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1119 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1120 refs = btrfs_extent_data_ref_count(leaf, dref);
1121 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1122 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1123 refs = btrfs_shared_data_ref_count(leaf, sref);
1124 } else {
1125 refs = 1;
1126 /*
1127 * For tree blocks we can only drop one ref for it, and tree
1128 * blocks should not have refs > 1.
1129 *
1130 * Furthermore if we're inserting a new inline backref, we
1131 * won't reach this path either. That would be
1132 * setup_inline_extent_backref().
1133 */
1134 if (unlikely(refs_to_mod != -1)) {
1135 struct btrfs_key key;
1136
1137 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1138
1139 btrfs_print_leaf(leaf);
1140 btrfs_err(fs_info,
1141 "invalid refs_to_mod for tree block %llu, has %d expect -1",
1142 key.objectid, refs_to_mod);
1143 return -EUCLEAN;
1144 }
1145 }
1146
1147 if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) {
1148 struct btrfs_key key;
1149 u32 extent_size;
1150
1151 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1152 if (key.type == BTRFS_METADATA_ITEM_KEY)
1153 extent_size = fs_info->nodesize;
1154 else
1155 extent_size = key.offset;
1156 btrfs_print_leaf(leaf);
1157 btrfs_err(fs_info,
1158 "invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu",
1159 (unsigned long)iref, key.objectid, extent_size,
1160 refs_to_mod, refs);
1161 return -EUCLEAN;
1162 }
1163 refs += refs_to_mod;
1164
1165 if (refs > 0) {
1166 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1167 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1168 else
1169 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1170 } else {
1171 size = btrfs_extent_inline_ref_size(type);
1172 item_size = btrfs_item_size(leaf, path->slots[0]);
1173 ptr = (unsigned long)iref;
1174 end = (unsigned long)ei + item_size;
1175 if (ptr + size < end)
1176 memmove_extent_buffer(leaf, ptr, ptr + size,
1177 end - ptr - size);
1178 item_size -= size;
1179 btrfs_truncate_item(trans, path, item_size, 1);
1180 }
1181 btrfs_mark_buffer_dirty(trans, leaf);
1182 return 0;
1183 }
1184
1185 static noinline_for_stack
insert_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)1186 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1187 struct btrfs_path *path,
1188 u64 bytenr, u64 num_bytes, u64 parent,
1189 u64 root_objectid, u64 owner,
1190 u64 offset, int refs_to_add,
1191 struct btrfs_delayed_extent_op *extent_op)
1192 {
1193 struct btrfs_extent_inline_ref *iref;
1194 int ret;
1195
1196 ret = lookup_inline_extent_backref(trans, path, &iref, bytenr,
1197 num_bytes, parent, root_objectid,
1198 owner, offset, 1);
1199 if (ret == 0) {
1200 /*
1201 * We're adding refs to a tree block we already own, this
1202 * should not happen at all.
1203 */
1204 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1205 btrfs_print_leaf(path->nodes[0]);
1206 btrfs_crit(trans->fs_info,
1207 "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu slot %u",
1208 bytenr, num_bytes, root_objectid, path->slots[0]);
1209 return -EUCLEAN;
1210 }
1211 ret = update_inline_extent_backref(trans, path, iref,
1212 refs_to_add, extent_op);
1213 } else if (ret == -ENOENT) {
1214 setup_inline_extent_backref(trans, path, iref, parent,
1215 root_objectid, owner, offset,
1216 refs_to_add, extent_op);
1217 ret = 0;
1218 }
1219 return ret;
1220 }
1221
remove_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,int refs_to_drop,int is_data)1222 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1223 struct btrfs_root *root,
1224 struct btrfs_path *path,
1225 struct btrfs_extent_inline_ref *iref,
1226 int refs_to_drop, int is_data)
1227 {
1228 int ret = 0;
1229
1230 BUG_ON(!is_data && refs_to_drop != 1);
1231 if (iref)
1232 ret = update_inline_extent_backref(trans, path, iref,
1233 -refs_to_drop, NULL);
1234 else if (is_data)
1235 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1236 else
1237 ret = btrfs_del_item(trans, root, path);
1238 return ret;
1239 }
1240
btrfs_issue_discard(struct block_device * bdev,u64 start,u64 len,u64 * discarded_bytes)1241 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1242 u64 *discarded_bytes)
1243 {
1244 int j, ret = 0;
1245 u64 bytes_left, end;
1246 u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT);
1247
1248 /* Adjust the range to be aligned to 512B sectors if necessary. */
1249 if (start != aligned_start) {
1250 len -= aligned_start - start;
1251 len = round_down(len, 1 << SECTOR_SHIFT);
1252 start = aligned_start;
1253 }
1254
1255 *discarded_bytes = 0;
1256
1257 if (!len)
1258 return 0;
1259
1260 end = start + len;
1261 bytes_left = len;
1262
1263 /* Skip any superblocks on this device. */
1264 for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1265 u64 sb_start = btrfs_sb_offset(j);
1266 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1267 u64 size = sb_start - start;
1268
1269 if (!in_range(sb_start, start, bytes_left) &&
1270 !in_range(sb_end, start, bytes_left) &&
1271 !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1272 continue;
1273
1274 /*
1275 * Superblock spans beginning of range. Adjust start and
1276 * try again.
1277 */
1278 if (sb_start <= start) {
1279 start += sb_end - start;
1280 if (start > end) {
1281 bytes_left = 0;
1282 break;
1283 }
1284 bytes_left = end - start;
1285 continue;
1286 }
1287
1288 if (size) {
1289 ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
1290 size >> SECTOR_SHIFT,
1291 GFP_NOFS);
1292 if (!ret)
1293 *discarded_bytes += size;
1294 else if (ret != -EOPNOTSUPP)
1295 return ret;
1296 }
1297
1298 start = sb_end;
1299 if (start > end) {
1300 bytes_left = 0;
1301 break;
1302 }
1303 bytes_left = end - start;
1304 }
1305
1306 while (bytes_left) {
1307 u64 bytes_to_discard = min(BTRFS_MAX_DISCARD_CHUNK_SIZE, bytes_left);
1308
1309 ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
1310 bytes_to_discard >> SECTOR_SHIFT,
1311 GFP_NOFS);
1312
1313 if (ret) {
1314 if (ret != -EOPNOTSUPP)
1315 break;
1316 continue;
1317 }
1318
1319 start += bytes_to_discard;
1320 bytes_left -= bytes_to_discard;
1321 *discarded_bytes += bytes_to_discard;
1322 }
1323
1324 return ret;
1325 }
1326
do_discard_extent(struct btrfs_discard_stripe * stripe,u64 * bytes)1327 static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes)
1328 {
1329 struct btrfs_device *dev = stripe->dev;
1330 struct btrfs_fs_info *fs_info = dev->fs_info;
1331 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1332 u64 phys = stripe->physical;
1333 u64 len = stripe->length;
1334 u64 discarded = 0;
1335 int ret = 0;
1336
1337 /* Zone reset on a zoned filesystem */
1338 if (btrfs_can_zone_reset(dev, phys, len)) {
1339 u64 src_disc;
1340
1341 ret = btrfs_reset_device_zone(dev, phys, len, &discarded);
1342 if (ret)
1343 goto out;
1344
1345 if (!btrfs_dev_replace_is_ongoing(dev_replace) ||
1346 dev != dev_replace->srcdev)
1347 goto out;
1348
1349 src_disc = discarded;
1350
1351 /* Send to replace target as well */
1352 ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len,
1353 &discarded);
1354 discarded += src_disc;
1355 } else if (bdev_max_discard_sectors(stripe->dev->bdev)) {
1356 ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded);
1357 } else {
1358 ret = 0;
1359 *bytes = 0;
1360 }
1361
1362 out:
1363 *bytes = discarded;
1364 return ret;
1365 }
1366
btrfs_discard_extent(struct btrfs_fs_info * fs_info,u64 bytenr,u64 num_bytes,u64 * actual_bytes)1367 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1368 u64 num_bytes, u64 *actual_bytes)
1369 {
1370 int ret = 0;
1371 u64 discarded_bytes = 0;
1372 u64 end = bytenr + num_bytes;
1373 u64 cur = bytenr;
1374
1375 /*
1376 * Avoid races with device replace and make sure the devices in the
1377 * stripes don't go away while we are discarding.
1378 */
1379 btrfs_bio_counter_inc_blocked(fs_info);
1380 while (cur < end) {
1381 struct btrfs_discard_stripe *stripes;
1382 unsigned int num_stripes;
1383 int i;
1384
1385 num_bytes = end - cur;
1386 stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes);
1387 if (IS_ERR(stripes)) {
1388 ret = PTR_ERR(stripes);
1389 if (ret == -EOPNOTSUPP)
1390 ret = 0;
1391 break;
1392 }
1393
1394 for (i = 0; i < num_stripes; i++) {
1395 struct btrfs_discard_stripe *stripe = stripes + i;
1396 u64 bytes;
1397
1398 if (!stripe->dev->bdev) {
1399 ASSERT(btrfs_test_opt(fs_info, DEGRADED));
1400 continue;
1401 }
1402
1403 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
1404 &stripe->dev->dev_state))
1405 continue;
1406
1407 ret = do_discard_extent(stripe, &bytes);
1408 if (ret) {
1409 /*
1410 * Keep going if discard is not supported by the
1411 * device.
1412 */
1413 if (ret != -EOPNOTSUPP)
1414 break;
1415 ret = 0;
1416 } else {
1417 discarded_bytes += bytes;
1418 }
1419 }
1420 kfree(stripes);
1421 if (ret)
1422 break;
1423 cur += num_bytes;
1424 }
1425 btrfs_bio_counter_dec(fs_info);
1426 if (actual_bytes)
1427 *actual_bytes = discarded_bytes;
1428 return ret;
1429 }
1430
1431 /* Can return -ENOMEM */
btrfs_inc_extent_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref)1432 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1433 struct btrfs_ref *generic_ref)
1434 {
1435 struct btrfs_fs_info *fs_info = trans->fs_info;
1436 int ret;
1437
1438 ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
1439 generic_ref->action);
1440 BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
1441 generic_ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID);
1442
1443 if (generic_ref->type == BTRFS_REF_METADATA)
1444 ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
1445 else
1446 ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0);
1447
1448 btrfs_ref_tree_mod(fs_info, generic_ref);
1449
1450 return ret;
1451 }
1452
1453 /*
1454 * __btrfs_inc_extent_ref - insert backreference for a given extent
1455 *
1456 * The counterpart is in __btrfs_free_extent(), with examples and more details
1457 * how it works.
1458 *
1459 * @trans: Handle of transaction
1460 *
1461 * @node: The delayed ref node used to get the bytenr/length for
1462 * extent whose references are incremented.
1463 *
1464 * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/
1465 * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical
1466 * bytenr of the parent block. Since new extents are always
1467 * created with indirect references, this will only be the case
1468 * when relocating a shared extent. In that case, root_objectid
1469 * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must
1470 * be 0
1471 *
1472 * @root_objectid: The id of the root where this modification has originated,
1473 * this can be either one of the well-known metadata trees or
1474 * the subvolume id which references this extent.
1475 *
1476 * @owner: For data extents it is the inode number of the owning file.
1477 * For metadata extents this parameter holds the level in the
1478 * tree of the extent.
1479 *
1480 * @offset: For metadata extents the offset is ignored and is currently
1481 * always passed as 0. For data extents it is the fileoffset
1482 * this extent belongs to.
1483 *
1484 * @refs_to_add Number of references to add
1485 *
1486 * @extent_op Pointer to a structure, holding information necessary when
1487 * updating a tree block's flags
1488 *
1489 */
__btrfs_inc_extent_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)1490 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1491 struct btrfs_delayed_ref_node *node,
1492 u64 parent, u64 root_objectid,
1493 u64 owner, u64 offset, int refs_to_add,
1494 struct btrfs_delayed_extent_op *extent_op)
1495 {
1496 struct btrfs_path *path;
1497 struct extent_buffer *leaf;
1498 struct btrfs_extent_item *item;
1499 struct btrfs_key key;
1500 u64 bytenr = node->bytenr;
1501 u64 num_bytes = node->num_bytes;
1502 u64 refs;
1503 int ret;
1504
1505 path = btrfs_alloc_path();
1506 if (!path)
1507 return -ENOMEM;
1508
1509 /* this will setup the path even if it fails to insert the back ref */
1510 ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
1511 parent, root_objectid, owner,
1512 offset, refs_to_add, extent_op);
1513 if ((ret < 0 && ret != -EAGAIN) || !ret)
1514 goto out;
1515
1516 /*
1517 * Ok we had -EAGAIN which means we didn't have space to insert and
1518 * inline extent ref, so just update the reference count and add a
1519 * normal backref.
1520 */
1521 leaf = path->nodes[0];
1522 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1523 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1524 refs = btrfs_extent_refs(leaf, item);
1525 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1526 if (extent_op)
1527 __run_delayed_extent_op(extent_op, leaf, item);
1528
1529 btrfs_mark_buffer_dirty(trans, leaf);
1530 btrfs_release_path(path);
1531
1532 /* now insert the actual backref */
1533 if (owner < BTRFS_FIRST_FREE_OBJECTID)
1534 ret = insert_tree_block_ref(trans, path, bytenr, parent,
1535 root_objectid);
1536 else
1537 ret = insert_extent_data_ref(trans, path, bytenr, parent,
1538 root_objectid, owner, offset,
1539 refs_to_add);
1540
1541 if (ret)
1542 btrfs_abort_transaction(trans, ret);
1543 out:
1544 btrfs_free_path(path);
1545 return ret;
1546 }
1547
run_delayed_data_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,bool insert_reserved)1548 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1549 struct btrfs_delayed_ref_node *node,
1550 struct btrfs_delayed_extent_op *extent_op,
1551 bool insert_reserved)
1552 {
1553 int ret = 0;
1554 struct btrfs_delayed_data_ref *ref;
1555 struct btrfs_key ins;
1556 u64 parent = 0;
1557 u64 ref_root = 0;
1558 u64 flags = 0;
1559
1560 ins.objectid = node->bytenr;
1561 ins.offset = node->num_bytes;
1562 ins.type = BTRFS_EXTENT_ITEM_KEY;
1563
1564 ref = btrfs_delayed_node_to_data_ref(node);
1565 trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action);
1566
1567 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1568 parent = ref->parent;
1569 ref_root = ref->root;
1570
1571 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1572 if (extent_op)
1573 flags |= extent_op->flags_to_set;
1574 ret = alloc_reserved_file_extent(trans, parent, ref_root,
1575 flags, ref->objectid,
1576 ref->offset, &ins,
1577 node->ref_mod);
1578 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1579 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
1580 ref->objectid, ref->offset,
1581 node->ref_mod, extent_op);
1582 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1583 ret = __btrfs_free_extent(trans, node, parent,
1584 ref_root, ref->objectid,
1585 ref->offset, node->ref_mod,
1586 extent_op);
1587 } else {
1588 BUG();
1589 }
1590 return ret;
1591 }
1592
__run_delayed_extent_op(struct btrfs_delayed_extent_op * extent_op,struct extent_buffer * leaf,struct btrfs_extent_item * ei)1593 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1594 struct extent_buffer *leaf,
1595 struct btrfs_extent_item *ei)
1596 {
1597 u64 flags = btrfs_extent_flags(leaf, ei);
1598 if (extent_op->update_flags) {
1599 flags |= extent_op->flags_to_set;
1600 btrfs_set_extent_flags(leaf, ei, flags);
1601 }
1602
1603 if (extent_op->update_key) {
1604 struct btrfs_tree_block_info *bi;
1605 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1606 bi = (struct btrfs_tree_block_info *)(ei + 1);
1607 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1608 }
1609 }
1610
run_delayed_extent_op(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_extent_op * extent_op)1611 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1612 struct btrfs_delayed_ref_head *head,
1613 struct btrfs_delayed_extent_op *extent_op)
1614 {
1615 struct btrfs_fs_info *fs_info = trans->fs_info;
1616 struct btrfs_root *root;
1617 struct btrfs_key key;
1618 struct btrfs_path *path;
1619 struct btrfs_extent_item *ei;
1620 struct extent_buffer *leaf;
1621 u32 item_size;
1622 int ret;
1623 int err = 0;
1624 int metadata = 1;
1625
1626 if (TRANS_ABORTED(trans))
1627 return 0;
1628
1629 if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1630 metadata = 0;
1631
1632 path = btrfs_alloc_path();
1633 if (!path)
1634 return -ENOMEM;
1635
1636 key.objectid = head->bytenr;
1637
1638 if (metadata) {
1639 key.type = BTRFS_METADATA_ITEM_KEY;
1640 key.offset = extent_op->level;
1641 } else {
1642 key.type = BTRFS_EXTENT_ITEM_KEY;
1643 key.offset = head->num_bytes;
1644 }
1645
1646 root = btrfs_extent_root(fs_info, key.objectid);
1647 again:
1648 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1649 if (ret < 0) {
1650 err = ret;
1651 goto out;
1652 }
1653 if (ret > 0) {
1654 if (metadata) {
1655 if (path->slots[0] > 0) {
1656 path->slots[0]--;
1657 btrfs_item_key_to_cpu(path->nodes[0], &key,
1658 path->slots[0]);
1659 if (key.objectid == head->bytenr &&
1660 key.type == BTRFS_EXTENT_ITEM_KEY &&
1661 key.offset == head->num_bytes)
1662 ret = 0;
1663 }
1664 if (ret > 0) {
1665 btrfs_release_path(path);
1666 metadata = 0;
1667
1668 key.objectid = head->bytenr;
1669 key.offset = head->num_bytes;
1670 key.type = BTRFS_EXTENT_ITEM_KEY;
1671 goto again;
1672 }
1673 } else {
1674 err = -EUCLEAN;
1675 btrfs_err(fs_info,
1676 "missing extent item for extent %llu num_bytes %llu level %d",
1677 head->bytenr, head->num_bytes, extent_op->level);
1678 goto out;
1679 }
1680 }
1681
1682 leaf = path->nodes[0];
1683 item_size = btrfs_item_size(leaf, path->slots[0]);
1684
1685 if (unlikely(item_size < sizeof(*ei))) {
1686 err = -EUCLEAN;
1687 btrfs_err(fs_info,
1688 "unexpected extent item size, has %u expect >= %zu",
1689 item_size, sizeof(*ei));
1690 btrfs_abort_transaction(trans, err);
1691 goto out;
1692 }
1693
1694 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1695 __run_delayed_extent_op(extent_op, leaf, ei);
1696
1697 btrfs_mark_buffer_dirty(trans, leaf);
1698 out:
1699 btrfs_free_path(path);
1700 return err;
1701 }
1702
run_delayed_tree_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,bool insert_reserved)1703 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1704 struct btrfs_delayed_ref_node *node,
1705 struct btrfs_delayed_extent_op *extent_op,
1706 bool insert_reserved)
1707 {
1708 int ret = 0;
1709 struct btrfs_delayed_tree_ref *ref;
1710 u64 parent = 0;
1711 u64 ref_root = 0;
1712
1713 ref = btrfs_delayed_node_to_tree_ref(node);
1714 trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action);
1715
1716 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1717 parent = ref->parent;
1718 ref_root = ref->root;
1719
1720 if (unlikely(node->ref_mod != 1)) {
1721 btrfs_err(trans->fs_info,
1722 "btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu",
1723 node->bytenr, node->ref_mod, node->action, ref_root,
1724 parent);
1725 return -EUCLEAN;
1726 }
1727 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1728 BUG_ON(!extent_op || !extent_op->update_flags);
1729 ret = alloc_reserved_tree_block(trans, node, extent_op);
1730 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1731 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
1732 ref->level, 0, 1, extent_op);
1733 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1734 ret = __btrfs_free_extent(trans, node, parent, ref_root,
1735 ref->level, 0, 1, extent_op);
1736 } else {
1737 BUG();
1738 }
1739 return ret;
1740 }
1741
1742 /* helper function to actually process a single delayed ref entry */
run_one_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,bool insert_reserved)1743 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1744 struct btrfs_delayed_ref_node *node,
1745 struct btrfs_delayed_extent_op *extent_op,
1746 bool insert_reserved)
1747 {
1748 int ret = 0;
1749
1750 if (TRANS_ABORTED(trans)) {
1751 if (insert_reserved)
1752 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
1753 return 0;
1754 }
1755
1756 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1757 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1758 ret = run_delayed_tree_ref(trans, node, extent_op,
1759 insert_reserved);
1760 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1761 node->type == BTRFS_SHARED_DATA_REF_KEY)
1762 ret = run_delayed_data_ref(trans, node, extent_op,
1763 insert_reserved);
1764 else
1765 BUG();
1766 if (ret && insert_reserved)
1767 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
1768 if (ret < 0)
1769 btrfs_err(trans->fs_info,
1770 "failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d",
1771 node->bytenr, node->num_bytes, node->type,
1772 node->action, node->ref_mod, ret);
1773 return ret;
1774 }
1775
1776 static inline struct btrfs_delayed_ref_node *
select_delayed_ref(struct btrfs_delayed_ref_head * head)1777 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1778 {
1779 struct btrfs_delayed_ref_node *ref;
1780
1781 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
1782 return NULL;
1783
1784 /*
1785 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
1786 * This is to prevent a ref count from going down to zero, which deletes
1787 * the extent item from the extent tree, when there still are references
1788 * to add, which would fail because they would not find the extent item.
1789 */
1790 if (!list_empty(&head->ref_add_list))
1791 return list_first_entry(&head->ref_add_list,
1792 struct btrfs_delayed_ref_node, add_list);
1793
1794 ref = rb_entry(rb_first_cached(&head->ref_tree),
1795 struct btrfs_delayed_ref_node, ref_node);
1796 ASSERT(list_empty(&ref->add_list));
1797 return ref;
1798 }
1799
unselect_delayed_ref_head(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)1800 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
1801 struct btrfs_delayed_ref_head *head)
1802 {
1803 spin_lock(&delayed_refs->lock);
1804 head->processing = false;
1805 delayed_refs->num_heads_ready++;
1806 spin_unlock(&delayed_refs->lock);
1807 btrfs_delayed_ref_unlock(head);
1808 }
1809
cleanup_extent_op(struct btrfs_delayed_ref_head * head)1810 static struct btrfs_delayed_extent_op *cleanup_extent_op(
1811 struct btrfs_delayed_ref_head *head)
1812 {
1813 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
1814
1815 if (!extent_op)
1816 return NULL;
1817
1818 if (head->must_insert_reserved) {
1819 head->extent_op = NULL;
1820 btrfs_free_delayed_extent_op(extent_op);
1821 return NULL;
1822 }
1823 return extent_op;
1824 }
1825
run_and_cleanup_extent_op(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head)1826 static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
1827 struct btrfs_delayed_ref_head *head)
1828 {
1829 struct btrfs_delayed_extent_op *extent_op;
1830 int ret;
1831
1832 extent_op = cleanup_extent_op(head);
1833 if (!extent_op)
1834 return 0;
1835 head->extent_op = NULL;
1836 spin_unlock(&head->lock);
1837 ret = run_delayed_extent_op(trans, head, extent_op);
1838 btrfs_free_delayed_extent_op(extent_op);
1839 return ret ? ret : 1;
1840 }
1841
btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)1842 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
1843 struct btrfs_delayed_ref_root *delayed_refs,
1844 struct btrfs_delayed_ref_head *head)
1845 {
1846 int nr_items = 1; /* Dropping this ref head update. */
1847
1848 /*
1849 * We had csum deletions accounted for in our delayed refs rsv, we need
1850 * to drop the csum leaves for this update from our delayed_refs_rsv.
1851 */
1852 if (head->total_ref_mod < 0 && head->is_data) {
1853 spin_lock(&delayed_refs->lock);
1854 delayed_refs->pending_csums -= head->num_bytes;
1855 spin_unlock(&delayed_refs->lock);
1856 nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
1857 }
1858
1859 btrfs_delayed_refs_rsv_release(fs_info, nr_items);
1860 }
1861
cleanup_ref_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head)1862 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
1863 struct btrfs_delayed_ref_head *head)
1864 {
1865
1866 struct btrfs_fs_info *fs_info = trans->fs_info;
1867 struct btrfs_delayed_ref_root *delayed_refs;
1868 int ret;
1869
1870 delayed_refs = &trans->transaction->delayed_refs;
1871
1872 ret = run_and_cleanup_extent_op(trans, head);
1873 if (ret < 0) {
1874 unselect_delayed_ref_head(delayed_refs, head);
1875 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
1876 return ret;
1877 } else if (ret) {
1878 return ret;
1879 }
1880
1881 /*
1882 * Need to drop our head ref lock and re-acquire the delayed ref lock
1883 * and then re-check to make sure nobody got added.
1884 */
1885 spin_unlock(&head->lock);
1886 spin_lock(&delayed_refs->lock);
1887 spin_lock(&head->lock);
1888 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
1889 spin_unlock(&head->lock);
1890 spin_unlock(&delayed_refs->lock);
1891 return 1;
1892 }
1893 btrfs_delete_ref_head(delayed_refs, head);
1894 spin_unlock(&head->lock);
1895 spin_unlock(&delayed_refs->lock);
1896
1897 if (head->must_insert_reserved) {
1898 btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1);
1899 if (head->is_data) {
1900 struct btrfs_root *csum_root;
1901
1902 csum_root = btrfs_csum_root(fs_info, head->bytenr);
1903 ret = btrfs_del_csums(trans, csum_root, head->bytenr,
1904 head->num_bytes);
1905 }
1906 }
1907
1908 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1909
1910 trace_run_delayed_ref_head(fs_info, head, 0);
1911 btrfs_delayed_ref_unlock(head);
1912 btrfs_put_delayed_ref_head(head);
1913 return ret;
1914 }
1915
btrfs_obtain_ref_head(struct btrfs_trans_handle * trans)1916 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
1917 struct btrfs_trans_handle *trans)
1918 {
1919 struct btrfs_delayed_ref_root *delayed_refs =
1920 &trans->transaction->delayed_refs;
1921 struct btrfs_delayed_ref_head *head = NULL;
1922 int ret;
1923
1924 spin_lock(&delayed_refs->lock);
1925 head = btrfs_select_ref_head(delayed_refs);
1926 if (!head) {
1927 spin_unlock(&delayed_refs->lock);
1928 return head;
1929 }
1930
1931 /*
1932 * Grab the lock that says we are going to process all the refs for
1933 * this head
1934 */
1935 ret = btrfs_delayed_ref_lock(delayed_refs, head);
1936 spin_unlock(&delayed_refs->lock);
1937
1938 /*
1939 * We may have dropped the spin lock to get the head mutex lock, and
1940 * that might have given someone else time to free the head. If that's
1941 * true, it has been removed from our list and we can move on.
1942 */
1943 if (ret == -EAGAIN)
1944 head = ERR_PTR(-EAGAIN);
1945
1946 return head;
1947 }
1948
btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * locked_ref)1949 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
1950 struct btrfs_delayed_ref_head *locked_ref)
1951 {
1952 struct btrfs_fs_info *fs_info = trans->fs_info;
1953 struct btrfs_delayed_ref_root *delayed_refs;
1954 struct btrfs_delayed_extent_op *extent_op;
1955 struct btrfs_delayed_ref_node *ref;
1956 bool must_insert_reserved;
1957 int ret;
1958
1959 delayed_refs = &trans->transaction->delayed_refs;
1960
1961 lockdep_assert_held(&locked_ref->mutex);
1962 lockdep_assert_held(&locked_ref->lock);
1963
1964 while ((ref = select_delayed_ref(locked_ref))) {
1965 if (ref->seq &&
1966 btrfs_check_delayed_seq(fs_info, ref->seq)) {
1967 spin_unlock(&locked_ref->lock);
1968 unselect_delayed_ref_head(delayed_refs, locked_ref);
1969 return -EAGAIN;
1970 }
1971
1972 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
1973 RB_CLEAR_NODE(&ref->ref_node);
1974 if (!list_empty(&ref->add_list))
1975 list_del(&ref->add_list);
1976 /*
1977 * When we play the delayed ref, also correct the ref_mod on
1978 * head
1979 */
1980 switch (ref->action) {
1981 case BTRFS_ADD_DELAYED_REF:
1982 case BTRFS_ADD_DELAYED_EXTENT:
1983 locked_ref->ref_mod -= ref->ref_mod;
1984 break;
1985 case BTRFS_DROP_DELAYED_REF:
1986 locked_ref->ref_mod += ref->ref_mod;
1987 break;
1988 default:
1989 WARN_ON(1);
1990 }
1991 atomic_dec(&delayed_refs->num_entries);
1992
1993 /*
1994 * Record the must_insert_reserved flag before we drop the
1995 * spin lock.
1996 */
1997 must_insert_reserved = locked_ref->must_insert_reserved;
1998 locked_ref->must_insert_reserved = false;
1999
2000 extent_op = locked_ref->extent_op;
2001 locked_ref->extent_op = NULL;
2002 spin_unlock(&locked_ref->lock);
2003
2004 ret = run_one_delayed_ref(trans, ref, extent_op,
2005 must_insert_reserved);
2006
2007 btrfs_free_delayed_extent_op(extent_op);
2008 if (ret) {
2009 unselect_delayed_ref_head(delayed_refs, locked_ref);
2010 btrfs_put_delayed_ref(ref);
2011 return ret;
2012 }
2013
2014 btrfs_put_delayed_ref(ref);
2015 cond_resched();
2016
2017 spin_lock(&locked_ref->lock);
2018 btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
2019 }
2020
2021 return 0;
2022 }
2023
2024 /*
2025 * Returns 0 on success or if called with an already aborted transaction.
2026 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2027 */
__btrfs_run_delayed_refs(struct btrfs_trans_handle * trans,unsigned long nr)2028 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2029 unsigned long nr)
2030 {
2031 struct btrfs_fs_info *fs_info = trans->fs_info;
2032 struct btrfs_delayed_ref_root *delayed_refs;
2033 struct btrfs_delayed_ref_head *locked_ref = NULL;
2034 int ret;
2035 unsigned long count = 0;
2036
2037 delayed_refs = &trans->transaction->delayed_refs;
2038 do {
2039 if (!locked_ref) {
2040 locked_ref = btrfs_obtain_ref_head(trans);
2041 if (IS_ERR_OR_NULL(locked_ref)) {
2042 if (PTR_ERR(locked_ref) == -EAGAIN) {
2043 continue;
2044 } else {
2045 break;
2046 }
2047 }
2048 count++;
2049 }
2050 /*
2051 * We need to try and merge add/drops of the same ref since we
2052 * can run into issues with relocate dropping the implicit ref
2053 * and then it being added back again before the drop can
2054 * finish. If we merged anything we need to re-loop so we can
2055 * get a good ref.
2056 * Or we can get node references of the same type that weren't
2057 * merged when created due to bumps in the tree mod seq, and
2058 * we need to merge them to prevent adding an inline extent
2059 * backref before dropping it (triggering a BUG_ON at
2060 * insert_inline_extent_backref()).
2061 */
2062 spin_lock(&locked_ref->lock);
2063 btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
2064
2065 ret = btrfs_run_delayed_refs_for_head(trans, locked_ref);
2066 if (ret < 0 && ret != -EAGAIN) {
2067 /*
2068 * Error, btrfs_run_delayed_refs_for_head already
2069 * unlocked everything so just bail out
2070 */
2071 return ret;
2072 } else if (!ret) {
2073 /*
2074 * Success, perform the usual cleanup of a processed
2075 * head
2076 */
2077 ret = cleanup_ref_head(trans, locked_ref);
2078 if (ret > 0 ) {
2079 /* We dropped our lock, we need to loop. */
2080 ret = 0;
2081 continue;
2082 } else if (ret) {
2083 return ret;
2084 }
2085 }
2086
2087 /*
2088 * Either success case or btrfs_run_delayed_refs_for_head
2089 * returned -EAGAIN, meaning we need to select another head
2090 */
2091
2092 locked_ref = NULL;
2093 cond_resched();
2094 } while ((nr != -1 && count < nr) || locked_ref);
2095
2096 return 0;
2097 }
2098
2099 #ifdef SCRAMBLE_DELAYED_REFS
2100 /*
2101 * Normally delayed refs get processed in ascending bytenr order. This
2102 * correlates in most cases to the order added. To expose dependencies on this
2103 * order, we start to process the tree in the middle instead of the beginning
2104 */
find_middle(struct rb_root * root)2105 static u64 find_middle(struct rb_root *root)
2106 {
2107 struct rb_node *n = root->rb_node;
2108 struct btrfs_delayed_ref_node *entry;
2109 int alt = 1;
2110 u64 middle;
2111 u64 first = 0, last = 0;
2112
2113 n = rb_first(root);
2114 if (n) {
2115 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2116 first = entry->bytenr;
2117 }
2118 n = rb_last(root);
2119 if (n) {
2120 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2121 last = entry->bytenr;
2122 }
2123 n = root->rb_node;
2124
2125 while (n) {
2126 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2127 WARN_ON(!entry->in_tree);
2128
2129 middle = entry->bytenr;
2130
2131 if (alt)
2132 n = n->rb_left;
2133 else
2134 n = n->rb_right;
2135
2136 alt = 1 - alt;
2137 }
2138 return middle;
2139 }
2140 #endif
2141
2142 /*
2143 * this starts processing the delayed reference count updates and
2144 * extent insertions we have queued up so far. count can be
2145 * 0, which means to process everything in the tree at the start
2146 * of the run (but not newly added entries), or it can be some target
2147 * number you'd like to process.
2148 *
2149 * Returns 0 on success or if called with an aborted transaction
2150 * Returns <0 on error and aborts the transaction
2151 */
btrfs_run_delayed_refs(struct btrfs_trans_handle * trans,unsigned long count)2152 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2153 unsigned long count)
2154 {
2155 struct btrfs_fs_info *fs_info = trans->fs_info;
2156 struct rb_node *node;
2157 struct btrfs_delayed_ref_root *delayed_refs;
2158 struct btrfs_delayed_ref_head *head;
2159 int ret;
2160 int run_all = count == (unsigned long)-1;
2161
2162 /* We'll clean this up in btrfs_cleanup_transaction */
2163 if (TRANS_ABORTED(trans))
2164 return 0;
2165
2166 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
2167 return 0;
2168
2169 delayed_refs = &trans->transaction->delayed_refs;
2170 if (count == 0)
2171 count = delayed_refs->num_heads_ready;
2172
2173 again:
2174 #ifdef SCRAMBLE_DELAYED_REFS
2175 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2176 #endif
2177 ret = __btrfs_run_delayed_refs(trans, count);
2178 if (ret < 0) {
2179 btrfs_abort_transaction(trans, ret);
2180 return ret;
2181 }
2182
2183 if (run_all) {
2184 btrfs_create_pending_block_groups(trans);
2185
2186 spin_lock(&delayed_refs->lock);
2187 node = rb_first_cached(&delayed_refs->href_root);
2188 if (!node) {
2189 spin_unlock(&delayed_refs->lock);
2190 goto out;
2191 }
2192 head = rb_entry(node, struct btrfs_delayed_ref_head,
2193 href_node);
2194 refcount_inc(&head->refs);
2195 spin_unlock(&delayed_refs->lock);
2196
2197 /* Mutex was contended, block until it's released and retry. */
2198 mutex_lock(&head->mutex);
2199 mutex_unlock(&head->mutex);
2200
2201 btrfs_put_delayed_ref_head(head);
2202 cond_resched();
2203 goto again;
2204 }
2205 out:
2206 return 0;
2207 }
2208
btrfs_set_disk_extent_flags(struct btrfs_trans_handle * trans,struct extent_buffer * eb,u64 flags)2209 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2210 struct extent_buffer *eb, u64 flags)
2211 {
2212 struct btrfs_delayed_extent_op *extent_op;
2213 int level = btrfs_header_level(eb);
2214 int ret;
2215
2216 extent_op = btrfs_alloc_delayed_extent_op();
2217 if (!extent_op)
2218 return -ENOMEM;
2219
2220 extent_op->flags_to_set = flags;
2221 extent_op->update_flags = true;
2222 extent_op->update_key = false;
2223 extent_op->level = level;
2224
2225 ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op);
2226 if (ret)
2227 btrfs_free_delayed_extent_op(extent_op);
2228 return ret;
2229 }
2230
check_delayed_ref(struct btrfs_root * root,struct btrfs_path * path,u64 objectid,u64 offset,u64 bytenr)2231 static noinline int check_delayed_ref(struct btrfs_root *root,
2232 struct btrfs_path *path,
2233 u64 objectid, u64 offset, u64 bytenr)
2234 {
2235 struct btrfs_delayed_ref_head *head;
2236 struct btrfs_delayed_ref_node *ref;
2237 struct btrfs_delayed_data_ref *data_ref;
2238 struct btrfs_delayed_ref_root *delayed_refs;
2239 struct btrfs_transaction *cur_trans;
2240 struct rb_node *node;
2241 int ret = 0;
2242
2243 spin_lock(&root->fs_info->trans_lock);
2244 cur_trans = root->fs_info->running_transaction;
2245 if (cur_trans)
2246 refcount_inc(&cur_trans->use_count);
2247 spin_unlock(&root->fs_info->trans_lock);
2248 if (!cur_trans)
2249 return 0;
2250
2251 delayed_refs = &cur_trans->delayed_refs;
2252 spin_lock(&delayed_refs->lock);
2253 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
2254 if (!head) {
2255 spin_unlock(&delayed_refs->lock);
2256 btrfs_put_transaction(cur_trans);
2257 return 0;
2258 }
2259
2260 if (!mutex_trylock(&head->mutex)) {
2261 if (path->nowait) {
2262 spin_unlock(&delayed_refs->lock);
2263 btrfs_put_transaction(cur_trans);
2264 return -EAGAIN;
2265 }
2266
2267 refcount_inc(&head->refs);
2268 spin_unlock(&delayed_refs->lock);
2269
2270 btrfs_release_path(path);
2271
2272 /*
2273 * Mutex was contended, block until it's released and let
2274 * caller try again
2275 */
2276 mutex_lock(&head->mutex);
2277 mutex_unlock(&head->mutex);
2278 btrfs_put_delayed_ref_head(head);
2279 btrfs_put_transaction(cur_trans);
2280 return -EAGAIN;
2281 }
2282 spin_unlock(&delayed_refs->lock);
2283
2284 spin_lock(&head->lock);
2285 /*
2286 * XXX: We should replace this with a proper search function in the
2287 * future.
2288 */
2289 for (node = rb_first_cached(&head->ref_tree); node;
2290 node = rb_next(node)) {
2291 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
2292 /* If it's a shared ref we know a cross reference exists */
2293 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2294 ret = 1;
2295 break;
2296 }
2297
2298 data_ref = btrfs_delayed_node_to_data_ref(ref);
2299
2300 /*
2301 * If our ref doesn't match the one we're currently looking at
2302 * then we have a cross reference.
2303 */
2304 if (data_ref->root != root->root_key.objectid ||
2305 data_ref->objectid != objectid ||
2306 data_ref->offset != offset) {
2307 ret = 1;
2308 break;
2309 }
2310 }
2311 spin_unlock(&head->lock);
2312 mutex_unlock(&head->mutex);
2313 btrfs_put_transaction(cur_trans);
2314 return ret;
2315 }
2316
check_committed_ref(struct btrfs_root * root,struct btrfs_path * path,u64 objectid,u64 offset,u64 bytenr,bool strict)2317 static noinline int check_committed_ref(struct btrfs_root *root,
2318 struct btrfs_path *path,
2319 u64 objectid, u64 offset, u64 bytenr,
2320 bool strict)
2321 {
2322 struct btrfs_fs_info *fs_info = root->fs_info;
2323 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2324 struct extent_buffer *leaf;
2325 struct btrfs_extent_data_ref *ref;
2326 struct btrfs_extent_inline_ref *iref;
2327 struct btrfs_extent_item *ei;
2328 struct btrfs_key key;
2329 u32 item_size;
2330 int type;
2331 int ret;
2332
2333 key.objectid = bytenr;
2334 key.offset = (u64)-1;
2335 key.type = BTRFS_EXTENT_ITEM_KEY;
2336
2337 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2338 if (ret < 0)
2339 goto out;
2340 BUG_ON(ret == 0); /* Corruption */
2341
2342 ret = -ENOENT;
2343 if (path->slots[0] == 0)
2344 goto out;
2345
2346 path->slots[0]--;
2347 leaf = path->nodes[0];
2348 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2349
2350 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2351 goto out;
2352
2353 ret = 1;
2354 item_size = btrfs_item_size(leaf, path->slots[0]);
2355 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2356
2357 /* If extent item has more than 1 inline ref then it's shared */
2358 if (item_size != sizeof(*ei) +
2359 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2360 goto out;
2361
2362 /*
2363 * If extent created before last snapshot => it's shared unless the
2364 * snapshot has been deleted. Use the heuristic if strict is false.
2365 */
2366 if (!strict &&
2367 (btrfs_extent_generation(leaf, ei) <=
2368 btrfs_root_last_snapshot(&root->root_item)))
2369 goto out;
2370
2371 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2372
2373 /* If this extent has SHARED_DATA_REF then it's shared */
2374 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
2375 if (type != BTRFS_EXTENT_DATA_REF_KEY)
2376 goto out;
2377
2378 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2379 if (btrfs_extent_refs(leaf, ei) !=
2380 btrfs_extent_data_ref_count(leaf, ref) ||
2381 btrfs_extent_data_ref_root(leaf, ref) !=
2382 root->root_key.objectid ||
2383 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2384 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2385 goto out;
2386
2387 ret = 0;
2388 out:
2389 return ret;
2390 }
2391
btrfs_cross_ref_exist(struct btrfs_root * root,u64 objectid,u64 offset,u64 bytenr,bool strict,struct btrfs_path * path)2392 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
2393 u64 bytenr, bool strict, struct btrfs_path *path)
2394 {
2395 int ret;
2396
2397 do {
2398 ret = check_committed_ref(root, path, objectid,
2399 offset, bytenr, strict);
2400 if (ret && ret != -ENOENT)
2401 goto out;
2402
2403 ret = check_delayed_ref(root, path, objectid, offset, bytenr);
2404 } while (ret == -EAGAIN);
2405
2406 out:
2407 btrfs_release_path(path);
2408 if (btrfs_is_data_reloc_root(root))
2409 WARN_ON(ret > 0);
2410 return ret;
2411 }
2412
__btrfs_mod_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref,int inc)2413 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2414 struct btrfs_root *root,
2415 struct extent_buffer *buf,
2416 int full_backref, int inc)
2417 {
2418 struct btrfs_fs_info *fs_info = root->fs_info;
2419 u64 bytenr;
2420 u64 num_bytes;
2421 u64 parent;
2422 u64 ref_root;
2423 u32 nritems;
2424 struct btrfs_key key;
2425 struct btrfs_file_extent_item *fi;
2426 struct btrfs_ref generic_ref = { 0 };
2427 bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC);
2428 int i;
2429 int action;
2430 int level;
2431 int ret = 0;
2432
2433 if (btrfs_is_testing(fs_info))
2434 return 0;
2435
2436 ref_root = btrfs_header_owner(buf);
2437 nritems = btrfs_header_nritems(buf);
2438 level = btrfs_header_level(buf);
2439
2440 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0)
2441 return 0;
2442
2443 if (full_backref)
2444 parent = buf->start;
2445 else
2446 parent = 0;
2447 if (inc)
2448 action = BTRFS_ADD_DELAYED_REF;
2449 else
2450 action = BTRFS_DROP_DELAYED_REF;
2451
2452 for (i = 0; i < nritems; i++) {
2453 if (level == 0) {
2454 btrfs_item_key_to_cpu(buf, &key, i);
2455 if (key.type != BTRFS_EXTENT_DATA_KEY)
2456 continue;
2457 fi = btrfs_item_ptr(buf, i,
2458 struct btrfs_file_extent_item);
2459 if (btrfs_file_extent_type(buf, fi) ==
2460 BTRFS_FILE_EXTENT_INLINE)
2461 continue;
2462 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2463 if (bytenr == 0)
2464 continue;
2465
2466 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2467 key.offset -= btrfs_file_extent_offset(buf, fi);
2468 btrfs_init_generic_ref(&generic_ref, action, bytenr,
2469 num_bytes, parent);
2470 btrfs_init_data_ref(&generic_ref, ref_root, key.objectid,
2471 key.offset, root->root_key.objectid,
2472 for_reloc);
2473 if (inc)
2474 ret = btrfs_inc_extent_ref(trans, &generic_ref);
2475 else
2476 ret = btrfs_free_extent(trans, &generic_ref);
2477 if (ret)
2478 goto fail;
2479 } else {
2480 bytenr = btrfs_node_blockptr(buf, i);
2481 num_bytes = fs_info->nodesize;
2482 btrfs_init_generic_ref(&generic_ref, action, bytenr,
2483 num_bytes, parent);
2484 btrfs_init_tree_ref(&generic_ref, level - 1, ref_root,
2485 root->root_key.objectid, for_reloc);
2486 if (inc)
2487 ret = btrfs_inc_extent_ref(trans, &generic_ref);
2488 else
2489 ret = btrfs_free_extent(trans, &generic_ref);
2490 if (ret)
2491 goto fail;
2492 }
2493 }
2494 return 0;
2495 fail:
2496 return ret;
2497 }
2498
btrfs_inc_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref)2499 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2500 struct extent_buffer *buf, int full_backref)
2501 {
2502 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2503 }
2504
btrfs_dec_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref)2505 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2506 struct extent_buffer *buf, int full_backref)
2507 {
2508 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2509 }
2510
get_alloc_profile_by_root(struct btrfs_root * root,int data)2511 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
2512 {
2513 struct btrfs_fs_info *fs_info = root->fs_info;
2514 u64 flags;
2515 u64 ret;
2516
2517 if (data)
2518 flags = BTRFS_BLOCK_GROUP_DATA;
2519 else if (root == fs_info->chunk_root)
2520 flags = BTRFS_BLOCK_GROUP_SYSTEM;
2521 else
2522 flags = BTRFS_BLOCK_GROUP_METADATA;
2523
2524 ret = btrfs_get_alloc_profile(fs_info, flags);
2525 return ret;
2526 }
2527
first_logical_byte(struct btrfs_fs_info * fs_info)2528 static u64 first_logical_byte(struct btrfs_fs_info *fs_info)
2529 {
2530 struct rb_node *leftmost;
2531 u64 bytenr = 0;
2532
2533 read_lock(&fs_info->block_group_cache_lock);
2534 /* Get the block group with the lowest logical start address. */
2535 leftmost = rb_first_cached(&fs_info->block_group_cache_tree);
2536 if (leftmost) {
2537 struct btrfs_block_group *bg;
2538
2539 bg = rb_entry(leftmost, struct btrfs_block_group, cache_node);
2540 bytenr = bg->start;
2541 }
2542 read_unlock(&fs_info->block_group_cache_lock);
2543
2544 return bytenr;
2545 }
2546
pin_down_extent(struct btrfs_trans_handle * trans,struct btrfs_block_group * cache,u64 bytenr,u64 num_bytes,int reserved)2547 static int pin_down_extent(struct btrfs_trans_handle *trans,
2548 struct btrfs_block_group *cache,
2549 u64 bytenr, u64 num_bytes, int reserved)
2550 {
2551 struct btrfs_fs_info *fs_info = cache->fs_info;
2552
2553 spin_lock(&cache->space_info->lock);
2554 spin_lock(&cache->lock);
2555 cache->pinned += num_bytes;
2556 btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info,
2557 num_bytes);
2558 if (reserved) {
2559 cache->reserved -= num_bytes;
2560 cache->space_info->bytes_reserved -= num_bytes;
2561 }
2562 spin_unlock(&cache->lock);
2563 spin_unlock(&cache->space_info->lock);
2564
2565 set_extent_bit(&trans->transaction->pinned_extents, bytenr,
2566 bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
2567 return 0;
2568 }
2569
btrfs_pin_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,int reserved)2570 int btrfs_pin_extent(struct btrfs_trans_handle *trans,
2571 u64 bytenr, u64 num_bytes, int reserved)
2572 {
2573 struct btrfs_block_group *cache;
2574
2575 cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
2576 BUG_ON(!cache); /* Logic error */
2577
2578 pin_down_extent(trans, cache, bytenr, num_bytes, reserved);
2579
2580 btrfs_put_block_group(cache);
2581 return 0;
2582 }
2583
2584 /*
2585 * this function must be called within transaction
2586 */
btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes)2587 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
2588 u64 bytenr, u64 num_bytes)
2589 {
2590 struct btrfs_block_group *cache;
2591 int ret;
2592
2593 cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
2594 if (!cache)
2595 return -EINVAL;
2596
2597 /*
2598 * Fully cache the free space first so that our pin removes the free space
2599 * from the cache.
2600 */
2601 ret = btrfs_cache_block_group(cache, true);
2602 if (ret)
2603 goto out;
2604
2605 pin_down_extent(trans, cache, bytenr, num_bytes, 0);
2606
2607 /* remove us from the free space cache (if we're there at all) */
2608 ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
2609 out:
2610 btrfs_put_block_group(cache);
2611 return ret;
2612 }
2613
__exclude_logged_extent(struct btrfs_fs_info * fs_info,u64 start,u64 num_bytes)2614 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
2615 u64 start, u64 num_bytes)
2616 {
2617 int ret;
2618 struct btrfs_block_group *block_group;
2619
2620 block_group = btrfs_lookup_block_group(fs_info, start);
2621 if (!block_group)
2622 return -EINVAL;
2623
2624 ret = btrfs_cache_block_group(block_group, true);
2625 if (ret)
2626 goto out;
2627
2628 ret = btrfs_remove_free_space(block_group, start, num_bytes);
2629 out:
2630 btrfs_put_block_group(block_group);
2631 return ret;
2632 }
2633
btrfs_exclude_logged_extents(struct extent_buffer * eb)2634 int btrfs_exclude_logged_extents(struct extent_buffer *eb)
2635 {
2636 struct btrfs_fs_info *fs_info = eb->fs_info;
2637 struct btrfs_file_extent_item *item;
2638 struct btrfs_key key;
2639 int found_type;
2640 int i;
2641 int ret = 0;
2642
2643 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
2644 return 0;
2645
2646 for (i = 0; i < btrfs_header_nritems(eb); i++) {
2647 btrfs_item_key_to_cpu(eb, &key, i);
2648 if (key.type != BTRFS_EXTENT_DATA_KEY)
2649 continue;
2650 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2651 found_type = btrfs_file_extent_type(eb, item);
2652 if (found_type == BTRFS_FILE_EXTENT_INLINE)
2653 continue;
2654 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
2655 continue;
2656 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
2657 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
2658 ret = __exclude_logged_extent(fs_info, key.objectid, key.offset);
2659 if (ret)
2660 break;
2661 }
2662
2663 return ret;
2664 }
2665
2666 static void
btrfs_inc_block_group_reservations(struct btrfs_block_group * bg)2667 btrfs_inc_block_group_reservations(struct btrfs_block_group *bg)
2668 {
2669 atomic_inc(&bg->reservations);
2670 }
2671
2672 /*
2673 * Returns the free cluster for the given space info and sets empty_cluster to
2674 * what it should be based on the mount options.
2675 */
2676 static struct btrfs_free_cluster *
fetch_cluster_info(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 * empty_cluster)2677 fetch_cluster_info(struct btrfs_fs_info *fs_info,
2678 struct btrfs_space_info *space_info, u64 *empty_cluster)
2679 {
2680 struct btrfs_free_cluster *ret = NULL;
2681
2682 *empty_cluster = 0;
2683 if (btrfs_mixed_space_info(space_info))
2684 return ret;
2685
2686 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
2687 ret = &fs_info->meta_alloc_cluster;
2688 if (btrfs_test_opt(fs_info, SSD))
2689 *empty_cluster = SZ_2M;
2690 else
2691 *empty_cluster = SZ_64K;
2692 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
2693 btrfs_test_opt(fs_info, SSD_SPREAD)) {
2694 *empty_cluster = SZ_2M;
2695 ret = &fs_info->data_alloc_cluster;
2696 }
2697
2698 return ret;
2699 }
2700
unpin_extent_range(struct btrfs_fs_info * fs_info,u64 start,u64 end,const bool return_free_space)2701 static int unpin_extent_range(struct btrfs_fs_info *fs_info,
2702 u64 start, u64 end,
2703 const bool return_free_space)
2704 {
2705 struct btrfs_block_group *cache = NULL;
2706 struct btrfs_space_info *space_info;
2707 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
2708 struct btrfs_free_cluster *cluster = NULL;
2709 u64 len;
2710 u64 total_unpinned = 0;
2711 u64 empty_cluster = 0;
2712 bool readonly;
2713
2714 while (start <= end) {
2715 readonly = false;
2716 if (!cache ||
2717 start >= cache->start + cache->length) {
2718 if (cache)
2719 btrfs_put_block_group(cache);
2720 total_unpinned = 0;
2721 cache = btrfs_lookup_block_group(fs_info, start);
2722 BUG_ON(!cache); /* Logic error */
2723
2724 cluster = fetch_cluster_info(fs_info,
2725 cache->space_info,
2726 &empty_cluster);
2727 empty_cluster <<= 1;
2728 }
2729
2730 len = cache->start + cache->length - start;
2731 len = min(len, end + 1 - start);
2732
2733 if (return_free_space)
2734 btrfs_add_free_space(cache, start, len);
2735
2736 start += len;
2737 total_unpinned += len;
2738 space_info = cache->space_info;
2739
2740 /*
2741 * If this space cluster has been marked as fragmented and we've
2742 * unpinned enough in this block group to potentially allow a
2743 * cluster to be created inside of it go ahead and clear the
2744 * fragmented check.
2745 */
2746 if (cluster && cluster->fragmented &&
2747 total_unpinned > empty_cluster) {
2748 spin_lock(&cluster->lock);
2749 cluster->fragmented = 0;
2750 spin_unlock(&cluster->lock);
2751 }
2752
2753 spin_lock(&space_info->lock);
2754 spin_lock(&cache->lock);
2755 cache->pinned -= len;
2756 btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
2757 space_info->max_extent_size = 0;
2758 if (cache->ro) {
2759 space_info->bytes_readonly += len;
2760 readonly = true;
2761 } else if (btrfs_is_zoned(fs_info)) {
2762 /* Need reset before reusing in a zoned block group */
2763 btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info,
2764 len);
2765 readonly = true;
2766 }
2767 spin_unlock(&cache->lock);
2768 if (!readonly && return_free_space &&
2769 global_rsv->space_info == space_info) {
2770 spin_lock(&global_rsv->lock);
2771 if (!global_rsv->full) {
2772 u64 to_add = min(len, global_rsv->size -
2773 global_rsv->reserved);
2774
2775 global_rsv->reserved += to_add;
2776 btrfs_space_info_update_bytes_may_use(fs_info,
2777 space_info, to_add);
2778 if (global_rsv->reserved >= global_rsv->size)
2779 global_rsv->full = 1;
2780 len -= to_add;
2781 }
2782 spin_unlock(&global_rsv->lock);
2783 }
2784 /* Add to any tickets we may have */
2785 if (!readonly && return_free_space && len)
2786 btrfs_try_granting_tickets(fs_info, space_info);
2787 spin_unlock(&space_info->lock);
2788 }
2789
2790 if (cache)
2791 btrfs_put_block_group(cache);
2792 return 0;
2793 }
2794
btrfs_finish_extent_commit(struct btrfs_trans_handle * trans)2795 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
2796 {
2797 struct btrfs_fs_info *fs_info = trans->fs_info;
2798 struct btrfs_block_group *block_group, *tmp;
2799 struct list_head *deleted_bgs;
2800 struct extent_io_tree *unpin;
2801 u64 start;
2802 u64 end;
2803 int ret;
2804
2805 unpin = &trans->transaction->pinned_extents;
2806
2807 while (!TRANS_ABORTED(trans)) {
2808 struct extent_state *cached_state = NULL;
2809
2810 mutex_lock(&fs_info->unused_bg_unpin_mutex);
2811 if (!find_first_extent_bit(unpin, 0, &start, &end,
2812 EXTENT_DIRTY, &cached_state)) {
2813 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
2814 break;
2815 }
2816
2817 if (btrfs_test_opt(fs_info, DISCARD_SYNC))
2818 ret = btrfs_discard_extent(fs_info, start,
2819 end + 1 - start, NULL);
2820
2821 clear_extent_dirty(unpin, start, end, &cached_state);
2822 unpin_extent_range(fs_info, start, end, true);
2823 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
2824 free_extent_state(cached_state);
2825 cond_resched();
2826 }
2827
2828 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
2829 btrfs_discard_calc_delay(&fs_info->discard_ctl);
2830 btrfs_discard_schedule_work(&fs_info->discard_ctl, true);
2831 }
2832
2833 /*
2834 * Transaction is finished. We don't need the lock anymore. We
2835 * do need to clean up the block groups in case of a transaction
2836 * abort.
2837 */
2838 deleted_bgs = &trans->transaction->deleted_bgs;
2839 list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
2840 u64 trimmed = 0;
2841
2842 ret = -EROFS;
2843 if (!TRANS_ABORTED(trans))
2844 ret = btrfs_discard_extent(fs_info,
2845 block_group->start,
2846 block_group->length,
2847 &trimmed);
2848
2849 list_del_init(&block_group->bg_list);
2850 btrfs_unfreeze_block_group(block_group);
2851 btrfs_put_block_group(block_group);
2852
2853 if (ret) {
2854 const char *errstr = btrfs_decode_error(ret);
2855 btrfs_warn(fs_info,
2856 "discard failed while removing blockgroup: errno=%d %s",
2857 ret, errstr);
2858 }
2859 }
2860
2861 return 0;
2862 }
2863
do_free_extent_accounting(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,bool is_data)2864 static int do_free_extent_accounting(struct btrfs_trans_handle *trans,
2865 u64 bytenr, u64 num_bytes, bool is_data)
2866 {
2867 int ret;
2868
2869 if (is_data) {
2870 struct btrfs_root *csum_root;
2871
2872 csum_root = btrfs_csum_root(trans->fs_info, bytenr);
2873 ret = btrfs_del_csums(trans, csum_root, bytenr, num_bytes);
2874 if (ret) {
2875 btrfs_abort_transaction(trans, ret);
2876 return ret;
2877 }
2878 }
2879
2880 ret = add_to_free_space_tree(trans, bytenr, num_bytes);
2881 if (ret) {
2882 btrfs_abort_transaction(trans, ret);
2883 return ret;
2884 }
2885
2886 ret = btrfs_update_block_group(trans, bytenr, num_bytes, false);
2887 if (ret)
2888 btrfs_abort_transaction(trans, ret);
2889
2890 return ret;
2891 }
2892
2893 #define abort_and_dump(trans, path, fmt, args...) \
2894 ({ \
2895 btrfs_abort_transaction(trans, -EUCLEAN); \
2896 btrfs_print_leaf(path->nodes[0]); \
2897 btrfs_crit(trans->fs_info, fmt, ##args); \
2898 })
2899
2900 /*
2901 * Drop one or more refs of @node.
2902 *
2903 * 1. Locate the extent refs.
2904 * It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item.
2905 * Locate it, then reduce the refs number or remove the ref line completely.
2906 *
2907 * 2. Update the refs count in EXTENT/METADATA_ITEM
2908 *
2909 * Inline backref case:
2910 *
2911 * in extent tree we have:
2912 *
2913 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82
2914 * refs 2 gen 6 flags DATA
2915 * extent data backref root FS_TREE objectid 258 offset 0 count 1
2916 * extent data backref root FS_TREE objectid 257 offset 0 count 1
2917 *
2918 * This function gets called with:
2919 *
2920 * node->bytenr = 13631488
2921 * node->num_bytes = 1048576
2922 * root_objectid = FS_TREE
2923 * owner_objectid = 257
2924 * owner_offset = 0
2925 * refs_to_drop = 1
2926 *
2927 * Then we should get some like:
2928 *
2929 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82
2930 * refs 1 gen 6 flags DATA
2931 * extent data backref root FS_TREE objectid 258 offset 0 count 1
2932 *
2933 * Keyed backref case:
2934 *
2935 * in extent tree we have:
2936 *
2937 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24
2938 * refs 754 gen 6 flags DATA
2939 * [...]
2940 * item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28
2941 * extent data backref root FS_TREE objectid 866 offset 0 count 1
2942 *
2943 * This function get called with:
2944 *
2945 * node->bytenr = 13631488
2946 * node->num_bytes = 1048576
2947 * root_objectid = FS_TREE
2948 * owner_objectid = 866
2949 * owner_offset = 0
2950 * refs_to_drop = 1
2951 *
2952 * Then we should get some like:
2953 *
2954 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24
2955 * refs 753 gen 6 flags DATA
2956 *
2957 * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed.
2958 */
__btrfs_free_extent(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,u64 parent,u64 root_objectid,u64 owner_objectid,u64 owner_offset,int refs_to_drop,struct btrfs_delayed_extent_op * extent_op)2959 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2960 struct btrfs_delayed_ref_node *node, u64 parent,
2961 u64 root_objectid, u64 owner_objectid,
2962 u64 owner_offset, int refs_to_drop,
2963 struct btrfs_delayed_extent_op *extent_op)
2964 {
2965 struct btrfs_fs_info *info = trans->fs_info;
2966 struct btrfs_key key;
2967 struct btrfs_path *path;
2968 struct btrfs_root *extent_root;
2969 struct extent_buffer *leaf;
2970 struct btrfs_extent_item *ei;
2971 struct btrfs_extent_inline_ref *iref;
2972 int ret;
2973 int is_data;
2974 int extent_slot = 0;
2975 int found_extent = 0;
2976 int num_to_del = 1;
2977 u32 item_size;
2978 u64 refs;
2979 u64 bytenr = node->bytenr;
2980 u64 num_bytes = node->num_bytes;
2981 bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
2982
2983 extent_root = btrfs_extent_root(info, bytenr);
2984 ASSERT(extent_root);
2985
2986 path = btrfs_alloc_path();
2987 if (!path)
2988 return -ENOMEM;
2989
2990 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
2991
2992 if (!is_data && refs_to_drop != 1) {
2993 btrfs_crit(info,
2994 "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u",
2995 node->bytenr, refs_to_drop);
2996 ret = -EINVAL;
2997 btrfs_abort_transaction(trans, ret);
2998 goto out;
2999 }
3000
3001 if (is_data)
3002 skinny_metadata = false;
3003
3004 ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
3005 parent, root_objectid, owner_objectid,
3006 owner_offset);
3007 if (ret == 0) {
3008 /*
3009 * Either the inline backref or the SHARED_DATA_REF/
3010 * SHARED_BLOCK_REF is found
3011 *
3012 * Here is a quick path to locate EXTENT/METADATA_ITEM.
3013 * It's possible the EXTENT/METADATA_ITEM is near current slot.
3014 */
3015 extent_slot = path->slots[0];
3016 while (extent_slot >= 0) {
3017 btrfs_item_key_to_cpu(path->nodes[0], &key,
3018 extent_slot);
3019 if (key.objectid != bytenr)
3020 break;
3021 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3022 key.offset == num_bytes) {
3023 found_extent = 1;
3024 break;
3025 }
3026 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3027 key.offset == owner_objectid) {
3028 found_extent = 1;
3029 break;
3030 }
3031
3032 /* Quick path didn't find the EXTEMT/METADATA_ITEM */
3033 if (path->slots[0] - extent_slot > 5)
3034 break;
3035 extent_slot--;
3036 }
3037
3038 if (!found_extent) {
3039 if (iref) {
3040 abort_and_dump(trans, path,
3041 "invalid iref slot %u, no EXTENT/METADATA_ITEM found but has inline extent ref",
3042 path->slots[0]);
3043 ret = -EUCLEAN;
3044 goto out;
3045 }
3046 /* Must be SHARED_* item, remove the backref first */
3047 ret = remove_extent_backref(trans, extent_root, path,
3048 NULL, refs_to_drop, is_data);
3049 if (ret) {
3050 btrfs_abort_transaction(trans, ret);
3051 goto out;
3052 }
3053 btrfs_release_path(path);
3054
3055 /* Slow path to locate EXTENT/METADATA_ITEM */
3056 key.objectid = bytenr;
3057 key.type = BTRFS_EXTENT_ITEM_KEY;
3058 key.offset = num_bytes;
3059
3060 if (!is_data && skinny_metadata) {
3061 key.type = BTRFS_METADATA_ITEM_KEY;
3062 key.offset = owner_objectid;
3063 }
3064
3065 ret = btrfs_search_slot(trans, extent_root,
3066 &key, path, -1, 1);
3067 if (ret > 0 && skinny_metadata && path->slots[0]) {
3068 /*
3069 * Couldn't find our skinny metadata item,
3070 * see if we have ye olde extent item.
3071 */
3072 path->slots[0]--;
3073 btrfs_item_key_to_cpu(path->nodes[0], &key,
3074 path->slots[0]);
3075 if (key.objectid == bytenr &&
3076 key.type == BTRFS_EXTENT_ITEM_KEY &&
3077 key.offset == num_bytes)
3078 ret = 0;
3079 }
3080
3081 if (ret > 0 && skinny_metadata) {
3082 skinny_metadata = false;
3083 key.objectid = bytenr;
3084 key.type = BTRFS_EXTENT_ITEM_KEY;
3085 key.offset = num_bytes;
3086 btrfs_release_path(path);
3087 ret = btrfs_search_slot(trans, extent_root,
3088 &key, path, -1, 1);
3089 }
3090
3091 if (ret) {
3092 if (ret > 0)
3093 btrfs_print_leaf(path->nodes[0]);
3094 btrfs_err(info,
3095 "umm, got %d back from search, was looking for %llu, slot %d",
3096 ret, bytenr, path->slots[0]);
3097 }
3098 if (ret < 0) {
3099 btrfs_abort_transaction(trans, ret);
3100 goto out;
3101 }
3102 extent_slot = path->slots[0];
3103 }
3104 } else if (WARN_ON(ret == -ENOENT)) {
3105 abort_and_dump(trans, path,
3106 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu slot %d",
3107 bytenr, parent, root_objectid, owner_objectid,
3108 owner_offset, path->slots[0]);
3109 goto out;
3110 } else {
3111 btrfs_abort_transaction(trans, ret);
3112 goto out;
3113 }
3114
3115 leaf = path->nodes[0];
3116 item_size = btrfs_item_size(leaf, extent_slot);
3117 if (unlikely(item_size < sizeof(*ei))) {
3118 ret = -EUCLEAN;
3119 btrfs_err(trans->fs_info,
3120 "unexpected extent item size, has %u expect >= %zu",
3121 item_size, sizeof(*ei));
3122 btrfs_abort_transaction(trans, ret);
3123 goto out;
3124 }
3125 ei = btrfs_item_ptr(leaf, extent_slot,
3126 struct btrfs_extent_item);
3127 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
3128 key.type == BTRFS_EXTENT_ITEM_KEY) {
3129 struct btrfs_tree_block_info *bi;
3130
3131 if (item_size < sizeof(*ei) + sizeof(*bi)) {
3132 abort_and_dump(trans, path,
3133 "invalid extent item size for key (%llu, %u, %llu) slot %u owner %llu, has %u expect >= %zu",
3134 key.objectid, key.type, key.offset,
3135 path->slots[0], owner_objectid, item_size,
3136 sizeof(*ei) + sizeof(*bi));
3137 ret = -EUCLEAN;
3138 goto out;
3139 }
3140 bi = (struct btrfs_tree_block_info *)(ei + 1);
3141 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3142 }
3143
3144 refs = btrfs_extent_refs(leaf, ei);
3145 if (refs < refs_to_drop) {
3146 abort_and_dump(trans, path,
3147 "trying to drop %d refs but we only have %llu for bytenr %llu slot %u",
3148 refs_to_drop, refs, bytenr, path->slots[0]);
3149 ret = -EUCLEAN;
3150 goto out;
3151 }
3152 refs -= refs_to_drop;
3153
3154 if (refs > 0) {
3155 if (extent_op)
3156 __run_delayed_extent_op(extent_op, leaf, ei);
3157 /*
3158 * In the case of inline back ref, reference count will
3159 * be updated by remove_extent_backref
3160 */
3161 if (iref) {
3162 if (!found_extent) {
3163 abort_and_dump(trans, path,
3164 "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found, slot %u",
3165 path->slots[0]);
3166 ret = -EUCLEAN;
3167 goto out;
3168 }
3169 } else {
3170 btrfs_set_extent_refs(leaf, ei, refs);
3171 btrfs_mark_buffer_dirty(trans, leaf);
3172 }
3173 if (found_extent) {
3174 ret = remove_extent_backref(trans, extent_root, path,
3175 iref, refs_to_drop, is_data);
3176 if (ret) {
3177 btrfs_abort_transaction(trans, ret);
3178 goto out;
3179 }
3180 }
3181 } else {
3182 /* In this branch refs == 1 */
3183 if (found_extent) {
3184 if (is_data && refs_to_drop !=
3185 extent_data_ref_count(path, iref)) {
3186 abort_and_dump(trans, path,
3187 "invalid refs_to_drop, current refs %u refs_to_drop %u slot %u",
3188 extent_data_ref_count(path, iref),
3189 refs_to_drop, path->slots[0]);
3190 ret = -EUCLEAN;
3191 goto out;
3192 }
3193 if (iref) {
3194 if (path->slots[0] != extent_slot) {
3195 abort_and_dump(trans, path,
3196 "invalid iref, extent item key (%llu %u %llu) slot %u doesn't have wanted iref",
3197 key.objectid, key.type,
3198 key.offset, path->slots[0]);
3199 ret = -EUCLEAN;
3200 goto out;
3201 }
3202 } else {
3203 /*
3204 * No inline ref, we must be at SHARED_* item,
3205 * And it's single ref, it must be:
3206 * | extent_slot ||extent_slot + 1|
3207 * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ]
3208 */
3209 if (path->slots[0] != extent_slot + 1) {
3210 abort_and_dump(trans, path,
3211 "invalid SHARED_* item slot %u, previous item is not EXTENT/METADATA_ITEM",
3212 path->slots[0]);
3213 ret = -EUCLEAN;
3214 goto out;
3215 }
3216 path->slots[0] = extent_slot;
3217 num_to_del = 2;
3218 }
3219 }
3220
3221 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3222 num_to_del);
3223 if (ret) {
3224 btrfs_abort_transaction(trans, ret);
3225 goto out;
3226 }
3227 btrfs_release_path(path);
3228
3229 ret = do_free_extent_accounting(trans, bytenr, num_bytes, is_data);
3230 }
3231 btrfs_release_path(path);
3232
3233 out:
3234 btrfs_free_path(path);
3235 return ret;
3236 }
3237
3238 /*
3239 * when we free an block, it is possible (and likely) that we free the last
3240 * delayed ref for that extent as well. This searches the delayed ref tree for
3241 * a given extent, and if there are no other delayed refs to be processed, it
3242 * removes it from the tree.
3243 */
check_ref_cleanup(struct btrfs_trans_handle * trans,u64 bytenr)3244 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3245 u64 bytenr)
3246 {
3247 struct btrfs_delayed_ref_head *head;
3248 struct btrfs_delayed_ref_root *delayed_refs;
3249 int ret = 0;
3250
3251 delayed_refs = &trans->transaction->delayed_refs;
3252 spin_lock(&delayed_refs->lock);
3253 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
3254 if (!head)
3255 goto out_delayed_unlock;
3256
3257 spin_lock(&head->lock);
3258 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
3259 goto out;
3260
3261 if (cleanup_extent_op(head) != NULL)
3262 goto out;
3263
3264 /*
3265 * waiting for the lock here would deadlock. If someone else has it
3266 * locked they are already in the process of dropping it anyway
3267 */
3268 if (!mutex_trylock(&head->mutex))
3269 goto out;
3270
3271 btrfs_delete_ref_head(delayed_refs, head);
3272 head->processing = false;
3273
3274 spin_unlock(&head->lock);
3275 spin_unlock(&delayed_refs->lock);
3276
3277 BUG_ON(head->extent_op);
3278 if (head->must_insert_reserved)
3279 ret = 1;
3280
3281 btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head);
3282 mutex_unlock(&head->mutex);
3283 btrfs_put_delayed_ref_head(head);
3284 return ret;
3285 out:
3286 spin_unlock(&head->lock);
3287
3288 out_delayed_unlock:
3289 spin_unlock(&delayed_refs->lock);
3290 return 0;
3291 }
3292
btrfs_free_tree_block(struct btrfs_trans_handle * trans,u64 root_id,struct extent_buffer * buf,u64 parent,int last_ref)3293 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
3294 u64 root_id,
3295 struct extent_buffer *buf,
3296 u64 parent, int last_ref)
3297 {
3298 struct btrfs_fs_info *fs_info = trans->fs_info;
3299 struct btrfs_ref generic_ref = { 0 };
3300 int ret;
3301
3302 btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
3303 buf->start, buf->len, parent);
3304 btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf),
3305 root_id, 0, false);
3306
3307 if (root_id != BTRFS_TREE_LOG_OBJECTID) {
3308 btrfs_ref_tree_mod(fs_info, &generic_ref);
3309 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
3310 BUG_ON(ret); /* -ENOMEM */
3311 }
3312
3313 if (last_ref && btrfs_header_generation(buf) == trans->transid) {
3314 struct btrfs_block_group *cache;
3315 bool must_pin = false;
3316
3317 if (root_id != BTRFS_TREE_LOG_OBJECTID) {
3318 ret = check_ref_cleanup(trans, buf->start);
3319 if (!ret) {
3320 btrfs_redirty_list_add(trans->transaction, buf);
3321 goto out;
3322 }
3323 }
3324
3325 cache = btrfs_lookup_block_group(fs_info, buf->start);
3326
3327 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3328 pin_down_extent(trans, cache, buf->start, buf->len, 1);
3329 btrfs_put_block_group(cache);
3330 goto out;
3331 }
3332
3333 /*
3334 * If there are tree mod log users we may have recorded mod log
3335 * operations for this node. If we re-allocate this node we
3336 * could replay operations on this node that happened when it
3337 * existed in a completely different root. For example if it
3338 * was part of root A, then was reallocated to root B, and we
3339 * are doing a btrfs_old_search_slot(root b), we could replay
3340 * operations that happened when the block was part of root A,
3341 * giving us an inconsistent view of the btree.
3342 *
3343 * We are safe from races here because at this point no other
3344 * node or root points to this extent buffer, so if after this
3345 * check a new tree mod log user joins we will not have an
3346 * existing log of operations on this node that we have to
3347 * contend with.
3348 */
3349 if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
3350 must_pin = true;
3351
3352 if (must_pin || btrfs_is_zoned(fs_info)) {
3353 btrfs_redirty_list_add(trans->transaction, buf);
3354 pin_down_extent(trans, cache, buf->start, buf->len, 1);
3355 btrfs_put_block_group(cache);
3356 goto out;
3357 }
3358
3359 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
3360
3361 btrfs_add_free_space(cache, buf->start, buf->len);
3362 btrfs_free_reserved_bytes(cache, buf->len, 0);
3363 btrfs_put_block_group(cache);
3364 trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
3365 }
3366 out:
3367 if (last_ref) {
3368 /*
3369 * Deleting the buffer, clear the corrupt flag since it doesn't
3370 * matter anymore.
3371 */
3372 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
3373 }
3374 }
3375
3376 /* Can return -ENOMEM */
btrfs_free_extent(struct btrfs_trans_handle * trans,struct btrfs_ref * ref)3377 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
3378 {
3379 struct btrfs_fs_info *fs_info = trans->fs_info;
3380 int ret;
3381
3382 if (btrfs_is_testing(fs_info))
3383 return 0;
3384
3385 /*
3386 * tree log blocks never actually go into the extent allocation
3387 * tree, just update pinning info and exit early.
3388 */
3389 if ((ref->type == BTRFS_REF_METADATA &&
3390 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) ||
3391 (ref->type == BTRFS_REF_DATA &&
3392 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)) {
3393 /* unlocks the pinned mutex */
3394 btrfs_pin_extent(trans, ref->bytenr, ref->len, 1);
3395 ret = 0;
3396 } else if (ref->type == BTRFS_REF_METADATA) {
3397 ret = btrfs_add_delayed_tree_ref(trans, ref, NULL);
3398 } else {
3399 ret = btrfs_add_delayed_data_ref(trans, ref, 0);
3400 }
3401
3402 if (!((ref->type == BTRFS_REF_METADATA &&
3403 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) ||
3404 (ref->type == BTRFS_REF_DATA &&
3405 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)))
3406 btrfs_ref_tree_mod(fs_info, ref);
3407
3408 return ret;
3409 }
3410
3411 enum btrfs_loop_type {
3412 /*
3413 * Start caching block groups but do not wait for progress or for them
3414 * to be done.
3415 */
3416 LOOP_CACHING_NOWAIT,
3417
3418 /*
3419 * Wait for the block group free_space >= the space we're waiting for if
3420 * the block group isn't cached.
3421 */
3422 LOOP_CACHING_WAIT,
3423
3424 /*
3425 * Allow allocations to happen from block groups that do not yet have a
3426 * size classification.
3427 */
3428 LOOP_UNSET_SIZE_CLASS,
3429
3430 /*
3431 * Allocate a chunk and then retry the allocation.
3432 */
3433 LOOP_ALLOC_CHUNK,
3434
3435 /*
3436 * Ignore the size class restrictions for this allocation.
3437 */
3438 LOOP_WRONG_SIZE_CLASS,
3439
3440 /*
3441 * Ignore the empty size, only try to allocate the number of bytes
3442 * needed for this allocation.
3443 */
3444 LOOP_NO_EMPTY_SIZE,
3445 };
3446
3447 static inline void
btrfs_lock_block_group(struct btrfs_block_group * cache,int delalloc)3448 btrfs_lock_block_group(struct btrfs_block_group *cache,
3449 int delalloc)
3450 {
3451 if (delalloc)
3452 down_read(&cache->data_rwsem);
3453 }
3454
btrfs_grab_block_group(struct btrfs_block_group * cache,int delalloc)3455 static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
3456 int delalloc)
3457 {
3458 btrfs_get_block_group(cache);
3459 if (delalloc)
3460 down_read(&cache->data_rwsem);
3461 }
3462
btrfs_lock_cluster(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,int delalloc)3463 static struct btrfs_block_group *btrfs_lock_cluster(
3464 struct btrfs_block_group *block_group,
3465 struct btrfs_free_cluster *cluster,
3466 int delalloc)
3467 __acquires(&cluster->refill_lock)
3468 {
3469 struct btrfs_block_group *used_bg = NULL;
3470
3471 spin_lock(&cluster->refill_lock);
3472 while (1) {
3473 used_bg = cluster->block_group;
3474 if (!used_bg)
3475 return NULL;
3476
3477 if (used_bg == block_group)
3478 return used_bg;
3479
3480 btrfs_get_block_group(used_bg);
3481
3482 if (!delalloc)
3483 return used_bg;
3484
3485 if (down_read_trylock(&used_bg->data_rwsem))
3486 return used_bg;
3487
3488 spin_unlock(&cluster->refill_lock);
3489
3490 /* We should only have one-level nested. */
3491 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
3492
3493 spin_lock(&cluster->refill_lock);
3494 if (used_bg == cluster->block_group)
3495 return used_bg;
3496
3497 up_read(&used_bg->data_rwsem);
3498 btrfs_put_block_group(used_bg);
3499 }
3500 }
3501
3502 static inline void
btrfs_release_block_group(struct btrfs_block_group * cache,int delalloc)3503 btrfs_release_block_group(struct btrfs_block_group *cache,
3504 int delalloc)
3505 {
3506 if (delalloc)
3507 up_read(&cache->data_rwsem);
3508 btrfs_put_block_group(cache);
3509 }
3510
3511 /*
3512 * Helper function for find_free_extent().
3513 *
3514 * Return -ENOENT to inform caller that we need fallback to unclustered mode.
3515 * Return >0 to inform caller that we find nothing
3516 * Return 0 means we have found a location and set ffe_ctl->found_offset.
3517 */
find_free_extent_clustered(struct btrfs_block_group * bg,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** cluster_bg_ret)3518 static int find_free_extent_clustered(struct btrfs_block_group *bg,
3519 struct find_free_extent_ctl *ffe_ctl,
3520 struct btrfs_block_group **cluster_bg_ret)
3521 {
3522 struct btrfs_block_group *cluster_bg;
3523 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3524 u64 aligned_cluster;
3525 u64 offset;
3526 int ret;
3527
3528 cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc);
3529 if (!cluster_bg)
3530 goto refill_cluster;
3531 if (cluster_bg != bg && (cluster_bg->ro ||
3532 !block_group_bits(cluster_bg, ffe_ctl->flags)))
3533 goto release_cluster;
3534
3535 offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
3536 ffe_ctl->num_bytes, cluster_bg->start,
3537 &ffe_ctl->max_extent_size);
3538 if (offset) {
3539 /* We have a block, we're done */
3540 spin_unlock(&last_ptr->refill_lock);
3541 trace_btrfs_reserve_extent_cluster(cluster_bg, ffe_ctl);
3542 *cluster_bg_ret = cluster_bg;
3543 ffe_ctl->found_offset = offset;
3544 return 0;
3545 }
3546 WARN_ON(last_ptr->block_group != cluster_bg);
3547
3548 release_cluster:
3549 /*
3550 * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so
3551 * lets just skip it and let the allocator find whatever block it can
3552 * find. If we reach this point, we will have tried the cluster
3553 * allocator plenty of times and not have found anything, so we are
3554 * likely way too fragmented for the clustering stuff to find anything.
3555 *
3556 * However, if the cluster is taken from the current block group,
3557 * release the cluster first, so that we stand a better chance of
3558 * succeeding in the unclustered allocation.
3559 */
3560 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) {
3561 spin_unlock(&last_ptr->refill_lock);
3562 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
3563 return -ENOENT;
3564 }
3565
3566 /* This cluster didn't work out, free it and start over */
3567 btrfs_return_cluster_to_free_space(NULL, last_ptr);
3568
3569 if (cluster_bg != bg)
3570 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
3571
3572 refill_cluster:
3573 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) {
3574 spin_unlock(&last_ptr->refill_lock);
3575 return -ENOENT;
3576 }
3577
3578 aligned_cluster = max_t(u64,
3579 ffe_ctl->empty_cluster + ffe_ctl->empty_size,
3580 bg->full_stripe_len);
3581 ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start,
3582 ffe_ctl->num_bytes, aligned_cluster);
3583 if (ret == 0) {
3584 /* Now pull our allocation out of this cluster */
3585 offset = btrfs_alloc_from_cluster(bg, last_ptr,
3586 ffe_ctl->num_bytes, ffe_ctl->search_start,
3587 &ffe_ctl->max_extent_size);
3588 if (offset) {
3589 /* We found one, proceed */
3590 spin_unlock(&last_ptr->refill_lock);
3591 ffe_ctl->found_offset = offset;
3592 trace_btrfs_reserve_extent_cluster(bg, ffe_ctl);
3593 return 0;
3594 }
3595 }
3596 /*
3597 * At this point we either didn't find a cluster or we weren't able to
3598 * allocate a block from our cluster. Free the cluster we've been
3599 * trying to use, and go to the next block group.
3600 */
3601 btrfs_return_cluster_to_free_space(NULL, last_ptr);
3602 spin_unlock(&last_ptr->refill_lock);
3603 return 1;
3604 }
3605
3606 /*
3607 * Return >0 to inform caller that we find nothing
3608 * Return 0 when we found an free extent and set ffe_ctrl->found_offset
3609 */
find_free_extent_unclustered(struct btrfs_block_group * bg,struct find_free_extent_ctl * ffe_ctl)3610 static int find_free_extent_unclustered(struct btrfs_block_group *bg,
3611 struct find_free_extent_ctl *ffe_ctl)
3612 {
3613 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3614 u64 offset;
3615
3616 /*
3617 * We are doing an unclustered allocation, set the fragmented flag so
3618 * we don't bother trying to setup a cluster again until we get more
3619 * space.
3620 */
3621 if (unlikely(last_ptr)) {
3622 spin_lock(&last_ptr->lock);
3623 last_ptr->fragmented = 1;
3624 spin_unlock(&last_ptr->lock);
3625 }
3626 if (ffe_ctl->cached) {
3627 struct btrfs_free_space_ctl *free_space_ctl;
3628
3629 free_space_ctl = bg->free_space_ctl;
3630 spin_lock(&free_space_ctl->tree_lock);
3631 if (free_space_ctl->free_space <
3632 ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
3633 ffe_ctl->empty_size) {
3634 ffe_ctl->total_free_space = max_t(u64,
3635 ffe_ctl->total_free_space,
3636 free_space_ctl->free_space);
3637 spin_unlock(&free_space_ctl->tree_lock);
3638 return 1;
3639 }
3640 spin_unlock(&free_space_ctl->tree_lock);
3641 }
3642
3643 offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
3644 ffe_ctl->num_bytes, ffe_ctl->empty_size,
3645 &ffe_ctl->max_extent_size);
3646 if (!offset)
3647 return 1;
3648 ffe_ctl->found_offset = offset;
3649 return 0;
3650 }
3651
do_allocation_clustered(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3652 static int do_allocation_clustered(struct btrfs_block_group *block_group,
3653 struct find_free_extent_ctl *ffe_ctl,
3654 struct btrfs_block_group **bg_ret)
3655 {
3656 int ret;
3657
3658 /* We want to try and use the cluster allocator, so lets look there */
3659 if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) {
3660 ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret);
3661 if (ret >= 0)
3662 return ret;
3663 /* ret == -ENOENT case falls through */
3664 }
3665
3666 return find_free_extent_unclustered(block_group, ffe_ctl);
3667 }
3668
3669 /*
3670 * Tree-log block group locking
3671 * ============================
3672 *
3673 * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which
3674 * indicates the starting address of a block group, which is reserved only
3675 * for tree-log metadata.
3676 *
3677 * Lock nesting
3678 * ============
3679 *
3680 * space_info::lock
3681 * block_group::lock
3682 * fs_info::treelog_bg_lock
3683 */
3684
3685 /*
3686 * Simple allocator for sequential-only block group. It only allows sequential
3687 * allocation. No need to play with trees. This function also reserves the
3688 * bytes as in btrfs_add_reserved_bytes.
3689 */
do_allocation_zoned(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3690 static int do_allocation_zoned(struct btrfs_block_group *block_group,
3691 struct find_free_extent_ctl *ffe_ctl,
3692 struct btrfs_block_group **bg_ret)
3693 {
3694 struct btrfs_fs_info *fs_info = block_group->fs_info;
3695 struct btrfs_space_info *space_info = block_group->space_info;
3696 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3697 u64 start = block_group->start;
3698 u64 num_bytes = ffe_ctl->num_bytes;
3699 u64 avail;
3700 u64 bytenr = block_group->start;
3701 u64 log_bytenr;
3702 u64 data_reloc_bytenr;
3703 int ret = 0;
3704 bool skip = false;
3705
3706 ASSERT(btrfs_is_zoned(block_group->fs_info));
3707
3708 /*
3709 * Do not allow non-tree-log blocks in the dedicated tree-log block
3710 * group, and vice versa.
3711 */
3712 spin_lock(&fs_info->treelog_bg_lock);
3713 log_bytenr = fs_info->treelog_bg;
3714 if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) ||
3715 (!ffe_ctl->for_treelog && bytenr == log_bytenr)))
3716 skip = true;
3717 spin_unlock(&fs_info->treelog_bg_lock);
3718 if (skip)
3719 return 1;
3720
3721 /*
3722 * Do not allow non-relocation blocks in the dedicated relocation block
3723 * group, and vice versa.
3724 */
3725 spin_lock(&fs_info->relocation_bg_lock);
3726 data_reloc_bytenr = fs_info->data_reloc_bg;
3727 if (data_reloc_bytenr &&
3728 ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) ||
3729 (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr)))
3730 skip = true;
3731 spin_unlock(&fs_info->relocation_bg_lock);
3732 if (skip)
3733 return 1;
3734
3735 /* Check RO and no space case before trying to activate it */
3736 spin_lock(&block_group->lock);
3737 if (block_group->ro || btrfs_zoned_bg_is_full(block_group)) {
3738 ret = 1;
3739 /*
3740 * May need to clear fs_info->{treelog,data_reloc}_bg.
3741 * Return the error after taking the locks.
3742 */
3743 }
3744 spin_unlock(&block_group->lock);
3745
3746 /* Metadata block group is activated at write time. */
3747 if (!ret && (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
3748 !btrfs_zone_activate(block_group)) {
3749 ret = 1;
3750 /*
3751 * May need to clear fs_info->{treelog,data_reloc}_bg.
3752 * Return the error after taking the locks.
3753 */
3754 }
3755
3756 spin_lock(&space_info->lock);
3757 spin_lock(&block_group->lock);
3758 spin_lock(&fs_info->treelog_bg_lock);
3759 spin_lock(&fs_info->relocation_bg_lock);
3760
3761 if (ret)
3762 goto out;
3763
3764 ASSERT(!ffe_ctl->for_treelog ||
3765 block_group->start == fs_info->treelog_bg ||
3766 fs_info->treelog_bg == 0);
3767 ASSERT(!ffe_ctl->for_data_reloc ||
3768 block_group->start == fs_info->data_reloc_bg ||
3769 fs_info->data_reloc_bg == 0);
3770
3771 if (block_group->ro ||
3772 (!ffe_ctl->for_data_reloc &&
3773 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) {
3774 ret = 1;
3775 goto out;
3776 }
3777
3778 /*
3779 * Do not allow currently using block group to be tree-log dedicated
3780 * block group.
3781 */
3782 if (ffe_ctl->for_treelog && !fs_info->treelog_bg &&
3783 (block_group->used || block_group->reserved)) {
3784 ret = 1;
3785 goto out;
3786 }
3787
3788 /*
3789 * Do not allow currently used block group to be the data relocation
3790 * dedicated block group.
3791 */
3792 if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg &&
3793 (block_group->used || block_group->reserved)) {
3794 ret = 1;
3795 goto out;
3796 }
3797
3798 WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity);
3799 avail = block_group->zone_capacity - block_group->alloc_offset;
3800 if (avail < num_bytes) {
3801 if (ffe_ctl->max_extent_size < avail) {
3802 /*
3803 * With sequential allocator, free space is always
3804 * contiguous
3805 */
3806 ffe_ctl->max_extent_size = avail;
3807 ffe_ctl->total_free_space = avail;
3808 }
3809 ret = 1;
3810 goto out;
3811 }
3812
3813 if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
3814 fs_info->treelog_bg = block_group->start;
3815
3816 if (ffe_ctl->for_data_reloc) {
3817 if (!fs_info->data_reloc_bg)
3818 fs_info->data_reloc_bg = block_group->start;
3819 /*
3820 * Do not allow allocations from this block group, unless it is
3821 * for data relocation. Compared to increasing the ->ro, setting
3822 * the ->zoned_data_reloc_ongoing flag still allows nocow
3823 * writers to come in. See btrfs_inc_nocow_writers().
3824 *
3825 * We need to disable an allocation to avoid an allocation of
3826 * regular (non-relocation data) extent. With mix of relocation
3827 * extents and regular extents, we can dispatch WRITE commands
3828 * (for relocation extents) and ZONE APPEND commands (for
3829 * regular extents) at the same time to the same zone, which
3830 * easily break the write pointer.
3831 *
3832 * Also, this flag avoids this block group to be zone finished.
3833 */
3834 set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
3835 }
3836
3837 ffe_ctl->found_offset = start + block_group->alloc_offset;
3838 block_group->alloc_offset += num_bytes;
3839 spin_lock(&ctl->tree_lock);
3840 ctl->free_space -= num_bytes;
3841 spin_unlock(&ctl->tree_lock);
3842
3843 /*
3844 * We do not check if found_offset is aligned to stripesize. The
3845 * address is anyway rewritten when using zone append writing.
3846 */
3847
3848 ffe_ctl->search_start = ffe_ctl->found_offset;
3849
3850 out:
3851 if (ret && ffe_ctl->for_treelog)
3852 fs_info->treelog_bg = 0;
3853 if (ret && ffe_ctl->for_data_reloc)
3854 fs_info->data_reloc_bg = 0;
3855 spin_unlock(&fs_info->relocation_bg_lock);
3856 spin_unlock(&fs_info->treelog_bg_lock);
3857 spin_unlock(&block_group->lock);
3858 spin_unlock(&space_info->lock);
3859 return ret;
3860 }
3861
do_allocation(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3862 static int do_allocation(struct btrfs_block_group *block_group,
3863 struct find_free_extent_ctl *ffe_ctl,
3864 struct btrfs_block_group **bg_ret)
3865 {
3866 switch (ffe_ctl->policy) {
3867 case BTRFS_EXTENT_ALLOC_CLUSTERED:
3868 return do_allocation_clustered(block_group, ffe_ctl, bg_ret);
3869 case BTRFS_EXTENT_ALLOC_ZONED:
3870 return do_allocation_zoned(block_group, ffe_ctl, bg_ret);
3871 default:
3872 BUG();
3873 }
3874 }
3875
release_block_group(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,int delalloc)3876 static void release_block_group(struct btrfs_block_group *block_group,
3877 struct find_free_extent_ctl *ffe_ctl,
3878 int delalloc)
3879 {
3880 switch (ffe_ctl->policy) {
3881 case BTRFS_EXTENT_ALLOC_CLUSTERED:
3882 ffe_ctl->retry_uncached = false;
3883 break;
3884 case BTRFS_EXTENT_ALLOC_ZONED:
3885 /* Nothing to do */
3886 break;
3887 default:
3888 BUG();
3889 }
3890
3891 BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
3892 ffe_ctl->index);
3893 btrfs_release_block_group(block_group, delalloc);
3894 }
3895
found_extent_clustered(struct find_free_extent_ctl * ffe_ctl,struct btrfs_key * ins)3896 static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl,
3897 struct btrfs_key *ins)
3898 {
3899 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3900
3901 if (!ffe_ctl->use_cluster && last_ptr) {
3902 spin_lock(&last_ptr->lock);
3903 last_ptr->window_start = ins->objectid;
3904 spin_unlock(&last_ptr->lock);
3905 }
3906 }
3907
found_extent(struct find_free_extent_ctl * ffe_ctl,struct btrfs_key * ins)3908 static void found_extent(struct find_free_extent_ctl *ffe_ctl,
3909 struct btrfs_key *ins)
3910 {
3911 switch (ffe_ctl->policy) {
3912 case BTRFS_EXTENT_ALLOC_CLUSTERED:
3913 found_extent_clustered(ffe_ctl, ins);
3914 break;
3915 case BTRFS_EXTENT_ALLOC_ZONED:
3916 /* Nothing to do */
3917 break;
3918 default:
3919 BUG();
3920 }
3921 }
3922
can_allocate_chunk_zoned(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl)3923 static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info,
3924 struct find_free_extent_ctl *ffe_ctl)
3925 {
3926 /* Block group's activeness is not a requirement for METADATA block groups. */
3927 if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA))
3928 return 0;
3929
3930 /* If we can activate new zone, just allocate a chunk and use it */
3931 if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags))
3932 return 0;
3933
3934 /*
3935 * We already reached the max active zones. Try to finish one block
3936 * group to make a room for a new block group. This is only possible
3937 * for a data block group because btrfs_zone_finish() may need to wait
3938 * for a running transaction which can cause a deadlock for metadata
3939 * allocation.
3940 */
3941 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
3942 int ret = btrfs_zone_finish_one_bg(fs_info);
3943
3944 if (ret == 1)
3945 return 0;
3946 else if (ret < 0)
3947 return ret;
3948 }
3949
3950 /*
3951 * If we have enough free space left in an already active block group
3952 * and we can't activate any other zone now, do not allow allocating a
3953 * new chunk and let find_free_extent() retry with a smaller size.
3954 */
3955 if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size)
3956 return -ENOSPC;
3957
3958 /*
3959 * Even min_alloc_size is not left in any block groups. Since we cannot
3960 * activate a new block group, allocating it may not help. Let's tell a
3961 * caller to try again and hope it progress something by writing some
3962 * parts of the region. That is only possible for data block groups,
3963 * where a part of the region can be written.
3964 */
3965 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)
3966 return -EAGAIN;
3967
3968 /*
3969 * We cannot activate a new block group and no enough space left in any
3970 * block groups. So, allocating a new block group may not help. But,
3971 * there is nothing to do anyway, so let's go with it.
3972 */
3973 return 0;
3974 }
3975
can_allocate_chunk(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl)3976 static int can_allocate_chunk(struct btrfs_fs_info *fs_info,
3977 struct find_free_extent_ctl *ffe_ctl)
3978 {
3979 switch (ffe_ctl->policy) {
3980 case BTRFS_EXTENT_ALLOC_CLUSTERED:
3981 return 0;
3982 case BTRFS_EXTENT_ALLOC_ZONED:
3983 return can_allocate_chunk_zoned(fs_info, ffe_ctl);
3984 default:
3985 BUG();
3986 }
3987 }
3988
3989 /*
3990 * Return >0 means caller needs to re-search for free extent
3991 * Return 0 means we have the needed free extent.
3992 * Return <0 means we failed to locate any free extent.
3993 */
find_free_extent_update_loop(struct btrfs_fs_info * fs_info,struct btrfs_key * ins,struct find_free_extent_ctl * ffe_ctl,bool full_search)3994 static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
3995 struct btrfs_key *ins,
3996 struct find_free_extent_ctl *ffe_ctl,
3997 bool full_search)
3998 {
3999 struct btrfs_root *root = fs_info->chunk_root;
4000 int ret;
4001
4002 if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) &&
4003 ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg)
4004 ffe_ctl->orig_have_caching_bg = true;
4005
4006 if (ins->objectid) {
4007 found_extent(ffe_ctl, ins);
4008 return 0;
4009 }
4010
4011 if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg)
4012 return 1;
4013
4014 ffe_ctl->index++;
4015 if (ffe_ctl->index < BTRFS_NR_RAID_TYPES)
4016 return 1;
4017
4018 /* See the comments for btrfs_loop_type for an explanation of the phases. */
4019 if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) {
4020 ffe_ctl->index = 0;
4021 /*
4022 * We want to skip the LOOP_CACHING_WAIT step if we don't have
4023 * any uncached bgs and we've already done a full search
4024 * through.
4025 */
4026 if (ffe_ctl->loop == LOOP_CACHING_NOWAIT &&
4027 (!ffe_ctl->orig_have_caching_bg && full_search))
4028 ffe_ctl->loop++;
4029 ffe_ctl->loop++;
4030
4031 if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) {
4032 struct btrfs_trans_handle *trans;
4033 int exist = 0;
4034
4035 /* Check if allocation policy allows to create a new chunk */
4036 ret = can_allocate_chunk(fs_info, ffe_ctl);
4037 if (ret)
4038 return ret;
4039
4040 trans = current->journal_info;
4041 if (trans)
4042 exist = 1;
4043 else
4044 trans = btrfs_join_transaction(root);
4045
4046 if (IS_ERR(trans)) {
4047 ret = PTR_ERR(trans);
4048 return ret;
4049 }
4050
4051 ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
4052 CHUNK_ALLOC_FORCE_FOR_EXTENT);
4053
4054 /* Do not bail out on ENOSPC since we can do more. */
4055 if (ret == -ENOSPC) {
4056 ret = 0;
4057 ffe_ctl->loop++;
4058 }
4059 else if (ret < 0)
4060 btrfs_abort_transaction(trans, ret);
4061 else
4062 ret = 0;
4063 if (!exist)
4064 btrfs_end_transaction(trans);
4065 if (ret)
4066 return ret;
4067 }
4068
4069 if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) {
4070 if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED)
4071 return -ENOSPC;
4072
4073 /*
4074 * Don't loop again if we already have no empty_size and
4075 * no empty_cluster.
4076 */
4077 if (ffe_ctl->empty_size == 0 &&
4078 ffe_ctl->empty_cluster == 0)
4079 return -ENOSPC;
4080 ffe_ctl->empty_size = 0;
4081 ffe_ctl->empty_cluster = 0;
4082 }
4083 return 1;
4084 }
4085 return -ENOSPC;
4086 }
4087
find_free_extent_check_size_class(struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group * bg)4088 static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl,
4089 struct btrfs_block_group *bg)
4090 {
4091 if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED)
4092 return true;
4093 if (!btrfs_block_group_should_use_size_class(bg))
4094 return true;
4095 if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS)
4096 return true;
4097 if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS &&
4098 bg->size_class == BTRFS_BG_SZ_NONE)
4099 return true;
4100 return ffe_ctl->size_class == bg->size_class;
4101 }
4102
prepare_allocation_clustered(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl,struct btrfs_space_info * space_info,struct btrfs_key * ins)4103 static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
4104 struct find_free_extent_ctl *ffe_ctl,
4105 struct btrfs_space_info *space_info,
4106 struct btrfs_key *ins)
4107 {
4108 /*
4109 * If our free space is heavily fragmented we may not be able to make
4110 * big contiguous allocations, so instead of doing the expensive search
4111 * for free space, simply return ENOSPC with our max_extent_size so we
4112 * can go ahead and search for a more manageable chunk.
4113 *
4114 * If our max_extent_size is large enough for our allocation simply
4115 * disable clustering since we will likely not be able to find enough
4116 * space to create a cluster and induce latency trying.
4117 */
4118 if (space_info->max_extent_size) {
4119 spin_lock(&space_info->lock);
4120 if (space_info->max_extent_size &&
4121 ffe_ctl->num_bytes > space_info->max_extent_size) {
4122 ins->offset = space_info->max_extent_size;
4123 spin_unlock(&space_info->lock);
4124 return -ENOSPC;
4125 } else if (space_info->max_extent_size) {
4126 ffe_ctl->use_cluster = false;
4127 }
4128 spin_unlock(&space_info->lock);
4129 }
4130
4131 ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info,
4132 &ffe_ctl->empty_cluster);
4133 if (ffe_ctl->last_ptr) {
4134 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
4135
4136 spin_lock(&last_ptr->lock);
4137 if (last_ptr->block_group)
4138 ffe_ctl->hint_byte = last_ptr->window_start;
4139 if (last_ptr->fragmented) {
4140 /*
4141 * We still set window_start so we can keep track of the
4142 * last place we found an allocation to try and save
4143 * some time.
4144 */
4145 ffe_ctl->hint_byte = last_ptr->window_start;
4146 ffe_ctl->use_cluster = false;
4147 }
4148 spin_unlock(&last_ptr->lock);
4149 }
4150
4151 return 0;
4152 }
4153
prepare_allocation_zoned(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl)4154 static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
4155 struct find_free_extent_ctl *ffe_ctl)
4156 {
4157 if (ffe_ctl->for_treelog) {
4158 spin_lock(&fs_info->treelog_bg_lock);
4159 if (fs_info->treelog_bg)
4160 ffe_ctl->hint_byte = fs_info->treelog_bg;
4161 spin_unlock(&fs_info->treelog_bg_lock);
4162 } else if (ffe_ctl->for_data_reloc) {
4163 spin_lock(&fs_info->relocation_bg_lock);
4164 if (fs_info->data_reloc_bg)
4165 ffe_ctl->hint_byte = fs_info->data_reloc_bg;
4166 spin_unlock(&fs_info->relocation_bg_lock);
4167 } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
4168 struct btrfs_block_group *block_group;
4169
4170 spin_lock(&fs_info->zone_active_bgs_lock);
4171 list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
4172 /*
4173 * No lock is OK here because avail is monotinically
4174 * decreasing, and this is just a hint.
4175 */
4176 u64 avail = block_group->zone_capacity - block_group->alloc_offset;
4177
4178 if (block_group_bits(block_group, ffe_ctl->flags) &&
4179 avail >= ffe_ctl->num_bytes) {
4180 ffe_ctl->hint_byte = block_group->start;
4181 break;
4182 }
4183 }
4184 spin_unlock(&fs_info->zone_active_bgs_lock);
4185 }
4186
4187 return 0;
4188 }
4189
prepare_allocation(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl,struct btrfs_space_info * space_info,struct btrfs_key * ins)4190 static int prepare_allocation(struct btrfs_fs_info *fs_info,
4191 struct find_free_extent_ctl *ffe_ctl,
4192 struct btrfs_space_info *space_info,
4193 struct btrfs_key *ins)
4194 {
4195 switch (ffe_ctl->policy) {
4196 case BTRFS_EXTENT_ALLOC_CLUSTERED:
4197 return prepare_allocation_clustered(fs_info, ffe_ctl,
4198 space_info, ins);
4199 case BTRFS_EXTENT_ALLOC_ZONED:
4200 return prepare_allocation_zoned(fs_info, ffe_ctl);
4201 default:
4202 BUG();
4203 }
4204 }
4205
4206 /*
4207 * walks the btree of allocated extents and find a hole of a given size.
4208 * The key ins is changed to record the hole:
4209 * ins->objectid == start position
4210 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4211 * ins->offset == the size of the hole.
4212 * Any available blocks before search_start are skipped.
4213 *
4214 * If there is no suitable free space, we will record the max size of
4215 * the free space extent currently.
4216 *
4217 * The overall logic and call chain:
4218 *
4219 * find_free_extent()
4220 * |- Iterate through all block groups
4221 * | |- Get a valid block group
4222 * | |- Try to do clustered allocation in that block group
4223 * | |- Try to do unclustered allocation in that block group
4224 * | |- Check if the result is valid
4225 * | | |- If valid, then exit
4226 * | |- Jump to next block group
4227 * |
4228 * |- Push harder to find free extents
4229 * |- If not found, re-iterate all block groups
4230 */
find_free_extent(struct btrfs_root * root,struct btrfs_key * ins,struct find_free_extent_ctl * ffe_ctl)4231 static noinline int find_free_extent(struct btrfs_root *root,
4232 struct btrfs_key *ins,
4233 struct find_free_extent_ctl *ffe_ctl)
4234 {
4235 struct btrfs_fs_info *fs_info = root->fs_info;
4236 int ret = 0;
4237 int cache_block_group_error = 0;
4238 struct btrfs_block_group *block_group = NULL;
4239 struct btrfs_space_info *space_info;
4240 bool full_search = false;
4241
4242 WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize);
4243
4244 ffe_ctl->search_start = 0;
4245 /* For clustered allocation */
4246 ffe_ctl->empty_cluster = 0;
4247 ffe_ctl->last_ptr = NULL;
4248 ffe_ctl->use_cluster = true;
4249 ffe_ctl->have_caching_bg = false;
4250 ffe_ctl->orig_have_caching_bg = false;
4251 ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags);
4252 ffe_ctl->loop = 0;
4253 ffe_ctl->retry_uncached = false;
4254 ffe_ctl->cached = 0;
4255 ffe_ctl->max_extent_size = 0;
4256 ffe_ctl->total_free_space = 0;
4257 ffe_ctl->found_offset = 0;
4258 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED;
4259 ffe_ctl->size_class = btrfs_calc_block_group_size_class(ffe_ctl->num_bytes);
4260
4261 if (btrfs_is_zoned(fs_info))
4262 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED;
4263
4264 ins->type = BTRFS_EXTENT_ITEM_KEY;
4265 ins->objectid = 0;
4266 ins->offset = 0;
4267
4268 trace_find_free_extent(root, ffe_ctl);
4269
4270 space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags);
4271 if (!space_info) {
4272 btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags);
4273 return -ENOSPC;
4274 }
4275
4276 ret = prepare_allocation(fs_info, ffe_ctl, space_info, ins);
4277 if (ret < 0)
4278 return ret;
4279
4280 ffe_ctl->search_start = max(ffe_ctl->search_start,
4281 first_logical_byte(fs_info));
4282 ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte);
4283 if (ffe_ctl->search_start == ffe_ctl->hint_byte) {
4284 block_group = btrfs_lookup_block_group(fs_info,
4285 ffe_ctl->search_start);
4286 /*
4287 * we don't want to use the block group if it doesn't match our
4288 * allocation bits, or if its not cached.
4289 *
4290 * However if we are re-searching with an ideal block group
4291 * picked out then we don't care that the block group is cached.
4292 */
4293 if (block_group && block_group_bits(block_group, ffe_ctl->flags) &&
4294 block_group->cached != BTRFS_CACHE_NO) {
4295 down_read(&space_info->groups_sem);
4296 if (list_empty(&block_group->list) ||
4297 block_group->ro) {
4298 /*
4299 * someone is removing this block group,
4300 * we can't jump into the have_block_group
4301 * target because our list pointers are not
4302 * valid
4303 */
4304 btrfs_put_block_group(block_group);
4305 up_read(&space_info->groups_sem);
4306 } else {
4307 ffe_ctl->index = btrfs_bg_flags_to_raid_index(
4308 block_group->flags);
4309 btrfs_lock_block_group(block_group,
4310 ffe_ctl->delalloc);
4311 ffe_ctl->hinted = true;
4312 goto have_block_group;
4313 }
4314 } else if (block_group) {
4315 btrfs_put_block_group(block_group);
4316 }
4317 }
4318 search:
4319 trace_find_free_extent_search_loop(root, ffe_ctl);
4320 ffe_ctl->have_caching_bg = false;
4321 if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) ||
4322 ffe_ctl->index == 0)
4323 full_search = true;
4324 down_read(&space_info->groups_sem);
4325 list_for_each_entry(block_group,
4326 &space_info->block_groups[ffe_ctl->index], list) {
4327 struct btrfs_block_group *bg_ret;
4328
4329 ffe_ctl->hinted = false;
4330 /* If the block group is read-only, we can skip it entirely. */
4331 if (unlikely(block_group->ro)) {
4332 if (ffe_ctl->for_treelog)
4333 btrfs_clear_treelog_bg(block_group);
4334 if (ffe_ctl->for_data_reloc)
4335 btrfs_clear_data_reloc_bg(block_group);
4336 continue;
4337 }
4338
4339 btrfs_grab_block_group(block_group, ffe_ctl->delalloc);
4340 ffe_ctl->search_start = block_group->start;
4341
4342 /*
4343 * this can happen if we end up cycling through all the
4344 * raid types, but we want to make sure we only allocate
4345 * for the proper type.
4346 */
4347 if (!block_group_bits(block_group, ffe_ctl->flags)) {
4348 u64 extra = BTRFS_BLOCK_GROUP_DUP |
4349 BTRFS_BLOCK_GROUP_RAID1_MASK |
4350 BTRFS_BLOCK_GROUP_RAID56_MASK |
4351 BTRFS_BLOCK_GROUP_RAID10;
4352
4353 /*
4354 * if they asked for extra copies and this block group
4355 * doesn't provide them, bail. This does allow us to
4356 * fill raid0 from raid1.
4357 */
4358 if ((ffe_ctl->flags & extra) && !(block_group->flags & extra))
4359 goto loop;
4360
4361 /*
4362 * This block group has different flags than we want.
4363 * It's possible that we have MIXED_GROUP flag but no
4364 * block group is mixed. Just skip such block group.
4365 */
4366 btrfs_release_block_group(block_group, ffe_ctl->delalloc);
4367 continue;
4368 }
4369
4370 have_block_group:
4371 trace_find_free_extent_have_block_group(root, ffe_ctl, block_group);
4372 ffe_ctl->cached = btrfs_block_group_done(block_group);
4373 if (unlikely(!ffe_ctl->cached)) {
4374 ffe_ctl->have_caching_bg = true;
4375 ret = btrfs_cache_block_group(block_group, false);
4376
4377 /*
4378 * If we get ENOMEM here or something else we want to
4379 * try other block groups, because it may not be fatal.
4380 * However if we can't find anything else we need to
4381 * save our return here so that we return the actual
4382 * error that caused problems, not ENOSPC.
4383 */
4384 if (ret < 0) {
4385 if (!cache_block_group_error)
4386 cache_block_group_error = ret;
4387 ret = 0;
4388 goto loop;
4389 }
4390 ret = 0;
4391 }
4392
4393 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) {
4394 if (!cache_block_group_error)
4395 cache_block_group_error = -EIO;
4396 goto loop;
4397 }
4398
4399 if (!find_free_extent_check_size_class(ffe_ctl, block_group))
4400 goto loop;
4401
4402 bg_ret = NULL;
4403 ret = do_allocation(block_group, ffe_ctl, &bg_ret);
4404 if (ret > 0)
4405 goto loop;
4406
4407 if (bg_ret && bg_ret != block_group) {
4408 btrfs_release_block_group(block_group, ffe_ctl->delalloc);
4409 block_group = bg_ret;
4410 }
4411
4412 /* Checks */
4413 ffe_ctl->search_start = round_up(ffe_ctl->found_offset,
4414 fs_info->stripesize);
4415
4416 /* move on to the next group */
4417 if (ffe_ctl->search_start + ffe_ctl->num_bytes >
4418 block_group->start + block_group->length) {
4419 btrfs_add_free_space_unused(block_group,
4420 ffe_ctl->found_offset,
4421 ffe_ctl->num_bytes);
4422 goto loop;
4423 }
4424
4425 if (ffe_ctl->found_offset < ffe_ctl->search_start)
4426 btrfs_add_free_space_unused(block_group,
4427 ffe_ctl->found_offset,
4428 ffe_ctl->search_start - ffe_ctl->found_offset);
4429
4430 ret = btrfs_add_reserved_bytes(block_group, ffe_ctl->ram_bytes,
4431 ffe_ctl->num_bytes,
4432 ffe_ctl->delalloc,
4433 ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS);
4434 if (ret == -EAGAIN) {
4435 btrfs_add_free_space_unused(block_group,
4436 ffe_ctl->found_offset,
4437 ffe_ctl->num_bytes);
4438 goto loop;
4439 }
4440 btrfs_inc_block_group_reservations(block_group);
4441
4442 /* we are all good, lets return */
4443 ins->objectid = ffe_ctl->search_start;
4444 ins->offset = ffe_ctl->num_bytes;
4445
4446 trace_btrfs_reserve_extent(block_group, ffe_ctl);
4447 btrfs_release_block_group(block_group, ffe_ctl->delalloc);
4448 break;
4449 loop:
4450 if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
4451 !ffe_ctl->retry_uncached) {
4452 ffe_ctl->retry_uncached = true;
4453 btrfs_wait_block_group_cache_progress(block_group,
4454 ffe_ctl->num_bytes +
4455 ffe_ctl->empty_cluster +
4456 ffe_ctl->empty_size);
4457 goto have_block_group;
4458 }
4459 release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc);
4460 cond_resched();
4461 }
4462 up_read(&space_info->groups_sem);
4463
4464 ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search);
4465 if (ret > 0)
4466 goto search;
4467
4468 if (ret == -ENOSPC && !cache_block_group_error) {
4469 /*
4470 * Use ffe_ctl->total_free_space as fallback if we can't find
4471 * any contiguous hole.
4472 */
4473 if (!ffe_ctl->max_extent_size)
4474 ffe_ctl->max_extent_size = ffe_ctl->total_free_space;
4475 spin_lock(&space_info->lock);
4476 space_info->max_extent_size = ffe_ctl->max_extent_size;
4477 spin_unlock(&space_info->lock);
4478 ins->offset = ffe_ctl->max_extent_size;
4479 } else if (ret == -ENOSPC) {
4480 ret = cache_block_group_error;
4481 }
4482 return ret;
4483 }
4484
4485 /*
4486 * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
4487 * hole that is at least as big as @num_bytes.
4488 *
4489 * @root - The root that will contain this extent
4490 *
4491 * @ram_bytes - The amount of space in ram that @num_bytes take. This
4492 * is used for accounting purposes. This value differs
4493 * from @num_bytes only in the case of compressed extents.
4494 *
4495 * @num_bytes - Number of bytes to allocate on-disk.
4496 *
4497 * @min_alloc_size - Indicates the minimum amount of space that the
4498 * allocator should try to satisfy. In some cases
4499 * @num_bytes may be larger than what is required and if
4500 * the filesystem is fragmented then allocation fails.
4501 * However, the presence of @min_alloc_size gives a
4502 * chance to try and satisfy the smaller allocation.
4503 *
4504 * @empty_size - A hint that you plan on doing more COW. This is the
4505 * size in bytes the allocator should try to find free
4506 * next to the block it returns. This is just a hint and
4507 * may be ignored by the allocator.
4508 *
4509 * @hint_byte - Hint to the allocator to start searching above the byte
4510 * address passed. It might be ignored.
4511 *
4512 * @ins - This key is modified to record the found hole. It will
4513 * have the following values:
4514 * ins->objectid == start position
4515 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4516 * ins->offset == the size of the hole.
4517 *
4518 * @is_data - Boolean flag indicating whether an extent is
4519 * allocated for data (true) or metadata (false)
4520 *
4521 * @delalloc - Boolean flag indicating whether this allocation is for
4522 * delalloc or not. If 'true' data_rwsem of block groups
4523 * is going to be acquired.
4524 *
4525 *
4526 * Returns 0 when an allocation succeeded or < 0 when an error occurred. In
4527 * case -ENOSPC is returned then @ins->offset will contain the size of the
4528 * largest available hole the allocator managed to find.
4529 */
btrfs_reserve_extent(struct btrfs_root * root,u64 ram_bytes,u64 num_bytes,u64 min_alloc_size,u64 empty_size,u64 hint_byte,struct btrfs_key * ins,int is_data,int delalloc)4530 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
4531 u64 num_bytes, u64 min_alloc_size,
4532 u64 empty_size, u64 hint_byte,
4533 struct btrfs_key *ins, int is_data, int delalloc)
4534 {
4535 struct btrfs_fs_info *fs_info = root->fs_info;
4536 struct find_free_extent_ctl ffe_ctl = {};
4537 bool final_tried = num_bytes == min_alloc_size;
4538 u64 flags;
4539 int ret;
4540 bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
4541 bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data);
4542
4543 flags = get_alloc_profile_by_root(root, is_data);
4544 again:
4545 WARN_ON(num_bytes < fs_info->sectorsize);
4546
4547 ffe_ctl.ram_bytes = ram_bytes;
4548 ffe_ctl.num_bytes = num_bytes;
4549 ffe_ctl.min_alloc_size = min_alloc_size;
4550 ffe_ctl.empty_size = empty_size;
4551 ffe_ctl.flags = flags;
4552 ffe_ctl.delalloc = delalloc;
4553 ffe_ctl.hint_byte = hint_byte;
4554 ffe_ctl.for_treelog = for_treelog;
4555 ffe_ctl.for_data_reloc = for_data_reloc;
4556
4557 ret = find_free_extent(root, ins, &ffe_ctl);
4558 if (!ret && !is_data) {
4559 btrfs_dec_block_group_reservations(fs_info, ins->objectid);
4560 } else if (ret == -ENOSPC) {
4561 if (!final_tried && ins->offset) {
4562 num_bytes = min(num_bytes >> 1, ins->offset);
4563 num_bytes = round_down(num_bytes,
4564 fs_info->sectorsize);
4565 num_bytes = max(num_bytes, min_alloc_size);
4566 ram_bytes = num_bytes;
4567 if (num_bytes == min_alloc_size)
4568 final_tried = true;
4569 goto again;
4570 } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
4571 struct btrfs_space_info *sinfo;
4572
4573 sinfo = btrfs_find_space_info(fs_info, flags);
4574 btrfs_err(fs_info,
4575 "allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d",
4576 flags, num_bytes, for_treelog, for_data_reloc);
4577 if (sinfo)
4578 btrfs_dump_space_info(fs_info, sinfo,
4579 num_bytes, 1);
4580 }
4581 }
4582
4583 return ret;
4584 }
4585
btrfs_free_reserved_extent(struct btrfs_fs_info * fs_info,u64 start,u64 len,int delalloc)4586 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
4587 u64 start, u64 len, int delalloc)
4588 {
4589 struct btrfs_block_group *cache;
4590
4591 cache = btrfs_lookup_block_group(fs_info, start);
4592 if (!cache) {
4593 btrfs_err(fs_info, "Unable to find block group for %llu",
4594 start);
4595 return -ENOSPC;
4596 }
4597
4598 btrfs_add_free_space(cache, start, len);
4599 btrfs_free_reserved_bytes(cache, len, delalloc);
4600 trace_btrfs_reserved_extent_free(fs_info, start, len);
4601
4602 btrfs_put_block_group(cache);
4603 return 0;
4604 }
4605
btrfs_pin_reserved_extent(struct btrfs_trans_handle * trans,u64 start,u64 len)4606 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start,
4607 u64 len)
4608 {
4609 struct btrfs_block_group *cache;
4610 int ret = 0;
4611
4612 cache = btrfs_lookup_block_group(trans->fs_info, start);
4613 if (!cache) {
4614 btrfs_err(trans->fs_info, "unable to find block group for %llu",
4615 start);
4616 return -ENOSPC;
4617 }
4618
4619 ret = pin_down_extent(trans, cache, start, len, 1);
4620 btrfs_put_block_group(cache);
4621 return ret;
4622 }
4623
alloc_reserved_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes)4624 static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr,
4625 u64 num_bytes)
4626 {
4627 struct btrfs_fs_info *fs_info = trans->fs_info;
4628 int ret;
4629
4630 ret = remove_from_free_space_tree(trans, bytenr, num_bytes);
4631 if (ret)
4632 return ret;
4633
4634 ret = btrfs_update_block_group(trans, bytenr, num_bytes, true);
4635 if (ret) {
4636 ASSERT(!ret);
4637 btrfs_err(fs_info, "update block group failed for %llu %llu",
4638 bytenr, num_bytes);
4639 return ret;
4640 }
4641
4642 trace_btrfs_reserved_extent_alloc(fs_info, bytenr, num_bytes);
4643 return 0;
4644 }
4645
alloc_reserved_file_extent(struct btrfs_trans_handle * trans,u64 parent,u64 root_objectid,u64 flags,u64 owner,u64 offset,struct btrfs_key * ins,int ref_mod)4646 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4647 u64 parent, u64 root_objectid,
4648 u64 flags, u64 owner, u64 offset,
4649 struct btrfs_key *ins, int ref_mod)
4650 {
4651 struct btrfs_fs_info *fs_info = trans->fs_info;
4652 struct btrfs_root *extent_root;
4653 int ret;
4654 struct btrfs_extent_item *extent_item;
4655 struct btrfs_extent_inline_ref *iref;
4656 struct btrfs_path *path;
4657 struct extent_buffer *leaf;
4658 int type;
4659 u32 size;
4660
4661 if (parent > 0)
4662 type = BTRFS_SHARED_DATA_REF_KEY;
4663 else
4664 type = BTRFS_EXTENT_DATA_REF_KEY;
4665
4666 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
4667
4668 path = btrfs_alloc_path();
4669 if (!path)
4670 return -ENOMEM;
4671
4672 extent_root = btrfs_extent_root(fs_info, ins->objectid);
4673 ret = btrfs_insert_empty_item(trans, extent_root, path, ins, size);
4674 if (ret) {
4675 btrfs_free_path(path);
4676 return ret;
4677 }
4678
4679 leaf = path->nodes[0];
4680 extent_item = btrfs_item_ptr(leaf, path->slots[0],
4681 struct btrfs_extent_item);
4682 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4683 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4684 btrfs_set_extent_flags(leaf, extent_item,
4685 flags | BTRFS_EXTENT_FLAG_DATA);
4686
4687 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4688 btrfs_set_extent_inline_ref_type(leaf, iref, type);
4689 if (parent > 0) {
4690 struct btrfs_shared_data_ref *ref;
4691 ref = (struct btrfs_shared_data_ref *)(iref + 1);
4692 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4693 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4694 } else {
4695 struct btrfs_extent_data_ref *ref;
4696 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4697 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4698 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4699 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4700 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4701 }
4702
4703 btrfs_mark_buffer_dirty(trans, path->nodes[0]);
4704 btrfs_free_path(path);
4705
4706 return alloc_reserved_extent(trans, ins->objectid, ins->offset);
4707 }
4708
alloc_reserved_tree_block(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op)4709 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4710 struct btrfs_delayed_ref_node *node,
4711 struct btrfs_delayed_extent_op *extent_op)
4712 {
4713 struct btrfs_fs_info *fs_info = trans->fs_info;
4714 struct btrfs_root *extent_root;
4715 int ret;
4716 struct btrfs_extent_item *extent_item;
4717 struct btrfs_key extent_key;
4718 struct btrfs_tree_block_info *block_info;
4719 struct btrfs_extent_inline_ref *iref;
4720 struct btrfs_path *path;
4721 struct extent_buffer *leaf;
4722 struct btrfs_delayed_tree_ref *ref;
4723 u32 size = sizeof(*extent_item) + sizeof(*iref);
4724 u64 flags = extent_op->flags_to_set;
4725 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
4726
4727 ref = btrfs_delayed_node_to_tree_ref(node);
4728
4729 extent_key.objectid = node->bytenr;
4730 if (skinny_metadata) {
4731 extent_key.offset = ref->level;
4732 extent_key.type = BTRFS_METADATA_ITEM_KEY;
4733 } else {
4734 extent_key.offset = node->num_bytes;
4735 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4736 size += sizeof(*block_info);
4737 }
4738
4739 path = btrfs_alloc_path();
4740 if (!path)
4741 return -ENOMEM;
4742
4743 extent_root = btrfs_extent_root(fs_info, extent_key.objectid);
4744 ret = btrfs_insert_empty_item(trans, extent_root, path, &extent_key,
4745 size);
4746 if (ret) {
4747 btrfs_free_path(path);
4748 return ret;
4749 }
4750
4751 leaf = path->nodes[0];
4752 extent_item = btrfs_item_ptr(leaf, path->slots[0],
4753 struct btrfs_extent_item);
4754 btrfs_set_extent_refs(leaf, extent_item, 1);
4755 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4756 btrfs_set_extent_flags(leaf, extent_item,
4757 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4758
4759 if (skinny_metadata) {
4760 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4761 } else {
4762 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4763 btrfs_set_tree_block_key(leaf, block_info, &extent_op->key);
4764 btrfs_set_tree_block_level(leaf, block_info, ref->level);
4765 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4766 }
4767
4768 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
4769 btrfs_set_extent_inline_ref_type(leaf, iref,
4770 BTRFS_SHARED_BLOCK_REF_KEY);
4771 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent);
4772 } else {
4773 btrfs_set_extent_inline_ref_type(leaf, iref,
4774 BTRFS_TREE_BLOCK_REF_KEY);
4775 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
4776 }
4777
4778 btrfs_mark_buffer_dirty(trans, leaf);
4779 btrfs_free_path(path);
4780
4781 return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
4782 }
4783
btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 owner,u64 offset,u64 ram_bytes,struct btrfs_key * ins)4784 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4785 struct btrfs_root *root, u64 owner,
4786 u64 offset, u64 ram_bytes,
4787 struct btrfs_key *ins)
4788 {
4789 struct btrfs_ref generic_ref = { 0 };
4790
4791 BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
4792
4793 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
4794 ins->objectid, ins->offset, 0);
4795 btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner,
4796 offset, 0, false);
4797 btrfs_ref_tree_mod(root->fs_info, &generic_ref);
4798
4799 return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes);
4800 }
4801
4802 /*
4803 * this is used by the tree logging recovery code. It records that
4804 * an extent has been allocated and makes sure to clear the free
4805 * space cache bits as well
4806 */
btrfs_alloc_logged_file_extent(struct btrfs_trans_handle * trans,u64 root_objectid,u64 owner,u64 offset,struct btrfs_key * ins)4807 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4808 u64 root_objectid, u64 owner, u64 offset,
4809 struct btrfs_key *ins)
4810 {
4811 struct btrfs_fs_info *fs_info = trans->fs_info;
4812 int ret;
4813 struct btrfs_block_group *block_group;
4814 struct btrfs_space_info *space_info;
4815
4816 /*
4817 * Mixed block groups will exclude before processing the log so we only
4818 * need to do the exclude dance if this fs isn't mixed.
4819 */
4820 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
4821 ret = __exclude_logged_extent(fs_info, ins->objectid,
4822 ins->offset);
4823 if (ret)
4824 return ret;
4825 }
4826
4827 block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
4828 if (!block_group)
4829 return -EINVAL;
4830
4831 space_info = block_group->space_info;
4832 spin_lock(&space_info->lock);
4833 spin_lock(&block_group->lock);
4834 space_info->bytes_reserved += ins->offset;
4835 block_group->reserved += ins->offset;
4836 spin_unlock(&block_group->lock);
4837 spin_unlock(&space_info->lock);
4838
4839 ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
4840 offset, ins, 1);
4841 if (ret)
4842 btrfs_pin_extent(trans, ins->objectid, ins->offset, 1);
4843 btrfs_put_block_group(block_group);
4844 return ret;
4845 }
4846
4847 static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,int level,u64 owner,enum btrfs_lock_nesting nest)4848 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4849 u64 bytenr, int level, u64 owner,
4850 enum btrfs_lock_nesting nest)
4851 {
4852 struct btrfs_fs_info *fs_info = root->fs_info;
4853 struct extent_buffer *buf;
4854 u64 lockdep_owner = owner;
4855
4856 buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level);
4857 if (IS_ERR(buf))
4858 return buf;
4859
4860 /*
4861 * Extra safety check in case the extent tree is corrupted and extent
4862 * allocator chooses to use a tree block which is already used and
4863 * locked.
4864 */
4865 if (buf->lock_owner == current->pid) {
4866 btrfs_err_rl(fs_info,
4867 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
4868 buf->start, btrfs_header_owner(buf), current->pid);
4869 free_extent_buffer(buf);
4870 return ERR_PTR(-EUCLEAN);
4871 }
4872
4873 /*
4874 * The reloc trees are just snapshots, so we need them to appear to be
4875 * just like any other fs tree WRT lockdep.
4876 *
4877 * The exception however is in replace_path() in relocation, where we
4878 * hold the lock on the original fs root and then search for the reloc
4879 * root. At that point we need to make sure any reloc root buffers are
4880 * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make
4881 * lockdep happy.
4882 */
4883 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID &&
4884 !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
4885 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
4886
4887 /* btrfs_clear_buffer_dirty() accesses generation field. */
4888 btrfs_set_header_generation(buf, trans->transid);
4889
4890 /*
4891 * This needs to stay, because we could allocate a freed block from an
4892 * old tree into a new tree, so we need to make sure this new block is
4893 * set to the appropriate level and owner.
4894 */
4895 btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level);
4896
4897 __btrfs_tree_lock(buf, nest);
4898 btrfs_clear_buffer_dirty(trans, buf);
4899 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
4900 clear_bit(EXTENT_BUFFER_NO_CHECK, &buf->bflags);
4901
4902 set_extent_buffer_uptodate(buf);
4903
4904 memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
4905 btrfs_set_header_level(buf, level);
4906 btrfs_set_header_bytenr(buf, buf->start);
4907 btrfs_set_header_generation(buf, trans->transid);
4908 btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV);
4909 btrfs_set_header_owner(buf, owner);
4910 write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid);
4911 write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
4912 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4913 buf->log_index = root->log_transid % 2;
4914 /*
4915 * we allow two log transactions at a time, use different
4916 * EXTENT bit to differentiate dirty pages.
4917 */
4918 if (buf->log_index == 0)
4919 set_extent_bit(&root->dirty_log_pages, buf->start,
4920 buf->start + buf->len - 1,
4921 EXTENT_DIRTY, NULL);
4922 else
4923 set_extent_bit(&root->dirty_log_pages, buf->start,
4924 buf->start + buf->len - 1,
4925 EXTENT_NEW, NULL);
4926 } else {
4927 buf->log_index = -1;
4928 set_extent_bit(&trans->transaction->dirty_pages, buf->start,
4929 buf->start + buf->len - 1, EXTENT_DIRTY, NULL);
4930 }
4931 /* this returns a buffer locked for blocking */
4932 return buf;
4933 }
4934
4935 /*
4936 * finds a free extent and does all the dirty work required for allocation
4937 * returns the tree buffer or an ERR_PTR on error.
4938 */
btrfs_alloc_tree_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 parent,u64 root_objectid,const struct btrfs_disk_key * key,int level,u64 hint,u64 empty_size,enum btrfs_lock_nesting nest)4939 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
4940 struct btrfs_root *root,
4941 u64 parent, u64 root_objectid,
4942 const struct btrfs_disk_key *key,
4943 int level, u64 hint,
4944 u64 empty_size,
4945 enum btrfs_lock_nesting nest)
4946 {
4947 struct btrfs_fs_info *fs_info = root->fs_info;
4948 struct btrfs_key ins;
4949 struct btrfs_block_rsv *block_rsv;
4950 struct extent_buffer *buf;
4951 struct btrfs_delayed_extent_op *extent_op;
4952 struct btrfs_ref generic_ref = { 0 };
4953 u64 flags = 0;
4954 int ret;
4955 u32 blocksize = fs_info->nodesize;
4956 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
4957
4958 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4959 if (btrfs_is_testing(fs_info)) {
4960 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
4961 level, root_objectid, nest);
4962 if (!IS_ERR(buf))
4963 root->alloc_bytenr += blocksize;
4964 return buf;
4965 }
4966 #endif
4967
4968 block_rsv = btrfs_use_block_rsv(trans, root, blocksize);
4969 if (IS_ERR(block_rsv))
4970 return ERR_CAST(block_rsv);
4971
4972 ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
4973 empty_size, hint, &ins, 0, 0);
4974 if (ret)
4975 goto out_unuse;
4976
4977 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level,
4978 root_objectid, nest);
4979 if (IS_ERR(buf)) {
4980 ret = PTR_ERR(buf);
4981 goto out_free_reserved;
4982 }
4983
4984 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4985 if (parent == 0)
4986 parent = ins.objectid;
4987 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4988 } else
4989 BUG_ON(parent > 0);
4990
4991 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
4992 extent_op = btrfs_alloc_delayed_extent_op();
4993 if (!extent_op) {
4994 ret = -ENOMEM;
4995 goto out_free_buf;
4996 }
4997 if (key)
4998 memcpy(&extent_op->key, key, sizeof(extent_op->key));
4999 else
5000 memset(&extent_op->key, 0, sizeof(extent_op->key));
5001 extent_op->flags_to_set = flags;
5002 extent_op->update_key = skinny_metadata ? false : true;
5003 extent_op->update_flags = true;
5004 extent_op->level = level;
5005
5006 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
5007 ins.objectid, ins.offset, parent);
5008 btrfs_init_tree_ref(&generic_ref, level, root_objectid,
5009 root->root_key.objectid, false);
5010 btrfs_ref_tree_mod(fs_info, &generic_ref);
5011 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op);
5012 if (ret)
5013 goto out_free_delayed;
5014 }
5015 return buf;
5016
5017 out_free_delayed:
5018 btrfs_free_delayed_extent_op(extent_op);
5019 out_free_buf:
5020 btrfs_tree_unlock(buf);
5021 free_extent_buffer(buf);
5022 out_free_reserved:
5023 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
5024 out_unuse:
5025 btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize);
5026 return ERR_PTR(ret);
5027 }
5028
5029 struct walk_control {
5030 u64 refs[BTRFS_MAX_LEVEL];
5031 u64 flags[BTRFS_MAX_LEVEL];
5032 struct btrfs_key update_progress;
5033 struct btrfs_key drop_progress;
5034 int drop_level;
5035 int stage;
5036 int level;
5037 int shared_level;
5038 int update_ref;
5039 int keep_locks;
5040 int reada_slot;
5041 int reada_count;
5042 int restarted;
5043 };
5044
5045 #define DROP_REFERENCE 1
5046 #define UPDATE_BACKREF 2
5047
reada_walk_down(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct walk_control * wc,struct btrfs_path * path)5048 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5049 struct btrfs_root *root,
5050 struct walk_control *wc,
5051 struct btrfs_path *path)
5052 {
5053 struct btrfs_fs_info *fs_info = root->fs_info;
5054 u64 bytenr;
5055 u64 generation;
5056 u64 refs;
5057 u64 flags;
5058 u32 nritems;
5059 struct btrfs_key key;
5060 struct extent_buffer *eb;
5061 int ret;
5062 int slot;
5063 int nread = 0;
5064
5065 if (path->slots[wc->level] < wc->reada_slot) {
5066 wc->reada_count = wc->reada_count * 2 / 3;
5067 wc->reada_count = max(wc->reada_count, 2);
5068 } else {
5069 wc->reada_count = wc->reada_count * 3 / 2;
5070 wc->reada_count = min_t(int, wc->reada_count,
5071 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
5072 }
5073
5074 eb = path->nodes[wc->level];
5075 nritems = btrfs_header_nritems(eb);
5076
5077 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5078 if (nread >= wc->reada_count)
5079 break;
5080
5081 cond_resched();
5082 bytenr = btrfs_node_blockptr(eb, slot);
5083 generation = btrfs_node_ptr_generation(eb, slot);
5084
5085 if (slot == path->slots[wc->level])
5086 goto reada;
5087
5088 if (wc->stage == UPDATE_BACKREF &&
5089 generation <= root->root_key.offset)
5090 continue;
5091
5092 /* We don't lock the tree block, it's OK to be racy here */
5093 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
5094 wc->level - 1, 1, &refs,
5095 &flags);
5096 /* We don't care about errors in readahead. */
5097 if (ret < 0)
5098 continue;
5099
5100 /*
5101 * This could be racey, it's conceivable that we raced and end
5102 * up with a bogus refs count, if that's the case just skip, if
5103 * we are actually corrupt we will notice when we look up
5104 * everything again with our locks.
5105 */
5106 if (refs == 0)
5107 continue;
5108
5109 if (wc->stage == DROP_REFERENCE) {
5110 if (refs == 1)
5111 goto reada;
5112
5113 if (wc->level == 1 &&
5114 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5115 continue;
5116 if (!wc->update_ref ||
5117 generation <= root->root_key.offset)
5118 continue;
5119 btrfs_node_key_to_cpu(eb, &key, slot);
5120 ret = btrfs_comp_cpu_keys(&key,
5121 &wc->update_progress);
5122 if (ret < 0)
5123 continue;
5124 } else {
5125 if (wc->level == 1 &&
5126 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5127 continue;
5128 }
5129 reada:
5130 btrfs_readahead_node_child(eb, slot);
5131 nread++;
5132 }
5133 wc->reada_slot = slot;
5134 }
5135
5136 /*
5137 * helper to process tree block while walking down the tree.
5138 *
5139 * when wc->stage == UPDATE_BACKREF, this function updates
5140 * back refs for pointers in the block.
5141 *
5142 * NOTE: return value 1 means we should stop walking down.
5143 */
walk_down_proc(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,int lookup_info)5144 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5145 struct btrfs_root *root,
5146 struct btrfs_path *path,
5147 struct walk_control *wc, int lookup_info)
5148 {
5149 struct btrfs_fs_info *fs_info = root->fs_info;
5150 int level = wc->level;
5151 struct extent_buffer *eb = path->nodes[level];
5152 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5153 int ret;
5154
5155 if (wc->stage == UPDATE_BACKREF &&
5156 btrfs_header_owner(eb) != root->root_key.objectid)
5157 return 1;
5158
5159 /*
5160 * when reference count of tree block is 1, it won't increase
5161 * again. once full backref flag is set, we never clear it.
5162 */
5163 if (lookup_info &&
5164 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5165 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5166 ASSERT(path->locks[level]);
5167 ret = btrfs_lookup_extent_info(trans, fs_info,
5168 eb->start, level, 1,
5169 &wc->refs[level],
5170 &wc->flags[level]);
5171 BUG_ON(ret == -ENOMEM);
5172 if (ret)
5173 return ret;
5174 if (unlikely(wc->refs[level] == 0)) {
5175 btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
5176 eb->start);
5177 return -EUCLEAN;
5178 }
5179 }
5180
5181 if (wc->stage == DROP_REFERENCE) {
5182 if (wc->refs[level] > 1)
5183 return 1;
5184
5185 if (path->locks[level] && !wc->keep_locks) {
5186 btrfs_tree_unlock_rw(eb, path->locks[level]);
5187 path->locks[level] = 0;
5188 }
5189 return 0;
5190 }
5191
5192 /* wc->stage == UPDATE_BACKREF */
5193 if (!(wc->flags[level] & flag)) {
5194 ASSERT(path->locks[level]);
5195 ret = btrfs_inc_ref(trans, root, eb, 1);
5196 BUG_ON(ret); /* -ENOMEM */
5197 ret = btrfs_dec_ref(trans, root, eb, 0);
5198 BUG_ON(ret); /* -ENOMEM */
5199 ret = btrfs_set_disk_extent_flags(trans, eb, flag);
5200 BUG_ON(ret); /* -ENOMEM */
5201 wc->flags[level] |= flag;
5202 }
5203
5204 /*
5205 * the block is shared by multiple trees, so it's not good to
5206 * keep the tree lock
5207 */
5208 if (path->locks[level] && level > 0) {
5209 btrfs_tree_unlock_rw(eb, path->locks[level]);
5210 path->locks[level] = 0;
5211 }
5212 return 0;
5213 }
5214
5215 /*
5216 * This is used to verify a ref exists for this root to deal with a bug where we
5217 * would have a drop_progress key that hadn't been updated properly.
5218 */
check_ref_exists(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,u64 parent,int level)5219 static int check_ref_exists(struct btrfs_trans_handle *trans,
5220 struct btrfs_root *root, u64 bytenr, u64 parent,
5221 int level)
5222 {
5223 struct btrfs_path *path;
5224 struct btrfs_extent_inline_ref *iref;
5225 int ret;
5226
5227 path = btrfs_alloc_path();
5228 if (!path)
5229 return -ENOMEM;
5230
5231 ret = lookup_extent_backref(trans, path, &iref, bytenr,
5232 root->fs_info->nodesize, parent,
5233 root->root_key.objectid, level, 0);
5234 btrfs_free_path(path);
5235 if (ret == -ENOENT)
5236 return 0;
5237 if (ret < 0)
5238 return ret;
5239 return 1;
5240 }
5241
5242 /*
5243 * helper to process tree block pointer.
5244 *
5245 * when wc->stage == DROP_REFERENCE, this function checks
5246 * reference count of the block pointed to. if the block
5247 * is shared and we need update back refs for the subtree
5248 * rooted at the block, this function changes wc->stage to
5249 * UPDATE_BACKREF. if the block is shared and there is no
5250 * need to update back, this function drops the reference
5251 * to the block.
5252 *
5253 * NOTE: return value 1 means we should stop walking down.
5254 */
do_walk_down(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,int * lookup_info)5255 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5256 struct btrfs_root *root,
5257 struct btrfs_path *path,
5258 struct walk_control *wc, int *lookup_info)
5259 {
5260 struct btrfs_fs_info *fs_info = root->fs_info;
5261 u64 bytenr;
5262 u64 generation;
5263 u64 parent;
5264 struct btrfs_tree_parent_check check = { 0 };
5265 struct btrfs_key key;
5266 struct btrfs_ref ref = { 0 };
5267 struct extent_buffer *next;
5268 int level = wc->level;
5269 int reada = 0;
5270 int ret = 0;
5271 bool need_account = false;
5272
5273 generation = btrfs_node_ptr_generation(path->nodes[level],
5274 path->slots[level]);
5275 /*
5276 * if the lower level block was created before the snapshot
5277 * was created, we know there is no need to update back refs
5278 * for the subtree
5279 */
5280 if (wc->stage == UPDATE_BACKREF &&
5281 generation <= root->root_key.offset) {
5282 *lookup_info = 1;
5283 return 1;
5284 }
5285
5286 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5287
5288 check.level = level - 1;
5289 check.transid = generation;
5290 check.owner_root = root->root_key.objectid;
5291 check.has_first_key = true;
5292 btrfs_node_key_to_cpu(path->nodes[level], &check.first_key,
5293 path->slots[level]);
5294
5295 next = find_extent_buffer(fs_info, bytenr);
5296 if (!next) {
5297 next = btrfs_find_create_tree_block(fs_info, bytenr,
5298 root->root_key.objectid, level - 1);
5299 if (IS_ERR(next))
5300 return PTR_ERR(next);
5301 reada = 1;
5302 }
5303 btrfs_tree_lock(next);
5304
5305 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
5306 &wc->refs[level - 1],
5307 &wc->flags[level - 1]);
5308 if (ret < 0)
5309 goto out_unlock;
5310
5311 if (unlikely(wc->refs[level - 1] == 0)) {
5312 btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
5313 bytenr);
5314 ret = -EUCLEAN;
5315 goto out_unlock;
5316 }
5317 *lookup_info = 0;
5318
5319 if (wc->stage == DROP_REFERENCE) {
5320 if (wc->refs[level - 1] > 1) {
5321 need_account = true;
5322 if (level == 1 &&
5323 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5324 goto skip;
5325
5326 if (!wc->update_ref ||
5327 generation <= root->root_key.offset)
5328 goto skip;
5329
5330 btrfs_node_key_to_cpu(path->nodes[level], &key,
5331 path->slots[level]);
5332 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5333 if (ret < 0)
5334 goto skip;
5335
5336 wc->stage = UPDATE_BACKREF;
5337 wc->shared_level = level - 1;
5338 }
5339 } else {
5340 if (level == 1 &&
5341 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5342 goto skip;
5343 }
5344
5345 if (!btrfs_buffer_uptodate(next, generation, 0)) {
5346 btrfs_tree_unlock(next);
5347 free_extent_buffer(next);
5348 next = NULL;
5349 *lookup_info = 1;
5350 }
5351
5352 if (!next) {
5353 if (reada && level == 1)
5354 reada_walk_down(trans, root, wc, path);
5355 next = read_tree_block(fs_info, bytenr, &check);
5356 if (IS_ERR(next)) {
5357 return PTR_ERR(next);
5358 } else if (!extent_buffer_uptodate(next)) {
5359 free_extent_buffer(next);
5360 return -EIO;
5361 }
5362 btrfs_tree_lock(next);
5363 }
5364
5365 level--;
5366 ASSERT(level == btrfs_header_level(next));
5367 if (level != btrfs_header_level(next)) {
5368 btrfs_err(root->fs_info, "mismatched level");
5369 ret = -EIO;
5370 goto out_unlock;
5371 }
5372 path->nodes[level] = next;
5373 path->slots[level] = 0;
5374 path->locks[level] = BTRFS_WRITE_LOCK;
5375 wc->level = level;
5376 if (wc->level == 1)
5377 wc->reada_slot = 0;
5378 return 0;
5379 skip:
5380 wc->refs[level - 1] = 0;
5381 wc->flags[level - 1] = 0;
5382 if (wc->stage == DROP_REFERENCE) {
5383 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5384 parent = path->nodes[level]->start;
5385 } else {
5386 ASSERT(root->root_key.objectid ==
5387 btrfs_header_owner(path->nodes[level]));
5388 if (root->root_key.objectid !=
5389 btrfs_header_owner(path->nodes[level])) {
5390 btrfs_err(root->fs_info,
5391 "mismatched block owner");
5392 ret = -EIO;
5393 goto out_unlock;
5394 }
5395 parent = 0;
5396 }
5397
5398 /*
5399 * If we had a drop_progress we need to verify the refs are set
5400 * as expected. If we find our ref then we know that from here
5401 * on out everything should be correct, and we can clear the
5402 * ->restarted flag.
5403 */
5404 if (wc->restarted) {
5405 ret = check_ref_exists(trans, root, bytenr, parent,
5406 level - 1);
5407 if (ret < 0)
5408 goto out_unlock;
5409 if (ret == 0)
5410 goto no_delete;
5411 ret = 0;
5412 wc->restarted = 0;
5413 }
5414
5415 /*
5416 * Reloc tree doesn't contribute to qgroup numbers, and we have
5417 * already accounted them at merge time (replace_path),
5418 * thus we could skip expensive subtree trace here.
5419 */
5420 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
5421 need_account) {
5422 ret = btrfs_qgroup_trace_subtree(trans, next,
5423 generation, level - 1);
5424 if (ret) {
5425 btrfs_err_rl(fs_info,
5426 "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
5427 ret);
5428 }
5429 }
5430
5431 /*
5432 * We need to update the next key in our walk control so we can
5433 * update the drop_progress key accordingly. We don't care if
5434 * find_next_key doesn't find a key because that means we're at
5435 * the end and are going to clean up now.
5436 */
5437 wc->drop_level = level;
5438 find_next_key(path, level, &wc->drop_progress);
5439
5440 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
5441 fs_info->nodesize, parent);
5442 btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid,
5443 0, false);
5444 ret = btrfs_free_extent(trans, &ref);
5445 if (ret)
5446 goto out_unlock;
5447 }
5448 no_delete:
5449 *lookup_info = 1;
5450 ret = 1;
5451
5452 out_unlock:
5453 btrfs_tree_unlock(next);
5454 free_extent_buffer(next);
5455
5456 return ret;
5457 }
5458
5459 /*
5460 * helper to process tree block while walking up the tree.
5461 *
5462 * when wc->stage == DROP_REFERENCE, this function drops
5463 * reference count on the block.
5464 *
5465 * when wc->stage == UPDATE_BACKREF, this function changes
5466 * wc->stage back to DROP_REFERENCE if we changed wc->stage
5467 * to UPDATE_BACKREF previously while processing the block.
5468 *
5469 * NOTE: return value 1 means we should stop walking up.
5470 */
walk_up_proc(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5471 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5472 struct btrfs_root *root,
5473 struct btrfs_path *path,
5474 struct walk_control *wc)
5475 {
5476 struct btrfs_fs_info *fs_info = root->fs_info;
5477 int ret;
5478 int level = wc->level;
5479 struct extent_buffer *eb = path->nodes[level];
5480 u64 parent = 0;
5481
5482 if (wc->stage == UPDATE_BACKREF) {
5483 BUG_ON(wc->shared_level < level);
5484 if (level < wc->shared_level)
5485 goto out;
5486
5487 ret = find_next_key(path, level + 1, &wc->update_progress);
5488 if (ret > 0)
5489 wc->update_ref = 0;
5490
5491 wc->stage = DROP_REFERENCE;
5492 wc->shared_level = -1;
5493 path->slots[level] = 0;
5494
5495 /*
5496 * check reference count again if the block isn't locked.
5497 * we should start walking down the tree again if reference
5498 * count is one.
5499 */
5500 if (!path->locks[level]) {
5501 BUG_ON(level == 0);
5502 btrfs_tree_lock(eb);
5503 path->locks[level] = BTRFS_WRITE_LOCK;
5504
5505 ret = btrfs_lookup_extent_info(trans, fs_info,
5506 eb->start, level, 1,
5507 &wc->refs[level],
5508 &wc->flags[level]);
5509 if (ret < 0) {
5510 btrfs_tree_unlock_rw(eb, path->locks[level]);
5511 path->locks[level] = 0;
5512 return ret;
5513 }
5514 if (unlikely(wc->refs[level] == 0)) {
5515 btrfs_tree_unlock_rw(eb, path->locks[level]);
5516 btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
5517 eb->start);
5518 return -EUCLEAN;
5519 }
5520 if (wc->refs[level] == 1) {
5521 btrfs_tree_unlock_rw(eb, path->locks[level]);
5522 path->locks[level] = 0;
5523 return 1;
5524 }
5525 }
5526 }
5527
5528 /* wc->stage == DROP_REFERENCE */
5529 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5530
5531 if (wc->refs[level] == 1) {
5532 if (level == 0) {
5533 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5534 ret = btrfs_dec_ref(trans, root, eb, 1);
5535 else
5536 ret = btrfs_dec_ref(trans, root, eb, 0);
5537 BUG_ON(ret); /* -ENOMEM */
5538 if (is_fstree(root->root_key.objectid)) {
5539 ret = btrfs_qgroup_trace_leaf_items(trans, eb);
5540 if (ret) {
5541 btrfs_err_rl(fs_info,
5542 "error %d accounting leaf items, quota is out of sync, rescan required",
5543 ret);
5544 }
5545 }
5546 }
5547 /* Make block locked assertion in btrfs_clear_buffer_dirty happy. */
5548 if (!path->locks[level]) {
5549 btrfs_tree_lock(eb);
5550 path->locks[level] = BTRFS_WRITE_LOCK;
5551 }
5552 btrfs_clear_buffer_dirty(trans, eb);
5553 }
5554
5555 if (eb == root->node) {
5556 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5557 parent = eb->start;
5558 else if (root->root_key.objectid != btrfs_header_owner(eb))
5559 goto owner_mismatch;
5560 } else {
5561 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5562 parent = path->nodes[level + 1]->start;
5563 else if (root->root_key.objectid !=
5564 btrfs_header_owner(path->nodes[level + 1]))
5565 goto owner_mismatch;
5566 }
5567
5568 btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent,
5569 wc->refs[level] == 1);
5570 out:
5571 wc->refs[level] = 0;
5572 wc->flags[level] = 0;
5573 return 0;
5574
5575 owner_mismatch:
5576 btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
5577 btrfs_header_owner(eb), root->root_key.objectid);
5578 return -EUCLEAN;
5579 }
5580
walk_down_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5581 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5582 struct btrfs_root *root,
5583 struct btrfs_path *path,
5584 struct walk_control *wc)
5585 {
5586 int level = wc->level;
5587 int lookup_info = 1;
5588 int ret = 0;
5589
5590 while (level >= 0) {
5591 ret = walk_down_proc(trans, root, path, wc, lookup_info);
5592 if (ret)
5593 break;
5594
5595 if (level == 0)
5596 break;
5597
5598 if (path->slots[level] >=
5599 btrfs_header_nritems(path->nodes[level]))
5600 break;
5601
5602 ret = do_walk_down(trans, root, path, wc, &lookup_info);
5603 if (ret > 0) {
5604 path->slots[level]++;
5605 continue;
5606 } else if (ret < 0)
5607 break;
5608 level = wc->level;
5609 }
5610 return (ret == 1) ? 0 : ret;
5611 }
5612
walk_up_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,int max_level)5613 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5614 struct btrfs_root *root,
5615 struct btrfs_path *path,
5616 struct walk_control *wc, int max_level)
5617 {
5618 int level = wc->level;
5619 int ret;
5620
5621 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5622 while (level < max_level && path->nodes[level]) {
5623 wc->level = level;
5624 if (path->slots[level] + 1 <
5625 btrfs_header_nritems(path->nodes[level])) {
5626 path->slots[level]++;
5627 return 0;
5628 } else {
5629 ret = walk_up_proc(trans, root, path, wc);
5630 if (ret > 0)
5631 return 0;
5632 if (ret < 0)
5633 return ret;
5634
5635 if (path->locks[level]) {
5636 btrfs_tree_unlock_rw(path->nodes[level],
5637 path->locks[level]);
5638 path->locks[level] = 0;
5639 }
5640 free_extent_buffer(path->nodes[level]);
5641 path->nodes[level] = NULL;
5642 level++;
5643 }
5644 }
5645 return 1;
5646 }
5647
5648 /*
5649 * drop a subvolume tree.
5650 *
5651 * this function traverses the tree freeing any blocks that only
5652 * referenced by the tree.
5653 *
5654 * when a shared tree block is found. this function decreases its
5655 * reference count by one. if update_ref is true, this function
5656 * also make sure backrefs for the shared block and all lower level
5657 * blocks are properly updated.
5658 *
5659 * If called with for_reloc == 0, may exit early with -EAGAIN
5660 */
btrfs_drop_snapshot(struct btrfs_root * root,int update_ref,int for_reloc)5661 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
5662 {
5663 const bool is_reloc_root = (root->root_key.objectid ==
5664 BTRFS_TREE_RELOC_OBJECTID);
5665 struct btrfs_fs_info *fs_info = root->fs_info;
5666 struct btrfs_path *path;
5667 struct btrfs_trans_handle *trans;
5668 struct btrfs_root *tree_root = fs_info->tree_root;
5669 struct btrfs_root_item *root_item = &root->root_item;
5670 struct walk_control *wc;
5671 struct btrfs_key key;
5672 int err = 0;
5673 int ret;
5674 int level;
5675 bool root_dropped = false;
5676 bool unfinished_drop = false;
5677
5678 btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid);
5679
5680 path = btrfs_alloc_path();
5681 if (!path) {
5682 err = -ENOMEM;
5683 goto out;
5684 }
5685
5686 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5687 if (!wc) {
5688 btrfs_free_path(path);
5689 err = -ENOMEM;
5690 goto out;
5691 }
5692
5693 /*
5694 * Use join to avoid potential EINTR from transaction start. See
5695 * wait_reserve_ticket and the whole reservation callchain.
5696 */
5697 if (for_reloc)
5698 trans = btrfs_join_transaction(tree_root);
5699 else
5700 trans = btrfs_start_transaction(tree_root, 0);
5701 if (IS_ERR(trans)) {
5702 err = PTR_ERR(trans);
5703 goto out_free;
5704 }
5705
5706 err = btrfs_run_delayed_items(trans);
5707 if (err)
5708 goto out_end_trans;
5709
5710 /*
5711 * This will help us catch people modifying the fs tree while we're
5712 * dropping it. It is unsafe to mess with the fs tree while it's being
5713 * dropped as we unlock the root node and parent nodes as we walk down
5714 * the tree, assuming nothing will change. If something does change
5715 * then we'll have stale information and drop references to blocks we've
5716 * already dropped.
5717 */
5718 set_bit(BTRFS_ROOT_DELETING, &root->state);
5719 unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state);
5720
5721 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
5722 level = btrfs_header_level(root->node);
5723 path->nodes[level] = btrfs_lock_root_node(root);
5724 path->slots[level] = 0;
5725 path->locks[level] = BTRFS_WRITE_LOCK;
5726 memset(&wc->update_progress, 0,
5727 sizeof(wc->update_progress));
5728 } else {
5729 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
5730 memcpy(&wc->update_progress, &key,
5731 sizeof(wc->update_progress));
5732
5733 level = btrfs_root_drop_level(root_item);
5734 BUG_ON(level == 0);
5735 path->lowest_level = level;
5736 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5737 path->lowest_level = 0;
5738 if (ret < 0) {
5739 err = ret;
5740 goto out_end_trans;
5741 }
5742 WARN_ON(ret > 0);
5743
5744 /*
5745 * unlock our path, this is safe because only this
5746 * function is allowed to delete this snapshot
5747 */
5748 btrfs_unlock_up_safe(path, 0);
5749
5750 level = btrfs_header_level(root->node);
5751 while (1) {
5752 btrfs_tree_lock(path->nodes[level]);
5753 path->locks[level] = BTRFS_WRITE_LOCK;
5754
5755 ret = btrfs_lookup_extent_info(trans, fs_info,
5756 path->nodes[level]->start,
5757 level, 1, &wc->refs[level],
5758 &wc->flags[level]);
5759 if (ret < 0) {
5760 err = ret;
5761 goto out_end_trans;
5762 }
5763 BUG_ON(wc->refs[level] == 0);
5764
5765 if (level == btrfs_root_drop_level(root_item))
5766 break;
5767
5768 btrfs_tree_unlock(path->nodes[level]);
5769 path->locks[level] = 0;
5770 WARN_ON(wc->refs[level] != 1);
5771 level--;
5772 }
5773 }
5774
5775 wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
5776 wc->level = level;
5777 wc->shared_level = -1;
5778 wc->stage = DROP_REFERENCE;
5779 wc->update_ref = update_ref;
5780 wc->keep_locks = 0;
5781 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
5782
5783 while (1) {
5784
5785 ret = walk_down_tree(trans, root, path, wc);
5786 if (ret < 0) {
5787 btrfs_abort_transaction(trans, ret);
5788 err = ret;
5789 break;
5790 }
5791
5792 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5793 if (ret < 0) {
5794 btrfs_abort_transaction(trans, ret);
5795 err = ret;
5796 break;
5797 }
5798
5799 if (ret > 0) {
5800 BUG_ON(wc->stage != DROP_REFERENCE);
5801 break;
5802 }
5803
5804 if (wc->stage == DROP_REFERENCE) {
5805 wc->drop_level = wc->level;
5806 btrfs_node_key_to_cpu(path->nodes[wc->drop_level],
5807 &wc->drop_progress,
5808 path->slots[wc->drop_level]);
5809 }
5810 btrfs_cpu_key_to_disk(&root_item->drop_progress,
5811 &wc->drop_progress);
5812 btrfs_set_root_drop_level(root_item, wc->drop_level);
5813
5814 BUG_ON(wc->level == 0);
5815 if (btrfs_should_end_transaction(trans) ||
5816 (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
5817 ret = btrfs_update_root(trans, tree_root,
5818 &root->root_key,
5819 root_item);
5820 if (ret) {
5821 btrfs_abort_transaction(trans, ret);
5822 err = ret;
5823 goto out_end_trans;
5824 }
5825
5826 if (!is_reloc_root)
5827 btrfs_set_last_root_drop_gen(fs_info, trans->transid);
5828
5829 btrfs_end_transaction_throttle(trans);
5830 if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
5831 btrfs_debug(fs_info,
5832 "drop snapshot early exit");
5833 err = -EAGAIN;
5834 goto out_free;
5835 }
5836
5837 /*
5838 * Use join to avoid potential EINTR from transaction
5839 * start. See wait_reserve_ticket and the whole
5840 * reservation callchain.
5841 */
5842 if (for_reloc)
5843 trans = btrfs_join_transaction(tree_root);
5844 else
5845 trans = btrfs_start_transaction(tree_root, 0);
5846 if (IS_ERR(trans)) {
5847 err = PTR_ERR(trans);
5848 goto out_free;
5849 }
5850 }
5851 }
5852 btrfs_release_path(path);
5853 if (err)
5854 goto out_end_trans;
5855
5856 ret = btrfs_del_root(trans, &root->root_key);
5857 if (ret) {
5858 btrfs_abort_transaction(trans, ret);
5859 err = ret;
5860 goto out_end_trans;
5861 }
5862
5863 if (!is_reloc_root) {
5864 ret = btrfs_find_root(tree_root, &root->root_key, path,
5865 NULL, NULL);
5866 if (ret < 0) {
5867 btrfs_abort_transaction(trans, ret);
5868 err = ret;
5869 goto out_end_trans;
5870 } else if (ret > 0) {
5871 /* if we fail to delete the orphan item this time
5872 * around, it'll get picked up the next time.
5873 *
5874 * The most common failure here is just -ENOENT.
5875 */
5876 btrfs_del_orphan_item(trans, tree_root,
5877 root->root_key.objectid);
5878 }
5879 }
5880
5881 /*
5882 * This subvolume is going to be completely dropped, and won't be
5883 * recorded as dirty roots, thus pertrans meta rsv will not be freed at
5884 * commit transaction time. So free it here manually.
5885 */
5886 btrfs_qgroup_convert_reserved_meta(root, INT_MAX);
5887 btrfs_qgroup_free_meta_all_pertrans(root);
5888
5889 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state))
5890 btrfs_add_dropped_root(trans, root);
5891 else
5892 btrfs_put_root(root);
5893 root_dropped = true;
5894 out_end_trans:
5895 if (!is_reloc_root)
5896 btrfs_set_last_root_drop_gen(fs_info, trans->transid);
5897
5898 btrfs_end_transaction_throttle(trans);
5899 out_free:
5900 kfree(wc);
5901 btrfs_free_path(path);
5902 out:
5903 /*
5904 * We were an unfinished drop root, check to see if there are any
5905 * pending, and if not clear and wake up any waiters.
5906 */
5907 if (!err && unfinished_drop)
5908 btrfs_maybe_wake_unfinished_drop(fs_info);
5909
5910 /*
5911 * So if we need to stop dropping the snapshot for whatever reason we
5912 * need to make sure to add it back to the dead root list so that we
5913 * keep trying to do the work later. This also cleans up roots if we
5914 * don't have it in the radix (like when we recover after a power fail
5915 * or unmount) so we don't leak memory.
5916 */
5917 if (!for_reloc && !root_dropped)
5918 btrfs_add_dead_root(root);
5919 return err;
5920 }
5921
5922 /*
5923 * drop subtree rooted at tree block 'node'.
5924 *
5925 * NOTE: this function will unlock and release tree block 'node'
5926 * only used by relocation code
5927 */
btrfs_drop_subtree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * node,struct extent_buffer * parent)5928 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5929 struct btrfs_root *root,
5930 struct extent_buffer *node,
5931 struct extent_buffer *parent)
5932 {
5933 struct btrfs_fs_info *fs_info = root->fs_info;
5934 struct btrfs_path *path;
5935 struct walk_control *wc;
5936 int level;
5937 int parent_level;
5938 int ret = 0;
5939 int wret;
5940
5941 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5942
5943 path = btrfs_alloc_path();
5944 if (!path)
5945 return -ENOMEM;
5946
5947 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5948 if (!wc) {
5949 btrfs_free_path(path);
5950 return -ENOMEM;
5951 }
5952
5953 btrfs_assert_tree_write_locked(parent);
5954 parent_level = btrfs_header_level(parent);
5955 atomic_inc(&parent->refs);
5956 path->nodes[parent_level] = parent;
5957 path->slots[parent_level] = btrfs_header_nritems(parent);
5958
5959 btrfs_assert_tree_write_locked(node);
5960 level = btrfs_header_level(node);
5961 path->nodes[level] = node;
5962 path->slots[level] = 0;
5963 path->locks[level] = BTRFS_WRITE_LOCK;
5964
5965 wc->refs[parent_level] = 1;
5966 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5967 wc->level = level;
5968 wc->shared_level = -1;
5969 wc->stage = DROP_REFERENCE;
5970 wc->update_ref = 0;
5971 wc->keep_locks = 1;
5972 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
5973
5974 while (1) {
5975 wret = walk_down_tree(trans, root, path, wc);
5976 if (wret < 0) {
5977 ret = wret;
5978 break;
5979 }
5980
5981 wret = walk_up_tree(trans, root, path, wc, parent_level);
5982 if (wret < 0)
5983 ret = wret;
5984 if (wret != 0)
5985 break;
5986 }
5987
5988 kfree(wc);
5989 btrfs_free_path(path);
5990 return ret;
5991 }
5992
btrfs_error_unpin_extent_range(struct btrfs_fs_info * fs_info,u64 start,u64 end)5993 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
5994 u64 start, u64 end)
5995 {
5996 return unpin_extent_range(fs_info, start, end, false);
5997 }
5998
5999 /*
6000 * It used to be that old block groups would be left around forever.
6001 * Iterating over them would be enough to trim unused space. Since we
6002 * now automatically remove them, we also need to iterate over unallocated
6003 * space.
6004 *
6005 * We don't want a transaction for this since the discard may take a
6006 * substantial amount of time. We don't require that a transaction be
6007 * running, but we do need to take a running transaction into account
6008 * to ensure that we're not discarding chunks that were released or
6009 * allocated in the current transaction.
6010 *
6011 * Holding the chunks lock will prevent other threads from allocating
6012 * or releasing chunks, but it won't prevent a running transaction
6013 * from committing and releasing the memory that the pending chunks
6014 * list head uses. For that, we need to take a reference to the
6015 * transaction and hold the commit root sem. We only need to hold
6016 * it while performing the free space search since we have already
6017 * held back allocations.
6018 */
btrfs_trim_free_extents(struct btrfs_device * device,u64 * trimmed)6019 static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
6020 {
6021 u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0;
6022 int ret;
6023
6024 *trimmed = 0;
6025
6026 /* Discard not supported = nothing to do. */
6027 if (!bdev_max_discard_sectors(device->bdev))
6028 return 0;
6029
6030 /* Not writable = nothing to do. */
6031 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
6032 return 0;
6033
6034 /* No free space = nothing to do. */
6035 if (device->total_bytes <= device->bytes_used)
6036 return 0;
6037
6038 ret = 0;
6039
6040 while (1) {
6041 struct btrfs_fs_info *fs_info = device->fs_info;
6042 u64 bytes;
6043
6044 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
6045 if (ret)
6046 break;
6047
6048 find_first_clear_extent_bit(&device->alloc_state, start,
6049 &start, &end,
6050 CHUNK_TRIMMED | CHUNK_ALLOCATED);
6051
6052 /* Check if there are any CHUNK_* bits left */
6053 if (start > device->total_bytes) {
6054 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
6055 btrfs_warn_in_rcu(fs_info,
6056 "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu",
6057 start, end - start + 1,
6058 btrfs_dev_name(device),
6059 device->total_bytes);
6060 mutex_unlock(&fs_info->chunk_mutex);
6061 ret = 0;
6062 break;
6063 }
6064
6065 /* Ensure we skip the reserved space on each device. */
6066 start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED);
6067
6068 /*
6069 * If find_first_clear_extent_bit find a range that spans the
6070 * end of the device it will set end to -1, in this case it's up
6071 * to the caller to trim the value to the size of the device.
6072 */
6073 end = min(end, device->total_bytes - 1);
6074
6075 len = end - start + 1;
6076
6077 /* We didn't find any extents */
6078 if (!len) {
6079 mutex_unlock(&fs_info->chunk_mutex);
6080 ret = 0;
6081 break;
6082 }
6083
6084 ret = btrfs_issue_discard(device->bdev, start, len,
6085 &bytes);
6086 if (!ret)
6087 set_extent_bit(&device->alloc_state, start,
6088 start + bytes - 1, CHUNK_TRIMMED, NULL);
6089 mutex_unlock(&fs_info->chunk_mutex);
6090
6091 if (ret)
6092 break;
6093
6094 start += len;
6095 *trimmed += bytes;
6096
6097 if (fatal_signal_pending(current)) {
6098 ret = -ERESTARTSYS;
6099 break;
6100 }
6101
6102 cond_resched();
6103 }
6104
6105 return ret;
6106 }
6107
6108 /*
6109 * Trim the whole filesystem by:
6110 * 1) trimming the free space in each block group
6111 * 2) trimming the unallocated space on each device
6112 *
6113 * This will also continue trimming even if a block group or device encounters
6114 * an error. The return value will be the last error, or 0 if nothing bad
6115 * happens.
6116 */
btrfs_trim_fs(struct btrfs_fs_info * fs_info,struct fstrim_range * range)6117 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
6118 {
6119 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6120 struct btrfs_block_group *cache = NULL;
6121 struct btrfs_device *device;
6122 u64 group_trimmed;
6123 u64 range_end = U64_MAX;
6124 u64 start;
6125 u64 end;
6126 u64 trimmed = 0;
6127 u64 bg_failed = 0;
6128 u64 dev_failed = 0;
6129 int bg_ret = 0;
6130 int dev_ret = 0;
6131 int ret = 0;
6132
6133 if (range->start == U64_MAX)
6134 return -EINVAL;
6135
6136 /*
6137 * Check range overflow if range->len is set.
6138 * The default range->len is U64_MAX.
6139 */
6140 if (range->len != U64_MAX &&
6141 check_add_overflow(range->start, range->len, &range_end))
6142 return -EINVAL;
6143
6144 cache = btrfs_lookup_first_block_group(fs_info, range->start);
6145 for (; cache; cache = btrfs_next_block_group(cache)) {
6146 if (cache->start >= range_end) {
6147 btrfs_put_block_group(cache);
6148 break;
6149 }
6150
6151 start = max(range->start, cache->start);
6152 end = min(range_end, cache->start + cache->length);
6153
6154 if (end - start >= range->minlen) {
6155 if (!btrfs_block_group_done(cache)) {
6156 ret = btrfs_cache_block_group(cache, true);
6157 if (ret) {
6158 bg_failed++;
6159 bg_ret = ret;
6160 continue;
6161 }
6162 }
6163 ret = btrfs_trim_block_group(cache,
6164 &group_trimmed,
6165 start,
6166 end,
6167 range->minlen);
6168
6169 trimmed += group_trimmed;
6170 if (ret) {
6171 bg_failed++;
6172 bg_ret = ret;
6173 continue;
6174 }
6175 }
6176 }
6177
6178 if (bg_failed)
6179 btrfs_warn(fs_info,
6180 "failed to trim %llu block group(s), last error %d",
6181 bg_failed, bg_ret);
6182
6183 mutex_lock(&fs_devices->device_list_mutex);
6184 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6185 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
6186 continue;
6187
6188 ret = btrfs_trim_free_extents(device, &group_trimmed);
6189
6190 trimmed += group_trimmed;
6191 if (ret) {
6192 dev_failed++;
6193 dev_ret = ret;
6194 break;
6195 }
6196 }
6197 mutex_unlock(&fs_devices->device_list_mutex);
6198
6199 if (dev_failed)
6200 btrfs_warn(fs_info,
6201 "failed to trim %llu device(s), last error %d",
6202 dev_failed, dev_ret);
6203 range->len = trimmed;
6204 if (bg_ret)
6205 return bg_ret;
6206 return dev_ret;
6207 }
6208