xref: /openbmc/linux/fs/btrfs/block-group.c (revision aa5b395b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "misc.h"
4 #include "ctree.h"
5 #include "block-group.h"
6 #include "space-info.h"
7 #include "disk-io.h"
8 #include "free-space-cache.h"
9 #include "free-space-tree.h"
10 #include "disk-io.h"
11 #include "volumes.h"
12 #include "transaction.h"
13 #include "ref-verify.h"
14 #include "sysfs.h"
15 #include "tree-log.h"
16 #include "delalloc-space.h"
17 #include "discard.h"
18 #include "raid56.h"
19 
20 /*
21  * Return target flags in extended format or 0 if restripe for this chunk_type
22  * is not in progress
23  *
24  * Should be called with balance_lock held
25  */
26 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
27 {
28 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
29 	u64 target = 0;
30 
31 	if (!bctl)
32 		return 0;
33 
34 	if (flags & BTRFS_BLOCK_GROUP_DATA &&
35 	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
36 		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
37 	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
38 		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
39 		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
40 	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
41 		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
42 		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
43 	}
44 
45 	return target;
46 }
47 
48 /*
49  * @flags: available profiles in extended format (see ctree.h)
50  *
51  * Return reduced profile in chunk format.  If profile changing is in progress
52  * (either running or paused) picks the target profile (if it's already
53  * available), otherwise falls back to plain reducing.
54  */
55 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
56 {
57 	u64 num_devices = fs_info->fs_devices->rw_devices;
58 	u64 target;
59 	u64 raid_type;
60 	u64 allowed = 0;
61 
62 	/*
63 	 * See if restripe for this chunk_type is in progress, if so try to
64 	 * reduce to the target profile
65 	 */
66 	spin_lock(&fs_info->balance_lock);
67 	target = get_restripe_target(fs_info, flags);
68 	if (target) {
69 		/* Pick target profile only if it's already available */
70 		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
71 			spin_unlock(&fs_info->balance_lock);
72 			return extended_to_chunk(target);
73 		}
74 	}
75 	spin_unlock(&fs_info->balance_lock);
76 
77 	/* First, mask out the RAID levels which aren't possible */
78 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
79 		if (num_devices >= btrfs_raid_array[raid_type].devs_min)
80 			allowed |= btrfs_raid_array[raid_type].bg_flag;
81 	}
82 	allowed &= flags;
83 
84 	if (allowed & BTRFS_BLOCK_GROUP_RAID6)
85 		allowed = BTRFS_BLOCK_GROUP_RAID6;
86 	else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
87 		allowed = BTRFS_BLOCK_GROUP_RAID5;
88 	else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
89 		allowed = BTRFS_BLOCK_GROUP_RAID10;
90 	else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
91 		allowed = BTRFS_BLOCK_GROUP_RAID1;
92 	else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
93 		allowed = BTRFS_BLOCK_GROUP_RAID0;
94 
95 	flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
96 
97 	return extended_to_chunk(flags | allowed);
98 }
99 
100 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
101 {
102 	unsigned seq;
103 	u64 flags;
104 
105 	do {
106 		flags = orig_flags;
107 		seq = read_seqbegin(&fs_info->profiles_lock);
108 
109 		if (flags & BTRFS_BLOCK_GROUP_DATA)
110 			flags |= fs_info->avail_data_alloc_bits;
111 		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
112 			flags |= fs_info->avail_system_alloc_bits;
113 		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
114 			flags |= fs_info->avail_metadata_alloc_bits;
115 	} while (read_seqretry(&fs_info->profiles_lock, seq));
116 
117 	return btrfs_reduce_alloc_profile(fs_info, flags);
118 }
119 
120 void btrfs_get_block_group(struct btrfs_block_group *cache)
121 {
122 	atomic_inc(&cache->count);
123 }
124 
125 void btrfs_put_block_group(struct btrfs_block_group *cache)
126 {
127 	if (atomic_dec_and_test(&cache->count)) {
128 		WARN_ON(cache->pinned > 0);
129 		WARN_ON(cache->reserved > 0);
130 
131 		/*
132 		 * A block_group shouldn't be on the discard_list anymore.
133 		 * Remove the block_group from the discard_list to prevent us
134 		 * from causing a panic due to NULL pointer dereference.
135 		 */
136 		if (WARN_ON(!list_empty(&cache->discard_list)))
137 			btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
138 						  cache);
139 
140 		/*
141 		 * If not empty, someone is still holding mutex of
142 		 * full_stripe_lock, which can only be released by caller.
143 		 * And it will definitely cause use-after-free when caller
144 		 * tries to release full stripe lock.
145 		 *
146 		 * No better way to resolve, but only to warn.
147 		 */
148 		WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
149 		kfree(cache->free_space_ctl);
150 		kfree(cache);
151 	}
152 }
153 
154 /*
155  * This adds the block group to the fs_info rb tree for the block group cache
156  */
157 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
158 				       struct btrfs_block_group *block_group)
159 {
160 	struct rb_node **p;
161 	struct rb_node *parent = NULL;
162 	struct btrfs_block_group *cache;
163 
164 	spin_lock(&info->block_group_cache_lock);
165 	p = &info->block_group_cache_tree.rb_node;
166 
167 	while (*p) {
168 		parent = *p;
169 		cache = rb_entry(parent, struct btrfs_block_group, cache_node);
170 		if (block_group->start < cache->start) {
171 			p = &(*p)->rb_left;
172 		} else if (block_group->start > cache->start) {
173 			p = &(*p)->rb_right;
174 		} else {
175 			spin_unlock(&info->block_group_cache_lock);
176 			return -EEXIST;
177 		}
178 	}
179 
180 	rb_link_node(&block_group->cache_node, parent, p);
181 	rb_insert_color(&block_group->cache_node,
182 			&info->block_group_cache_tree);
183 
184 	if (info->first_logical_byte > block_group->start)
185 		info->first_logical_byte = block_group->start;
186 
187 	spin_unlock(&info->block_group_cache_lock);
188 
189 	return 0;
190 }
191 
192 /*
193  * This will return the block group at or after bytenr if contains is 0, else
194  * it will return the block group that contains the bytenr
195  */
196 static struct btrfs_block_group *block_group_cache_tree_search(
197 		struct btrfs_fs_info *info, u64 bytenr, int contains)
198 {
199 	struct btrfs_block_group *cache, *ret = NULL;
200 	struct rb_node *n;
201 	u64 end, start;
202 
203 	spin_lock(&info->block_group_cache_lock);
204 	n = info->block_group_cache_tree.rb_node;
205 
206 	while (n) {
207 		cache = rb_entry(n, struct btrfs_block_group, cache_node);
208 		end = cache->start + cache->length - 1;
209 		start = cache->start;
210 
211 		if (bytenr < start) {
212 			if (!contains && (!ret || start < ret->start))
213 				ret = cache;
214 			n = n->rb_left;
215 		} else if (bytenr > start) {
216 			if (contains && bytenr <= end) {
217 				ret = cache;
218 				break;
219 			}
220 			n = n->rb_right;
221 		} else {
222 			ret = cache;
223 			break;
224 		}
225 	}
226 	if (ret) {
227 		btrfs_get_block_group(ret);
228 		if (bytenr == 0 && info->first_logical_byte > ret->start)
229 			info->first_logical_byte = ret->start;
230 	}
231 	spin_unlock(&info->block_group_cache_lock);
232 
233 	return ret;
234 }
235 
236 /*
237  * Return the block group that starts at or after bytenr
238  */
239 struct btrfs_block_group *btrfs_lookup_first_block_group(
240 		struct btrfs_fs_info *info, u64 bytenr)
241 {
242 	return block_group_cache_tree_search(info, bytenr, 0);
243 }
244 
245 /*
246  * Return the block group that contains the given bytenr
247  */
248 struct btrfs_block_group *btrfs_lookup_block_group(
249 		struct btrfs_fs_info *info, u64 bytenr)
250 {
251 	return block_group_cache_tree_search(info, bytenr, 1);
252 }
253 
254 struct btrfs_block_group *btrfs_next_block_group(
255 		struct btrfs_block_group *cache)
256 {
257 	struct btrfs_fs_info *fs_info = cache->fs_info;
258 	struct rb_node *node;
259 
260 	spin_lock(&fs_info->block_group_cache_lock);
261 
262 	/* If our block group was removed, we need a full search. */
263 	if (RB_EMPTY_NODE(&cache->cache_node)) {
264 		const u64 next_bytenr = cache->start + cache->length;
265 
266 		spin_unlock(&fs_info->block_group_cache_lock);
267 		btrfs_put_block_group(cache);
268 		cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
269 	}
270 	node = rb_next(&cache->cache_node);
271 	btrfs_put_block_group(cache);
272 	if (node) {
273 		cache = rb_entry(node, struct btrfs_block_group, cache_node);
274 		btrfs_get_block_group(cache);
275 	} else
276 		cache = NULL;
277 	spin_unlock(&fs_info->block_group_cache_lock);
278 	return cache;
279 }
280 
281 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
282 {
283 	struct btrfs_block_group *bg;
284 	bool ret = true;
285 
286 	bg = btrfs_lookup_block_group(fs_info, bytenr);
287 	if (!bg)
288 		return false;
289 
290 	spin_lock(&bg->lock);
291 	if (bg->ro)
292 		ret = false;
293 	else
294 		atomic_inc(&bg->nocow_writers);
295 	spin_unlock(&bg->lock);
296 
297 	/* No put on block group, done by btrfs_dec_nocow_writers */
298 	if (!ret)
299 		btrfs_put_block_group(bg);
300 
301 	return ret;
302 }
303 
304 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
305 {
306 	struct btrfs_block_group *bg;
307 
308 	bg = btrfs_lookup_block_group(fs_info, bytenr);
309 	ASSERT(bg);
310 	if (atomic_dec_and_test(&bg->nocow_writers))
311 		wake_up_var(&bg->nocow_writers);
312 	/*
313 	 * Once for our lookup and once for the lookup done by a previous call
314 	 * to btrfs_inc_nocow_writers()
315 	 */
316 	btrfs_put_block_group(bg);
317 	btrfs_put_block_group(bg);
318 }
319 
320 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
321 {
322 	wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
323 }
324 
325 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
326 					const u64 start)
327 {
328 	struct btrfs_block_group *bg;
329 
330 	bg = btrfs_lookup_block_group(fs_info, start);
331 	ASSERT(bg);
332 	if (atomic_dec_and_test(&bg->reservations))
333 		wake_up_var(&bg->reservations);
334 	btrfs_put_block_group(bg);
335 }
336 
337 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
338 {
339 	struct btrfs_space_info *space_info = bg->space_info;
340 
341 	ASSERT(bg->ro);
342 
343 	if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
344 		return;
345 
346 	/*
347 	 * Our block group is read only but before we set it to read only,
348 	 * some task might have had allocated an extent from it already, but it
349 	 * has not yet created a respective ordered extent (and added it to a
350 	 * root's list of ordered extents).
351 	 * Therefore wait for any task currently allocating extents, since the
352 	 * block group's reservations counter is incremented while a read lock
353 	 * on the groups' semaphore is held and decremented after releasing
354 	 * the read access on that semaphore and creating the ordered extent.
355 	 */
356 	down_write(&space_info->groups_sem);
357 	up_write(&space_info->groups_sem);
358 
359 	wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
360 }
361 
362 struct btrfs_caching_control *btrfs_get_caching_control(
363 		struct btrfs_block_group *cache)
364 {
365 	struct btrfs_caching_control *ctl;
366 
367 	spin_lock(&cache->lock);
368 	if (!cache->caching_ctl) {
369 		spin_unlock(&cache->lock);
370 		return NULL;
371 	}
372 
373 	ctl = cache->caching_ctl;
374 	refcount_inc(&ctl->count);
375 	spin_unlock(&cache->lock);
376 	return ctl;
377 }
378 
379 void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
380 {
381 	if (refcount_dec_and_test(&ctl->count))
382 		kfree(ctl);
383 }
384 
385 /*
386  * When we wait for progress in the block group caching, its because our
387  * allocation attempt failed at least once.  So, we must sleep and let some
388  * progress happen before we try again.
389  *
390  * This function will sleep at least once waiting for new free space to show
391  * up, and then it will check the block group free space numbers for our min
392  * num_bytes.  Another option is to have it go ahead and look in the rbtree for
393  * a free extent of a given size, but this is a good start.
394  *
395  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
396  * any of the information in this block group.
397  */
398 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
399 					   u64 num_bytes)
400 {
401 	struct btrfs_caching_control *caching_ctl;
402 
403 	caching_ctl = btrfs_get_caching_control(cache);
404 	if (!caching_ctl)
405 		return;
406 
407 	wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
408 		   (cache->free_space_ctl->free_space >= num_bytes));
409 
410 	btrfs_put_caching_control(caching_ctl);
411 }
412 
413 int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
414 {
415 	struct btrfs_caching_control *caching_ctl;
416 	int ret = 0;
417 
418 	caching_ctl = btrfs_get_caching_control(cache);
419 	if (!caching_ctl)
420 		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
421 
422 	wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
423 	if (cache->cached == BTRFS_CACHE_ERROR)
424 		ret = -EIO;
425 	btrfs_put_caching_control(caching_ctl);
426 	return ret;
427 }
428 
429 #ifdef CONFIG_BTRFS_DEBUG
430 static void fragment_free_space(struct btrfs_block_group *block_group)
431 {
432 	struct btrfs_fs_info *fs_info = block_group->fs_info;
433 	u64 start = block_group->start;
434 	u64 len = block_group->length;
435 	u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
436 		fs_info->nodesize : fs_info->sectorsize;
437 	u64 step = chunk << 1;
438 
439 	while (len > chunk) {
440 		btrfs_remove_free_space(block_group, start, chunk);
441 		start += step;
442 		if (len < step)
443 			len = 0;
444 		else
445 			len -= step;
446 	}
447 }
448 #endif
449 
450 /*
451  * This is only called by btrfs_cache_block_group, since we could have freed
452  * extents we need to check the pinned_extents for any extents that can't be
453  * used yet since their free space will be released as soon as the transaction
454  * commits.
455  */
456 u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
457 {
458 	struct btrfs_fs_info *info = block_group->fs_info;
459 	u64 extent_start, extent_end, size, total_added = 0;
460 	int ret;
461 
462 	while (start < end) {
463 		ret = find_first_extent_bit(info->pinned_extents, start,
464 					    &extent_start, &extent_end,
465 					    EXTENT_DIRTY | EXTENT_UPTODATE,
466 					    NULL);
467 		if (ret)
468 			break;
469 
470 		if (extent_start <= start) {
471 			start = extent_end + 1;
472 		} else if (extent_start > start && extent_start < end) {
473 			size = extent_start - start;
474 			total_added += size;
475 			ret = btrfs_add_free_space_async_trimmed(block_group,
476 								 start, size);
477 			BUG_ON(ret); /* -ENOMEM or logic error */
478 			start = extent_end + 1;
479 		} else {
480 			break;
481 		}
482 	}
483 
484 	if (start < end) {
485 		size = end - start;
486 		total_added += size;
487 		ret = btrfs_add_free_space_async_trimmed(block_group, start,
488 							 size);
489 		BUG_ON(ret); /* -ENOMEM or logic error */
490 	}
491 
492 	return total_added;
493 }
494 
495 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
496 {
497 	struct btrfs_block_group *block_group = caching_ctl->block_group;
498 	struct btrfs_fs_info *fs_info = block_group->fs_info;
499 	struct btrfs_root *extent_root = fs_info->extent_root;
500 	struct btrfs_path *path;
501 	struct extent_buffer *leaf;
502 	struct btrfs_key key;
503 	u64 total_found = 0;
504 	u64 last = 0;
505 	u32 nritems;
506 	int ret;
507 	bool wakeup = true;
508 
509 	path = btrfs_alloc_path();
510 	if (!path)
511 		return -ENOMEM;
512 
513 	last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
514 
515 #ifdef CONFIG_BTRFS_DEBUG
516 	/*
517 	 * If we're fragmenting we don't want to make anybody think we can
518 	 * allocate from this block group until we've had a chance to fragment
519 	 * the free space.
520 	 */
521 	if (btrfs_should_fragment_free_space(block_group))
522 		wakeup = false;
523 #endif
524 	/*
525 	 * We don't want to deadlock with somebody trying to allocate a new
526 	 * extent for the extent root while also trying to search the extent
527 	 * root to add free space.  So we skip locking and search the commit
528 	 * root, since its read-only
529 	 */
530 	path->skip_locking = 1;
531 	path->search_commit_root = 1;
532 	path->reada = READA_FORWARD;
533 
534 	key.objectid = last;
535 	key.offset = 0;
536 	key.type = BTRFS_EXTENT_ITEM_KEY;
537 
538 next:
539 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
540 	if (ret < 0)
541 		goto out;
542 
543 	leaf = path->nodes[0];
544 	nritems = btrfs_header_nritems(leaf);
545 
546 	while (1) {
547 		if (btrfs_fs_closing(fs_info) > 1) {
548 			last = (u64)-1;
549 			break;
550 		}
551 
552 		if (path->slots[0] < nritems) {
553 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
554 		} else {
555 			ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
556 			if (ret)
557 				break;
558 
559 			if (need_resched() ||
560 			    rwsem_is_contended(&fs_info->commit_root_sem)) {
561 				if (wakeup)
562 					caching_ctl->progress = last;
563 				btrfs_release_path(path);
564 				up_read(&fs_info->commit_root_sem);
565 				mutex_unlock(&caching_ctl->mutex);
566 				cond_resched();
567 				mutex_lock(&caching_ctl->mutex);
568 				down_read(&fs_info->commit_root_sem);
569 				goto next;
570 			}
571 
572 			ret = btrfs_next_leaf(extent_root, path);
573 			if (ret < 0)
574 				goto out;
575 			if (ret)
576 				break;
577 			leaf = path->nodes[0];
578 			nritems = btrfs_header_nritems(leaf);
579 			continue;
580 		}
581 
582 		if (key.objectid < last) {
583 			key.objectid = last;
584 			key.offset = 0;
585 			key.type = BTRFS_EXTENT_ITEM_KEY;
586 
587 			if (wakeup)
588 				caching_ctl->progress = last;
589 			btrfs_release_path(path);
590 			goto next;
591 		}
592 
593 		if (key.objectid < block_group->start) {
594 			path->slots[0]++;
595 			continue;
596 		}
597 
598 		if (key.objectid >= block_group->start + block_group->length)
599 			break;
600 
601 		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
602 		    key.type == BTRFS_METADATA_ITEM_KEY) {
603 			total_found += add_new_free_space(block_group, last,
604 							  key.objectid);
605 			if (key.type == BTRFS_METADATA_ITEM_KEY)
606 				last = key.objectid +
607 					fs_info->nodesize;
608 			else
609 				last = key.objectid + key.offset;
610 
611 			if (total_found > CACHING_CTL_WAKE_UP) {
612 				total_found = 0;
613 				if (wakeup)
614 					wake_up(&caching_ctl->wait);
615 			}
616 		}
617 		path->slots[0]++;
618 	}
619 	ret = 0;
620 
621 	total_found += add_new_free_space(block_group, last,
622 				block_group->start + block_group->length);
623 	caching_ctl->progress = (u64)-1;
624 
625 out:
626 	btrfs_free_path(path);
627 	return ret;
628 }
629 
630 static noinline void caching_thread(struct btrfs_work *work)
631 {
632 	struct btrfs_block_group *block_group;
633 	struct btrfs_fs_info *fs_info;
634 	struct btrfs_caching_control *caching_ctl;
635 	int ret;
636 
637 	caching_ctl = container_of(work, struct btrfs_caching_control, work);
638 	block_group = caching_ctl->block_group;
639 	fs_info = block_group->fs_info;
640 
641 	mutex_lock(&caching_ctl->mutex);
642 	down_read(&fs_info->commit_root_sem);
643 
644 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
645 		ret = load_free_space_tree(caching_ctl);
646 	else
647 		ret = load_extent_tree_free(caching_ctl);
648 
649 	spin_lock(&block_group->lock);
650 	block_group->caching_ctl = NULL;
651 	block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
652 	spin_unlock(&block_group->lock);
653 
654 #ifdef CONFIG_BTRFS_DEBUG
655 	if (btrfs_should_fragment_free_space(block_group)) {
656 		u64 bytes_used;
657 
658 		spin_lock(&block_group->space_info->lock);
659 		spin_lock(&block_group->lock);
660 		bytes_used = block_group->length - block_group->used;
661 		block_group->space_info->bytes_used += bytes_used >> 1;
662 		spin_unlock(&block_group->lock);
663 		spin_unlock(&block_group->space_info->lock);
664 		fragment_free_space(block_group);
665 	}
666 #endif
667 
668 	caching_ctl->progress = (u64)-1;
669 
670 	up_read(&fs_info->commit_root_sem);
671 	btrfs_free_excluded_extents(block_group);
672 	mutex_unlock(&caching_ctl->mutex);
673 
674 	wake_up(&caching_ctl->wait);
675 
676 	btrfs_put_caching_control(caching_ctl);
677 	btrfs_put_block_group(block_group);
678 }
679 
680 int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
681 {
682 	DEFINE_WAIT(wait);
683 	struct btrfs_fs_info *fs_info = cache->fs_info;
684 	struct btrfs_caching_control *caching_ctl;
685 	int ret = 0;
686 
687 	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
688 	if (!caching_ctl)
689 		return -ENOMEM;
690 
691 	INIT_LIST_HEAD(&caching_ctl->list);
692 	mutex_init(&caching_ctl->mutex);
693 	init_waitqueue_head(&caching_ctl->wait);
694 	caching_ctl->block_group = cache;
695 	caching_ctl->progress = cache->start;
696 	refcount_set(&caching_ctl->count, 1);
697 	btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
698 
699 	spin_lock(&cache->lock);
700 	/*
701 	 * This should be a rare occasion, but this could happen I think in the
702 	 * case where one thread starts to load the space cache info, and then
703 	 * some other thread starts a transaction commit which tries to do an
704 	 * allocation while the other thread is still loading the space cache
705 	 * info.  The previous loop should have kept us from choosing this block
706 	 * group, but if we've moved to the state where we will wait on caching
707 	 * block groups we need to first check if we're doing a fast load here,
708 	 * so we can wait for it to finish, otherwise we could end up allocating
709 	 * from a block group who's cache gets evicted for one reason or
710 	 * another.
711 	 */
712 	while (cache->cached == BTRFS_CACHE_FAST) {
713 		struct btrfs_caching_control *ctl;
714 
715 		ctl = cache->caching_ctl;
716 		refcount_inc(&ctl->count);
717 		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
718 		spin_unlock(&cache->lock);
719 
720 		schedule();
721 
722 		finish_wait(&ctl->wait, &wait);
723 		btrfs_put_caching_control(ctl);
724 		spin_lock(&cache->lock);
725 	}
726 
727 	if (cache->cached != BTRFS_CACHE_NO) {
728 		spin_unlock(&cache->lock);
729 		kfree(caching_ctl);
730 		return 0;
731 	}
732 	WARN_ON(cache->caching_ctl);
733 	cache->caching_ctl = caching_ctl;
734 	cache->cached = BTRFS_CACHE_FAST;
735 	spin_unlock(&cache->lock);
736 
737 	if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
738 		mutex_lock(&caching_ctl->mutex);
739 		ret = load_free_space_cache(cache);
740 
741 		spin_lock(&cache->lock);
742 		if (ret == 1) {
743 			cache->caching_ctl = NULL;
744 			cache->cached = BTRFS_CACHE_FINISHED;
745 			cache->last_byte_to_unpin = (u64)-1;
746 			caching_ctl->progress = (u64)-1;
747 		} else {
748 			if (load_cache_only) {
749 				cache->caching_ctl = NULL;
750 				cache->cached = BTRFS_CACHE_NO;
751 			} else {
752 				cache->cached = BTRFS_CACHE_STARTED;
753 				cache->has_caching_ctl = 1;
754 			}
755 		}
756 		spin_unlock(&cache->lock);
757 #ifdef CONFIG_BTRFS_DEBUG
758 		if (ret == 1 &&
759 		    btrfs_should_fragment_free_space(cache)) {
760 			u64 bytes_used;
761 
762 			spin_lock(&cache->space_info->lock);
763 			spin_lock(&cache->lock);
764 			bytes_used = cache->length - cache->used;
765 			cache->space_info->bytes_used += bytes_used >> 1;
766 			spin_unlock(&cache->lock);
767 			spin_unlock(&cache->space_info->lock);
768 			fragment_free_space(cache);
769 		}
770 #endif
771 		mutex_unlock(&caching_ctl->mutex);
772 
773 		wake_up(&caching_ctl->wait);
774 		if (ret == 1) {
775 			btrfs_put_caching_control(caching_ctl);
776 			btrfs_free_excluded_extents(cache);
777 			return 0;
778 		}
779 	} else {
780 		/*
781 		 * We're either using the free space tree or no caching at all.
782 		 * Set cached to the appropriate value and wakeup any waiters.
783 		 */
784 		spin_lock(&cache->lock);
785 		if (load_cache_only) {
786 			cache->caching_ctl = NULL;
787 			cache->cached = BTRFS_CACHE_NO;
788 		} else {
789 			cache->cached = BTRFS_CACHE_STARTED;
790 			cache->has_caching_ctl = 1;
791 		}
792 		spin_unlock(&cache->lock);
793 		wake_up(&caching_ctl->wait);
794 	}
795 
796 	if (load_cache_only) {
797 		btrfs_put_caching_control(caching_ctl);
798 		return 0;
799 	}
800 
801 	down_write(&fs_info->commit_root_sem);
802 	refcount_inc(&caching_ctl->count);
803 	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
804 	up_write(&fs_info->commit_root_sem);
805 
806 	btrfs_get_block_group(cache);
807 
808 	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
809 
810 	return ret;
811 }
812 
813 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
814 {
815 	u64 extra_flags = chunk_to_extended(flags) &
816 				BTRFS_EXTENDED_PROFILE_MASK;
817 
818 	write_seqlock(&fs_info->profiles_lock);
819 	if (flags & BTRFS_BLOCK_GROUP_DATA)
820 		fs_info->avail_data_alloc_bits &= ~extra_flags;
821 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
822 		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
823 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
824 		fs_info->avail_system_alloc_bits &= ~extra_flags;
825 	write_sequnlock(&fs_info->profiles_lock);
826 }
827 
828 /*
829  * Clear incompat bits for the following feature(s):
830  *
831  * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
832  *            in the whole filesystem
833  *
834  * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
835  */
836 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
837 {
838 	bool found_raid56 = false;
839 	bool found_raid1c34 = false;
840 
841 	if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
842 	    (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
843 	    (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
844 		struct list_head *head = &fs_info->space_info;
845 		struct btrfs_space_info *sinfo;
846 
847 		list_for_each_entry_rcu(sinfo, head, list) {
848 			down_read(&sinfo->groups_sem);
849 			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
850 				found_raid56 = true;
851 			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
852 				found_raid56 = true;
853 			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
854 				found_raid1c34 = true;
855 			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
856 				found_raid1c34 = true;
857 			up_read(&sinfo->groups_sem);
858 		}
859 		if (found_raid56)
860 			btrfs_clear_fs_incompat(fs_info, RAID56);
861 		if (found_raid1c34)
862 			btrfs_clear_fs_incompat(fs_info, RAID1C34);
863 	}
864 }
865 
866 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
867 			     u64 group_start, struct extent_map *em)
868 {
869 	struct btrfs_fs_info *fs_info = trans->fs_info;
870 	struct btrfs_root *root = fs_info->extent_root;
871 	struct btrfs_path *path;
872 	struct btrfs_block_group *block_group;
873 	struct btrfs_free_cluster *cluster;
874 	struct btrfs_root *tree_root = fs_info->tree_root;
875 	struct btrfs_key key;
876 	struct inode *inode;
877 	struct kobject *kobj = NULL;
878 	int ret;
879 	int index;
880 	int factor;
881 	struct btrfs_caching_control *caching_ctl = NULL;
882 	bool remove_em;
883 	bool remove_rsv = false;
884 
885 	block_group = btrfs_lookup_block_group(fs_info, group_start);
886 	BUG_ON(!block_group);
887 	BUG_ON(!block_group->ro);
888 
889 	trace_btrfs_remove_block_group(block_group);
890 	/*
891 	 * Free the reserved super bytes from this block group before
892 	 * remove it.
893 	 */
894 	btrfs_free_excluded_extents(block_group);
895 	btrfs_free_ref_tree_range(fs_info, block_group->start,
896 				  block_group->length);
897 
898 	index = btrfs_bg_flags_to_raid_index(block_group->flags);
899 	factor = btrfs_bg_type_to_factor(block_group->flags);
900 
901 	/* make sure this block group isn't part of an allocation cluster */
902 	cluster = &fs_info->data_alloc_cluster;
903 	spin_lock(&cluster->refill_lock);
904 	btrfs_return_cluster_to_free_space(block_group, cluster);
905 	spin_unlock(&cluster->refill_lock);
906 
907 	/*
908 	 * make sure this block group isn't part of a metadata
909 	 * allocation cluster
910 	 */
911 	cluster = &fs_info->meta_alloc_cluster;
912 	spin_lock(&cluster->refill_lock);
913 	btrfs_return_cluster_to_free_space(block_group, cluster);
914 	spin_unlock(&cluster->refill_lock);
915 
916 	path = btrfs_alloc_path();
917 	if (!path) {
918 		ret = -ENOMEM;
919 		goto out;
920 	}
921 
922 	/*
923 	 * get the inode first so any iput calls done for the io_list
924 	 * aren't the final iput (no unlinks allowed now)
925 	 */
926 	inode = lookup_free_space_inode(block_group, path);
927 
928 	mutex_lock(&trans->transaction->cache_write_mutex);
929 	/*
930 	 * Make sure our free space cache IO is done before removing the
931 	 * free space inode
932 	 */
933 	spin_lock(&trans->transaction->dirty_bgs_lock);
934 	if (!list_empty(&block_group->io_list)) {
935 		list_del_init(&block_group->io_list);
936 
937 		WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
938 
939 		spin_unlock(&trans->transaction->dirty_bgs_lock);
940 		btrfs_wait_cache_io(trans, block_group, path);
941 		btrfs_put_block_group(block_group);
942 		spin_lock(&trans->transaction->dirty_bgs_lock);
943 	}
944 
945 	if (!list_empty(&block_group->dirty_list)) {
946 		list_del_init(&block_group->dirty_list);
947 		remove_rsv = true;
948 		btrfs_put_block_group(block_group);
949 	}
950 	spin_unlock(&trans->transaction->dirty_bgs_lock);
951 	mutex_unlock(&trans->transaction->cache_write_mutex);
952 
953 	if (!IS_ERR(inode)) {
954 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
955 		if (ret) {
956 			btrfs_add_delayed_iput(inode);
957 			goto out;
958 		}
959 		clear_nlink(inode);
960 		/* One for the block groups ref */
961 		spin_lock(&block_group->lock);
962 		if (block_group->iref) {
963 			block_group->iref = 0;
964 			block_group->inode = NULL;
965 			spin_unlock(&block_group->lock);
966 			iput(inode);
967 		} else {
968 			spin_unlock(&block_group->lock);
969 		}
970 		/* One for our lookup ref */
971 		btrfs_add_delayed_iput(inode);
972 	}
973 
974 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
975 	key.type = 0;
976 	key.offset = block_group->start;
977 
978 	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
979 	if (ret < 0)
980 		goto out;
981 	if (ret > 0)
982 		btrfs_release_path(path);
983 	if (ret == 0) {
984 		ret = btrfs_del_item(trans, tree_root, path);
985 		if (ret)
986 			goto out;
987 		btrfs_release_path(path);
988 	}
989 
990 	spin_lock(&fs_info->block_group_cache_lock);
991 	rb_erase(&block_group->cache_node,
992 		 &fs_info->block_group_cache_tree);
993 	RB_CLEAR_NODE(&block_group->cache_node);
994 
995 	if (fs_info->first_logical_byte == block_group->start)
996 		fs_info->first_logical_byte = (u64)-1;
997 	spin_unlock(&fs_info->block_group_cache_lock);
998 
999 	down_write(&block_group->space_info->groups_sem);
1000 	/*
1001 	 * we must use list_del_init so people can check to see if they
1002 	 * are still on the list after taking the semaphore
1003 	 */
1004 	list_del_init(&block_group->list);
1005 	if (list_empty(&block_group->space_info->block_groups[index])) {
1006 		kobj = block_group->space_info->block_group_kobjs[index];
1007 		block_group->space_info->block_group_kobjs[index] = NULL;
1008 		clear_avail_alloc_bits(fs_info, block_group->flags);
1009 	}
1010 	up_write(&block_group->space_info->groups_sem);
1011 	clear_incompat_bg_bits(fs_info, block_group->flags);
1012 	if (kobj) {
1013 		kobject_del(kobj);
1014 		kobject_put(kobj);
1015 	}
1016 
1017 	if (block_group->has_caching_ctl)
1018 		caching_ctl = btrfs_get_caching_control(block_group);
1019 	if (block_group->cached == BTRFS_CACHE_STARTED)
1020 		btrfs_wait_block_group_cache_done(block_group);
1021 	if (block_group->has_caching_ctl) {
1022 		down_write(&fs_info->commit_root_sem);
1023 		if (!caching_ctl) {
1024 			struct btrfs_caching_control *ctl;
1025 
1026 			list_for_each_entry(ctl,
1027 				    &fs_info->caching_block_groups, list)
1028 				if (ctl->block_group == block_group) {
1029 					caching_ctl = ctl;
1030 					refcount_inc(&caching_ctl->count);
1031 					break;
1032 				}
1033 		}
1034 		if (caching_ctl)
1035 			list_del_init(&caching_ctl->list);
1036 		up_write(&fs_info->commit_root_sem);
1037 		if (caching_ctl) {
1038 			/* Once for the caching bgs list and once for us. */
1039 			btrfs_put_caching_control(caching_ctl);
1040 			btrfs_put_caching_control(caching_ctl);
1041 		}
1042 	}
1043 
1044 	spin_lock(&trans->transaction->dirty_bgs_lock);
1045 	WARN_ON(!list_empty(&block_group->dirty_list));
1046 	WARN_ON(!list_empty(&block_group->io_list));
1047 	spin_unlock(&trans->transaction->dirty_bgs_lock);
1048 
1049 	btrfs_remove_free_space_cache(block_group);
1050 
1051 	spin_lock(&block_group->space_info->lock);
1052 	list_del_init(&block_group->ro_list);
1053 
1054 	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1055 		WARN_ON(block_group->space_info->total_bytes
1056 			< block_group->length);
1057 		WARN_ON(block_group->space_info->bytes_readonly
1058 			< block_group->length);
1059 		WARN_ON(block_group->space_info->disk_total
1060 			< block_group->length * factor);
1061 	}
1062 	block_group->space_info->total_bytes -= block_group->length;
1063 	block_group->space_info->bytes_readonly -= block_group->length;
1064 	block_group->space_info->disk_total -= block_group->length * factor;
1065 
1066 	spin_unlock(&block_group->space_info->lock);
1067 
1068 	key.objectid = block_group->start;
1069 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1070 	key.offset = block_group->length;
1071 
1072 	mutex_lock(&fs_info->chunk_mutex);
1073 	spin_lock(&block_group->lock);
1074 	block_group->removed = 1;
1075 	/*
1076 	 * At this point trimming can't start on this block group, because we
1077 	 * removed the block group from the tree fs_info->block_group_cache_tree
1078 	 * so no one can't find it anymore and even if someone already got this
1079 	 * block group before we removed it from the rbtree, they have already
1080 	 * incremented block_group->trimming - if they didn't, they won't find
1081 	 * any free space entries because we already removed them all when we
1082 	 * called btrfs_remove_free_space_cache().
1083 	 *
1084 	 * And we must not remove the extent map from the fs_info->mapping_tree
1085 	 * to prevent the same logical address range and physical device space
1086 	 * ranges from being reused for a new block group. This is because our
1087 	 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
1088 	 * completely transactionless, so while it is trimming a range the
1089 	 * currently running transaction might finish and a new one start,
1090 	 * allowing for new block groups to be created that can reuse the same
1091 	 * physical device locations unless we take this special care.
1092 	 *
1093 	 * There may also be an implicit trim operation if the file system
1094 	 * is mounted with -odiscard. The same protections must remain
1095 	 * in place until the extents have been discarded completely when
1096 	 * the transaction commit has completed.
1097 	 */
1098 	remove_em = (atomic_read(&block_group->trimming) == 0);
1099 	spin_unlock(&block_group->lock);
1100 
1101 	mutex_unlock(&fs_info->chunk_mutex);
1102 
1103 	ret = remove_block_group_free_space(trans, block_group);
1104 	if (ret)
1105 		goto out;
1106 
1107 	btrfs_put_block_group(block_group);
1108 	btrfs_put_block_group(block_group);
1109 
1110 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1111 	if (ret > 0)
1112 		ret = -EIO;
1113 	if (ret < 0)
1114 		goto out;
1115 
1116 	ret = btrfs_del_item(trans, root, path);
1117 	if (ret)
1118 		goto out;
1119 
1120 	if (remove_em) {
1121 		struct extent_map_tree *em_tree;
1122 
1123 		em_tree = &fs_info->mapping_tree;
1124 		write_lock(&em_tree->lock);
1125 		remove_extent_mapping(em_tree, em);
1126 		write_unlock(&em_tree->lock);
1127 		/* once for the tree */
1128 		free_extent_map(em);
1129 	}
1130 out:
1131 	if (remove_rsv)
1132 		btrfs_delayed_refs_rsv_release(fs_info, 1);
1133 	btrfs_free_path(path);
1134 	return ret;
1135 }
1136 
1137 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
1138 		struct btrfs_fs_info *fs_info, const u64 chunk_offset)
1139 {
1140 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1141 	struct extent_map *em;
1142 	struct map_lookup *map;
1143 	unsigned int num_items;
1144 
1145 	read_lock(&em_tree->lock);
1146 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1147 	read_unlock(&em_tree->lock);
1148 	ASSERT(em && em->start == chunk_offset);
1149 
1150 	/*
1151 	 * We need to reserve 3 + N units from the metadata space info in order
1152 	 * to remove a block group (done at btrfs_remove_chunk() and at
1153 	 * btrfs_remove_block_group()), which are used for:
1154 	 *
1155 	 * 1 unit for adding the free space inode's orphan (located in the tree
1156 	 * of tree roots).
1157 	 * 1 unit for deleting the block group item (located in the extent
1158 	 * tree).
1159 	 * 1 unit for deleting the free space item (located in tree of tree
1160 	 * roots).
1161 	 * N units for deleting N device extent items corresponding to each
1162 	 * stripe (located in the device tree).
1163 	 *
1164 	 * In order to remove a block group we also need to reserve units in the
1165 	 * system space info in order to update the chunk tree (update one or
1166 	 * more device items and remove one chunk item), but this is done at
1167 	 * btrfs_remove_chunk() through a call to check_system_chunk().
1168 	 */
1169 	map = em->map_lookup;
1170 	num_items = 3 + map->num_stripes;
1171 	free_extent_map(em);
1172 
1173 	return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
1174 							   num_items, 1);
1175 }
1176 
1177 /*
1178  * Mark block group @cache read-only, so later write won't happen to block
1179  * group @cache.
1180  *
1181  * If @force is not set, this function will only mark the block group readonly
1182  * if we have enough free space (1M) in other metadata/system block groups.
1183  * If @force is not set, this function will mark the block group readonly
1184  * without checking free space.
1185  *
1186  * NOTE: This function doesn't care if other block groups can contain all the
1187  * data in this block group. That check should be done by relocation routine,
1188  * not this function.
1189  */
1190 static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
1191 {
1192 	struct btrfs_space_info *sinfo = cache->space_info;
1193 	u64 num_bytes;
1194 	u64 sinfo_used;
1195 	int ret = -ENOSPC;
1196 
1197 	spin_lock(&sinfo->lock);
1198 	spin_lock(&cache->lock);
1199 
1200 	if (cache->ro) {
1201 		cache->ro++;
1202 		ret = 0;
1203 		goto out;
1204 	}
1205 
1206 	num_bytes = cache->length - cache->reserved - cache->pinned -
1207 		    cache->bytes_super - cache->used;
1208 	sinfo_used = btrfs_space_info_used(sinfo, true);
1209 
1210 	/*
1211 	 * sinfo_used + num_bytes should always <= sinfo->total_bytes.
1212 	 *
1213 	 * Here we make sure if we mark this bg RO, we still have enough
1214 	 * free space as buffer.
1215 	 */
1216 	if (sinfo_used + num_bytes <= sinfo->total_bytes) {
1217 		sinfo->bytes_readonly += num_bytes;
1218 		cache->ro++;
1219 		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1220 		ret = 0;
1221 	}
1222 out:
1223 	spin_unlock(&cache->lock);
1224 	spin_unlock(&sinfo->lock);
1225 	if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1226 		btrfs_info(cache->fs_info,
1227 			"unable to make block group %llu ro", cache->start);
1228 		btrfs_info(cache->fs_info,
1229 			"sinfo_used=%llu bg_num_bytes=%llu",
1230 			sinfo_used, num_bytes);
1231 		btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1232 	}
1233 	return ret;
1234 }
1235 
1236 /*
1237  * Process the unused_bgs list and remove any that don't have any allocated
1238  * space inside of them.
1239  */
1240 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
1241 {
1242 	struct btrfs_block_group *block_group;
1243 	struct btrfs_space_info *space_info;
1244 	struct btrfs_trans_handle *trans;
1245 	const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC);
1246 	int ret = 0;
1247 
1248 	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1249 		return;
1250 
1251 	spin_lock(&fs_info->unused_bgs_lock);
1252 	while (!list_empty(&fs_info->unused_bgs)) {
1253 		u64 start, end;
1254 		int trimming;
1255 
1256 		block_group = list_first_entry(&fs_info->unused_bgs,
1257 					       struct btrfs_block_group,
1258 					       bg_list);
1259 		list_del_init(&block_group->bg_list);
1260 
1261 		space_info = block_group->space_info;
1262 
1263 		if (ret || btrfs_mixed_space_info(space_info)) {
1264 			btrfs_put_block_group(block_group);
1265 			continue;
1266 		}
1267 		spin_unlock(&fs_info->unused_bgs_lock);
1268 
1269 		btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
1270 
1271 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
1272 
1273 		/* Don't want to race with allocators so take the groups_sem */
1274 		down_write(&space_info->groups_sem);
1275 
1276 		/*
1277 		 * Async discard moves the final block group discard to be prior
1278 		 * to the unused_bgs code path.  Therefore, if it's not fully
1279 		 * trimmed, punt it back to the async discard lists.
1280 		 */
1281 		if (btrfs_test_opt(fs_info, DISCARD_ASYNC) &&
1282 		    !btrfs_is_free_space_trimmed(block_group)) {
1283 			trace_btrfs_skip_unused_block_group(block_group);
1284 			up_write(&space_info->groups_sem);
1285 			/* Requeue if we failed because of async discard */
1286 			btrfs_discard_queue_work(&fs_info->discard_ctl,
1287 						 block_group);
1288 			goto next;
1289 		}
1290 
1291 		spin_lock(&block_group->lock);
1292 		if (block_group->reserved || block_group->pinned ||
1293 		    block_group->used || block_group->ro ||
1294 		    list_is_singular(&block_group->list)) {
1295 			/*
1296 			 * We want to bail if we made new allocations or have
1297 			 * outstanding allocations in this block group.  We do
1298 			 * the ro check in case balance is currently acting on
1299 			 * this block group.
1300 			 */
1301 			trace_btrfs_skip_unused_block_group(block_group);
1302 			spin_unlock(&block_group->lock);
1303 			up_write(&space_info->groups_sem);
1304 			goto next;
1305 		}
1306 		spin_unlock(&block_group->lock);
1307 
1308 		/* We don't want to force the issue, only flip if it's ok. */
1309 		ret = inc_block_group_ro(block_group, 0);
1310 		up_write(&space_info->groups_sem);
1311 		if (ret < 0) {
1312 			ret = 0;
1313 			goto next;
1314 		}
1315 
1316 		/*
1317 		 * Want to do this before we do anything else so we can recover
1318 		 * properly if we fail to join the transaction.
1319 		 */
1320 		trans = btrfs_start_trans_remove_block_group(fs_info,
1321 						     block_group->start);
1322 		if (IS_ERR(trans)) {
1323 			btrfs_dec_block_group_ro(block_group);
1324 			ret = PTR_ERR(trans);
1325 			goto next;
1326 		}
1327 
1328 		/*
1329 		 * We could have pending pinned extents for this block group,
1330 		 * just delete them, we don't care about them anymore.
1331 		 */
1332 		start = block_group->start;
1333 		end = start + block_group->length - 1;
1334 		/*
1335 		 * Hold the unused_bg_unpin_mutex lock to avoid racing with
1336 		 * btrfs_finish_extent_commit(). If we are at transaction N,
1337 		 * another task might be running finish_extent_commit() for the
1338 		 * previous transaction N - 1, and have seen a range belonging
1339 		 * to the block group in freed_extents[] before we were able to
1340 		 * clear the whole block group range from freed_extents[]. This
1341 		 * means that task can lookup for the block group after we
1342 		 * unpinned it from freed_extents[] and removed it, leading to
1343 		 * a BUG_ON() at btrfs_unpin_extent_range().
1344 		 */
1345 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
1346 		ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
1347 				  EXTENT_DIRTY);
1348 		if (ret) {
1349 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1350 			btrfs_dec_block_group_ro(block_group);
1351 			goto end_trans;
1352 		}
1353 		ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
1354 				  EXTENT_DIRTY);
1355 		if (ret) {
1356 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1357 			btrfs_dec_block_group_ro(block_group);
1358 			goto end_trans;
1359 		}
1360 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1361 
1362 		/*
1363 		 * At this point, the block_group is read only and should fail
1364 		 * new allocations.  However, btrfs_finish_extent_commit() can
1365 		 * cause this block_group to be placed back on the discard
1366 		 * lists because now the block_group isn't fully discarded.
1367 		 * Bail here and try again later after discarding everything.
1368 		 */
1369 		spin_lock(&fs_info->discard_ctl.lock);
1370 		if (!list_empty(&block_group->discard_list)) {
1371 			spin_unlock(&fs_info->discard_ctl.lock);
1372 			btrfs_dec_block_group_ro(block_group);
1373 			btrfs_discard_queue_work(&fs_info->discard_ctl,
1374 						 block_group);
1375 			goto end_trans;
1376 		}
1377 		spin_unlock(&fs_info->discard_ctl.lock);
1378 
1379 		/* Reset pinned so btrfs_put_block_group doesn't complain */
1380 		spin_lock(&space_info->lock);
1381 		spin_lock(&block_group->lock);
1382 
1383 		btrfs_space_info_update_bytes_pinned(fs_info, space_info,
1384 						     -block_group->pinned);
1385 		space_info->bytes_readonly += block_group->pinned;
1386 		percpu_counter_add_batch(&space_info->total_bytes_pinned,
1387 				   -block_group->pinned,
1388 				   BTRFS_TOTAL_BYTES_PINNED_BATCH);
1389 		block_group->pinned = 0;
1390 
1391 		spin_unlock(&block_group->lock);
1392 		spin_unlock(&space_info->lock);
1393 
1394 		/*
1395 		 * The normal path here is an unused block group is passed here,
1396 		 * then trimming is handled in the transaction commit path.
1397 		 * Async discard interposes before this to do the trimming
1398 		 * before coming down the unused block group path as trimming
1399 		 * will no longer be done later in the transaction commit path.
1400 		 */
1401 		if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC))
1402 			goto flip_async;
1403 
1404 		/* DISCARD can flip during remount */
1405 		trimming = btrfs_test_opt(fs_info, DISCARD_SYNC);
1406 
1407 		/* Implicit trim during transaction commit. */
1408 		if (trimming)
1409 			btrfs_get_block_group_trimming(block_group);
1410 
1411 		/*
1412 		 * Btrfs_remove_chunk will abort the transaction if things go
1413 		 * horribly wrong.
1414 		 */
1415 		ret = btrfs_remove_chunk(trans, block_group->start);
1416 
1417 		if (ret) {
1418 			if (trimming)
1419 				btrfs_put_block_group_trimming(block_group);
1420 			goto end_trans;
1421 		}
1422 
1423 		/*
1424 		 * If we're not mounted with -odiscard, we can just forget
1425 		 * about this block group. Otherwise we'll need to wait
1426 		 * until transaction commit to do the actual discard.
1427 		 */
1428 		if (trimming) {
1429 			spin_lock(&fs_info->unused_bgs_lock);
1430 			/*
1431 			 * A concurrent scrub might have added us to the list
1432 			 * fs_info->unused_bgs, so use a list_move operation
1433 			 * to add the block group to the deleted_bgs list.
1434 			 */
1435 			list_move(&block_group->bg_list,
1436 				  &trans->transaction->deleted_bgs);
1437 			spin_unlock(&fs_info->unused_bgs_lock);
1438 			btrfs_get_block_group(block_group);
1439 		}
1440 end_trans:
1441 		btrfs_end_transaction(trans);
1442 next:
1443 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
1444 		btrfs_put_block_group(block_group);
1445 		spin_lock(&fs_info->unused_bgs_lock);
1446 	}
1447 	spin_unlock(&fs_info->unused_bgs_lock);
1448 	return;
1449 
1450 flip_async:
1451 	btrfs_end_transaction(trans);
1452 	mutex_unlock(&fs_info->delete_unused_bgs_mutex);
1453 	btrfs_put_block_group(block_group);
1454 	btrfs_discard_punt_unused_bgs_list(fs_info);
1455 }
1456 
1457 void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
1458 {
1459 	struct btrfs_fs_info *fs_info = bg->fs_info;
1460 
1461 	spin_lock(&fs_info->unused_bgs_lock);
1462 	if (list_empty(&bg->bg_list)) {
1463 		btrfs_get_block_group(bg);
1464 		trace_btrfs_add_unused_block_group(bg);
1465 		list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1466 	}
1467 	spin_unlock(&fs_info->unused_bgs_lock);
1468 }
1469 
1470 static int find_first_block_group(struct btrfs_fs_info *fs_info,
1471 				  struct btrfs_path *path,
1472 				  struct btrfs_key *key)
1473 {
1474 	struct btrfs_root *root = fs_info->extent_root;
1475 	int ret = 0;
1476 	struct btrfs_key found_key;
1477 	struct extent_buffer *leaf;
1478 	struct btrfs_block_group_item bg;
1479 	u64 flags;
1480 	int slot;
1481 
1482 	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1483 	if (ret < 0)
1484 		goto out;
1485 
1486 	while (1) {
1487 		slot = path->slots[0];
1488 		leaf = path->nodes[0];
1489 		if (slot >= btrfs_header_nritems(leaf)) {
1490 			ret = btrfs_next_leaf(root, path);
1491 			if (ret == 0)
1492 				continue;
1493 			if (ret < 0)
1494 				goto out;
1495 			break;
1496 		}
1497 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1498 
1499 		if (found_key.objectid >= key->objectid &&
1500 		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1501 			struct extent_map_tree *em_tree;
1502 			struct extent_map *em;
1503 
1504 			em_tree = &root->fs_info->mapping_tree;
1505 			read_lock(&em_tree->lock);
1506 			em = lookup_extent_mapping(em_tree, found_key.objectid,
1507 						   found_key.offset);
1508 			read_unlock(&em_tree->lock);
1509 			if (!em) {
1510 				btrfs_err(fs_info,
1511 			"logical %llu len %llu found bg but no related chunk",
1512 					  found_key.objectid, found_key.offset);
1513 				ret = -ENOENT;
1514 			} else if (em->start != found_key.objectid ||
1515 				   em->len != found_key.offset) {
1516 				btrfs_err(fs_info,
1517 		"block group %llu len %llu mismatch with chunk %llu len %llu",
1518 					  found_key.objectid, found_key.offset,
1519 					  em->start, em->len);
1520 				ret = -EUCLEAN;
1521 			} else {
1522 				read_extent_buffer(leaf, &bg,
1523 					btrfs_item_ptr_offset(leaf, slot),
1524 					sizeof(bg));
1525 				flags = btrfs_stack_block_group_flags(&bg) &
1526 					BTRFS_BLOCK_GROUP_TYPE_MASK;
1527 
1528 				if (flags != (em->map_lookup->type &
1529 					      BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1530 					btrfs_err(fs_info,
1531 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1532 						found_key.objectid,
1533 						found_key.offset, flags,
1534 						(BTRFS_BLOCK_GROUP_TYPE_MASK &
1535 						 em->map_lookup->type));
1536 					ret = -EUCLEAN;
1537 				} else {
1538 					ret = 0;
1539 				}
1540 			}
1541 			free_extent_map(em);
1542 			goto out;
1543 		}
1544 		path->slots[0]++;
1545 	}
1546 out:
1547 	return ret;
1548 }
1549 
1550 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1551 {
1552 	u64 extra_flags = chunk_to_extended(flags) &
1553 				BTRFS_EXTENDED_PROFILE_MASK;
1554 
1555 	write_seqlock(&fs_info->profiles_lock);
1556 	if (flags & BTRFS_BLOCK_GROUP_DATA)
1557 		fs_info->avail_data_alloc_bits |= extra_flags;
1558 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
1559 		fs_info->avail_metadata_alloc_bits |= extra_flags;
1560 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1561 		fs_info->avail_system_alloc_bits |= extra_flags;
1562 	write_sequnlock(&fs_info->profiles_lock);
1563 }
1564 
1565 /**
1566  * btrfs_rmap_block - Map a physical disk address to a list of logical addresses
1567  * @chunk_start:   logical address of block group
1568  * @physical:	   physical address to map to logical addresses
1569  * @logical:	   return array of logical addresses which map to @physical
1570  * @naddrs:	   length of @logical
1571  * @stripe_len:    size of IO stripe for the given block group
1572  *
1573  * Maps a particular @physical disk address to a list of @logical addresses.
1574  * Used primarily to exclude those portions of a block group that contain super
1575  * block copies.
1576  */
1577 EXPORT_FOR_TESTS
1578 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
1579 		     u64 physical, u64 **logical, int *naddrs, int *stripe_len)
1580 {
1581 	struct extent_map *em;
1582 	struct map_lookup *map;
1583 	u64 *buf;
1584 	u64 bytenr;
1585 	u64 data_stripe_length;
1586 	u64 io_stripe_size;
1587 	int i, nr = 0;
1588 	int ret = 0;
1589 
1590 	em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
1591 	if (IS_ERR(em))
1592 		return -EIO;
1593 
1594 	map = em->map_lookup;
1595 	data_stripe_length = em->len;
1596 	io_stripe_size = map->stripe_len;
1597 
1598 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1599 		data_stripe_length = div_u64(data_stripe_length,
1600 					     map->num_stripes / map->sub_stripes);
1601 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
1602 		data_stripe_length = div_u64(data_stripe_length, map->num_stripes);
1603 	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1604 		data_stripe_length = div_u64(data_stripe_length,
1605 					     nr_data_stripes(map));
1606 		io_stripe_size = map->stripe_len * nr_data_stripes(map);
1607 	}
1608 
1609 	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
1610 	if (!buf) {
1611 		ret = -ENOMEM;
1612 		goto out;
1613 	}
1614 
1615 	for (i = 0; i < map->num_stripes; i++) {
1616 		bool already_inserted = false;
1617 		u64 stripe_nr;
1618 		int j;
1619 
1620 		if (!in_range(physical, map->stripes[i].physical,
1621 			      data_stripe_length))
1622 			continue;
1623 
1624 		stripe_nr = physical - map->stripes[i].physical;
1625 		stripe_nr = div64_u64(stripe_nr, map->stripe_len);
1626 
1627 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1628 			stripe_nr = stripe_nr * map->num_stripes + i;
1629 			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
1630 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1631 			stripe_nr = stripe_nr * map->num_stripes + i;
1632 		}
1633 		/*
1634 		 * The remaining case would be for RAID56, multiply by
1635 		 * nr_data_stripes().  Alternatively, just use rmap_len below
1636 		 * instead of map->stripe_len
1637 		 */
1638 
1639 		bytenr = chunk_start + stripe_nr * io_stripe_size;
1640 
1641 		/* Ensure we don't add duplicate addresses */
1642 		for (j = 0; j < nr; j++) {
1643 			if (buf[j] == bytenr) {
1644 				already_inserted = true;
1645 				break;
1646 			}
1647 		}
1648 
1649 		if (!already_inserted)
1650 			buf[nr++] = bytenr;
1651 	}
1652 
1653 	*logical = buf;
1654 	*naddrs = nr;
1655 	*stripe_len = io_stripe_size;
1656 out:
1657 	free_extent_map(em);
1658 	return ret;
1659 }
1660 
1661 static int exclude_super_stripes(struct btrfs_block_group *cache)
1662 {
1663 	struct btrfs_fs_info *fs_info = cache->fs_info;
1664 	u64 bytenr;
1665 	u64 *logical;
1666 	int stripe_len;
1667 	int i, nr, ret;
1668 
1669 	if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
1670 		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
1671 		cache->bytes_super += stripe_len;
1672 		ret = btrfs_add_excluded_extent(fs_info, cache->start,
1673 						stripe_len);
1674 		if (ret)
1675 			return ret;
1676 	}
1677 
1678 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1679 		bytenr = btrfs_sb_offset(i);
1680 		ret = btrfs_rmap_block(fs_info, cache->start,
1681 				       bytenr, &logical, &nr, &stripe_len);
1682 		if (ret)
1683 			return ret;
1684 
1685 		while (nr--) {
1686 			u64 start, len;
1687 
1688 			if (logical[nr] > cache->start + cache->length)
1689 				continue;
1690 
1691 			if (logical[nr] + stripe_len <= cache->start)
1692 				continue;
1693 
1694 			start = logical[nr];
1695 			if (start < cache->start) {
1696 				start = cache->start;
1697 				len = (logical[nr] + stripe_len) - start;
1698 			} else {
1699 				len = min_t(u64, stripe_len,
1700 					    cache->start + cache->length - start);
1701 			}
1702 
1703 			cache->bytes_super += len;
1704 			ret = btrfs_add_excluded_extent(fs_info, start, len);
1705 			if (ret) {
1706 				kfree(logical);
1707 				return ret;
1708 			}
1709 		}
1710 
1711 		kfree(logical);
1712 	}
1713 	return 0;
1714 }
1715 
1716 static void link_block_group(struct btrfs_block_group *cache)
1717 {
1718 	struct btrfs_space_info *space_info = cache->space_info;
1719 	int index = btrfs_bg_flags_to_raid_index(cache->flags);
1720 	bool first = false;
1721 
1722 	down_write(&space_info->groups_sem);
1723 	if (list_empty(&space_info->block_groups[index]))
1724 		first = true;
1725 	list_add_tail(&cache->list, &space_info->block_groups[index]);
1726 	up_write(&space_info->groups_sem);
1727 
1728 	if (first)
1729 		btrfs_sysfs_add_block_group_type(cache);
1730 }
1731 
1732 static struct btrfs_block_group *btrfs_create_block_group_cache(
1733 		struct btrfs_fs_info *fs_info, u64 start, u64 size)
1734 {
1735 	struct btrfs_block_group *cache;
1736 
1737 	cache = kzalloc(sizeof(*cache), GFP_NOFS);
1738 	if (!cache)
1739 		return NULL;
1740 
1741 	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
1742 					GFP_NOFS);
1743 	if (!cache->free_space_ctl) {
1744 		kfree(cache);
1745 		return NULL;
1746 	}
1747 
1748 	cache->start = start;
1749 	cache->length = size;
1750 
1751 	cache->fs_info = fs_info;
1752 	cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
1753 	set_free_space_tree_thresholds(cache);
1754 
1755 	cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
1756 
1757 	atomic_set(&cache->count, 1);
1758 	spin_lock_init(&cache->lock);
1759 	init_rwsem(&cache->data_rwsem);
1760 	INIT_LIST_HEAD(&cache->list);
1761 	INIT_LIST_HEAD(&cache->cluster_list);
1762 	INIT_LIST_HEAD(&cache->bg_list);
1763 	INIT_LIST_HEAD(&cache->ro_list);
1764 	INIT_LIST_HEAD(&cache->discard_list);
1765 	INIT_LIST_HEAD(&cache->dirty_list);
1766 	INIT_LIST_HEAD(&cache->io_list);
1767 	btrfs_init_free_space_ctl(cache);
1768 	atomic_set(&cache->trimming, 0);
1769 	mutex_init(&cache->free_space_lock);
1770 	btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
1771 
1772 	return cache;
1773 }
1774 
1775 /*
1776  * Iterate all chunks and verify that each of them has the corresponding block
1777  * group
1778  */
1779 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
1780 {
1781 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
1782 	struct extent_map *em;
1783 	struct btrfs_block_group *bg;
1784 	u64 start = 0;
1785 	int ret = 0;
1786 
1787 	while (1) {
1788 		read_lock(&map_tree->lock);
1789 		/*
1790 		 * lookup_extent_mapping will return the first extent map
1791 		 * intersecting the range, so setting @len to 1 is enough to
1792 		 * get the first chunk.
1793 		 */
1794 		em = lookup_extent_mapping(map_tree, start, 1);
1795 		read_unlock(&map_tree->lock);
1796 		if (!em)
1797 			break;
1798 
1799 		bg = btrfs_lookup_block_group(fs_info, em->start);
1800 		if (!bg) {
1801 			btrfs_err(fs_info,
1802 	"chunk start=%llu len=%llu doesn't have corresponding block group",
1803 				     em->start, em->len);
1804 			ret = -EUCLEAN;
1805 			free_extent_map(em);
1806 			break;
1807 		}
1808 		if (bg->start != em->start || bg->length != em->len ||
1809 		    (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
1810 		    (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1811 			btrfs_err(fs_info,
1812 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
1813 				em->start, em->len,
1814 				em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
1815 				bg->start, bg->length,
1816 				bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
1817 			ret = -EUCLEAN;
1818 			free_extent_map(em);
1819 			btrfs_put_block_group(bg);
1820 			break;
1821 		}
1822 		start = em->start + em->len;
1823 		free_extent_map(em);
1824 		btrfs_put_block_group(bg);
1825 	}
1826 	return ret;
1827 }
1828 
1829 static int read_one_block_group(struct btrfs_fs_info *info,
1830 				struct btrfs_path *path,
1831 				const struct btrfs_key *key,
1832 				int need_clear)
1833 {
1834 	struct extent_buffer *leaf = path->nodes[0];
1835 	struct btrfs_block_group *cache;
1836 	struct btrfs_space_info *space_info;
1837 	struct btrfs_block_group_item bgi;
1838 	const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
1839 	int slot = path->slots[0];
1840 	int ret;
1841 
1842 	ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
1843 
1844 	cache = btrfs_create_block_group_cache(info, key->objectid, key->offset);
1845 	if (!cache)
1846 		return -ENOMEM;
1847 
1848 	if (need_clear) {
1849 		/*
1850 		 * When we mount with old space cache, we need to
1851 		 * set BTRFS_DC_CLEAR and set dirty flag.
1852 		 *
1853 		 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
1854 		 *    truncate the old free space cache inode and
1855 		 *    setup a new one.
1856 		 * b) Setting 'dirty flag' makes sure that we flush
1857 		 *    the new space cache info onto disk.
1858 		 */
1859 		if (btrfs_test_opt(info, SPACE_CACHE))
1860 			cache->disk_cache_state = BTRFS_DC_CLEAR;
1861 	}
1862 	read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
1863 			   sizeof(bgi));
1864 	cache->used = btrfs_stack_block_group_used(&bgi);
1865 	cache->flags = btrfs_stack_block_group_flags(&bgi);
1866 	if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
1867 	    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
1868 			btrfs_err(info,
1869 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
1870 				  cache->start);
1871 			ret = -EINVAL;
1872 			goto error;
1873 	}
1874 
1875 	/*
1876 	 * We need to exclude the super stripes now so that the space info has
1877 	 * super bytes accounted for, otherwise we'll think we have more space
1878 	 * than we actually do.
1879 	 */
1880 	ret = exclude_super_stripes(cache);
1881 	if (ret) {
1882 		/* We may have excluded something, so call this just in case. */
1883 		btrfs_free_excluded_extents(cache);
1884 		goto error;
1885 	}
1886 
1887 	/*
1888 	 * Check for two cases, either we are full, and therefore don't need
1889 	 * to bother with the caching work since we won't find any space, or we
1890 	 * are empty, and we can just add all the space in and be done with it.
1891 	 * This saves us _a_lot_ of time, particularly in the full case.
1892 	 */
1893 	if (key->offset == cache->used) {
1894 		cache->last_byte_to_unpin = (u64)-1;
1895 		cache->cached = BTRFS_CACHE_FINISHED;
1896 		btrfs_free_excluded_extents(cache);
1897 	} else if (cache->used == 0) {
1898 		cache->last_byte_to_unpin = (u64)-1;
1899 		cache->cached = BTRFS_CACHE_FINISHED;
1900 		add_new_free_space(cache, key->objectid,
1901 				   key->objectid + key->offset);
1902 		btrfs_free_excluded_extents(cache);
1903 	}
1904 
1905 	ret = btrfs_add_block_group_cache(info, cache);
1906 	if (ret) {
1907 		btrfs_remove_free_space_cache(cache);
1908 		goto error;
1909 	}
1910 	trace_btrfs_add_block_group(info, cache, 0);
1911 	btrfs_update_space_info(info, cache->flags, key->offset,
1912 				cache->used, cache->bytes_super, &space_info);
1913 
1914 	cache->space_info = space_info;
1915 
1916 	link_block_group(cache);
1917 
1918 	set_avail_alloc_bits(info, cache->flags);
1919 	if (btrfs_chunk_readonly(info, cache->start)) {
1920 		inc_block_group_ro(cache, 1);
1921 	} else if (cache->used == 0) {
1922 		ASSERT(list_empty(&cache->bg_list));
1923 		if (btrfs_test_opt(info, DISCARD_ASYNC))
1924 			btrfs_discard_queue_work(&info->discard_ctl, cache);
1925 		else
1926 			btrfs_mark_bg_unused(cache);
1927 	}
1928 	return 0;
1929 error:
1930 	btrfs_put_block_group(cache);
1931 	return ret;
1932 }
1933 
1934 int btrfs_read_block_groups(struct btrfs_fs_info *info)
1935 {
1936 	struct btrfs_path *path;
1937 	int ret;
1938 	struct btrfs_block_group *cache;
1939 	struct btrfs_space_info *space_info;
1940 	struct btrfs_key key;
1941 	int need_clear = 0;
1942 	u64 cache_gen;
1943 
1944 	key.objectid = 0;
1945 	key.offset = 0;
1946 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1947 	path = btrfs_alloc_path();
1948 	if (!path)
1949 		return -ENOMEM;
1950 	path->reada = READA_FORWARD;
1951 
1952 	cache_gen = btrfs_super_cache_generation(info->super_copy);
1953 	if (btrfs_test_opt(info, SPACE_CACHE) &&
1954 	    btrfs_super_generation(info->super_copy) != cache_gen)
1955 		need_clear = 1;
1956 	if (btrfs_test_opt(info, CLEAR_CACHE))
1957 		need_clear = 1;
1958 
1959 	while (1) {
1960 		ret = find_first_block_group(info, path, &key);
1961 		if (ret > 0)
1962 			break;
1963 		if (ret != 0)
1964 			goto error;
1965 
1966 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1967 		ret = read_one_block_group(info, path, &key, need_clear);
1968 		if (ret < 0)
1969 			goto error;
1970 		key.objectid += key.offset;
1971 		key.offset = 0;
1972 		btrfs_release_path(path);
1973 	}
1974 
1975 	list_for_each_entry_rcu(space_info, &info->space_info, list) {
1976 		if (!(btrfs_get_alloc_profile(info, space_info->flags) &
1977 		      (BTRFS_BLOCK_GROUP_RAID10 |
1978 		       BTRFS_BLOCK_GROUP_RAID1_MASK |
1979 		       BTRFS_BLOCK_GROUP_RAID56_MASK |
1980 		       BTRFS_BLOCK_GROUP_DUP)))
1981 			continue;
1982 		/*
1983 		 * Avoid allocating from un-mirrored block group if there are
1984 		 * mirrored block groups.
1985 		 */
1986 		list_for_each_entry(cache,
1987 				&space_info->block_groups[BTRFS_RAID_RAID0],
1988 				list)
1989 			inc_block_group_ro(cache, 1);
1990 		list_for_each_entry(cache,
1991 				&space_info->block_groups[BTRFS_RAID_SINGLE],
1992 				list)
1993 			inc_block_group_ro(cache, 1);
1994 	}
1995 
1996 	btrfs_init_global_block_rsv(info);
1997 	ret = check_chunk_block_group_mappings(info);
1998 error:
1999 	btrfs_free_path(path);
2000 	return ret;
2001 }
2002 
2003 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
2004 {
2005 	struct btrfs_fs_info *fs_info = trans->fs_info;
2006 	struct btrfs_block_group *block_group;
2007 	struct btrfs_root *extent_root = fs_info->extent_root;
2008 	struct btrfs_block_group_item item;
2009 	struct btrfs_key key;
2010 	int ret = 0;
2011 
2012 	if (!trans->can_flush_pending_bgs)
2013 		return;
2014 
2015 	while (!list_empty(&trans->new_bgs)) {
2016 		block_group = list_first_entry(&trans->new_bgs,
2017 					       struct btrfs_block_group,
2018 					       bg_list);
2019 		if (ret)
2020 			goto next;
2021 
2022 		spin_lock(&block_group->lock);
2023 		btrfs_set_stack_block_group_used(&item, block_group->used);
2024 		btrfs_set_stack_block_group_chunk_objectid(&item,
2025 				BTRFS_FIRST_CHUNK_TREE_OBJECTID);
2026 		btrfs_set_stack_block_group_flags(&item, block_group->flags);
2027 		key.objectid = block_group->start;
2028 		key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2029 		key.offset = block_group->length;
2030 		spin_unlock(&block_group->lock);
2031 
2032 		ret = btrfs_insert_item(trans, extent_root, &key, &item,
2033 					sizeof(item));
2034 		if (ret)
2035 			btrfs_abort_transaction(trans, ret);
2036 		ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
2037 		if (ret)
2038 			btrfs_abort_transaction(trans, ret);
2039 		add_block_group_free_space(trans, block_group);
2040 		/* Already aborted the transaction if it failed. */
2041 next:
2042 		btrfs_delayed_refs_rsv_release(fs_info, 1);
2043 		list_del_init(&block_group->bg_list);
2044 	}
2045 	btrfs_trans_release_chunk_metadata(trans);
2046 }
2047 
2048 int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
2049 			   u64 type, u64 chunk_offset, u64 size)
2050 {
2051 	struct btrfs_fs_info *fs_info = trans->fs_info;
2052 	struct btrfs_block_group *cache;
2053 	int ret;
2054 
2055 	btrfs_set_log_full_commit(trans);
2056 
2057 	cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
2058 	if (!cache)
2059 		return -ENOMEM;
2060 
2061 	cache->used = bytes_used;
2062 	cache->flags = type;
2063 	cache->last_byte_to_unpin = (u64)-1;
2064 	cache->cached = BTRFS_CACHE_FINISHED;
2065 	cache->needs_free_space = 1;
2066 	ret = exclude_super_stripes(cache);
2067 	if (ret) {
2068 		/* We may have excluded something, so call this just in case */
2069 		btrfs_free_excluded_extents(cache);
2070 		btrfs_put_block_group(cache);
2071 		return ret;
2072 	}
2073 
2074 	add_new_free_space(cache, chunk_offset, chunk_offset + size);
2075 
2076 	btrfs_free_excluded_extents(cache);
2077 
2078 #ifdef CONFIG_BTRFS_DEBUG
2079 	if (btrfs_should_fragment_free_space(cache)) {
2080 		u64 new_bytes_used = size - bytes_used;
2081 
2082 		bytes_used += new_bytes_used >> 1;
2083 		fragment_free_space(cache);
2084 	}
2085 #endif
2086 	/*
2087 	 * Ensure the corresponding space_info object is created and
2088 	 * assigned to our block group. We want our bg to be added to the rbtree
2089 	 * with its ->space_info set.
2090 	 */
2091 	cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
2092 	ASSERT(cache->space_info);
2093 
2094 	ret = btrfs_add_block_group_cache(fs_info, cache);
2095 	if (ret) {
2096 		btrfs_remove_free_space_cache(cache);
2097 		btrfs_put_block_group(cache);
2098 		return ret;
2099 	}
2100 
2101 	/*
2102 	 * Now that our block group has its ->space_info set and is inserted in
2103 	 * the rbtree, update the space info's counters.
2104 	 */
2105 	trace_btrfs_add_block_group(fs_info, cache, 1);
2106 	btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
2107 				cache->bytes_super, &cache->space_info);
2108 	btrfs_update_global_block_rsv(fs_info);
2109 
2110 	link_block_group(cache);
2111 
2112 	list_add_tail(&cache->bg_list, &trans->new_bgs);
2113 	trans->delayed_ref_updates++;
2114 	btrfs_update_delayed_refs_rsv(trans);
2115 
2116 	set_avail_alloc_bits(fs_info, type);
2117 	return 0;
2118 }
2119 
2120 static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
2121 {
2122 	u64 num_devices;
2123 	u64 stripped;
2124 
2125 	/*
2126 	 * if restripe for this chunk_type is on pick target profile and
2127 	 * return, otherwise do the usual balance
2128 	 */
2129 	stripped = get_restripe_target(fs_info, flags);
2130 	if (stripped)
2131 		return extended_to_chunk(stripped);
2132 
2133 	num_devices = fs_info->fs_devices->rw_devices;
2134 
2135 	stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK |
2136 		BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10;
2137 
2138 	if (num_devices == 1) {
2139 		stripped |= BTRFS_BLOCK_GROUP_DUP;
2140 		stripped = flags & ~stripped;
2141 
2142 		/* turn raid0 into single device chunks */
2143 		if (flags & BTRFS_BLOCK_GROUP_RAID0)
2144 			return stripped;
2145 
2146 		/* turn mirroring into duplication */
2147 		if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK |
2148 			     BTRFS_BLOCK_GROUP_RAID10))
2149 			return stripped | BTRFS_BLOCK_GROUP_DUP;
2150 	} else {
2151 		/* they already had raid on here, just return */
2152 		if (flags & stripped)
2153 			return flags;
2154 
2155 		stripped |= BTRFS_BLOCK_GROUP_DUP;
2156 		stripped = flags & ~stripped;
2157 
2158 		/* switch duplicated blocks with raid1 */
2159 		if (flags & BTRFS_BLOCK_GROUP_DUP)
2160 			return stripped | BTRFS_BLOCK_GROUP_RAID1;
2161 
2162 		/* this is drive concat, leave it alone */
2163 	}
2164 
2165 	return flags;
2166 }
2167 
2168 /*
2169  * Mark one block group RO, can be called several times for the same block
2170  * group.
2171  *
2172  * @cache:		the destination block group
2173  * @do_chunk_alloc:	whether need to do chunk pre-allocation, this is to
2174  * 			ensure we still have some free space after marking this
2175  * 			block group RO.
2176  */
2177 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
2178 			     bool do_chunk_alloc)
2179 {
2180 	struct btrfs_fs_info *fs_info = cache->fs_info;
2181 	struct btrfs_trans_handle *trans;
2182 	u64 alloc_flags;
2183 	int ret;
2184 
2185 again:
2186 	trans = btrfs_join_transaction(fs_info->extent_root);
2187 	if (IS_ERR(trans))
2188 		return PTR_ERR(trans);
2189 
2190 	/*
2191 	 * we're not allowed to set block groups readonly after the dirty
2192 	 * block groups cache has started writing.  If it already started,
2193 	 * back off and let this transaction commit
2194 	 */
2195 	mutex_lock(&fs_info->ro_block_group_mutex);
2196 	if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
2197 		u64 transid = trans->transid;
2198 
2199 		mutex_unlock(&fs_info->ro_block_group_mutex);
2200 		btrfs_end_transaction(trans);
2201 
2202 		ret = btrfs_wait_for_commit(fs_info, transid);
2203 		if (ret)
2204 			return ret;
2205 		goto again;
2206 	}
2207 
2208 	if (do_chunk_alloc) {
2209 		/*
2210 		 * If we are changing raid levels, try to allocate a
2211 		 * corresponding block group with the new raid level.
2212 		 */
2213 		alloc_flags = update_block_group_flags(fs_info, cache->flags);
2214 		if (alloc_flags != cache->flags) {
2215 			ret = btrfs_chunk_alloc(trans, alloc_flags,
2216 						CHUNK_ALLOC_FORCE);
2217 			/*
2218 			 * ENOSPC is allowed here, we may have enough space
2219 			 * already allocated at the new raid level to carry on
2220 			 */
2221 			if (ret == -ENOSPC)
2222 				ret = 0;
2223 			if (ret < 0)
2224 				goto out;
2225 		}
2226 	}
2227 
2228 	ret = inc_block_group_ro(cache, !do_chunk_alloc);
2229 	if (!do_chunk_alloc)
2230 		goto unlock_out;
2231 	if (!ret)
2232 		goto out;
2233 	alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2234 	ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2235 	if (ret < 0)
2236 		goto out;
2237 	ret = inc_block_group_ro(cache, 0);
2238 out:
2239 	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2240 		alloc_flags = update_block_group_flags(fs_info, cache->flags);
2241 		mutex_lock(&fs_info->chunk_mutex);
2242 		check_system_chunk(trans, alloc_flags);
2243 		mutex_unlock(&fs_info->chunk_mutex);
2244 	}
2245 unlock_out:
2246 	mutex_unlock(&fs_info->ro_block_group_mutex);
2247 
2248 	btrfs_end_transaction(trans);
2249 	return ret;
2250 }
2251 
2252 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
2253 {
2254 	struct btrfs_space_info *sinfo = cache->space_info;
2255 	u64 num_bytes;
2256 
2257 	BUG_ON(!cache->ro);
2258 
2259 	spin_lock(&sinfo->lock);
2260 	spin_lock(&cache->lock);
2261 	if (!--cache->ro) {
2262 		num_bytes = cache->length - cache->reserved -
2263 			    cache->pinned - cache->bytes_super - cache->used;
2264 		sinfo->bytes_readonly -= num_bytes;
2265 		list_del_init(&cache->ro_list);
2266 	}
2267 	spin_unlock(&cache->lock);
2268 	spin_unlock(&sinfo->lock);
2269 }
2270 
2271 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2272 				 struct btrfs_path *path,
2273 				 struct btrfs_block_group *cache)
2274 {
2275 	struct btrfs_fs_info *fs_info = trans->fs_info;
2276 	int ret;
2277 	struct btrfs_root *extent_root = fs_info->extent_root;
2278 	unsigned long bi;
2279 	struct extent_buffer *leaf;
2280 	struct btrfs_block_group_item bgi;
2281 	struct btrfs_key key;
2282 
2283 	key.objectid = cache->start;
2284 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2285 	key.offset = cache->length;
2286 
2287 	ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
2288 	if (ret) {
2289 		if (ret > 0)
2290 			ret = -ENOENT;
2291 		goto fail;
2292 	}
2293 
2294 	leaf = path->nodes[0];
2295 	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2296 	btrfs_set_stack_block_group_used(&bgi, cache->used);
2297 	btrfs_set_stack_block_group_chunk_objectid(&bgi,
2298 			BTRFS_FIRST_CHUNK_TREE_OBJECTID);
2299 	btrfs_set_stack_block_group_flags(&bgi, cache->flags);
2300 	write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
2301 	btrfs_mark_buffer_dirty(leaf);
2302 fail:
2303 	btrfs_release_path(path);
2304 	return ret;
2305 
2306 }
2307 
2308 static int cache_save_setup(struct btrfs_block_group *block_group,
2309 			    struct btrfs_trans_handle *trans,
2310 			    struct btrfs_path *path)
2311 {
2312 	struct btrfs_fs_info *fs_info = block_group->fs_info;
2313 	struct btrfs_root *root = fs_info->tree_root;
2314 	struct inode *inode = NULL;
2315 	struct extent_changeset *data_reserved = NULL;
2316 	u64 alloc_hint = 0;
2317 	int dcs = BTRFS_DC_ERROR;
2318 	u64 num_pages = 0;
2319 	int retries = 0;
2320 	int ret = 0;
2321 
2322 	/*
2323 	 * If this block group is smaller than 100 megs don't bother caching the
2324 	 * block group.
2325 	 */
2326 	if (block_group->length < (100 * SZ_1M)) {
2327 		spin_lock(&block_group->lock);
2328 		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2329 		spin_unlock(&block_group->lock);
2330 		return 0;
2331 	}
2332 
2333 	if (trans->aborted)
2334 		return 0;
2335 again:
2336 	inode = lookup_free_space_inode(block_group, path);
2337 	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2338 		ret = PTR_ERR(inode);
2339 		btrfs_release_path(path);
2340 		goto out;
2341 	}
2342 
2343 	if (IS_ERR(inode)) {
2344 		BUG_ON(retries);
2345 		retries++;
2346 
2347 		if (block_group->ro)
2348 			goto out_free;
2349 
2350 		ret = create_free_space_inode(trans, block_group, path);
2351 		if (ret)
2352 			goto out_free;
2353 		goto again;
2354 	}
2355 
2356 	/*
2357 	 * We want to set the generation to 0, that way if anything goes wrong
2358 	 * from here on out we know not to trust this cache when we load up next
2359 	 * time.
2360 	 */
2361 	BTRFS_I(inode)->generation = 0;
2362 	ret = btrfs_update_inode(trans, root, inode);
2363 	if (ret) {
2364 		/*
2365 		 * So theoretically we could recover from this, simply set the
2366 		 * super cache generation to 0 so we know to invalidate the
2367 		 * cache, but then we'd have to keep track of the block groups
2368 		 * that fail this way so we know we _have_ to reset this cache
2369 		 * before the next commit or risk reading stale cache.  So to
2370 		 * limit our exposure to horrible edge cases lets just abort the
2371 		 * transaction, this only happens in really bad situations
2372 		 * anyway.
2373 		 */
2374 		btrfs_abort_transaction(trans, ret);
2375 		goto out_put;
2376 	}
2377 	WARN_ON(ret);
2378 
2379 	/* We've already setup this transaction, go ahead and exit */
2380 	if (block_group->cache_generation == trans->transid &&
2381 	    i_size_read(inode)) {
2382 		dcs = BTRFS_DC_SETUP;
2383 		goto out_put;
2384 	}
2385 
2386 	if (i_size_read(inode) > 0) {
2387 		ret = btrfs_check_trunc_cache_free_space(fs_info,
2388 					&fs_info->global_block_rsv);
2389 		if (ret)
2390 			goto out_put;
2391 
2392 		ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
2393 		if (ret)
2394 			goto out_put;
2395 	}
2396 
2397 	spin_lock(&block_group->lock);
2398 	if (block_group->cached != BTRFS_CACHE_FINISHED ||
2399 	    !btrfs_test_opt(fs_info, SPACE_CACHE)) {
2400 		/*
2401 		 * don't bother trying to write stuff out _if_
2402 		 * a) we're not cached,
2403 		 * b) we're with nospace_cache mount option,
2404 		 * c) we're with v2 space_cache (FREE_SPACE_TREE).
2405 		 */
2406 		dcs = BTRFS_DC_WRITTEN;
2407 		spin_unlock(&block_group->lock);
2408 		goto out_put;
2409 	}
2410 	spin_unlock(&block_group->lock);
2411 
2412 	/*
2413 	 * We hit an ENOSPC when setting up the cache in this transaction, just
2414 	 * skip doing the setup, we've already cleared the cache so we're safe.
2415 	 */
2416 	if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
2417 		ret = -ENOSPC;
2418 		goto out_put;
2419 	}
2420 
2421 	/*
2422 	 * Try to preallocate enough space based on how big the block group is.
2423 	 * Keep in mind this has to include any pinned space which could end up
2424 	 * taking up quite a bit since it's not folded into the other space
2425 	 * cache.
2426 	 */
2427 	num_pages = div_u64(block_group->length, SZ_256M);
2428 	if (!num_pages)
2429 		num_pages = 1;
2430 
2431 	num_pages *= 16;
2432 	num_pages *= PAGE_SIZE;
2433 
2434 	ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
2435 	if (ret)
2436 		goto out_put;
2437 
2438 	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2439 					      num_pages, num_pages,
2440 					      &alloc_hint);
2441 	/*
2442 	 * Our cache requires contiguous chunks so that we don't modify a bunch
2443 	 * of metadata or split extents when writing the cache out, which means
2444 	 * we can enospc if we are heavily fragmented in addition to just normal
2445 	 * out of space conditions.  So if we hit this just skip setting up any
2446 	 * other block groups for this transaction, maybe we'll unpin enough
2447 	 * space the next time around.
2448 	 */
2449 	if (!ret)
2450 		dcs = BTRFS_DC_SETUP;
2451 	else if (ret == -ENOSPC)
2452 		set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
2453 
2454 out_put:
2455 	iput(inode);
2456 out_free:
2457 	btrfs_release_path(path);
2458 out:
2459 	spin_lock(&block_group->lock);
2460 	if (!ret && dcs == BTRFS_DC_SETUP)
2461 		block_group->cache_generation = trans->transid;
2462 	block_group->disk_cache_state = dcs;
2463 	spin_unlock(&block_group->lock);
2464 
2465 	extent_changeset_free(data_reserved);
2466 	return ret;
2467 }
2468 
2469 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
2470 {
2471 	struct btrfs_fs_info *fs_info = trans->fs_info;
2472 	struct btrfs_block_group *cache, *tmp;
2473 	struct btrfs_transaction *cur_trans = trans->transaction;
2474 	struct btrfs_path *path;
2475 
2476 	if (list_empty(&cur_trans->dirty_bgs) ||
2477 	    !btrfs_test_opt(fs_info, SPACE_CACHE))
2478 		return 0;
2479 
2480 	path = btrfs_alloc_path();
2481 	if (!path)
2482 		return -ENOMEM;
2483 
2484 	/* Could add new block groups, use _safe just in case */
2485 	list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
2486 				 dirty_list) {
2487 		if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2488 			cache_save_setup(cache, trans, path);
2489 	}
2490 
2491 	btrfs_free_path(path);
2492 	return 0;
2493 }
2494 
2495 /*
2496  * Transaction commit does final block group cache writeback during a critical
2497  * section where nothing is allowed to change the FS.  This is required in
2498  * order for the cache to actually match the block group, but can introduce a
2499  * lot of latency into the commit.
2500  *
2501  * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
2502  * There's a chance we'll have to redo some of it if the block group changes
2503  * again during the commit, but it greatly reduces the commit latency by
2504  * getting rid of the easy block groups while we're still allowing others to
2505  * join the commit.
2506  */
2507 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
2508 {
2509 	struct btrfs_fs_info *fs_info = trans->fs_info;
2510 	struct btrfs_block_group *cache;
2511 	struct btrfs_transaction *cur_trans = trans->transaction;
2512 	int ret = 0;
2513 	int should_put;
2514 	struct btrfs_path *path = NULL;
2515 	LIST_HEAD(dirty);
2516 	struct list_head *io = &cur_trans->io_bgs;
2517 	int num_started = 0;
2518 	int loops = 0;
2519 
2520 	spin_lock(&cur_trans->dirty_bgs_lock);
2521 	if (list_empty(&cur_trans->dirty_bgs)) {
2522 		spin_unlock(&cur_trans->dirty_bgs_lock);
2523 		return 0;
2524 	}
2525 	list_splice_init(&cur_trans->dirty_bgs, &dirty);
2526 	spin_unlock(&cur_trans->dirty_bgs_lock);
2527 
2528 again:
2529 	/* Make sure all the block groups on our dirty list actually exist */
2530 	btrfs_create_pending_block_groups(trans);
2531 
2532 	if (!path) {
2533 		path = btrfs_alloc_path();
2534 		if (!path)
2535 			return -ENOMEM;
2536 	}
2537 
2538 	/*
2539 	 * cache_write_mutex is here only to save us from balance or automatic
2540 	 * removal of empty block groups deleting this block group while we are
2541 	 * writing out the cache
2542 	 */
2543 	mutex_lock(&trans->transaction->cache_write_mutex);
2544 	while (!list_empty(&dirty)) {
2545 		bool drop_reserve = true;
2546 
2547 		cache = list_first_entry(&dirty, struct btrfs_block_group,
2548 					 dirty_list);
2549 		/*
2550 		 * This can happen if something re-dirties a block group that
2551 		 * is already under IO.  Just wait for it to finish and then do
2552 		 * it all again
2553 		 */
2554 		if (!list_empty(&cache->io_list)) {
2555 			list_del_init(&cache->io_list);
2556 			btrfs_wait_cache_io(trans, cache, path);
2557 			btrfs_put_block_group(cache);
2558 		}
2559 
2560 
2561 		/*
2562 		 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
2563 		 * it should update the cache_state.  Don't delete until after
2564 		 * we wait.
2565 		 *
2566 		 * Since we're not running in the commit critical section
2567 		 * we need the dirty_bgs_lock to protect from update_block_group
2568 		 */
2569 		spin_lock(&cur_trans->dirty_bgs_lock);
2570 		list_del_init(&cache->dirty_list);
2571 		spin_unlock(&cur_trans->dirty_bgs_lock);
2572 
2573 		should_put = 1;
2574 
2575 		cache_save_setup(cache, trans, path);
2576 
2577 		if (cache->disk_cache_state == BTRFS_DC_SETUP) {
2578 			cache->io_ctl.inode = NULL;
2579 			ret = btrfs_write_out_cache(trans, cache, path);
2580 			if (ret == 0 && cache->io_ctl.inode) {
2581 				num_started++;
2582 				should_put = 0;
2583 
2584 				/*
2585 				 * The cache_write_mutex is protecting the
2586 				 * io_list, also refer to the definition of
2587 				 * btrfs_transaction::io_bgs for more details
2588 				 */
2589 				list_add_tail(&cache->io_list, io);
2590 			} else {
2591 				/*
2592 				 * If we failed to write the cache, the
2593 				 * generation will be bad and life goes on
2594 				 */
2595 				ret = 0;
2596 			}
2597 		}
2598 		if (!ret) {
2599 			ret = write_one_cache_group(trans, path, cache);
2600 			/*
2601 			 * Our block group might still be attached to the list
2602 			 * of new block groups in the transaction handle of some
2603 			 * other task (struct btrfs_trans_handle->new_bgs). This
2604 			 * means its block group item isn't yet in the extent
2605 			 * tree. If this happens ignore the error, as we will
2606 			 * try again later in the critical section of the
2607 			 * transaction commit.
2608 			 */
2609 			if (ret == -ENOENT) {
2610 				ret = 0;
2611 				spin_lock(&cur_trans->dirty_bgs_lock);
2612 				if (list_empty(&cache->dirty_list)) {
2613 					list_add_tail(&cache->dirty_list,
2614 						      &cur_trans->dirty_bgs);
2615 					btrfs_get_block_group(cache);
2616 					drop_reserve = false;
2617 				}
2618 				spin_unlock(&cur_trans->dirty_bgs_lock);
2619 			} else if (ret) {
2620 				btrfs_abort_transaction(trans, ret);
2621 			}
2622 		}
2623 
2624 		/* If it's not on the io list, we need to put the block group */
2625 		if (should_put)
2626 			btrfs_put_block_group(cache);
2627 		if (drop_reserve)
2628 			btrfs_delayed_refs_rsv_release(fs_info, 1);
2629 
2630 		if (ret)
2631 			break;
2632 
2633 		/*
2634 		 * Avoid blocking other tasks for too long. It might even save
2635 		 * us from writing caches for block groups that are going to be
2636 		 * removed.
2637 		 */
2638 		mutex_unlock(&trans->transaction->cache_write_mutex);
2639 		mutex_lock(&trans->transaction->cache_write_mutex);
2640 	}
2641 	mutex_unlock(&trans->transaction->cache_write_mutex);
2642 
2643 	/*
2644 	 * Go through delayed refs for all the stuff we've just kicked off
2645 	 * and then loop back (just once)
2646 	 */
2647 	ret = btrfs_run_delayed_refs(trans, 0);
2648 	if (!ret && loops == 0) {
2649 		loops++;
2650 		spin_lock(&cur_trans->dirty_bgs_lock);
2651 		list_splice_init(&cur_trans->dirty_bgs, &dirty);
2652 		/*
2653 		 * dirty_bgs_lock protects us from concurrent block group
2654 		 * deletes too (not just cache_write_mutex).
2655 		 */
2656 		if (!list_empty(&dirty)) {
2657 			spin_unlock(&cur_trans->dirty_bgs_lock);
2658 			goto again;
2659 		}
2660 		spin_unlock(&cur_trans->dirty_bgs_lock);
2661 	} else if (ret < 0) {
2662 		btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
2663 	}
2664 
2665 	btrfs_free_path(path);
2666 	return ret;
2667 }
2668 
2669 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
2670 {
2671 	struct btrfs_fs_info *fs_info = trans->fs_info;
2672 	struct btrfs_block_group *cache;
2673 	struct btrfs_transaction *cur_trans = trans->transaction;
2674 	int ret = 0;
2675 	int should_put;
2676 	struct btrfs_path *path;
2677 	struct list_head *io = &cur_trans->io_bgs;
2678 	int num_started = 0;
2679 
2680 	path = btrfs_alloc_path();
2681 	if (!path)
2682 		return -ENOMEM;
2683 
2684 	/*
2685 	 * Even though we are in the critical section of the transaction commit,
2686 	 * we can still have concurrent tasks adding elements to this
2687 	 * transaction's list of dirty block groups. These tasks correspond to
2688 	 * endio free space workers started when writeback finishes for a
2689 	 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
2690 	 * allocate new block groups as a result of COWing nodes of the root
2691 	 * tree when updating the free space inode. The writeback for the space
2692 	 * caches is triggered by an earlier call to
2693 	 * btrfs_start_dirty_block_groups() and iterations of the following
2694 	 * loop.
2695 	 * Also we want to do the cache_save_setup first and then run the
2696 	 * delayed refs to make sure we have the best chance at doing this all
2697 	 * in one shot.
2698 	 */
2699 	spin_lock(&cur_trans->dirty_bgs_lock);
2700 	while (!list_empty(&cur_trans->dirty_bgs)) {
2701 		cache = list_first_entry(&cur_trans->dirty_bgs,
2702 					 struct btrfs_block_group,
2703 					 dirty_list);
2704 
2705 		/*
2706 		 * This can happen if cache_save_setup re-dirties a block group
2707 		 * that is already under IO.  Just wait for it to finish and
2708 		 * then do it all again
2709 		 */
2710 		if (!list_empty(&cache->io_list)) {
2711 			spin_unlock(&cur_trans->dirty_bgs_lock);
2712 			list_del_init(&cache->io_list);
2713 			btrfs_wait_cache_io(trans, cache, path);
2714 			btrfs_put_block_group(cache);
2715 			spin_lock(&cur_trans->dirty_bgs_lock);
2716 		}
2717 
2718 		/*
2719 		 * Don't remove from the dirty list until after we've waited on
2720 		 * any pending IO
2721 		 */
2722 		list_del_init(&cache->dirty_list);
2723 		spin_unlock(&cur_trans->dirty_bgs_lock);
2724 		should_put = 1;
2725 
2726 		cache_save_setup(cache, trans, path);
2727 
2728 		if (!ret)
2729 			ret = btrfs_run_delayed_refs(trans,
2730 						     (unsigned long) -1);
2731 
2732 		if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
2733 			cache->io_ctl.inode = NULL;
2734 			ret = btrfs_write_out_cache(trans, cache, path);
2735 			if (ret == 0 && cache->io_ctl.inode) {
2736 				num_started++;
2737 				should_put = 0;
2738 				list_add_tail(&cache->io_list, io);
2739 			} else {
2740 				/*
2741 				 * If we failed to write the cache, the
2742 				 * generation will be bad and life goes on
2743 				 */
2744 				ret = 0;
2745 			}
2746 		}
2747 		if (!ret) {
2748 			ret = write_one_cache_group(trans, path, cache);
2749 			/*
2750 			 * One of the free space endio workers might have
2751 			 * created a new block group while updating a free space
2752 			 * cache's inode (at inode.c:btrfs_finish_ordered_io())
2753 			 * and hasn't released its transaction handle yet, in
2754 			 * which case the new block group is still attached to
2755 			 * its transaction handle and its creation has not
2756 			 * finished yet (no block group item in the extent tree
2757 			 * yet, etc). If this is the case, wait for all free
2758 			 * space endio workers to finish and retry. This is a
2759 			 * a very rare case so no need for a more efficient and
2760 			 * complex approach.
2761 			 */
2762 			if (ret == -ENOENT) {
2763 				wait_event(cur_trans->writer_wait,
2764 				   atomic_read(&cur_trans->num_writers) == 1);
2765 				ret = write_one_cache_group(trans, path, cache);
2766 			}
2767 			if (ret)
2768 				btrfs_abort_transaction(trans, ret);
2769 		}
2770 
2771 		/* If its not on the io list, we need to put the block group */
2772 		if (should_put)
2773 			btrfs_put_block_group(cache);
2774 		btrfs_delayed_refs_rsv_release(fs_info, 1);
2775 		spin_lock(&cur_trans->dirty_bgs_lock);
2776 	}
2777 	spin_unlock(&cur_trans->dirty_bgs_lock);
2778 
2779 	/*
2780 	 * Refer to the definition of io_bgs member for details why it's safe
2781 	 * to use it without any locking
2782 	 */
2783 	while (!list_empty(io)) {
2784 		cache = list_first_entry(io, struct btrfs_block_group,
2785 					 io_list);
2786 		list_del_init(&cache->io_list);
2787 		btrfs_wait_cache_io(trans, cache, path);
2788 		btrfs_put_block_group(cache);
2789 	}
2790 
2791 	btrfs_free_path(path);
2792 	return ret;
2793 }
2794 
2795 int btrfs_update_block_group(struct btrfs_trans_handle *trans,
2796 			     u64 bytenr, u64 num_bytes, int alloc)
2797 {
2798 	struct btrfs_fs_info *info = trans->fs_info;
2799 	struct btrfs_block_group *cache = NULL;
2800 	u64 total = num_bytes;
2801 	u64 old_val;
2802 	u64 byte_in_group;
2803 	int factor;
2804 	int ret = 0;
2805 
2806 	/* Block accounting for super block */
2807 	spin_lock(&info->delalloc_root_lock);
2808 	old_val = btrfs_super_bytes_used(info->super_copy);
2809 	if (alloc)
2810 		old_val += num_bytes;
2811 	else
2812 		old_val -= num_bytes;
2813 	btrfs_set_super_bytes_used(info->super_copy, old_val);
2814 	spin_unlock(&info->delalloc_root_lock);
2815 
2816 	while (total) {
2817 		cache = btrfs_lookup_block_group(info, bytenr);
2818 		if (!cache) {
2819 			ret = -ENOENT;
2820 			break;
2821 		}
2822 		factor = btrfs_bg_type_to_factor(cache->flags);
2823 
2824 		/*
2825 		 * If this block group has free space cache written out, we
2826 		 * need to make sure to load it if we are removing space.  This
2827 		 * is because we need the unpinning stage to actually add the
2828 		 * space back to the block group, otherwise we will leak space.
2829 		 */
2830 		if (!alloc && !btrfs_block_group_done(cache))
2831 			btrfs_cache_block_group(cache, 1);
2832 
2833 		byte_in_group = bytenr - cache->start;
2834 		WARN_ON(byte_in_group > cache->length);
2835 
2836 		spin_lock(&cache->space_info->lock);
2837 		spin_lock(&cache->lock);
2838 
2839 		if (btrfs_test_opt(info, SPACE_CACHE) &&
2840 		    cache->disk_cache_state < BTRFS_DC_CLEAR)
2841 			cache->disk_cache_state = BTRFS_DC_CLEAR;
2842 
2843 		old_val = cache->used;
2844 		num_bytes = min(total, cache->length - byte_in_group);
2845 		if (alloc) {
2846 			old_val += num_bytes;
2847 			cache->used = old_val;
2848 			cache->reserved -= num_bytes;
2849 			cache->space_info->bytes_reserved -= num_bytes;
2850 			cache->space_info->bytes_used += num_bytes;
2851 			cache->space_info->disk_used += num_bytes * factor;
2852 			spin_unlock(&cache->lock);
2853 			spin_unlock(&cache->space_info->lock);
2854 		} else {
2855 			old_val -= num_bytes;
2856 			cache->used = old_val;
2857 			cache->pinned += num_bytes;
2858 			btrfs_space_info_update_bytes_pinned(info,
2859 					cache->space_info, num_bytes);
2860 			cache->space_info->bytes_used -= num_bytes;
2861 			cache->space_info->disk_used -= num_bytes * factor;
2862 			spin_unlock(&cache->lock);
2863 			spin_unlock(&cache->space_info->lock);
2864 
2865 			percpu_counter_add_batch(
2866 					&cache->space_info->total_bytes_pinned,
2867 					num_bytes,
2868 					BTRFS_TOTAL_BYTES_PINNED_BATCH);
2869 			set_extent_dirty(info->pinned_extents,
2870 					 bytenr, bytenr + num_bytes - 1,
2871 					 GFP_NOFS | __GFP_NOFAIL);
2872 		}
2873 
2874 		spin_lock(&trans->transaction->dirty_bgs_lock);
2875 		if (list_empty(&cache->dirty_list)) {
2876 			list_add_tail(&cache->dirty_list,
2877 				      &trans->transaction->dirty_bgs);
2878 			trans->delayed_ref_updates++;
2879 			btrfs_get_block_group(cache);
2880 		}
2881 		spin_unlock(&trans->transaction->dirty_bgs_lock);
2882 
2883 		/*
2884 		 * No longer have used bytes in this block group, queue it for
2885 		 * deletion. We do this after adding the block group to the
2886 		 * dirty list to avoid races between cleaner kthread and space
2887 		 * cache writeout.
2888 		 */
2889 		if (!alloc && old_val == 0) {
2890 			if (!btrfs_test_opt(info, DISCARD_ASYNC))
2891 				btrfs_mark_bg_unused(cache);
2892 		}
2893 
2894 		btrfs_put_block_group(cache);
2895 		total -= num_bytes;
2896 		bytenr += num_bytes;
2897 	}
2898 
2899 	/* Modified block groups are accounted for in the delayed_refs_rsv. */
2900 	btrfs_update_delayed_refs_rsv(trans);
2901 	return ret;
2902 }
2903 
2904 /**
2905  * btrfs_add_reserved_bytes - update the block_group and space info counters
2906  * @cache:	The cache we are manipulating
2907  * @ram_bytes:  The number of bytes of file content, and will be same to
2908  *              @num_bytes except for the compress path.
2909  * @num_bytes:	The number of bytes in question
2910  * @delalloc:   The blocks are allocated for the delalloc write
2911  *
2912  * This is called by the allocator when it reserves space. If this is a
2913  * reservation and the block group has become read only we cannot make the
2914  * reservation and return -EAGAIN, otherwise this function always succeeds.
2915  */
2916 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
2917 			     u64 ram_bytes, u64 num_bytes, int delalloc)
2918 {
2919 	struct btrfs_space_info *space_info = cache->space_info;
2920 	int ret = 0;
2921 
2922 	spin_lock(&space_info->lock);
2923 	spin_lock(&cache->lock);
2924 	if (cache->ro) {
2925 		ret = -EAGAIN;
2926 	} else {
2927 		cache->reserved += num_bytes;
2928 		space_info->bytes_reserved += num_bytes;
2929 		trace_btrfs_space_reservation(cache->fs_info, "space_info",
2930 					      space_info->flags, num_bytes, 1);
2931 		btrfs_space_info_update_bytes_may_use(cache->fs_info,
2932 						      space_info, -ram_bytes);
2933 		if (delalloc)
2934 			cache->delalloc_bytes += num_bytes;
2935 	}
2936 	spin_unlock(&cache->lock);
2937 	spin_unlock(&space_info->lock);
2938 	return ret;
2939 }
2940 
2941 /**
2942  * btrfs_free_reserved_bytes - update the block_group and space info counters
2943  * @cache:      The cache we are manipulating
2944  * @num_bytes:  The number of bytes in question
2945  * @delalloc:   The blocks are allocated for the delalloc write
2946  *
2947  * This is called by somebody who is freeing space that was never actually used
2948  * on disk.  For example if you reserve some space for a new leaf in transaction
2949  * A and before transaction A commits you free that leaf, you call this with
2950  * reserve set to 0 in order to clear the reservation.
2951  */
2952 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
2953 			       u64 num_bytes, int delalloc)
2954 {
2955 	struct btrfs_space_info *space_info = cache->space_info;
2956 
2957 	spin_lock(&space_info->lock);
2958 	spin_lock(&cache->lock);
2959 	if (cache->ro)
2960 		space_info->bytes_readonly += num_bytes;
2961 	cache->reserved -= num_bytes;
2962 	space_info->bytes_reserved -= num_bytes;
2963 	space_info->max_extent_size = 0;
2964 
2965 	if (delalloc)
2966 		cache->delalloc_bytes -= num_bytes;
2967 	spin_unlock(&cache->lock);
2968 	spin_unlock(&space_info->lock);
2969 }
2970 
2971 static void force_metadata_allocation(struct btrfs_fs_info *info)
2972 {
2973 	struct list_head *head = &info->space_info;
2974 	struct btrfs_space_info *found;
2975 
2976 	rcu_read_lock();
2977 	list_for_each_entry_rcu(found, head, list) {
2978 		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2979 			found->force_alloc = CHUNK_ALLOC_FORCE;
2980 	}
2981 	rcu_read_unlock();
2982 }
2983 
2984 static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
2985 			      struct btrfs_space_info *sinfo, int force)
2986 {
2987 	u64 bytes_used = btrfs_space_info_used(sinfo, false);
2988 	u64 thresh;
2989 
2990 	if (force == CHUNK_ALLOC_FORCE)
2991 		return 1;
2992 
2993 	/*
2994 	 * in limited mode, we want to have some free space up to
2995 	 * about 1% of the FS size.
2996 	 */
2997 	if (force == CHUNK_ALLOC_LIMITED) {
2998 		thresh = btrfs_super_total_bytes(fs_info->super_copy);
2999 		thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
3000 
3001 		if (sinfo->total_bytes - bytes_used < thresh)
3002 			return 1;
3003 	}
3004 
3005 	if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
3006 		return 0;
3007 	return 1;
3008 }
3009 
3010 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
3011 {
3012 	u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
3013 
3014 	return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
3015 }
3016 
3017 /*
3018  * If force is CHUNK_ALLOC_FORCE:
3019  *    - return 1 if it successfully allocates a chunk,
3020  *    - return errors including -ENOSPC otherwise.
3021  * If force is NOT CHUNK_ALLOC_FORCE:
3022  *    - return 0 if it doesn't need to allocate a new chunk,
3023  *    - return 1 if it successfully allocates a chunk,
3024  *    - return errors including -ENOSPC otherwise.
3025  */
3026 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
3027 		      enum btrfs_chunk_alloc_enum force)
3028 {
3029 	struct btrfs_fs_info *fs_info = trans->fs_info;
3030 	struct btrfs_space_info *space_info;
3031 	bool wait_for_alloc = false;
3032 	bool should_alloc = false;
3033 	int ret = 0;
3034 
3035 	/* Don't re-enter if we're already allocating a chunk */
3036 	if (trans->allocating_chunk)
3037 		return -ENOSPC;
3038 
3039 	space_info = btrfs_find_space_info(fs_info, flags);
3040 	ASSERT(space_info);
3041 
3042 	do {
3043 		spin_lock(&space_info->lock);
3044 		if (force < space_info->force_alloc)
3045 			force = space_info->force_alloc;
3046 		should_alloc = should_alloc_chunk(fs_info, space_info, force);
3047 		if (space_info->full) {
3048 			/* No more free physical space */
3049 			if (should_alloc)
3050 				ret = -ENOSPC;
3051 			else
3052 				ret = 0;
3053 			spin_unlock(&space_info->lock);
3054 			return ret;
3055 		} else if (!should_alloc) {
3056 			spin_unlock(&space_info->lock);
3057 			return 0;
3058 		} else if (space_info->chunk_alloc) {
3059 			/*
3060 			 * Someone is already allocating, so we need to block
3061 			 * until this someone is finished and then loop to
3062 			 * recheck if we should continue with our allocation
3063 			 * attempt.
3064 			 */
3065 			wait_for_alloc = true;
3066 			spin_unlock(&space_info->lock);
3067 			mutex_lock(&fs_info->chunk_mutex);
3068 			mutex_unlock(&fs_info->chunk_mutex);
3069 		} else {
3070 			/* Proceed with allocation */
3071 			space_info->chunk_alloc = 1;
3072 			wait_for_alloc = false;
3073 			spin_unlock(&space_info->lock);
3074 		}
3075 
3076 		cond_resched();
3077 	} while (wait_for_alloc);
3078 
3079 	mutex_lock(&fs_info->chunk_mutex);
3080 	trans->allocating_chunk = true;
3081 
3082 	/*
3083 	 * If we have mixed data/metadata chunks we want to make sure we keep
3084 	 * allocating mixed chunks instead of individual chunks.
3085 	 */
3086 	if (btrfs_mixed_space_info(space_info))
3087 		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3088 
3089 	/*
3090 	 * if we're doing a data chunk, go ahead and make sure that
3091 	 * we keep a reasonable number of metadata chunks allocated in the
3092 	 * FS as well.
3093 	 */
3094 	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3095 		fs_info->data_chunk_allocations++;
3096 		if (!(fs_info->data_chunk_allocations %
3097 		      fs_info->metadata_ratio))
3098 			force_metadata_allocation(fs_info);
3099 	}
3100 
3101 	/*
3102 	 * Check if we have enough space in SYSTEM chunk because we may need
3103 	 * to update devices.
3104 	 */
3105 	check_system_chunk(trans, flags);
3106 
3107 	ret = btrfs_alloc_chunk(trans, flags);
3108 	trans->allocating_chunk = false;
3109 
3110 	spin_lock(&space_info->lock);
3111 	if (ret < 0) {
3112 		if (ret == -ENOSPC)
3113 			space_info->full = 1;
3114 		else
3115 			goto out;
3116 	} else {
3117 		ret = 1;
3118 		space_info->max_extent_size = 0;
3119 	}
3120 
3121 	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3122 out:
3123 	space_info->chunk_alloc = 0;
3124 	spin_unlock(&space_info->lock);
3125 	mutex_unlock(&fs_info->chunk_mutex);
3126 	/*
3127 	 * When we allocate a new chunk we reserve space in the chunk block
3128 	 * reserve to make sure we can COW nodes/leafs in the chunk tree or
3129 	 * add new nodes/leafs to it if we end up needing to do it when
3130 	 * inserting the chunk item and updating device items as part of the
3131 	 * second phase of chunk allocation, performed by
3132 	 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
3133 	 * large number of new block groups to create in our transaction
3134 	 * handle's new_bgs list to avoid exhausting the chunk block reserve
3135 	 * in extreme cases - like having a single transaction create many new
3136 	 * block groups when starting to write out the free space caches of all
3137 	 * the block groups that were made dirty during the lifetime of the
3138 	 * transaction.
3139 	 */
3140 	if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
3141 		btrfs_create_pending_block_groups(trans);
3142 
3143 	return ret;
3144 }
3145 
3146 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
3147 {
3148 	u64 num_dev;
3149 
3150 	num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
3151 	if (!num_dev)
3152 		num_dev = fs_info->fs_devices->rw_devices;
3153 
3154 	return num_dev;
3155 }
3156 
3157 /*
3158  * Reserve space in the system space for allocating or removing a chunk
3159  */
3160 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
3161 {
3162 	struct btrfs_fs_info *fs_info = trans->fs_info;
3163 	struct btrfs_space_info *info;
3164 	u64 left;
3165 	u64 thresh;
3166 	int ret = 0;
3167 	u64 num_devs;
3168 
3169 	/*
3170 	 * Needed because we can end up allocating a system chunk and for an
3171 	 * atomic and race free space reservation in the chunk block reserve.
3172 	 */
3173 	lockdep_assert_held(&fs_info->chunk_mutex);
3174 
3175 	info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3176 	spin_lock(&info->lock);
3177 	left = info->total_bytes - btrfs_space_info_used(info, true);
3178 	spin_unlock(&info->lock);
3179 
3180 	num_devs = get_profile_num_devs(fs_info, type);
3181 
3182 	/* num_devs device items to update and 1 chunk item to add or remove */
3183 	thresh = btrfs_calc_metadata_size(fs_info, num_devs) +
3184 		btrfs_calc_insert_metadata_size(fs_info, 1);
3185 
3186 	if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
3187 		btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
3188 			   left, thresh, type);
3189 		btrfs_dump_space_info(fs_info, info, 0, 0);
3190 	}
3191 
3192 	if (left < thresh) {
3193 		u64 flags = btrfs_system_alloc_profile(fs_info);
3194 
3195 		/*
3196 		 * Ignore failure to create system chunk. We might end up not
3197 		 * needing it, as we might not need to COW all nodes/leafs from
3198 		 * the paths we visit in the chunk tree (they were already COWed
3199 		 * or created in the current transaction for example).
3200 		 */
3201 		ret = btrfs_alloc_chunk(trans, flags);
3202 	}
3203 
3204 	if (!ret) {
3205 		ret = btrfs_block_rsv_add(fs_info->chunk_root,
3206 					  &fs_info->chunk_block_rsv,
3207 					  thresh, BTRFS_RESERVE_NO_FLUSH);
3208 		if (!ret)
3209 			trans->chunk_bytes_reserved += thresh;
3210 	}
3211 }
3212 
3213 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
3214 {
3215 	struct btrfs_block_group *block_group;
3216 	u64 last = 0;
3217 
3218 	while (1) {
3219 		struct inode *inode;
3220 
3221 		block_group = btrfs_lookup_first_block_group(info, last);
3222 		while (block_group) {
3223 			btrfs_wait_block_group_cache_done(block_group);
3224 			spin_lock(&block_group->lock);
3225 			if (block_group->iref)
3226 				break;
3227 			spin_unlock(&block_group->lock);
3228 			block_group = btrfs_next_block_group(block_group);
3229 		}
3230 		if (!block_group) {
3231 			if (last == 0)
3232 				break;
3233 			last = 0;
3234 			continue;
3235 		}
3236 
3237 		inode = block_group->inode;
3238 		block_group->iref = 0;
3239 		block_group->inode = NULL;
3240 		spin_unlock(&block_group->lock);
3241 		ASSERT(block_group->io_ctl.inode == NULL);
3242 		iput(inode);
3243 		last = block_group->start + block_group->length;
3244 		btrfs_put_block_group(block_group);
3245 	}
3246 }
3247 
3248 /*
3249  * Must be called only after stopping all workers, since we could have block
3250  * group caching kthreads running, and therefore they could race with us if we
3251  * freed the block groups before stopping them.
3252  */
3253 int btrfs_free_block_groups(struct btrfs_fs_info *info)
3254 {
3255 	struct btrfs_block_group *block_group;
3256 	struct btrfs_space_info *space_info;
3257 	struct btrfs_caching_control *caching_ctl;
3258 	struct rb_node *n;
3259 
3260 	down_write(&info->commit_root_sem);
3261 	while (!list_empty(&info->caching_block_groups)) {
3262 		caching_ctl = list_entry(info->caching_block_groups.next,
3263 					 struct btrfs_caching_control, list);
3264 		list_del(&caching_ctl->list);
3265 		btrfs_put_caching_control(caching_ctl);
3266 	}
3267 	up_write(&info->commit_root_sem);
3268 
3269 	spin_lock(&info->unused_bgs_lock);
3270 	while (!list_empty(&info->unused_bgs)) {
3271 		block_group = list_first_entry(&info->unused_bgs,
3272 					       struct btrfs_block_group,
3273 					       bg_list);
3274 		list_del_init(&block_group->bg_list);
3275 		btrfs_put_block_group(block_group);
3276 	}
3277 	spin_unlock(&info->unused_bgs_lock);
3278 
3279 	spin_lock(&info->block_group_cache_lock);
3280 	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
3281 		block_group = rb_entry(n, struct btrfs_block_group,
3282 				       cache_node);
3283 		rb_erase(&block_group->cache_node,
3284 			 &info->block_group_cache_tree);
3285 		RB_CLEAR_NODE(&block_group->cache_node);
3286 		spin_unlock(&info->block_group_cache_lock);
3287 
3288 		down_write(&block_group->space_info->groups_sem);
3289 		list_del(&block_group->list);
3290 		up_write(&block_group->space_info->groups_sem);
3291 
3292 		/*
3293 		 * We haven't cached this block group, which means we could
3294 		 * possibly have excluded extents on this block group.
3295 		 */
3296 		if (block_group->cached == BTRFS_CACHE_NO ||
3297 		    block_group->cached == BTRFS_CACHE_ERROR)
3298 			btrfs_free_excluded_extents(block_group);
3299 
3300 		btrfs_remove_free_space_cache(block_group);
3301 		ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
3302 		ASSERT(list_empty(&block_group->dirty_list));
3303 		ASSERT(list_empty(&block_group->io_list));
3304 		ASSERT(list_empty(&block_group->bg_list));
3305 		ASSERT(atomic_read(&block_group->count) == 1);
3306 		btrfs_put_block_group(block_group);
3307 
3308 		spin_lock(&info->block_group_cache_lock);
3309 	}
3310 	spin_unlock(&info->block_group_cache_lock);
3311 
3312 	/*
3313 	 * Now that all the block groups are freed, go through and free all the
3314 	 * space_info structs.  This is only called during the final stages of
3315 	 * unmount, and so we know nobody is using them.  We call
3316 	 * synchronize_rcu() once before we start, just to be on the safe side.
3317 	 */
3318 	synchronize_rcu();
3319 
3320 	btrfs_release_global_block_rsv(info);
3321 
3322 	while (!list_empty(&info->space_info)) {
3323 		space_info = list_entry(info->space_info.next,
3324 					struct btrfs_space_info,
3325 					list);
3326 
3327 		/*
3328 		 * Do not hide this behind enospc_debug, this is actually
3329 		 * important and indicates a real bug if this happens.
3330 		 */
3331 		if (WARN_ON(space_info->bytes_pinned > 0 ||
3332 			    space_info->bytes_reserved > 0 ||
3333 			    space_info->bytes_may_use > 0))
3334 			btrfs_dump_space_info(info, space_info, 0, 0);
3335 		list_del(&space_info->list);
3336 		btrfs_sysfs_remove_space_info(space_info);
3337 	}
3338 	return 0;
3339 }
3340