xref: /openbmc/linux/fs/btrfs/block-group.h (revision e97ebc2a)
1aac0023cSJosef Bacik /* SPDX-License-Identifier: GPL-2.0 */
2aac0023cSJosef Bacik 
3aac0023cSJosef Bacik #ifndef BTRFS_BLOCK_GROUP_H
4aac0023cSJosef Bacik #define BTRFS_BLOCK_GROUP_H
5aac0023cSJosef Bacik 
667b61aefSDavid Sterba #include "free-space-cache.h"
767b61aefSDavid Sterba 
8aac0023cSJosef Bacik enum btrfs_disk_cache_state {
9aac0023cSJosef Bacik 	BTRFS_DC_WRITTEN,
10aac0023cSJosef Bacik 	BTRFS_DC_ERROR,
11aac0023cSJosef Bacik 	BTRFS_DC_CLEAR,
12aac0023cSJosef Bacik 	BTRFS_DC_SETUP,
13aac0023cSJosef Bacik };
14aac0023cSJosef Bacik 
1552bb7a21SBoris Burkov enum btrfs_block_group_size_class {
1652bb7a21SBoris Burkov 	/* Unset */
1752bb7a21SBoris Burkov 	BTRFS_BG_SZ_NONE,
1852bb7a21SBoris Burkov 	/* 0 < size <= 128K */
1952bb7a21SBoris Burkov 	BTRFS_BG_SZ_SMALL,
2052bb7a21SBoris Burkov 	/* 128K < size <= 8M */
2152bb7a21SBoris Burkov 	BTRFS_BG_SZ_MEDIUM,
2252bb7a21SBoris Burkov 	/* 8M < size < BG_LENGTH */
2352bb7a21SBoris Burkov 	BTRFS_BG_SZ_LARGE,
2452bb7a21SBoris Burkov };
2552bb7a21SBoris Burkov 
2607730d87SJosef Bacik /*
272bee7eb8SDennis Zhou  * This describes the state of the block_group for async discard.  This is due
282bee7eb8SDennis Zhou  * to the two pass nature of it where extent discarding is prioritized over
292bee7eb8SDennis Zhou  * bitmap discarding.  BTRFS_DISCARD_RESET_CURSOR is set when we are resetting
302bee7eb8SDennis Zhou  * between lists to prevent contention for discard state variables
312bee7eb8SDennis Zhou  * (eg. discard_cursor).
322bee7eb8SDennis Zhou  */
332bee7eb8SDennis Zhou enum btrfs_discard_state {
342bee7eb8SDennis Zhou 	BTRFS_DISCARD_EXTENTS,
352bee7eb8SDennis Zhou 	BTRFS_DISCARD_BITMAPS,
362bee7eb8SDennis Zhou 	BTRFS_DISCARD_RESET_CURSOR,
372bee7eb8SDennis Zhou };
382bee7eb8SDennis Zhou 
392bee7eb8SDennis Zhou /*
4007730d87SJosef Bacik  * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
4107730d87SJosef Bacik  * only allocate a chunk if we really need one.
4207730d87SJosef Bacik  *
4307730d87SJosef Bacik  * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
4407730d87SJosef Bacik  * chunks already allocated.  This is used as part of the clustering code to
4507730d87SJosef Bacik  * help make sure we have a good pool of storage to cluster in, without filling
4607730d87SJosef Bacik  * the FS with empty chunks
4707730d87SJosef Bacik  *
4807730d87SJosef Bacik  * CHUNK_ALLOC_FORCE means it must try to allocate one
49760e69c4SNaohiro Aota  *
50760e69c4SNaohiro Aota  * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
51760e69c4SNaohiro Aota  * find_free_extent() that also activaes the zone
5207730d87SJosef Bacik  */
5307730d87SJosef Bacik enum btrfs_chunk_alloc_enum {
5407730d87SJosef Bacik 	CHUNK_ALLOC_NO_FORCE,
5507730d87SJosef Bacik 	CHUNK_ALLOC_LIMITED,
5607730d87SJosef Bacik 	CHUNK_ALLOC_FORCE,
57760e69c4SNaohiro Aota 	CHUNK_ALLOC_FORCE_FOR_EXTENT,
5807730d87SJosef Bacik };
5907730d87SJosef Bacik 
603349b57fSJosef Bacik /* Block group flags set at runtime */
613349b57fSJosef Bacik enum btrfs_block_group_flags {
623349b57fSJosef Bacik 	BLOCK_GROUP_FLAG_IREF,
633349b57fSJosef Bacik 	BLOCK_GROUP_FLAG_REMOVED,
643349b57fSJosef Bacik 	BLOCK_GROUP_FLAG_TO_COPY,
653349b57fSJosef Bacik 	BLOCK_GROUP_FLAG_RELOCATING_REPAIR,
663349b57fSJosef Bacik 	BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
673349b57fSJosef Bacik 	BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
683349b57fSJosef Bacik 	BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
690d7764ffSDavid Sterba 	/* Does the block group need to be added to the free space tree? */
700d7764ffSDavid Sterba 	BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
71961f5b8bSDavid Sterba 	/* Indicate that the block group is placed on a sequential zone */
72961f5b8bSDavid Sterba 	BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE,
730657b20cSFilipe Manana 	/*
740657b20cSFilipe Manana 	 * Indicate that block group is in the list of new block groups of a
750657b20cSFilipe Manana 	 * transaction.
760657b20cSFilipe Manana 	 */
770657b20cSFilipe Manana 	BLOCK_GROUP_FLAG_NEW,
783349b57fSJosef Bacik };
793349b57fSJosef Bacik 
8016708a88SJosef Bacik enum btrfs_caching_type {
8116708a88SJosef Bacik 	BTRFS_CACHE_NO,
8216708a88SJosef Bacik 	BTRFS_CACHE_STARTED,
8316708a88SJosef Bacik 	BTRFS_CACHE_FINISHED,
8416708a88SJosef Bacik 	BTRFS_CACHE_ERROR,
8516708a88SJosef Bacik };
8616708a88SJosef Bacik 
87aac0023cSJosef Bacik struct btrfs_caching_control {
88aac0023cSJosef Bacik 	struct list_head list;
89aac0023cSJosef Bacik 	struct mutex mutex;
90aac0023cSJosef Bacik 	wait_queue_head_t wait;
91aac0023cSJosef Bacik 	struct btrfs_work work;
9232da5386SDavid Sterba 	struct btrfs_block_group *block_group;
93fc1f91b9SJosef Bacik 	/* Track progress of caching during allocation. */
94fc1f91b9SJosef Bacik 	atomic_t progress;
95aac0023cSJosef Bacik 	refcount_t count;
96aac0023cSJosef Bacik };
97aac0023cSJosef Bacik 
98aac0023cSJosef Bacik /* Once caching_thread() finds this much free space, it will wake up waiters. */
99aac0023cSJosef Bacik #define CACHING_CTL_WAKE_UP SZ_2M
100aac0023cSJosef Bacik 
10132da5386SDavid Sterba struct btrfs_block_group {
102aac0023cSJosef Bacik 	struct btrfs_fs_info *fs_info;
103aac0023cSJosef Bacik 	struct inode *inode;
104aac0023cSJosef Bacik 	spinlock_t lock;
105b3470b5dSDavid Sterba 	u64 start;
106b3470b5dSDavid Sterba 	u64 length;
107aac0023cSJosef Bacik 	u64 pinned;
108aac0023cSJosef Bacik 	u64 reserved;
109bf38be65SDavid Sterba 	u64 used;
110aac0023cSJosef Bacik 	u64 delalloc_bytes;
111aac0023cSJosef Bacik 	u64 bytes_super;
112aac0023cSJosef Bacik 	u64 flags;
113aac0023cSJosef Bacik 	u64 cache_generation;
114f7238e50SJosef Bacik 	u64 global_root_id;
115aac0023cSJosef Bacik 
116aac0023cSJosef Bacik 	/*
1177248e0ceSQu Wenruo 	 * The last committed used bytes of this block group, if the above @used
1187248e0ceSQu Wenruo 	 * is still the same as @commit_used, we don't need to update block
1197248e0ceSQu Wenruo 	 * group item of this block group.
1207248e0ceSQu Wenruo 	 */
1217248e0ceSQu Wenruo 	u64 commit_used;
1227248e0ceSQu Wenruo 	/*
123aac0023cSJosef Bacik 	 * If the free space extent count exceeds this number, convert the block
124aac0023cSJosef Bacik 	 * group to bitmaps.
125aac0023cSJosef Bacik 	 */
126aac0023cSJosef Bacik 	u32 bitmap_high_thresh;
127aac0023cSJosef Bacik 
128aac0023cSJosef Bacik 	/*
129aac0023cSJosef Bacik 	 * If the free space extent count drops below this number, convert the
130aac0023cSJosef Bacik 	 * block group back to extents.
131aac0023cSJosef Bacik 	 */
132aac0023cSJosef Bacik 	u32 bitmap_low_thresh;
133aac0023cSJosef Bacik 
134aac0023cSJosef Bacik 	/*
135aac0023cSJosef Bacik 	 * It is just used for the delayed data space allocation because
136aac0023cSJosef Bacik 	 * only the data space allocation and the relative metadata update
137aac0023cSJosef Bacik 	 * can be done cross the transaction.
138aac0023cSJosef Bacik 	 */
139aac0023cSJosef Bacik 	struct rw_semaphore data_rwsem;
140aac0023cSJosef Bacik 
141aac0023cSJosef Bacik 	/* For raid56, this is a full stripe, without parity */
142aac0023cSJosef Bacik 	unsigned long full_stripe_len;
1433349b57fSJosef Bacik 	unsigned long runtime_flags;
144aac0023cSJosef Bacik 
145aac0023cSJosef Bacik 	unsigned int ro;
146aac0023cSJosef Bacik 
147aac0023cSJosef Bacik 	int disk_cache_state;
148aac0023cSJosef Bacik 
149aac0023cSJosef Bacik 	/* Cache tracking stuff */
150aac0023cSJosef Bacik 	int cached;
151aac0023cSJosef Bacik 	struct btrfs_caching_control *caching_ctl;
152aac0023cSJosef Bacik 
153aac0023cSJosef Bacik 	struct btrfs_space_info *space_info;
154aac0023cSJosef Bacik 
155aac0023cSJosef Bacik 	/* Free space cache stuff */
156aac0023cSJosef Bacik 	struct btrfs_free_space_ctl *free_space_ctl;
157aac0023cSJosef Bacik 
158aac0023cSJosef Bacik 	/* Block group cache stuff */
159aac0023cSJosef Bacik 	struct rb_node cache_node;
160aac0023cSJosef Bacik 
161aac0023cSJosef Bacik 	/* For block groups in the same raid type */
162aac0023cSJosef Bacik 	struct list_head list;
163aac0023cSJosef Bacik 
16448aaeebeSJosef Bacik 	refcount_t refs;
165aac0023cSJosef Bacik 
166aac0023cSJosef Bacik 	/*
167aac0023cSJosef Bacik 	 * List of struct btrfs_free_clusters for this block group.
168aac0023cSJosef Bacik 	 * Today it will only have one thing on it, but that may change
169aac0023cSJosef Bacik 	 */
170aac0023cSJosef Bacik 	struct list_head cluster_list;
171aac0023cSJosef Bacik 
172aadb164bSFilipe Manana 	/*
173aadb164bSFilipe Manana 	 * Used for several lists:
174aadb164bSFilipe Manana 	 *
175aadb164bSFilipe Manana 	 * 1) struct btrfs_fs_info::unused_bgs
176aadb164bSFilipe Manana 	 * 2) struct btrfs_fs_info::reclaim_bgs
177aadb164bSFilipe Manana 	 * 3) struct btrfs_transaction::deleted_bgs
178aadb164bSFilipe Manana 	 * 4) struct btrfs_trans_handle::new_bgs
179aadb164bSFilipe Manana 	 */
180aac0023cSJosef Bacik 	struct list_head bg_list;
181aac0023cSJosef Bacik 
182aac0023cSJosef Bacik 	/* For read-only block groups */
183aac0023cSJosef Bacik 	struct list_head ro_list;
184aac0023cSJosef Bacik 
1856b7304afSFilipe Manana 	/*
1866b7304afSFilipe Manana 	 * When non-zero it means the block group's logical address and its
1876b7304afSFilipe Manana 	 * device extents can not be reused for future block group allocations
1886b7304afSFilipe Manana 	 * until the counter goes down to 0. This is to prevent them from being
1896b7304afSFilipe Manana 	 * reused while some task is still using the block group after it was
1906b7304afSFilipe Manana 	 * deleted - we want to make sure they can only be reused for new block
1916b7304afSFilipe Manana 	 * groups after that task is done with the deleted block group.
1926b7304afSFilipe Manana 	 */
1936b7304afSFilipe Manana 	atomic_t frozen;
1946b7304afSFilipe Manana 
195b0643e59SDennis Zhou 	/* For discard operations */
196b0643e59SDennis Zhou 	struct list_head discard_list;
197b0643e59SDennis Zhou 	int discard_index;
198b0643e59SDennis Zhou 	u64 discard_eligible_time;
1992bee7eb8SDennis Zhou 	u64 discard_cursor;
2002bee7eb8SDennis Zhou 	enum btrfs_discard_state discard_state;
201aac0023cSJosef Bacik 
202aac0023cSJosef Bacik 	/* For dirty block groups */
203aac0023cSJosef Bacik 	struct list_head dirty_list;
204aac0023cSJosef Bacik 	struct list_head io_list;
205aac0023cSJosef Bacik 
206aac0023cSJosef Bacik 	struct btrfs_io_ctl io_ctl;
207aac0023cSJosef Bacik 
208aac0023cSJosef Bacik 	/*
209aac0023cSJosef Bacik 	 * Incremented when doing extent allocations and holding a read lock
210aac0023cSJosef Bacik 	 * on the space_info's groups_sem semaphore.
211aac0023cSJosef Bacik 	 * Decremented when an ordered extent that represents an IO against this
212aac0023cSJosef Bacik 	 * block group's range is created (after it's added to its inode's
213aac0023cSJosef Bacik 	 * root's list of ordered extents) or immediately after the allocation
214aac0023cSJosef Bacik 	 * if it's a metadata extent or fallocate extent (for these cases we
215aac0023cSJosef Bacik 	 * don't create ordered extents).
216aac0023cSJosef Bacik 	 */
217aac0023cSJosef Bacik 	atomic_t reservations;
218aac0023cSJosef Bacik 
219aac0023cSJosef Bacik 	/*
220aac0023cSJosef Bacik 	 * Incremented while holding the spinlock *lock* by a task checking if
221aac0023cSJosef Bacik 	 * it can perform a nocow write (incremented if the value for the *ro*
222aac0023cSJosef Bacik 	 * field is 0). Decremented by such tasks once they create an ordered
223aac0023cSJosef Bacik 	 * extent or before that if some error happens before reaching that step.
224aac0023cSJosef Bacik 	 * This is to prevent races between block group relocation and nocow
225aac0023cSJosef Bacik 	 * writes through direct IO.
226aac0023cSJosef Bacik 	 */
227aac0023cSJosef Bacik 	atomic_t nocow_writers;
228aac0023cSJosef Bacik 
229aac0023cSJosef Bacik 	/* Lock for free space tree operations. */
230aac0023cSJosef Bacik 	struct mutex free_space_lock;
231aac0023cSJosef Bacik 
232195a49eaSFilipe Manana 	/*
233195a49eaSFilipe Manana 	 * Number of extents in this block group used for swap files.
234195a49eaSFilipe Manana 	 * All accesses protected by the spinlock 'lock'.
235195a49eaSFilipe Manana 	 */
236195a49eaSFilipe Manana 	int swap_extents;
237195a49eaSFilipe Manana 
23808e11a3dSNaohiro Aota 	/*
23908e11a3dSNaohiro Aota 	 * Allocation offset for the block group to implement sequential
24008e11a3dSNaohiro Aota 	 * allocation. This is used only on a zoned filesystem.
24108e11a3dSNaohiro Aota 	 */
24208e11a3dSNaohiro Aota 	u64 alloc_offset;
243169e0da9SNaohiro Aota 	u64 zone_unusable;
2448eae532bSNaohiro Aota 	u64 zone_capacity;
2450bc09ca1SNaohiro Aota 	u64 meta_write_pointer;
246dafc340dSNaohiro Aota 	struct map_lookup *physical_map;
247afba2bc0SNaohiro Aota 	struct list_head active_bg_list;
24856fbb0a4SNaohiro Aota 	struct work_struct zone_finish_work;
24956fbb0a4SNaohiro Aota 	struct extent_buffer *last_eb;
25052bb7a21SBoris Burkov 	enum btrfs_block_group_size_class size_class;
251aac0023cSJosef Bacik };
252aac0023cSJosef Bacik 
btrfs_block_group_end(struct btrfs_block_group * block_group)253b0643e59SDennis Zhou static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
254b0643e59SDennis Zhou {
255b0643e59SDennis Zhou 	return (block_group->start + block_group->length);
256b0643e59SDennis Zhou }
257b0643e59SDennis Zhou 
btrfs_is_block_group_used(const struct btrfs_block_group * bg)258*e97ebc2aSFilipe Manana static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg)
259*e97ebc2aSFilipe Manana {
260*e97ebc2aSFilipe Manana 	lockdep_assert_held(&bg->lock);
261*e97ebc2aSFilipe Manana 
262*e97ebc2aSFilipe Manana 	return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0);
263*e97ebc2aSFilipe Manana }
264*e97ebc2aSFilipe Manana 
btrfs_is_block_group_data_only(struct btrfs_block_group * block_group)2655cb0724eSDennis Zhou static inline bool btrfs_is_block_group_data_only(
2665cb0724eSDennis Zhou 					struct btrfs_block_group *block_group)
2675cb0724eSDennis Zhou {
2685cb0724eSDennis Zhou 	/*
2695cb0724eSDennis Zhou 	 * In mixed mode the fragmentation is expected to be high, lowering the
2705cb0724eSDennis Zhou 	 * efficiency, so only proper data block groups are considered.
2715cb0724eSDennis Zhou 	 */
2725cb0724eSDennis Zhou 	return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
2735cb0724eSDennis Zhou 	       !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
2745cb0724eSDennis Zhou }
2755cb0724eSDennis Zhou 
276aac0023cSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG
27706d61cb1SJosef Bacik int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group);
278aac0023cSJosef Bacik #endif
279aac0023cSJosef Bacik 
28032da5386SDavid Sterba struct btrfs_block_group *btrfs_lookup_first_block_group(
2812e405ad8SJosef Bacik 		struct btrfs_fs_info *info, u64 bytenr);
28232da5386SDavid Sterba struct btrfs_block_group *btrfs_lookup_block_group(
2832e405ad8SJosef Bacik 		struct btrfs_fs_info *info, u64 bytenr);
28432da5386SDavid Sterba struct btrfs_block_group *btrfs_next_block_group(
28532da5386SDavid Sterba 		struct btrfs_block_group *cache);
28632da5386SDavid Sterba void btrfs_get_block_group(struct btrfs_block_group *cache);
28732da5386SDavid Sterba void btrfs_put_block_group(struct btrfs_block_group *cache);
2883eeb3226SJosef Bacik void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
2893eeb3226SJosef Bacik 					const u64 start);
29032da5386SDavid Sterba void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
2912306e83eSFilipe Manana struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
2922306e83eSFilipe Manana 						  u64 bytenr);
2932306e83eSFilipe Manana void btrfs_dec_nocow_writers(struct btrfs_block_group *bg);
29432da5386SDavid Sterba void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
29532da5386SDavid Sterba void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
296676f1f75SJosef Bacik 				           u64 num_bytes);
297ced8ecf0SOmar Sandoval int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
298e3cb339fSJosef Bacik void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
299e3cb339fSJosef Bacik struct btrfs_caching_control *btrfs_get_caching_control(
30032da5386SDavid Sterba 		struct btrfs_block_group *cache);
3013b9f0995SFilipe Manana int btrfs_add_new_free_space(struct btrfs_block_group *block_group,
302d8ccbd21SFilipe Manana 			     u64 start, u64 end, u64 *total_added_ret);
303e3e0520bSJosef Bacik struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
304e3e0520bSJosef Bacik 				struct btrfs_fs_info *fs_info,
305e3e0520bSJosef Bacik 				const u64 chunk_offset);
306e3e0520bSJosef Bacik int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
307e3e0520bSJosef Bacik 			     u64 group_start, struct extent_map *em);
308e3e0520bSJosef Bacik void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
30932da5386SDavid Sterba void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
31018bb8bbfSJohannes Thumshirn void btrfs_reclaim_bgs_work(struct work_struct *work);
31118bb8bbfSJohannes Thumshirn void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
31218bb8bbfSJohannes Thumshirn void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
3134358d963SJosef Bacik int btrfs_read_block_groups(struct btrfs_fs_info *info);
31479bd3712SFilipe Manana struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
3155758d1bdSFilipe Manana 						 u64 type,
31679bd3712SFilipe Manana 						 u64 chunk_offset, u64 size);
3174358d963SJosef Bacik void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
318b12de528SQu Wenruo int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
319b12de528SQu Wenruo 			     bool do_chunk_alloc);
32032da5386SDavid Sterba void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
32177745c05SJosef Bacik int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
32277745c05SJosef Bacik int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
32377745c05SJosef Bacik int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
324ade4b516SJosef Bacik int btrfs_update_block_group(struct btrfs_trans_handle *trans,
32511b66fa6SAnand Jain 			     u64 bytenr, u64 num_bytes, bool alloc);
32632da5386SDavid Sterba int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
32752bb7a21SBoris Burkov 			     u64 ram_bytes, u64 num_bytes, int delalloc,
32852bb7a21SBoris Burkov 			     bool force_wrong_size_class);
32932da5386SDavid Sterba void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
330ade4b516SJosef Bacik 			       u64 num_bytes, int delalloc);
33107730d87SJosef Bacik int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
33207730d87SJosef Bacik 		      enum btrfs_chunk_alloc_enum force);
33307730d87SJosef Bacik int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
33407730d87SJosef Bacik void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
3352bb2e00eSFilipe Manana void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
3362bb2e00eSFilipe Manana 				  bool is_item_insertion);
337878d7b67SJosef Bacik u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
3383e43c279SJosef Bacik void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
3393e43c279SJosef Bacik int btrfs_free_block_groups(struct btrfs_fs_info *info);
340138082f3SNaohiro Aota int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
3411eb82ef8SChristoph Hellwig 		     u64 physical, u64 **logical, int *naddrs, int *stripe_len);
342878d7b67SJosef Bacik 
btrfs_data_alloc_profile(struct btrfs_fs_info * fs_info)343878d7b67SJosef Bacik static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
344878d7b67SJosef Bacik {
345878d7b67SJosef Bacik 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
346878d7b67SJosef Bacik }
347878d7b67SJosef Bacik 
btrfs_metadata_alloc_profile(struct btrfs_fs_info * fs_info)348878d7b67SJosef Bacik static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
349878d7b67SJosef Bacik {
350878d7b67SJosef Bacik 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
351878d7b67SJosef Bacik }
352878d7b67SJosef Bacik 
btrfs_system_alloc_profile(struct btrfs_fs_info * fs_info)353878d7b67SJosef Bacik static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
354878d7b67SJosef Bacik {
355878d7b67SJosef Bacik 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
356878d7b67SJosef Bacik }
357676f1f75SJosef Bacik 
btrfs_block_group_done(struct btrfs_block_group * cache)35832da5386SDavid Sterba static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
359676f1f75SJosef Bacik {
360676f1f75SJosef Bacik 	smp_mb();
361676f1f75SJosef Bacik 	return cache->cached == BTRFS_CACHE_FINISHED ||
362676f1f75SJosef Bacik 		cache->cached == BTRFS_CACHE_ERROR;
363676f1f75SJosef Bacik }
3642e405ad8SJosef Bacik 
365684b752bSFilipe Manana void btrfs_freeze_block_group(struct btrfs_block_group *cache);
366684b752bSFilipe Manana void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
367684b752bSFilipe Manana 
368195a49eaSFilipe Manana bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
369195a49eaSFilipe Manana void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
370195a49eaSFilipe Manana 
37152bb7a21SBoris Burkov enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size);
37252bb7a21SBoris Burkov int btrfs_use_block_group_size_class(struct btrfs_block_group *bg,
37352bb7a21SBoris Burkov 				     enum btrfs_block_group_size_class size_class,
37452bb7a21SBoris Burkov 				     bool force_wrong_size_class);
375cb0922f2SBoris Burkov bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg);
37652bb7a21SBoris Burkov 
377aac0023cSJosef Bacik #endif /* BTRFS_BLOCK_GROUP_H */
378