Lines Matching +full:cache +full:- +full:block +full:- +full:size

1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include "free-space-cache.h"
18 /* 0 < size <= 128K */
20 /* 128K < size <= 8M */
22 /* 8M < size < BG_LENGTH */
60 /* Block group flags set at runtime */
69 /* Does the block group need to be added to the free space tree? */
71 /* Indicate that the block group is placed on a sequential zone */
74 * Indicate that block group is in the list of new block groups of a
117 * The last committed used bytes of this block group, if the above @used
118 * is still the same as @commit_used, we don't need to update block
119 * group item of this block group.
123 * If the free space extent count exceeds this number, convert the block
130 * block group back to extents.
149 /* Cache tracking stuff */
155 /* Free space cache stuff */
158 /* Block group cache stuff */
161 /* For block groups in the same raid type */
167 * List of struct btrfs_free_clusters for this block group.
182 /* For read-only block groups */
186 * When non-zero it means the block group's logical address and its
187 * device extents can not be reused for future block group allocations
189 * reused while some task is still using the block group after it was
190 * deleted - we want to make sure they can only be reused for new block
191 * groups after that task is done with the deleted block group.
202 /* For dirty block groups */
212 * block group's range is created (after it's added to its inode's
224 * This is to prevent races between block group relocation and nocow
233 * Number of extents in this block group used for swap files.
239 * Allocation offset for the block group to implement sequential
255 return (block_group->start + block_group->length); in btrfs_block_group_end()
260 lockdep_assert_held(&bg->lock); in btrfs_is_block_group_used()
262 return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0); in btrfs_is_block_group_used()
270 * efficiency, so only proper data block groups are considered. in btrfs_is_block_group_data_only()
272 return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && in btrfs_is_block_group_data_only()
273 !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA); in btrfs_is_block_group_data_only()
285 struct btrfs_block_group *cache);
286 void btrfs_get_block_group(struct btrfs_block_group *cache);
287 void btrfs_put_block_group(struct btrfs_block_group *cache);
295 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
297 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
300 struct btrfs_block_group *cache);
316 u64 chunk_offset, u64 size);
318 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
320 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
326 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
329 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
358 static inline int btrfs_block_group_done(struct btrfs_block_group *cache) in btrfs_block_group_done() argument
361 return cache->cached == BTRFS_CACHE_FINISHED || in btrfs_block_group_done()
362 cache->cached == BTRFS_CACHE_ERROR; in btrfs_block_group_done()
365 void btrfs_freeze_block_group(struct btrfs_block_group *cache);
366 void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
371 enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size);