xref: /openbmc/linux/fs/btrfs/block-group.h (revision 4b33b5ff)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_BLOCK_GROUP_H
4 #define BTRFS_BLOCK_GROUP_H
5 
6 #include "free-space-cache.h"
7 
8 enum btrfs_disk_cache_state {
9 	BTRFS_DC_WRITTEN,
10 	BTRFS_DC_ERROR,
11 	BTRFS_DC_CLEAR,
12 	BTRFS_DC_SETUP,
13 };
14 
15 /*
16  * This describes the state of the block_group for async discard.  This is due
17  * to the two pass nature of it where extent discarding is prioritized over
18  * bitmap discarding.  BTRFS_DISCARD_RESET_CURSOR is set when we are resetting
19  * between lists to prevent contention for discard state variables
20  * (eg. discard_cursor).
21  */
22 enum btrfs_discard_state {
23 	BTRFS_DISCARD_EXTENTS,
24 	BTRFS_DISCARD_BITMAPS,
25 	BTRFS_DISCARD_RESET_CURSOR,
26 };
27 
28 /*
29  * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
30  * only allocate a chunk if we really need one.
31  *
32  * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
33  * chunks already allocated.  This is used as part of the clustering code to
34  * help make sure we have a good pool of storage to cluster in, without filling
35  * the FS with empty chunks
36  *
37  * CHUNK_ALLOC_FORCE means it must try to allocate one
38  *
39  * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
40  * find_free_extent() that also activaes the zone
41  */
42 enum btrfs_chunk_alloc_enum {
43 	CHUNK_ALLOC_NO_FORCE,
44 	CHUNK_ALLOC_LIMITED,
45 	CHUNK_ALLOC_FORCE,
46 	CHUNK_ALLOC_FORCE_FOR_EXTENT,
47 };
48 
49 struct btrfs_caching_control {
50 	struct list_head list;
51 	struct mutex mutex;
52 	wait_queue_head_t wait;
53 	struct btrfs_work work;
54 	struct btrfs_block_group *block_group;
55 	u64 progress;
56 	refcount_t count;
57 };
58 
59 /* Once caching_thread() finds this much free space, it will wake up waiters. */
60 #define CACHING_CTL_WAKE_UP SZ_2M
61 
62 struct btrfs_block_group {
63 	struct btrfs_fs_info *fs_info;
64 	struct inode *inode;
65 	spinlock_t lock;
66 	u64 start;
67 	u64 length;
68 	u64 pinned;
69 	u64 reserved;
70 	u64 used;
71 	u64 delalloc_bytes;
72 	u64 bytes_super;
73 	u64 flags;
74 	u64 cache_generation;
75 	u64 global_root_id;
76 
77 	/*
78 	 * If the free space extent count exceeds this number, convert the block
79 	 * group to bitmaps.
80 	 */
81 	u32 bitmap_high_thresh;
82 
83 	/*
84 	 * If the free space extent count drops below this number, convert the
85 	 * block group back to extents.
86 	 */
87 	u32 bitmap_low_thresh;
88 
89 	/*
90 	 * It is just used for the delayed data space allocation because
91 	 * only the data space allocation and the relative metadata update
92 	 * can be done cross the transaction.
93 	 */
94 	struct rw_semaphore data_rwsem;
95 
96 	/* For raid56, this is a full stripe, without parity */
97 	unsigned long full_stripe_len;
98 
99 	unsigned int ro;
100 	unsigned int iref:1;
101 	unsigned int has_caching_ctl:1;
102 	unsigned int removed:1;
103 	unsigned int to_copy:1;
104 	unsigned int relocating_repair:1;
105 	unsigned int chunk_item_inserted:1;
106 	unsigned int zone_is_active:1;
107 
108 	int disk_cache_state;
109 
110 	/* Cache tracking stuff */
111 	int cached;
112 	struct btrfs_caching_control *caching_ctl;
113 	u64 last_byte_to_unpin;
114 
115 	struct btrfs_space_info *space_info;
116 
117 	/* Free space cache stuff */
118 	struct btrfs_free_space_ctl *free_space_ctl;
119 
120 	/* Block group cache stuff */
121 	struct rb_node cache_node;
122 
123 	/* For block groups in the same raid type */
124 	struct list_head list;
125 
126 	refcount_t refs;
127 
128 	/*
129 	 * List of struct btrfs_free_clusters for this block group.
130 	 * Today it will only have one thing on it, but that may change
131 	 */
132 	struct list_head cluster_list;
133 
134 	/* For delayed block group creation or deletion of empty block groups */
135 	struct list_head bg_list;
136 
137 	/* For read-only block groups */
138 	struct list_head ro_list;
139 
140 	/*
141 	 * When non-zero it means the block group's logical address and its
142 	 * device extents can not be reused for future block group allocations
143 	 * until the counter goes down to 0. This is to prevent them from being
144 	 * reused while some task is still using the block group after it was
145 	 * deleted - we want to make sure they can only be reused for new block
146 	 * groups after that task is done with the deleted block group.
147 	 */
148 	atomic_t frozen;
149 
150 	/* For discard operations */
151 	struct list_head discard_list;
152 	int discard_index;
153 	u64 discard_eligible_time;
154 	u64 discard_cursor;
155 	enum btrfs_discard_state discard_state;
156 
157 	/* For dirty block groups */
158 	struct list_head dirty_list;
159 	struct list_head io_list;
160 
161 	struct btrfs_io_ctl io_ctl;
162 
163 	/*
164 	 * Incremented when doing extent allocations and holding a read lock
165 	 * on the space_info's groups_sem semaphore.
166 	 * Decremented when an ordered extent that represents an IO against this
167 	 * block group's range is created (after it's added to its inode's
168 	 * root's list of ordered extents) or immediately after the allocation
169 	 * if it's a metadata extent or fallocate extent (for these cases we
170 	 * don't create ordered extents).
171 	 */
172 	atomic_t reservations;
173 
174 	/*
175 	 * Incremented while holding the spinlock *lock* by a task checking if
176 	 * it can perform a nocow write (incremented if the value for the *ro*
177 	 * field is 0). Decremented by such tasks once they create an ordered
178 	 * extent or before that if some error happens before reaching that step.
179 	 * This is to prevent races between block group relocation and nocow
180 	 * writes through direct IO.
181 	 */
182 	atomic_t nocow_writers;
183 
184 	/* Lock for free space tree operations. */
185 	struct mutex free_space_lock;
186 
187 	/*
188 	 * Does the block group need to be added to the free space tree?
189 	 * Protected by free_space_lock.
190 	 */
191 	int needs_free_space;
192 
193 	/* Flag indicating this block group is placed on a sequential zone */
194 	bool seq_zone;
195 
196 	/*
197 	 * Number of extents in this block group used for swap files.
198 	 * All accesses protected by the spinlock 'lock'.
199 	 */
200 	int swap_extents;
201 
202 	/* Record locked full stripes for RAID5/6 block group */
203 	struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
204 
205 	/*
206 	 * Allocation offset for the block group to implement sequential
207 	 * allocation. This is used only on a zoned filesystem.
208 	 */
209 	u64 alloc_offset;
210 	u64 zone_unusable;
211 	u64 zone_capacity;
212 	u64 meta_write_pointer;
213 	struct map_lookup *physical_map;
214 	struct list_head active_bg_list;
215 };
216 
217 static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
218 {
219 	return (block_group->start + block_group->length);
220 }
221 
222 static inline bool btrfs_is_block_group_data_only(
223 					struct btrfs_block_group *block_group)
224 {
225 	/*
226 	 * In mixed mode the fragmentation is expected to be high, lowering the
227 	 * efficiency, so only proper data block groups are considered.
228 	 */
229 	return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
230 	       !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
231 }
232 
233 #ifdef CONFIG_BTRFS_DEBUG
234 static inline int btrfs_should_fragment_free_space(
235 		struct btrfs_block_group *block_group)
236 {
237 	struct btrfs_fs_info *fs_info = block_group->fs_info;
238 
239 	return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
240 		block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
241 	       (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
242 		block_group->flags &  BTRFS_BLOCK_GROUP_DATA);
243 }
244 #endif
245 
246 struct btrfs_block_group *btrfs_lookup_first_block_group(
247 		struct btrfs_fs_info *info, u64 bytenr);
248 struct btrfs_block_group *btrfs_lookup_block_group(
249 		struct btrfs_fs_info *info, u64 bytenr);
250 struct btrfs_block_group *btrfs_next_block_group(
251 		struct btrfs_block_group *cache);
252 void btrfs_get_block_group(struct btrfs_block_group *cache);
253 void btrfs_put_block_group(struct btrfs_block_group *cache);
254 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
255 					const u64 start);
256 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
257 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
258 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
259 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
260 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
261 				           u64 num_bytes);
262 int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
263 int btrfs_cache_block_group(struct btrfs_block_group *cache,
264 			    int load_cache_only);
265 void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
266 struct btrfs_caching_control *btrfs_get_caching_control(
267 		struct btrfs_block_group *cache);
268 u64 add_new_free_space(struct btrfs_block_group *block_group,
269 		       u64 start, u64 end);
270 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
271 				struct btrfs_fs_info *fs_info,
272 				const u64 chunk_offset);
273 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
274 			     u64 group_start, struct extent_map *em);
275 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
276 void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
277 void btrfs_reclaim_bgs_work(struct work_struct *work);
278 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
279 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
280 int btrfs_read_block_groups(struct btrfs_fs_info *info);
281 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
282 						 u64 bytes_used, u64 type,
283 						 u64 chunk_offset, u64 size);
284 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
285 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
286 			     bool do_chunk_alloc);
287 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
288 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
289 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
290 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
291 int btrfs_update_block_group(struct btrfs_trans_handle *trans,
292 			     u64 bytenr, u64 num_bytes, bool alloc);
293 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
294 			     u64 ram_bytes, u64 num_bytes, int delalloc);
295 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
296 			       u64 num_bytes, int delalloc);
297 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
298 		      enum btrfs_chunk_alloc_enum force);
299 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
300 void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
301 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
302 				  bool is_item_insertion);
303 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
304 void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
305 int btrfs_free_block_groups(struct btrfs_fs_info *info);
306 void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
307 				struct btrfs_caching_control *caching_ctl);
308 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
309 		       struct block_device *bdev, u64 physical, u64 **logical,
310 		       int *naddrs, int *stripe_len);
311 
312 static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
313 {
314 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
315 }
316 
317 static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
318 {
319 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
320 }
321 
322 static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
323 {
324 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
325 }
326 
327 static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
328 {
329 	smp_mb();
330 	return cache->cached == BTRFS_CACHE_FINISHED ||
331 		cache->cached == BTRFS_CACHE_ERROR;
332 }
333 
334 void btrfs_freeze_block_group(struct btrfs_block_group *cache);
335 void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
336 
337 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
338 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
339 
340 #endif /* BTRFS_BLOCK_GROUP_H */
341