xref: /openbmc/linux/fs/btrfs/zoned.h (revision 6486a57f)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  
3  #ifndef BTRFS_ZONED_H
4  #define BTRFS_ZONED_H
5  
6  #include <linux/types.h>
7  #include <linux/blkdev.h>
8  #include "messages.h"
9  #include "volumes.h"
10  #include "disk-io.h"
11  #include "block-group.h"
12  #include "btrfs_inode.h"
13  
14  #define BTRFS_DEFAULT_RECLAIM_THRESH           			(75)
15  
16  struct btrfs_zoned_device_info {
17  	/*
18  	 * Number of zones, zone size and types of zones if bdev is a
19  	 * zoned block device.
20  	 */
21  	u64 zone_size;
22  	u8  zone_size_shift;
23  	u32 nr_zones;
24  	unsigned int max_active_zones;
25  	atomic_t active_zones_left;
26  	unsigned long *seq_zones;
27  	unsigned long *empty_zones;
28  	unsigned long *active_zones;
29  	struct blk_zone *zone_cache;
30  	struct blk_zone sb_zones[2 * BTRFS_SUPER_MIRROR_MAX];
31  };
32  
33  #ifdef CONFIG_BLK_DEV_ZONED
34  int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
35  		       struct blk_zone *zone);
36  int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info);
37  int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache);
38  void btrfs_destroy_dev_zone_info(struct btrfs_device *device);
39  struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev);
40  int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info);
41  int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info);
42  int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
43  			       u64 *bytenr_ret);
44  int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
45  			  u64 *bytenr_ret);
46  int btrfs_advance_sb_log(struct btrfs_device *device, int mirror);
47  int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror);
48  u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
49  				 u64 hole_end, u64 num_bytes);
50  int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
51  			    u64 length, u64 *bytes);
52  int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size);
53  int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new);
54  void btrfs_calc_zone_unusable(struct btrfs_block_group *cache);
55  void btrfs_redirty_list_add(struct btrfs_transaction *trans,
56  			    struct extent_buffer *eb);
57  void btrfs_free_redirty_list(struct btrfs_transaction *trans);
58  bool btrfs_use_zone_append(struct btrfs_bio *bbio);
59  void btrfs_record_physical_zoned(struct btrfs_bio *bbio);
60  void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered);
61  bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
62  				    struct extent_buffer *eb,
63  				    struct btrfs_block_group **cache_ret);
64  void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
65  				     struct extent_buffer *eb);
66  int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length);
67  int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
68  				  u64 physical_start, u64 physical_pos);
69  bool btrfs_zone_activate(struct btrfs_block_group *block_group);
70  int btrfs_zone_finish(struct btrfs_block_group *block_group);
71  bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags);
72  void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical,
73  			     u64 length);
74  void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
75  				   struct extent_buffer *eb);
76  void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg);
77  void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info);
78  bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info);
79  void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
80  				       u64 length);
81  int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
82  int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
83  				struct btrfs_space_info *space_info, bool do_finish);
84  #else /* CONFIG_BLK_DEV_ZONED */
85  static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
86  				     struct blk_zone *zone)
87  {
88  	return 0;
89  }
90  
91  static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
92  {
93  	return 0;
94  }
95  
96  static inline int btrfs_get_dev_zone_info(struct btrfs_device *device,
97  					  bool populate_cache)
98  {
99  	return 0;
100  }
101  
102  static inline void btrfs_destroy_dev_zone_info(struct btrfs_device *device) { }
103  
104  /*
105   * In case the kernel is compiled without CONFIG_BLK_DEV_ZONED we'll never call
106   * into btrfs_clone_dev_zone_info() so it's safe to return NULL here.
107   */
108  static inline struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(
109  						 struct btrfs_device *orig_dev)
110  {
111  	return NULL;
112  }
113  
114  static inline int btrfs_check_zoned_mode(const struct btrfs_fs_info *fs_info)
115  {
116  	if (!btrfs_is_zoned(fs_info))
117  		return 0;
118  
119  	btrfs_err(fs_info, "zoned block devices support is not enabled");
120  	return -EOPNOTSUPP;
121  }
122  
123  static inline int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
124  {
125  	return 0;
126  }
127  
128  static inline int btrfs_sb_log_location_bdev(struct block_device *bdev,
129  					     int mirror, int rw, u64 *bytenr_ret)
130  {
131  	*bytenr_ret = btrfs_sb_offset(mirror);
132  	return 0;
133  }
134  
135  static inline int btrfs_sb_log_location(struct btrfs_device *device, int mirror,
136  					int rw, u64 *bytenr_ret)
137  {
138  	*bytenr_ret = btrfs_sb_offset(mirror);
139  	return 0;
140  }
141  
142  static inline int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
143  {
144  	return 0;
145  }
146  
147  static inline int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
148  {
149  	return 0;
150  }
151  
152  static inline u64 btrfs_find_allocatable_zones(struct btrfs_device *device,
153  					       u64 hole_start, u64 hole_end,
154  					       u64 num_bytes)
155  {
156  	return hole_start;
157  }
158  
159  static inline int btrfs_reset_device_zone(struct btrfs_device *device,
160  					  u64 physical, u64 length, u64 *bytes)
161  {
162  	*bytes = 0;
163  	return 0;
164  }
165  
166  static inline int btrfs_ensure_empty_zones(struct btrfs_device *device,
167  					   u64 start, u64 size)
168  {
169  	return 0;
170  }
171  
172  static inline int btrfs_load_block_group_zone_info(
173  		struct btrfs_block_group *cache, bool new)
174  {
175  	return 0;
176  }
177  
178  static inline void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) { }
179  
180  static inline void btrfs_redirty_list_add(struct btrfs_transaction *trans,
181  					  struct extent_buffer *eb) { }
182  static inline void btrfs_free_redirty_list(struct btrfs_transaction *trans) { }
183  
184  static inline bool btrfs_use_zone_append(struct btrfs_bio *bbio)
185  {
186  	return false;
187  }
188  
189  static inline void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
190  {
191  }
192  
193  static inline void btrfs_rewrite_logical_zoned(
194  				struct btrfs_ordered_extent *ordered) { }
195  
196  static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
197  			       struct extent_buffer *eb,
198  			       struct btrfs_block_group **cache_ret)
199  {
200  	return true;
201  }
202  
203  static inline void btrfs_revert_meta_write_pointer(
204  						struct btrfs_block_group *cache,
205  						struct extent_buffer *eb)
206  {
207  }
208  
209  static inline int btrfs_zoned_issue_zeroout(struct btrfs_device *device,
210  					    u64 physical, u64 length)
211  {
212  	return -EOPNOTSUPP;
213  }
214  
215  static inline int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev,
216  						u64 logical, u64 physical_start,
217  						u64 physical_pos)
218  {
219  	return -EOPNOTSUPP;
220  }
221  
222  static inline bool btrfs_zone_activate(struct btrfs_block_group *block_group)
223  {
224  	return true;
225  }
226  
227  static inline int btrfs_zone_finish(struct btrfs_block_group *block_group)
228  {
229  	return 0;
230  }
231  
232  static inline bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices,
233  					   u64 flags)
234  {
235  	return true;
236  }
237  
238  static inline void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info,
239  					   u64 logical, u64 length) { }
240  
241  static inline void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
242  						 struct extent_buffer *eb) { }
243  
244  static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { }
245  
246  static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { }
247  
248  static inline bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
249  {
250  	return false;
251  }
252  
253  static inline void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info,
254  						     u64 logical, u64 length) { }
255  
256  static inline int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
257  {
258  	return 1;
259  }
260  
261  static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
262  					      struct btrfs_space_info *space_info,
263  					      bool do_finish)
264  {
265  	/* Consider all the block groups are active */
266  	return 0;
267  }
268  
269  #endif
270  
271  static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
272  {
273  	struct btrfs_zoned_device_info *zone_info = device->zone_info;
274  
275  	if (!zone_info)
276  		return false;
277  
278  	return test_bit(pos >> zone_info->zone_size_shift, zone_info->seq_zones);
279  }
280  
281  static inline bool btrfs_dev_is_empty_zone(struct btrfs_device *device, u64 pos)
282  {
283  	struct btrfs_zoned_device_info *zone_info = device->zone_info;
284  
285  	if (!zone_info)
286  		return true;
287  
288  	return test_bit(pos >> zone_info->zone_size_shift, zone_info->empty_zones);
289  }
290  
291  static inline void btrfs_dev_set_empty_zone_bit(struct btrfs_device *device,
292  						u64 pos, bool set)
293  {
294  	struct btrfs_zoned_device_info *zone_info = device->zone_info;
295  	unsigned int zno;
296  
297  	if (!zone_info)
298  		return;
299  
300  	zno = pos >> zone_info->zone_size_shift;
301  	if (set)
302  		set_bit(zno, zone_info->empty_zones);
303  	else
304  		clear_bit(zno, zone_info->empty_zones);
305  }
306  
307  static inline void btrfs_dev_set_zone_empty(struct btrfs_device *device, u64 pos)
308  {
309  	btrfs_dev_set_empty_zone_bit(device, pos, true);
310  }
311  
312  static inline void btrfs_dev_clear_zone_empty(struct btrfs_device *device, u64 pos)
313  {
314  	btrfs_dev_set_empty_zone_bit(device, pos, false);
315  }
316  
317  static inline bool btrfs_check_device_zone_type(const struct btrfs_fs_info *fs_info,
318  						struct block_device *bdev)
319  {
320  	if (btrfs_is_zoned(fs_info)) {
321  		/*
322  		 * We can allow a regular device on a zoned filesystem, because
323  		 * we will emulate the zoned capabilities.
324  		 */
325  		if (!bdev_is_zoned(bdev))
326  			return true;
327  
328  		return fs_info->zone_size ==
329  			(bdev_zone_sectors(bdev) << SECTOR_SHIFT);
330  	}
331  
332  	/* Do not allow Host Manged zoned device */
333  	return bdev_zoned_model(bdev) != BLK_ZONED_HM;
334  }
335  
336  static inline bool btrfs_check_super_location(struct btrfs_device *device, u64 pos)
337  {
338  	/*
339  	 * On a non-zoned device, any address is OK. On a zoned device,
340  	 * non-SEQUENTIAL WRITE REQUIRED zones are capable.
341  	 */
342  	return device->zone_info == NULL || !btrfs_dev_is_sequential(device, pos);
343  }
344  
345  static inline bool btrfs_can_zone_reset(struct btrfs_device *device,
346  					u64 physical, u64 length)
347  {
348  	u64 zone_size;
349  
350  	if (!btrfs_dev_is_sequential(device, physical))
351  		return false;
352  
353  	zone_size = device->zone_info->zone_size;
354  	if (!IS_ALIGNED(physical, zone_size) || !IS_ALIGNED(length, zone_size))
355  		return false;
356  
357  	return true;
358  }
359  
360  static inline void btrfs_zoned_meta_io_lock(struct btrfs_fs_info *fs_info)
361  {
362  	if (!btrfs_is_zoned(fs_info))
363  		return;
364  	mutex_lock(&fs_info->zoned_meta_io_lock);
365  }
366  
367  static inline void btrfs_zoned_meta_io_unlock(struct btrfs_fs_info *fs_info)
368  {
369  	if (!btrfs_is_zoned(fs_info))
370  		return;
371  	mutex_unlock(&fs_info->zoned_meta_io_lock);
372  }
373  
374  static inline void btrfs_clear_treelog_bg(struct btrfs_block_group *bg)
375  {
376  	struct btrfs_fs_info *fs_info = bg->fs_info;
377  
378  	if (!btrfs_is_zoned(fs_info))
379  		return;
380  
381  	spin_lock(&fs_info->treelog_bg_lock);
382  	if (fs_info->treelog_bg == bg->start)
383  		fs_info->treelog_bg = 0;
384  	spin_unlock(&fs_info->treelog_bg_lock);
385  }
386  
387  static inline void btrfs_zoned_data_reloc_lock(struct btrfs_inode *inode)
388  {
389  	struct btrfs_root *root = inode->root;
390  
391  	if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
392  		mutex_lock(&root->fs_info->zoned_data_reloc_io_lock);
393  }
394  
395  static inline void btrfs_zoned_data_reloc_unlock(struct btrfs_inode *inode)
396  {
397  	struct btrfs_root *root = inode->root;
398  
399  	if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
400  		mutex_unlock(&root->fs_info->zoned_data_reloc_io_lock);
401  }
402  
403  static inline bool btrfs_zoned_bg_is_full(const struct btrfs_block_group *bg)
404  {
405  	ASSERT(btrfs_is_zoned(bg->fs_info));
406  	return (bg->alloc_offset == bg->zone_capacity);
407  }
408  
409  #endif
410