1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef BTRFS_ZONED_H 4 #define BTRFS_ZONED_H 5 6 #include <linux/types.h> 7 #include <linux/blkdev.h> 8 #include "volumes.h" 9 #include "disk-io.h" 10 #include "block-group.h" 11 12 /* 13 * Block groups with more than this value (percents) of unusable space will be 14 * scheduled for background reclaim. 15 */ 16 #define BTRFS_DEFAULT_RECLAIM_THRESH 75 17 18 struct btrfs_zoned_device_info { 19 /* 20 * Number of zones, zone size and types of zones if bdev is a 21 * zoned block device. 22 */ 23 u64 zone_size; 24 u8 zone_size_shift; 25 u64 max_zone_append_size; 26 u32 nr_zones; 27 unsigned long *seq_zones; 28 unsigned long *empty_zones; 29 struct blk_zone sb_zones[2 * BTRFS_SUPER_MIRROR_MAX]; 30 }; 31 32 #ifdef CONFIG_BLK_DEV_ZONED 33 int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, 34 struct blk_zone *zone); 35 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info); 36 int btrfs_get_dev_zone_info(struct btrfs_device *device); 37 void btrfs_destroy_dev_zone_info(struct btrfs_device *device); 38 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info); 39 int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info); 40 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw, 41 u64 *bytenr_ret); 42 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw, 43 u64 *bytenr_ret); 44 void btrfs_advance_sb_log(struct btrfs_device *device, int mirror); 45 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror); 46 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start, 47 u64 hole_end, u64 num_bytes); 48 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical, 49 u64 length, u64 *bytes); 50 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size); 51 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new); 52 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache); 53 void btrfs_redirty_list_add(struct btrfs_transaction *trans, 54 struct extent_buffer *eb); 55 void btrfs_free_redirty_list(struct btrfs_transaction *trans); 56 bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start); 57 void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset, 58 struct bio *bio); 59 void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered); 60 bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, 61 struct extent_buffer *eb, 62 struct btrfs_block_group **cache_ret); 63 void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache, 64 struct extent_buffer *eb); 65 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length); 66 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical, 67 u64 physical_start, u64 physical_pos); 68 #else /* CONFIG_BLK_DEV_ZONED */ 69 static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, 70 struct blk_zone *zone) 71 { 72 return 0; 73 } 74 75 static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info) 76 { 77 return 0; 78 } 79 80 static inline int btrfs_get_dev_zone_info(struct btrfs_device *device) 81 { 82 return 0; 83 } 84 85 static inline void btrfs_destroy_dev_zone_info(struct btrfs_device *device) { } 86 87 static inline int btrfs_check_zoned_mode(const struct btrfs_fs_info *fs_info) 88 { 89 if (!btrfs_is_zoned(fs_info)) 90 return 0; 91 92 btrfs_err(fs_info, "zoned block devices support is not enabled"); 93 return -EOPNOTSUPP; 94 } 95 96 static inline int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info) 97 { 98 return 0; 99 } 100 101 static inline int btrfs_sb_log_location_bdev(struct block_device *bdev, 102 int mirror, int rw, u64 *bytenr_ret) 103 { 104 *bytenr_ret = btrfs_sb_offset(mirror); 105 return 0; 106 } 107 108 static inline int btrfs_sb_log_location(struct btrfs_device *device, int mirror, 109 int rw, u64 *bytenr_ret) 110 { 111 *bytenr_ret = btrfs_sb_offset(mirror); 112 return 0; 113 } 114 115 static inline void btrfs_advance_sb_log(struct btrfs_device *device, int mirror) 116 { } 117 118 static inline int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror) 119 { 120 return 0; 121 } 122 123 static inline u64 btrfs_find_allocatable_zones(struct btrfs_device *device, 124 u64 hole_start, u64 hole_end, 125 u64 num_bytes) 126 { 127 return hole_start; 128 } 129 130 static inline int btrfs_reset_device_zone(struct btrfs_device *device, 131 u64 physical, u64 length, u64 *bytes) 132 { 133 *bytes = 0; 134 return 0; 135 } 136 137 static inline int btrfs_ensure_empty_zones(struct btrfs_device *device, 138 u64 start, u64 size) 139 { 140 return 0; 141 } 142 143 static inline int btrfs_load_block_group_zone_info( 144 struct btrfs_block_group *cache, bool new) 145 { 146 return 0; 147 } 148 149 static inline void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) { } 150 151 static inline void btrfs_redirty_list_add(struct btrfs_transaction *trans, 152 struct extent_buffer *eb) { } 153 static inline void btrfs_free_redirty_list(struct btrfs_transaction *trans) { } 154 155 static inline bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start) 156 { 157 return false; 158 } 159 160 static inline void btrfs_record_physical_zoned(struct inode *inode, 161 u64 file_offset, struct bio *bio) 162 { 163 } 164 165 static inline void btrfs_rewrite_logical_zoned( 166 struct btrfs_ordered_extent *ordered) { } 167 168 static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, 169 struct extent_buffer *eb, 170 struct btrfs_block_group **cache_ret) 171 { 172 return true; 173 } 174 175 static inline void btrfs_revert_meta_write_pointer( 176 struct btrfs_block_group *cache, 177 struct extent_buffer *eb) 178 { 179 } 180 181 static inline int btrfs_zoned_issue_zeroout(struct btrfs_device *device, 182 u64 physical, u64 length) 183 { 184 return -EOPNOTSUPP; 185 } 186 187 static inline int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, 188 u64 logical, u64 physical_start, 189 u64 physical_pos) 190 { 191 return -EOPNOTSUPP; 192 } 193 194 #endif 195 196 static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos) 197 { 198 struct btrfs_zoned_device_info *zone_info = device->zone_info; 199 200 if (!zone_info) 201 return false; 202 203 return test_bit(pos >> zone_info->zone_size_shift, zone_info->seq_zones); 204 } 205 206 static inline bool btrfs_dev_is_empty_zone(struct btrfs_device *device, u64 pos) 207 { 208 struct btrfs_zoned_device_info *zone_info = device->zone_info; 209 210 if (!zone_info) 211 return true; 212 213 return test_bit(pos >> zone_info->zone_size_shift, zone_info->empty_zones); 214 } 215 216 static inline void btrfs_dev_set_empty_zone_bit(struct btrfs_device *device, 217 u64 pos, bool set) 218 { 219 struct btrfs_zoned_device_info *zone_info = device->zone_info; 220 unsigned int zno; 221 222 if (!zone_info) 223 return; 224 225 zno = pos >> zone_info->zone_size_shift; 226 if (set) 227 set_bit(zno, zone_info->empty_zones); 228 else 229 clear_bit(zno, zone_info->empty_zones); 230 } 231 232 static inline void btrfs_dev_set_zone_empty(struct btrfs_device *device, u64 pos) 233 { 234 btrfs_dev_set_empty_zone_bit(device, pos, true); 235 } 236 237 static inline void btrfs_dev_clear_zone_empty(struct btrfs_device *device, u64 pos) 238 { 239 btrfs_dev_set_empty_zone_bit(device, pos, false); 240 } 241 242 static inline bool btrfs_check_device_zone_type(const struct btrfs_fs_info *fs_info, 243 struct block_device *bdev) 244 { 245 if (btrfs_is_zoned(fs_info)) { 246 /* 247 * We can allow a regular device on a zoned filesystem, because 248 * we will emulate the zoned capabilities. 249 */ 250 if (!bdev_is_zoned(bdev)) 251 return true; 252 253 return fs_info->zone_size == 254 (bdev_zone_sectors(bdev) << SECTOR_SHIFT); 255 } 256 257 /* Do not allow Host Manged zoned device */ 258 return bdev_zoned_model(bdev) != BLK_ZONED_HM; 259 } 260 261 static inline bool btrfs_check_super_location(struct btrfs_device *device, u64 pos) 262 { 263 /* 264 * On a non-zoned device, any address is OK. On a zoned device, 265 * non-SEQUENTIAL WRITE REQUIRED zones are capable. 266 */ 267 return device->zone_info == NULL || !btrfs_dev_is_sequential(device, pos); 268 } 269 270 static inline bool btrfs_can_zone_reset(struct btrfs_device *device, 271 u64 physical, u64 length) 272 { 273 u64 zone_size; 274 275 if (!btrfs_dev_is_sequential(device, physical)) 276 return false; 277 278 zone_size = device->zone_info->zone_size; 279 if (!IS_ALIGNED(physical, zone_size) || !IS_ALIGNED(length, zone_size)) 280 return false; 281 282 return true; 283 } 284 285 static inline void btrfs_zoned_meta_io_lock(struct btrfs_fs_info *fs_info) 286 { 287 if (!btrfs_is_zoned(fs_info)) 288 return; 289 mutex_lock(&fs_info->zoned_meta_io_lock); 290 } 291 292 static inline void btrfs_zoned_meta_io_unlock(struct btrfs_fs_info *fs_info) 293 { 294 if (!btrfs_is_zoned(fs_info)) 295 return; 296 mutex_unlock(&fs_info->zoned_meta_io_lock); 297 } 298 299 static inline void btrfs_clear_treelog_bg(struct btrfs_block_group *bg) 300 { 301 struct btrfs_fs_info *fs_info = bg->fs_info; 302 303 if (!btrfs_is_zoned(fs_info)) 304 return; 305 306 spin_lock(&fs_info->treelog_bg_lock); 307 if (fs_info->treelog_bg == bg->start) 308 fs_info->treelog_bg = 0; 309 spin_unlock(&fs_info->treelog_bg_lock); 310 } 311 312 #endif 313