1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #ifndef BTRFS_DELAYED_REF_H 7 #define BTRFS_DELAYED_REF_H 8 9 #include <linux/refcount.h> 10 11 /* these are the possible values of struct btrfs_delayed_ref_node->action */ 12 #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */ 13 #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */ 14 #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */ 15 #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */ 16 17 struct btrfs_delayed_ref_node { 18 struct rb_node ref_node; 19 /* 20 * If action is BTRFS_ADD_DELAYED_REF, also link this node to 21 * ref_head->ref_add_list, then we do not need to iterate the 22 * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes. 23 */ 24 struct list_head add_list; 25 26 /* the starting bytenr of the extent */ 27 u64 bytenr; 28 29 /* the size of the extent */ 30 u64 num_bytes; 31 32 /* seq number to keep track of insertion order */ 33 u64 seq; 34 35 /* ref count on this data structure */ 36 refcount_t refs; 37 38 /* 39 * how many refs is this entry adding or deleting. For 40 * head refs, this may be a negative number because it is keeping 41 * track of the total mods done to the reference count. 42 * For individual refs, this will always be a positive number 43 * 44 * It may be more than one, since it is possible for a single 45 * parent to have more than one ref on an extent 46 */ 47 int ref_mod; 48 49 unsigned int action:8; 50 unsigned int type:8; 51 /* is this node still in the rbtree? */ 52 unsigned int is_head:1; 53 unsigned int in_tree:1; 54 }; 55 56 struct btrfs_delayed_extent_op { 57 struct btrfs_disk_key key; 58 u8 level; 59 bool update_key; 60 bool update_flags; 61 u64 flags_to_set; 62 }; 63 64 /* 65 * the head refs are used to hold a lock on a given extent, which allows us 66 * to make sure that only one process is running the delayed refs 67 * at a time for a single extent. They also store the sum of all the 68 * reference count modifications we've queued up. 69 */ 70 struct btrfs_delayed_ref_head { 71 u64 bytenr; 72 u64 num_bytes; 73 refcount_t refs; 74 /* 75 * the mutex is held while running the refs, and it is also 76 * held when checking the sum of reference modifications. 77 */ 78 struct mutex mutex; 79 80 spinlock_t lock; 81 struct rb_root_cached ref_tree; 82 /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */ 83 struct list_head ref_add_list; 84 85 struct rb_node href_node; 86 87 struct btrfs_delayed_extent_op *extent_op; 88 89 /* 90 * This is used to track the final ref_mod from all the refs associated 91 * with this head ref, this is not adjusted as delayed refs are run, 92 * this is meant to track if we need to do the csum accounting or not. 93 */ 94 int total_ref_mod; 95 96 /* 97 * This is the current outstanding mod references for this bytenr. This 98 * is used with lookup_extent_info to get an accurate reference count 99 * for a bytenr, so it is adjusted as delayed refs are run so that any 100 * on disk reference count + ref_mod is accurate. 101 */ 102 int ref_mod; 103 104 /* 105 * when a new extent is allocated, it is just reserved in memory 106 * The actual extent isn't inserted into the extent allocation tree 107 * until the delayed ref is processed. must_insert_reserved is 108 * used to flag a delayed ref so the accounting can be updated 109 * when a full insert is done. 110 * 111 * It is possible the extent will be freed before it is ever 112 * inserted into the extent allocation tree. In this case 113 * we need to update the in ram accounting to properly reflect 114 * the free has happened. 115 */ 116 unsigned int must_insert_reserved:1; 117 unsigned int is_data:1; 118 unsigned int is_system:1; 119 unsigned int processing:1; 120 }; 121 122 struct btrfs_delayed_tree_ref { 123 struct btrfs_delayed_ref_node node; 124 u64 root; 125 u64 parent; 126 int level; 127 }; 128 129 struct btrfs_delayed_data_ref { 130 struct btrfs_delayed_ref_node node; 131 u64 root; 132 u64 parent; 133 u64 objectid; 134 u64 offset; 135 }; 136 137 enum btrfs_delayed_ref_flags { 138 /* Indicate that we are flushing delayed refs for the commit */ 139 BTRFS_DELAYED_REFS_FLUSHING, 140 }; 141 142 struct btrfs_delayed_ref_root { 143 /* head ref rbtree */ 144 struct rb_root_cached href_root; 145 146 /* dirty extent records */ 147 struct rb_root dirty_extent_root; 148 149 /* this spin lock protects the rbtree and the entries inside */ 150 spinlock_t lock; 151 152 /* how many delayed ref updates we've queued, used by the 153 * throttling code 154 */ 155 atomic_t num_entries; 156 157 /* total number of head nodes in tree */ 158 unsigned long num_heads; 159 160 /* total number of head nodes ready for processing */ 161 unsigned long num_heads_ready; 162 163 u64 pending_csums; 164 165 unsigned long flags; 166 167 u64 run_delayed_start; 168 169 /* 170 * To make qgroup to skip given root. 171 * This is for snapshot, as btrfs_qgroup_inherit() will manually 172 * modify counters for snapshot and its source, so we should skip 173 * the snapshot in new_root/old_roots or it will get calculated twice 174 */ 175 u64 qgroup_to_skip; 176 }; 177 178 enum btrfs_ref_type { 179 BTRFS_REF_NOT_SET, 180 BTRFS_REF_DATA, 181 BTRFS_REF_METADATA, 182 BTRFS_REF_LAST, 183 }; 184 185 struct btrfs_data_ref { 186 /* For EXTENT_DATA_REF */ 187 188 /* Original root this data extent belongs to */ 189 u64 owning_root; 190 191 /* Inode which refers to this data extent */ 192 u64 ino; 193 194 /* 195 * file_offset - extent_offset 196 * 197 * file_offset is the key.offset of the EXTENT_DATA key. 198 * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data. 199 */ 200 u64 offset; 201 }; 202 203 struct btrfs_tree_ref { 204 /* 205 * Level of this tree block 206 * 207 * Shared for skinny (TREE_BLOCK_REF) and normal tree ref. 208 */ 209 int level; 210 211 /* 212 * Root which owns this tree block. 213 * 214 * For TREE_BLOCK_REF (skinny metadata, either inline or keyed) 215 */ 216 u64 owning_root; 217 218 /* For non-skinny metadata, no special member needed */ 219 }; 220 221 struct btrfs_ref { 222 enum btrfs_ref_type type; 223 int action; 224 225 /* 226 * Whether this extent should go through qgroup record. 227 * 228 * Normally false, but for certain cases like delayed subtree scan, 229 * setting this flag can hugely reduce qgroup overhead. 230 */ 231 bool skip_qgroup; 232 233 #ifdef CONFIG_BTRFS_FS_REF_VERIFY 234 /* Through which root is this modification. */ 235 u64 real_root; 236 #endif 237 u64 bytenr; 238 u64 len; 239 240 /* Bytenr of the parent tree block */ 241 u64 parent; 242 union { 243 struct btrfs_data_ref data_ref; 244 struct btrfs_tree_ref tree_ref; 245 }; 246 }; 247 248 extern struct kmem_cache *btrfs_delayed_ref_head_cachep; 249 extern struct kmem_cache *btrfs_delayed_tree_ref_cachep; 250 extern struct kmem_cache *btrfs_delayed_data_ref_cachep; 251 extern struct kmem_cache *btrfs_delayed_extent_op_cachep; 252 253 int __init btrfs_delayed_ref_init(void); 254 void __cold btrfs_delayed_ref_exit(void); 255 256 static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_info, 257 int num_delayed_refs) 258 { 259 u64 num_bytes; 260 261 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs); 262 263 /* 264 * We have to check the mount option here because we could be enabling 265 * the free space tree for the first time and don't have the compat_ro 266 * option set yet. 267 * 268 * We need extra reservations if we have the free space tree because 269 * we'll have to modify that tree as well. 270 */ 271 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) 272 num_bytes *= 2; 273 274 return num_bytes; 275 } 276 277 static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref, 278 int action, u64 bytenr, u64 len, u64 parent) 279 { 280 generic_ref->action = action; 281 generic_ref->bytenr = bytenr; 282 generic_ref->len = len; 283 generic_ref->parent = parent; 284 } 285 286 static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, 287 int level, u64 root, u64 mod_root, bool skip_qgroup) 288 { 289 #ifdef CONFIG_BTRFS_FS_REF_VERIFY 290 /* If @real_root not set, use @root as fallback */ 291 generic_ref->real_root = mod_root ?: root; 292 #endif 293 generic_ref->tree_ref.level = level; 294 generic_ref->tree_ref.owning_root = root; 295 generic_ref->type = BTRFS_REF_METADATA; 296 if (skip_qgroup || !(is_fstree(root) && 297 (!mod_root || is_fstree(mod_root)))) 298 generic_ref->skip_qgroup = true; 299 else 300 generic_ref->skip_qgroup = false; 301 302 } 303 304 static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref, 305 u64 ref_root, u64 ino, u64 offset, u64 mod_root, 306 bool skip_qgroup) 307 { 308 #ifdef CONFIG_BTRFS_FS_REF_VERIFY 309 /* If @real_root not set, use @root as fallback */ 310 generic_ref->real_root = mod_root ?: ref_root; 311 #endif 312 generic_ref->data_ref.owning_root = ref_root; 313 generic_ref->data_ref.ino = ino; 314 generic_ref->data_ref.offset = offset; 315 generic_ref->type = BTRFS_REF_DATA; 316 if (skip_qgroup || !(is_fstree(ref_root) && 317 (!mod_root || is_fstree(mod_root)))) 318 generic_ref->skip_qgroup = true; 319 else 320 generic_ref->skip_qgroup = false; 321 } 322 323 static inline struct btrfs_delayed_extent_op * 324 btrfs_alloc_delayed_extent_op(void) 325 { 326 return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS); 327 } 328 329 static inline void 330 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op) 331 { 332 if (op) 333 kmem_cache_free(btrfs_delayed_extent_op_cachep, op); 334 } 335 336 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) 337 { 338 WARN_ON(refcount_read(&ref->refs) == 0); 339 if (refcount_dec_and_test(&ref->refs)) { 340 WARN_ON(ref->in_tree); 341 switch (ref->type) { 342 case BTRFS_TREE_BLOCK_REF_KEY: 343 case BTRFS_SHARED_BLOCK_REF_KEY: 344 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); 345 break; 346 case BTRFS_EXTENT_DATA_REF_KEY: 347 case BTRFS_SHARED_DATA_REF_KEY: 348 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); 349 break; 350 default: 351 BUG(); 352 } 353 } 354 } 355 356 static inline u64 btrfs_ref_head_to_space_flags( 357 struct btrfs_delayed_ref_head *head_ref) 358 { 359 if (head_ref->is_data) 360 return BTRFS_BLOCK_GROUP_DATA; 361 else if (head_ref->is_system) 362 return BTRFS_BLOCK_GROUP_SYSTEM; 363 return BTRFS_BLOCK_GROUP_METADATA; 364 } 365 366 static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head) 367 { 368 if (refcount_dec_and_test(&head->refs)) 369 kmem_cache_free(btrfs_delayed_ref_head_cachep, head); 370 } 371 372 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, 373 struct btrfs_ref *generic_ref, 374 struct btrfs_delayed_extent_op *extent_op); 375 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans, 376 struct btrfs_ref *generic_ref, 377 u64 reserved); 378 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, 379 u64 bytenr, u64 num_bytes, 380 struct btrfs_delayed_extent_op *extent_op); 381 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info, 382 struct btrfs_delayed_ref_root *delayed_refs, 383 struct btrfs_delayed_ref_head *head); 384 385 struct btrfs_delayed_ref_head * 386 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 387 u64 bytenr); 388 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs, 389 struct btrfs_delayed_ref_head *head); 390 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) 391 { 392 mutex_unlock(&head->mutex); 393 } 394 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 395 struct btrfs_delayed_ref_head *head); 396 397 struct btrfs_delayed_ref_head *btrfs_select_ref_head( 398 struct btrfs_delayed_ref_root *delayed_refs); 399 400 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq); 401 402 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr); 403 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans); 404 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, 405 enum btrfs_reserve_flush_enum flush); 406 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, 407 struct btrfs_block_rsv *src, 408 u64 num_bytes); 409 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info); 410 411 /* 412 * helper functions to cast a node into its container 413 */ 414 static inline struct btrfs_delayed_tree_ref * 415 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node) 416 { 417 return container_of(node, struct btrfs_delayed_tree_ref, node); 418 } 419 420 static inline struct btrfs_delayed_data_ref * 421 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node) 422 { 423 return container_of(node, struct btrfs_delayed_data_ref, node); 424 } 425 426 #endif 427