1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #ifndef BTRFS_DELAYED_REF_H 7 #define BTRFS_DELAYED_REF_H 8 9 #include <linux/refcount.h> 10 11 /* these are the possible values of struct btrfs_delayed_ref_node->action */ 12 #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */ 13 #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */ 14 #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */ 15 #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */ 16 17 struct btrfs_delayed_ref_node { 18 struct rb_node ref_node; 19 /* 20 * If action is BTRFS_ADD_DELAYED_REF, also link this node to 21 * ref_head->ref_add_list, then we do not need to iterate the 22 * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes. 23 */ 24 struct list_head add_list; 25 26 /* the starting bytenr of the extent */ 27 u64 bytenr; 28 29 /* the size of the extent */ 30 u64 num_bytes; 31 32 /* seq number to keep track of insertion order */ 33 u64 seq; 34 35 /* ref count on this data structure */ 36 refcount_t refs; 37 38 /* 39 * how many refs is this entry adding or deleting. For 40 * head refs, this may be a negative number because it is keeping 41 * track of the total mods done to the reference count. 42 * For individual refs, this will always be a positive number 43 * 44 * It may be more than one, since it is possible for a single 45 * parent to have more than one ref on an extent 46 */ 47 int ref_mod; 48 49 unsigned int action:8; 50 unsigned int type:8; 51 /* is this node still in the rbtree? */ 52 unsigned int is_head:1; 53 unsigned int in_tree:1; 54 }; 55 56 struct btrfs_delayed_extent_op { 57 struct btrfs_disk_key key; 58 u8 level; 59 bool update_key; 60 bool update_flags; 61 u64 flags_to_set; 62 }; 63 64 /* 65 * the head refs are used to hold a lock on a given extent, which allows us 66 * to make sure that only one process is running the delayed refs 67 * at a time for a single extent. They also store the sum of all the 68 * reference count modifications we've queued up. 69 */ 70 struct btrfs_delayed_ref_head { 71 u64 bytenr; 72 u64 num_bytes; 73 /* 74 * For insertion into struct btrfs_delayed_ref_root::href_root. 75 * Keep it in the same cache line as 'bytenr' for more efficient 76 * searches in the rbtree. 77 */ 78 struct rb_node href_node; 79 /* 80 * the mutex is held while running the refs, and it is also 81 * held when checking the sum of reference modifications. 82 */ 83 struct mutex mutex; 84 85 refcount_t refs; 86 87 /* Protects 'ref_tree' and 'ref_add_list'. */ 88 spinlock_t lock; 89 struct rb_root_cached ref_tree; 90 /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */ 91 struct list_head ref_add_list; 92 93 struct btrfs_delayed_extent_op *extent_op; 94 95 /* 96 * This is used to track the final ref_mod from all the refs associated 97 * with this head ref, this is not adjusted as delayed refs are run, 98 * this is meant to track if we need to do the csum accounting or not. 99 */ 100 int total_ref_mod; 101 102 /* 103 * This is the current outstanding mod references for this bytenr. This 104 * is used with lookup_extent_info to get an accurate reference count 105 * for a bytenr, so it is adjusted as delayed refs are run so that any 106 * on disk reference count + ref_mod is accurate. 107 */ 108 int ref_mod; 109 110 /* 111 * when a new extent is allocated, it is just reserved in memory 112 * The actual extent isn't inserted into the extent allocation tree 113 * until the delayed ref is processed. must_insert_reserved is 114 * used to flag a delayed ref so the accounting can be updated 115 * when a full insert is done. 116 * 117 * It is possible the extent will be freed before it is ever 118 * inserted into the extent allocation tree. In this case 119 * we need to update the in ram accounting to properly reflect 120 * the free has happened. 121 */ 122 unsigned int must_insert_reserved:1; 123 unsigned int is_data:1; 124 unsigned int is_system:1; 125 unsigned int processing:1; 126 }; 127 128 struct btrfs_delayed_tree_ref { 129 struct btrfs_delayed_ref_node node; 130 u64 root; 131 u64 parent; 132 int level; 133 }; 134 135 struct btrfs_delayed_data_ref { 136 struct btrfs_delayed_ref_node node; 137 u64 root; 138 u64 parent; 139 u64 objectid; 140 u64 offset; 141 }; 142 143 enum btrfs_delayed_ref_flags { 144 /* Indicate that we are flushing delayed refs for the commit */ 145 BTRFS_DELAYED_REFS_FLUSHING, 146 }; 147 148 struct btrfs_delayed_ref_root { 149 /* head ref rbtree */ 150 struct rb_root_cached href_root; 151 152 /* dirty extent records */ 153 struct rb_root dirty_extent_root; 154 155 /* this spin lock protects the rbtree and the entries inside */ 156 spinlock_t lock; 157 158 /* how many delayed ref updates we've queued, used by the 159 * throttling code 160 */ 161 atomic_t num_entries; 162 163 /* total number of head nodes in tree */ 164 unsigned long num_heads; 165 166 /* total number of head nodes ready for processing */ 167 unsigned long num_heads_ready; 168 169 u64 pending_csums; 170 171 unsigned long flags; 172 173 u64 run_delayed_start; 174 175 /* 176 * To make qgroup to skip given root. 177 * This is for snapshot, as btrfs_qgroup_inherit() will manually 178 * modify counters for snapshot and its source, so we should skip 179 * the snapshot in new_root/old_roots or it will get calculated twice 180 */ 181 u64 qgroup_to_skip; 182 }; 183 184 enum btrfs_ref_type { 185 BTRFS_REF_NOT_SET, 186 BTRFS_REF_DATA, 187 BTRFS_REF_METADATA, 188 BTRFS_REF_LAST, 189 }; 190 191 struct btrfs_data_ref { 192 /* For EXTENT_DATA_REF */ 193 194 /* Original root this data extent belongs to */ 195 u64 owning_root; 196 197 /* Inode which refers to this data extent */ 198 u64 ino; 199 200 /* 201 * file_offset - extent_offset 202 * 203 * file_offset is the key.offset of the EXTENT_DATA key. 204 * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data. 205 */ 206 u64 offset; 207 }; 208 209 struct btrfs_tree_ref { 210 /* 211 * Level of this tree block 212 * 213 * Shared for skinny (TREE_BLOCK_REF) and normal tree ref. 214 */ 215 int level; 216 217 /* 218 * Root which owns this tree block. 219 * 220 * For TREE_BLOCK_REF (skinny metadata, either inline or keyed) 221 */ 222 u64 owning_root; 223 224 /* For non-skinny metadata, no special member needed */ 225 }; 226 227 struct btrfs_ref { 228 enum btrfs_ref_type type; 229 int action; 230 231 /* 232 * Whether this extent should go through qgroup record. 233 * 234 * Normally false, but for certain cases like delayed subtree scan, 235 * setting this flag can hugely reduce qgroup overhead. 236 */ 237 bool skip_qgroup; 238 239 #ifdef CONFIG_BTRFS_FS_REF_VERIFY 240 /* Through which root is this modification. */ 241 u64 real_root; 242 #endif 243 u64 bytenr; 244 u64 len; 245 246 /* Bytenr of the parent tree block */ 247 u64 parent; 248 union { 249 struct btrfs_data_ref data_ref; 250 struct btrfs_tree_ref tree_ref; 251 }; 252 }; 253 254 extern struct kmem_cache *btrfs_delayed_ref_head_cachep; 255 extern struct kmem_cache *btrfs_delayed_tree_ref_cachep; 256 extern struct kmem_cache *btrfs_delayed_data_ref_cachep; 257 extern struct kmem_cache *btrfs_delayed_extent_op_cachep; 258 259 int __init btrfs_delayed_ref_init(void); 260 void __cold btrfs_delayed_ref_exit(void); 261 262 static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_info, 263 int num_delayed_refs) 264 { 265 u64 num_bytes; 266 267 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs); 268 269 /* 270 * We have to check the mount option here because we could be enabling 271 * the free space tree for the first time and don't have the compat_ro 272 * option set yet. 273 * 274 * We need extra reservations if we have the free space tree because 275 * we'll have to modify that tree as well. 276 */ 277 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) 278 num_bytes *= 2; 279 280 return num_bytes; 281 } 282 283 static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref, 284 int action, u64 bytenr, u64 len, u64 parent) 285 { 286 generic_ref->action = action; 287 generic_ref->bytenr = bytenr; 288 generic_ref->len = len; 289 generic_ref->parent = parent; 290 } 291 292 static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, 293 int level, u64 root, u64 mod_root, bool skip_qgroup) 294 { 295 #ifdef CONFIG_BTRFS_FS_REF_VERIFY 296 /* If @real_root not set, use @root as fallback */ 297 generic_ref->real_root = mod_root ?: root; 298 #endif 299 generic_ref->tree_ref.level = level; 300 generic_ref->tree_ref.owning_root = root; 301 generic_ref->type = BTRFS_REF_METADATA; 302 if (skip_qgroup || !(is_fstree(root) && 303 (!mod_root || is_fstree(mod_root)))) 304 generic_ref->skip_qgroup = true; 305 else 306 generic_ref->skip_qgroup = false; 307 308 } 309 310 static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref, 311 u64 ref_root, u64 ino, u64 offset, u64 mod_root, 312 bool skip_qgroup) 313 { 314 #ifdef CONFIG_BTRFS_FS_REF_VERIFY 315 /* If @real_root not set, use @root as fallback */ 316 generic_ref->real_root = mod_root ?: ref_root; 317 #endif 318 generic_ref->data_ref.owning_root = ref_root; 319 generic_ref->data_ref.ino = ino; 320 generic_ref->data_ref.offset = offset; 321 generic_ref->type = BTRFS_REF_DATA; 322 if (skip_qgroup || !(is_fstree(ref_root) && 323 (!mod_root || is_fstree(mod_root)))) 324 generic_ref->skip_qgroup = true; 325 else 326 generic_ref->skip_qgroup = false; 327 } 328 329 static inline struct btrfs_delayed_extent_op * 330 btrfs_alloc_delayed_extent_op(void) 331 { 332 return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS); 333 } 334 335 static inline void 336 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op) 337 { 338 if (op) 339 kmem_cache_free(btrfs_delayed_extent_op_cachep, op); 340 } 341 342 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) 343 { 344 WARN_ON(refcount_read(&ref->refs) == 0); 345 if (refcount_dec_and_test(&ref->refs)) { 346 WARN_ON(ref->in_tree); 347 switch (ref->type) { 348 case BTRFS_TREE_BLOCK_REF_KEY: 349 case BTRFS_SHARED_BLOCK_REF_KEY: 350 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); 351 break; 352 case BTRFS_EXTENT_DATA_REF_KEY: 353 case BTRFS_SHARED_DATA_REF_KEY: 354 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); 355 break; 356 default: 357 BUG(); 358 } 359 } 360 } 361 362 static inline u64 btrfs_ref_head_to_space_flags( 363 struct btrfs_delayed_ref_head *head_ref) 364 { 365 if (head_ref->is_data) 366 return BTRFS_BLOCK_GROUP_DATA; 367 else if (head_ref->is_system) 368 return BTRFS_BLOCK_GROUP_SYSTEM; 369 return BTRFS_BLOCK_GROUP_METADATA; 370 } 371 372 static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head) 373 { 374 if (refcount_dec_and_test(&head->refs)) 375 kmem_cache_free(btrfs_delayed_ref_head_cachep, head); 376 } 377 378 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, 379 struct btrfs_ref *generic_ref, 380 struct btrfs_delayed_extent_op *extent_op); 381 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans, 382 struct btrfs_ref *generic_ref, 383 u64 reserved); 384 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, 385 u64 bytenr, u64 num_bytes, 386 struct btrfs_delayed_extent_op *extent_op); 387 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info, 388 struct btrfs_delayed_ref_root *delayed_refs, 389 struct btrfs_delayed_ref_head *head); 390 391 struct btrfs_delayed_ref_head * 392 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 393 u64 bytenr); 394 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs, 395 struct btrfs_delayed_ref_head *head); 396 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) 397 { 398 mutex_unlock(&head->mutex); 399 } 400 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 401 struct btrfs_delayed_ref_head *head); 402 403 struct btrfs_delayed_ref_head *btrfs_select_ref_head( 404 struct btrfs_delayed_ref_root *delayed_refs); 405 406 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq); 407 408 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr); 409 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans); 410 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, 411 enum btrfs_reserve_flush_enum flush); 412 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, 413 struct btrfs_block_rsv *src, 414 u64 num_bytes); 415 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info); 416 417 /* 418 * helper functions to cast a node into its container 419 */ 420 static inline struct btrfs_delayed_tree_ref * 421 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node) 422 { 423 return container_of(node, struct btrfs_delayed_tree_ref, node); 424 } 425 426 static inline struct btrfs_delayed_data_ref * 427 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node) 428 { 429 return container_of(node, struct btrfs_delayed_data_ref, node); 430 } 431 432 #endif 433