1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #ifndef __DELAYED_REF__ 19 #define __DELAYED_REF__ 20 21 /* these are the possible values of struct btrfs_delayed_ref_node->action */ 22 #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */ 23 #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */ 24 #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */ 25 #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */ 26 27 /* 28 * XXX: Qu: I really hate the design that ref_head and tree/data ref shares the 29 * same ref_node structure. 30 * Ref_head is in a higher logic level than tree/data ref, and duplicated 31 * bytenr/num_bytes in ref_node is really a waste or memory, they should be 32 * referred from ref_head. 33 * This gets more disgusting after we use list to store tree/data ref in 34 * ref_head. Must clean this mess up later. 35 */ 36 struct btrfs_delayed_ref_node { 37 /*data/tree ref use list, stored in ref_head->ref_list. */ 38 struct list_head list; 39 40 /* the starting bytenr of the extent */ 41 u64 bytenr; 42 43 /* the size of the extent */ 44 u64 num_bytes; 45 46 /* seq number to keep track of insertion order */ 47 u64 seq; 48 49 /* ref count on this data structure */ 50 atomic_t refs; 51 52 /* 53 * how many refs is this entry adding or deleting. For 54 * head refs, this may be a negative number because it is keeping 55 * track of the total mods done to the reference count. 56 * For individual refs, this will always be a positive number 57 * 58 * It may be more than one, since it is possible for a single 59 * parent to have more than one ref on an extent 60 */ 61 int ref_mod; 62 63 unsigned int action:8; 64 unsigned int type:8; 65 /* is this node still in the rbtree? */ 66 unsigned int is_head:1; 67 unsigned int in_tree:1; 68 }; 69 70 struct btrfs_delayed_extent_op { 71 struct btrfs_disk_key key; 72 u8 level; 73 bool update_key; 74 bool update_flags; 75 bool is_data; 76 u64 flags_to_set; 77 }; 78 79 /* 80 * the head refs are used to hold a lock on a given extent, which allows us 81 * to make sure that only one process is running the delayed refs 82 * at a time for a single extent. They also store the sum of all the 83 * reference count modifications we've queued up. 84 */ 85 struct btrfs_delayed_ref_head { 86 struct btrfs_delayed_ref_node node; 87 88 /* 89 * the mutex is held while running the refs, and it is also 90 * held when checking the sum of reference modifications. 91 */ 92 struct mutex mutex; 93 94 spinlock_t lock; 95 struct list_head ref_list; 96 97 struct rb_node href_node; 98 99 struct btrfs_delayed_extent_op *extent_op; 100 101 /* 102 * This is used to track the final ref_mod from all the refs associated 103 * with this head ref, this is not adjusted as delayed refs are run, 104 * this is meant to track if we need to do the csum accounting or not. 105 */ 106 int total_ref_mod; 107 108 /* 109 * For qgroup reserved space freeing. 110 * 111 * ref_root and reserved will be recorded after 112 * BTRFS_ADD_DELAYED_EXTENT is called. 113 * And will be used to free reserved qgroup space at 114 * run_delayed_refs() time. 115 */ 116 u64 qgroup_ref_root; 117 u64 qgroup_reserved; 118 119 /* 120 * when a new extent is allocated, it is just reserved in memory 121 * The actual extent isn't inserted into the extent allocation tree 122 * until the delayed ref is processed. must_insert_reserved is 123 * used to flag a delayed ref so the accounting can be updated 124 * when a full insert is done. 125 * 126 * It is possible the extent will be freed before it is ever 127 * inserted into the extent allocation tree. In this case 128 * we need to update the in ram accounting to properly reflect 129 * the free has happened. 130 */ 131 unsigned int must_insert_reserved:1; 132 unsigned int is_data:1; 133 unsigned int processing:1; 134 }; 135 136 struct btrfs_delayed_tree_ref { 137 struct btrfs_delayed_ref_node node; 138 u64 root; 139 u64 parent; 140 int level; 141 }; 142 143 struct btrfs_delayed_data_ref { 144 struct btrfs_delayed_ref_node node; 145 u64 root; 146 u64 parent; 147 u64 objectid; 148 u64 offset; 149 }; 150 151 struct btrfs_delayed_ref_root { 152 /* head ref rbtree */ 153 struct rb_root href_root; 154 155 /* dirty extent records */ 156 struct rb_root dirty_extent_root; 157 158 /* this spin lock protects the rbtree and the entries inside */ 159 spinlock_t lock; 160 161 /* how many delayed ref updates we've queued, used by the 162 * throttling code 163 */ 164 atomic_t num_entries; 165 166 /* total number of head nodes in tree */ 167 unsigned long num_heads; 168 169 /* total number of head nodes ready for processing */ 170 unsigned long num_heads_ready; 171 172 u64 pending_csums; 173 174 /* 175 * set when the tree is flushing before a transaction commit, 176 * used by the throttling code to decide if new updates need 177 * to be run right away 178 */ 179 int flushing; 180 181 u64 run_delayed_start; 182 183 /* 184 * To make qgroup to skip given root. 185 * This is for snapshot, as btrfs_qgroup_inherit() will manually 186 * modify counters for snapshot and its source, so we should skip 187 * the snapshot in new_root/old_roots or it will get calculated twice 188 */ 189 u64 qgroup_to_skip; 190 }; 191 192 extern struct kmem_cache *btrfs_delayed_ref_head_cachep; 193 extern struct kmem_cache *btrfs_delayed_tree_ref_cachep; 194 extern struct kmem_cache *btrfs_delayed_data_ref_cachep; 195 extern struct kmem_cache *btrfs_delayed_extent_op_cachep; 196 197 int btrfs_delayed_ref_init(void); 198 void btrfs_delayed_ref_exit(void); 199 200 static inline struct btrfs_delayed_extent_op * 201 btrfs_alloc_delayed_extent_op(void) 202 { 203 return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS); 204 } 205 206 static inline void 207 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op) 208 { 209 if (op) 210 kmem_cache_free(btrfs_delayed_extent_op_cachep, op); 211 } 212 213 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) 214 { 215 WARN_ON(atomic_read(&ref->refs) == 0); 216 if (atomic_dec_and_test(&ref->refs)) { 217 WARN_ON(ref->in_tree); 218 switch (ref->type) { 219 case BTRFS_TREE_BLOCK_REF_KEY: 220 case BTRFS_SHARED_BLOCK_REF_KEY: 221 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); 222 break; 223 case BTRFS_EXTENT_DATA_REF_KEY: 224 case BTRFS_SHARED_DATA_REF_KEY: 225 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); 226 break; 227 case 0: 228 kmem_cache_free(btrfs_delayed_ref_head_cachep, ref); 229 break; 230 default: 231 BUG(); 232 } 233 } 234 } 235 236 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, 237 struct btrfs_trans_handle *trans, 238 u64 bytenr, u64 num_bytes, u64 parent, 239 u64 ref_root, int level, int action, 240 struct btrfs_delayed_extent_op *extent_op); 241 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, 242 struct btrfs_trans_handle *trans, 243 u64 bytenr, u64 num_bytes, 244 u64 parent, u64 ref_root, 245 u64 owner, u64 offset, u64 reserved, int action, 246 struct btrfs_delayed_extent_op *extent_op); 247 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, 248 struct btrfs_trans_handle *trans, 249 u64 bytenr, u64 num_bytes, 250 struct btrfs_delayed_extent_op *extent_op); 251 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, 252 struct btrfs_fs_info *fs_info, 253 struct btrfs_delayed_ref_root *delayed_refs, 254 struct btrfs_delayed_ref_head *head); 255 256 struct btrfs_delayed_ref_head * 257 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); 258 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, 259 struct btrfs_delayed_ref_head *head); 260 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) 261 { 262 mutex_unlock(&head->mutex); 263 } 264 265 266 struct btrfs_delayed_ref_head * 267 btrfs_select_ref_head(struct btrfs_trans_handle *trans); 268 269 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, 270 struct btrfs_delayed_ref_root *delayed_refs, 271 u64 seq); 272 273 /* 274 * a node might live in a head or a regular ref, this lets you 275 * test for the proper type to use. 276 */ 277 static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node) 278 { 279 return node->is_head; 280 } 281 282 /* 283 * helper functions to cast a node into its container 284 */ 285 static inline struct btrfs_delayed_tree_ref * 286 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node) 287 { 288 WARN_ON(btrfs_delayed_ref_is_head(node)); 289 return container_of(node, struct btrfs_delayed_tree_ref, node); 290 } 291 292 static inline struct btrfs_delayed_data_ref * 293 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node) 294 { 295 WARN_ON(btrfs_delayed_ref_is_head(node)); 296 return container_of(node, struct btrfs_delayed_data_ref, node); 297 } 298 299 static inline struct btrfs_delayed_ref_head * 300 btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node) 301 { 302 WARN_ON(!btrfs_delayed_ref_is_head(node)); 303 return container_of(node, struct btrfs_delayed_ref_head, node); 304 } 305 #endif 306