1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #ifndef __DELAYED_REF__ 19 #define __DELAYED_REF__ 20 21 #include <linux/refcount.h> 22 23 /* these are the possible values of struct btrfs_delayed_ref_node->action */ 24 #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */ 25 #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */ 26 #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */ 27 #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */ 28 29 /* 30 * XXX: Qu: I really hate the design that ref_head and tree/data ref shares the 31 * same ref_node structure. 32 * Ref_head is in a higher logic level than tree/data ref, and duplicated 33 * bytenr/num_bytes in ref_node is really a waste or memory, they should be 34 * referred from ref_head. 35 * This gets more disgusting after we use list to store tree/data ref in 36 * ref_head. Must clean this mess up later. 37 */ 38 struct btrfs_delayed_ref_node { 39 /*data/tree ref use list, stored in ref_head->ref_list. */ 40 struct list_head list; 41 /* 42 * If action is BTRFS_ADD_DELAYED_REF, also link this node to 43 * ref_head->ref_add_list, then we do not need to iterate the 44 * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes. 45 */ 46 struct list_head add_list; 47 48 /* the starting bytenr of the extent */ 49 u64 bytenr; 50 51 /* the size of the extent */ 52 u64 num_bytes; 53 54 /* seq number to keep track of insertion order */ 55 u64 seq; 56 57 /* ref count on this data structure */ 58 refcount_t refs; 59 60 /* 61 * how many refs is this entry adding or deleting. For 62 * head refs, this may be a negative number because it is keeping 63 * track of the total mods done to the reference count. 64 * For individual refs, this will always be a positive number 65 * 66 * It may be more than one, since it is possible for a single 67 * parent to have more than one ref on an extent 68 */ 69 int ref_mod; 70 71 unsigned int action:8; 72 unsigned int type:8; 73 /* is this node still in the rbtree? */ 74 unsigned int is_head:1; 75 unsigned int in_tree:1; 76 }; 77 78 struct btrfs_delayed_extent_op { 79 struct btrfs_disk_key key; 80 u8 level; 81 bool update_key; 82 bool update_flags; 83 bool is_data; 84 u64 flags_to_set; 85 }; 86 87 /* 88 * the head refs are used to hold a lock on a given extent, which allows us 89 * to make sure that only one process is running the delayed refs 90 * at a time for a single extent. They also store the sum of all the 91 * reference count modifications we've queued up. 92 */ 93 struct btrfs_delayed_ref_head { 94 struct btrfs_delayed_ref_node node; 95 96 /* 97 * the mutex is held while running the refs, and it is also 98 * held when checking the sum of reference modifications. 99 */ 100 struct mutex mutex; 101 102 spinlock_t lock; 103 struct list_head ref_list; 104 /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */ 105 struct list_head ref_add_list; 106 107 struct rb_node href_node; 108 109 struct btrfs_delayed_extent_op *extent_op; 110 111 /* 112 * This is used to track the final ref_mod from all the refs associated 113 * with this head ref, this is not adjusted as delayed refs are run, 114 * this is meant to track if we need to do the csum accounting or not. 115 */ 116 int total_ref_mod; 117 118 /* 119 * For qgroup reserved space freeing. 120 * 121 * ref_root and reserved will be recorded after 122 * BTRFS_ADD_DELAYED_EXTENT is called. 123 * And will be used to free reserved qgroup space at 124 * run_delayed_refs() time. 125 */ 126 u64 qgroup_ref_root; 127 u64 qgroup_reserved; 128 129 /* 130 * when a new extent is allocated, it is just reserved in memory 131 * The actual extent isn't inserted into the extent allocation tree 132 * until the delayed ref is processed. must_insert_reserved is 133 * used to flag a delayed ref so the accounting can be updated 134 * when a full insert is done. 135 * 136 * It is possible the extent will be freed before it is ever 137 * inserted into the extent allocation tree. In this case 138 * we need to update the in ram accounting to properly reflect 139 * the free has happened. 140 */ 141 unsigned int must_insert_reserved:1; 142 unsigned int is_data:1; 143 unsigned int processing:1; 144 }; 145 146 struct btrfs_delayed_tree_ref { 147 struct btrfs_delayed_ref_node node; 148 u64 root; 149 u64 parent; 150 int level; 151 }; 152 153 struct btrfs_delayed_data_ref { 154 struct btrfs_delayed_ref_node node; 155 u64 root; 156 u64 parent; 157 u64 objectid; 158 u64 offset; 159 }; 160 161 struct btrfs_delayed_ref_root { 162 /* head ref rbtree */ 163 struct rb_root href_root; 164 165 /* dirty extent records */ 166 struct rb_root dirty_extent_root; 167 168 /* this spin lock protects the rbtree and the entries inside */ 169 spinlock_t lock; 170 171 /* how many delayed ref updates we've queued, used by the 172 * throttling code 173 */ 174 atomic_t num_entries; 175 176 /* total number of head nodes in tree */ 177 unsigned long num_heads; 178 179 /* total number of head nodes ready for processing */ 180 unsigned long num_heads_ready; 181 182 u64 pending_csums; 183 184 /* 185 * set when the tree is flushing before a transaction commit, 186 * used by the throttling code to decide if new updates need 187 * to be run right away 188 */ 189 int flushing; 190 191 u64 run_delayed_start; 192 193 /* 194 * To make qgroup to skip given root. 195 * This is for snapshot, as btrfs_qgroup_inherit() will manually 196 * modify counters for snapshot and its source, so we should skip 197 * the snapshot in new_root/old_roots or it will get calculated twice 198 */ 199 u64 qgroup_to_skip; 200 }; 201 202 extern struct kmem_cache *btrfs_delayed_ref_head_cachep; 203 extern struct kmem_cache *btrfs_delayed_tree_ref_cachep; 204 extern struct kmem_cache *btrfs_delayed_data_ref_cachep; 205 extern struct kmem_cache *btrfs_delayed_extent_op_cachep; 206 207 int btrfs_delayed_ref_init(void); 208 void btrfs_delayed_ref_exit(void); 209 210 static inline struct btrfs_delayed_extent_op * 211 btrfs_alloc_delayed_extent_op(void) 212 { 213 return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS); 214 } 215 216 static inline void 217 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op) 218 { 219 if (op) 220 kmem_cache_free(btrfs_delayed_extent_op_cachep, op); 221 } 222 223 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) 224 { 225 WARN_ON(refcount_read(&ref->refs) == 0); 226 if (refcount_dec_and_test(&ref->refs)) { 227 WARN_ON(ref->in_tree); 228 switch (ref->type) { 229 case BTRFS_TREE_BLOCK_REF_KEY: 230 case BTRFS_SHARED_BLOCK_REF_KEY: 231 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); 232 break; 233 case BTRFS_EXTENT_DATA_REF_KEY: 234 case BTRFS_SHARED_DATA_REF_KEY: 235 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); 236 break; 237 case 0: 238 kmem_cache_free(btrfs_delayed_ref_head_cachep, ref); 239 break; 240 default: 241 BUG(); 242 } 243 } 244 } 245 246 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, 247 struct btrfs_trans_handle *trans, 248 u64 bytenr, u64 num_bytes, u64 parent, 249 u64 ref_root, int level, int action, 250 struct btrfs_delayed_extent_op *extent_op); 251 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, 252 struct btrfs_trans_handle *trans, 253 u64 bytenr, u64 num_bytes, 254 u64 parent, u64 ref_root, 255 u64 owner, u64 offset, u64 reserved, int action); 256 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, 257 struct btrfs_trans_handle *trans, 258 u64 bytenr, u64 num_bytes, 259 struct btrfs_delayed_extent_op *extent_op); 260 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, 261 struct btrfs_fs_info *fs_info, 262 struct btrfs_delayed_ref_root *delayed_refs, 263 struct btrfs_delayed_ref_head *head); 264 265 struct btrfs_delayed_ref_head * 266 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 267 u64 bytenr); 268 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, 269 struct btrfs_delayed_ref_head *head); 270 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) 271 { 272 mutex_unlock(&head->mutex); 273 } 274 275 276 struct btrfs_delayed_ref_head * 277 btrfs_select_ref_head(struct btrfs_trans_handle *trans); 278 279 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, 280 struct btrfs_delayed_ref_root *delayed_refs, 281 u64 seq); 282 283 /* 284 * a node might live in a head or a regular ref, this lets you 285 * test for the proper type to use. 286 */ 287 static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node) 288 { 289 return node->is_head; 290 } 291 292 /* 293 * helper functions to cast a node into its container 294 */ 295 static inline struct btrfs_delayed_tree_ref * 296 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node) 297 { 298 WARN_ON(btrfs_delayed_ref_is_head(node)); 299 return container_of(node, struct btrfs_delayed_tree_ref, node); 300 } 301 302 static inline struct btrfs_delayed_data_ref * 303 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node) 304 { 305 WARN_ON(btrfs_delayed_ref_is_head(node)); 306 return container_of(node, struct btrfs_delayed_data_ref, node); 307 } 308 309 static inline struct btrfs_delayed_ref_head * 310 btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node) 311 { 312 WARN_ON(!btrfs_delayed_ref_is_head(node)); 313 return container_of(node, struct btrfs_delayed_ref_head, node); 314 } 315 #endif 316