1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #ifndef __DELAYED_REF__ 19 #define __DELAYED_REF__ 20 21 /* these are the possible values of struct btrfs_delayed_ref_node->action */ 22 #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */ 23 #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */ 24 #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */ 25 #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */ 26 27 struct btrfs_delayed_ref_node { 28 struct rb_node rb_node; 29 30 /* the starting bytenr of the extent */ 31 u64 bytenr; 32 33 /* the size of the extent */ 34 u64 num_bytes; 35 36 /* seq number to keep track of insertion order */ 37 u64 seq; 38 39 /* ref count on this data structure */ 40 atomic_t refs; 41 42 /* 43 * how many refs is this entry adding or deleting. For 44 * head refs, this may be a negative number because it is keeping 45 * track of the total mods done to the reference count. 46 * For individual refs, this will always be a positive number 47 * 48 * It may be more than one, since it is possible for a single 49 * parent to have more than one ref on an extent 50 */ 51 int ref_mod; 52 53 unsigned int action:8; 54 unsigned int type:8; 55 /* is this node still in the rbtree? */ 56 unsigned int is_head:1; 57 unsigned int in_tree:1; 58 }; 59 60 struct btrfs_delayed_extent_op { 61 struct btrfs_disk_key key; 62 u64 flags_to_set; 63 unsigned int update_key:1; 64 unsigned int update_flags:1; 65 unsigned int is_data:1; 66 }; 67 68 /* 69 * the head refs are used to hold a lock on a given extent, which allows us 70 * to make sure that only one process is running the delayed refs 71 * at a time for a single extent. They also store the sum of all the 72 * reference count modifications we've queued up. 73 */ 74 struct btrfs_delayed_ref_head { 75 struct btrfs_delayed_ref_node node; 76 77 /* 78 * the mutex is held while running the refs, and it is also 79 * held when checking the sum of reference modifications. 80 */ 81 struct mutex mutex; 82 83 struct list_head cluster; 84 85 struct btrfs_delayed_extent_op *extent_op; 86 /* 87 * when a new extent is allocated, it is just reserved in memory 88 * The actual extent isn't inserted into the extent allocation tree 89 * until the delayed ref is processed. must_insert_reserved is 90 * used to flag a delayed ref so the accounting can be updated 91 * when a full insert is done. 92 * 93 * It is possible the extent will be freed before it is ever 94 * inserted into the extent allocation tree. In this case 95 * we need to update the in ram accounting to properly reflect 96 * the free has happened. 97 */ 98 unsigned int must_insert_reserved:1; 99 unsigned int is_data:1; 100 }; 101 102 struct btrfs_delayed_tree_ref { 103 struct btrfs_delayed_ref_node node; 104 u64 root; 105 u64 parent; 106 int level; 107 }; 108 109 struct btrfs_delayed_data_ref { 110 struct btrfs_delayed_ref_node node; 111 u64 root; 112 u64 parent; 113 u64 objectid; 114 u64 offset; 115 }; 116 117 struct btrfs_delayed_ref_root { 118 struct rb_root root; 119 120 /* this spin lock protects the rbtree and the entries inside */ 121 spinlock_t lock; 122 123 /* how many delayed ref updates we've queued, used by the 124 * throttling code 125 */ 126 unsigned long num_entries; 127 128 /* total number of head nodes in tree */ 129 unsigned long num_heads; 130 131 /* total number of head nodes ready for processing */ 132 unsigned long num_heads_ready; 133 134 /* 135 * bumped when someone is making progress on the delayed 136 * refs, so that other procs know they are just adding to 137 * contention intead of helping 138 */ 139 atomic_t procs_running_refs; 140 atomic_t ref_seq; 141 wait_queue_head_t wait; 142 143 /* 144 * set when the tree is flushing before a transaction commit, 145 * used by the throttling code to decide if new updates need 146 * to be run right away 147 */ 148 int flushing; 149 150 u64 run_delayed_start; 151 }; 152 153 extern struct kmem_cache *btrfs_delayed_ref_head_cachep; 154 extern struct kmem_cache *btrfs_delayed_tree_ref_cachep; 155 extern struct kmem_cache *btrfs_delayed_data_ref_cachep; 156 extern struct kmem_cache *btrfs_delayed_extent_op_cachep; 157 158 int btrfs_delayed_ref_init(void); 159 void btrfs_delayed_ref_exit(void); 160 161 static inline struct btrfs_delayed_extent_op * 162 btrfs_alloc_delayed_extent_op(void) 163 { 164 return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS); 165 } 166 167 static inline void 168 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op) 169 { 170 if (op) 171 kmem_cache_free(btrfs_delayed_extent_op_cachep, op); 172 } 173 174 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) 175 { 176 WARN_ON(atomic_read(&ref->refs) == 0); 177 if (atomic_dec_and_test(&ref->refs)) { 178 WARN_ON(ref->in_tree); 179 switch (ref->type) { 180 case BTRFS_TREE_BLOCK_REF_KEY: 181 case BTRFS_SHARED_BLOCK_REF_KEY: 182 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); 183 break; 184 case BTRFS_EXTENT_DATA_REF_KEY: 185 case BTRFS_SHARED_DATA_REF_KEY: 186 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); 187 break; 188 case 0: 189 kmem_cache_free(btrfs_delayed_ref_head_cachep, ref); 190 break; 191 default: 192 BUG(); 193 } 194 } 195 } 196 197 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, 198 struct btrfs_trans_handle *trans, 199 u64 bytenr, u64 num_bytes, u64 parent, 200 u64 ref_root, int level, int action, 201 struct btrfs_delayed_extent_op *extent_op, 202 int for_cow); 203 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, 204 struct btrfs_trans_handle *trans, 205 u64 bytenr, u64 num_bytes, 206 u64 parent, u64 ref_root, 207 u64 owner, u64 offset, int action, 208 struct btrfs_delayed_extent_op *extent_op, 209 int for_cow); 210 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, 211 struct btrfs_trans_handle *trans, 212 u64 bytenr, u64 num_bytes, 213 struct btrfs_delayed_extent_op *extent_op); 214 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, 215 struct btrfs_fs_info *fs_info, 216 struct btrfs_delayed_ref_root *delayed_refs, 217 struct btrfs_delayed_ref_head *head); 218 219 struct btrfs_delayed_ref_head * 220 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); 221 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, 222 struct btrfs_delayed_ref_head *head); 223 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) 224 { 225 mutex_unlock(&head->mutex); 226 } 227 228 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, 229 struct list_head *cluster, u64 search_start); 230 void btrfs_release_ref_cluster(struct list_head *cluster); 231 232 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, 233 struct btrfs_delayed_ref_root *delayed_refs, 234 u64 seq); 235 236 /* 237 * delayed refs with a ref_seq > 0 must be held back during backref walking. 238 * this only applies to items in one of the fs-trees. for_cow items never need 239 * to be held back, so they won't get a ref_seq number. 240 */ 241 static inline int need_ref_seq(int for_cow, u64 rootid) 242 { 243 if (for_cow) 244 return 0; 245 246 if (rootid == BTRFS_FS_TREE_OBJECTID) 247 return 1; 248 249 if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID) 250 return 1; 251 252 return 0; 253 } 254 255 /* 256 * a node might live in a head or a regular ref, this lets you 257 * test for the proper type to use. 258 */ 259 static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node) 260 { 261 return node->is_head; 262 } 263 264 /* 265 * helper functions to cast a node into its container 266 */ 267 static inline struct btrfs_delayed_tree_ref * 268 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node) 269 { 270 WARN_ON(btrfs_delayed_ref_is_head(node)); 271 return container_of(node, struct btrfs_delayed_tree_ref, node); 272 } 273 274 static inline struct btrfs_delayed_data_ref * 275 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node) 276 { 277 WARN_ON(btrfs_delayed_ref_is_head(node)); 278 return container_of(node, struct btrfs_delayed_data_ref, node); 279 } 280 281 static inline struct btrfs_delayed_ref_head * 282 btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node) 283 { 284 WARN_ON(!btrfs_delayed_ref_is_head(node)); 285 return container_of(node, struct btrfs_delayed_ref_head, node); 286 } 287 #endif 288