xref: /openbmc/linux/fs/btrfs/delayed-ref.h (revision 95db3b25)
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #ifndef __DELAYED_REF__
19 #define __DELAYED_REF__
20 
21 /* these are the possible values of struct btrfs_delayed_ref_node->action */
22 #define BTRFS_ADD_DELAYED_REF    1 /* add one backref to the tree */
23 #define BTRFS_DROP_DELAYED_REF   2 /* delete one backref from the tree */
24 #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
25 #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
26 
27 /*
28  * XXX: Qu: I really hate the design that ref_head and tree/data ref shares the
29  * same ref_node structure.
30  * Ref_head is in a higher logic level than tree/data ref, and duplicated
31  * bytenr/num_bytes in ref_node is really a waste or memory, they should be
32  * referred from ref_head.
33  * This gets more disgusting after we use list to store tree/data ref in
34  * ref_head. Must clean this mess up later.
35  */
36 struct btrfs_delayed_ref_node {
37 	/*
38 	 * ref_head use rb tree, stored in ref_root->href.
39 	 * indexed by bytenr
40 	 */
41 	struct rb_node rb_node;
42 
43 	/*data/tree ref use list, stored in ref_head->ref_list. */
44 	struct list_head list;
45 
46 	/* the starting bytenr of the extent */
47 	u64 bytenr;
48 
49 	/* the size of the extent */
50 	u64 num_bytes;
51 
52 	/* seq number to keep track of insertion order */
53 	u64 seq;
54 
55 	/* ref count on this data structure */
56 	atomic_t refs;
57 
58 	/*
59 	 * how many refs is this entry adding or deleting.  For
60 	 * head refs, this may be a negative number because it is keeping
61 	 * track of the total mods done to the reference count.
62 	 * For individual refs, this will always be a positive number
63 	 *
64 	 * It may be more than one, since it is possible for a single
65 	 * parent to have more than one ref on an extent
66 	 */
67 	int ref_mod;
68 
69 	unsigned int action:8;
70 	unsigned int type:8;
71 	/* is this node still in the rbtree? */
72 	unsigned int is_head:1;
73 	unsigned int in_tree:1;
74 };
75 
76 struct btrfs_delayed_extent_op {
77 	struct btrfs_disk_key key;
78 	u8 level;
79 	bool update_key;
80 	bool update_flags;
81 	bool is_data;
82 	u64 flags_to_set;
83 };
84 
85 /*
86  * the head refs are used to hold a lock on a given extent, which allows us
87  * to make sure that only one process is running the delayed refs
88  * at a time for a single extent.  They also store the sum of all the
89  * reference count modifications we've queued up.
90  */
91 struct btrfs_delayed_ref_head {
92 	struct btrfs_delayed_ref_node node;
93 
94 	/*
95 	 * the mutex is held while running the refs, and it is also
96 	 * held when checking the sum of reference modifications.
97 	 */
98 	struct mutex mutex;
99 
100 	spinlock_t lock;
101 	struct list_head ref_list;
102 
103 	struct rb_node href_node;
104 
105 	struct btrfs_delayed_extent_op *extent_op;
106 
107 	/*
108 	 * This is used to track the final ref_mod from all the refs associated
109 	 * with this head ref, this is not adjusted as delayed refs are run,
110 	 * this is meant to track if we need to do the csum accounting or not.
111 	 */
112 	int total_ref_mod;
113 
114 	/*
115 	 * For qgroup reserved space freeing.
116 	 *
117 	 * ref_root and reserved will be recorded after
118 	 * BTRFS_ADD_DELAYED_EXTENT is called.
119 	 * And will be used to free reserved qgroup space at
120 	 * run_delayed_refs() time.
121 	 */
122 	u64 qgroup_ref_root;
123 	u64 qgroup_reserved;
124 
125 	/*
126 	 * when a new extent is allocated, it is just reserved in memory
127 	 * The actual extent isn't inserted into the extent allocation tree
128 	 * until the delayed ref is processed.  must_insert_reserved is
129 	 * used to flag a delayed ref so the accounting can be updated
130 	 * when a full insert is done.
131 	 *
132 	 * It is possible the extent will be freed before it is ever
133 	 * inserted into the extent allocation tree.  In this case
134 	 * we need to update the in ram accounting to properly reflect
135 	 * the free has happened.
136 	 */
137 	unsigned int must_insert_reserved:1;
138 	unsigned int is_data:1;
139 	unsigned int processing:1;
140 };
141 
142 struct btrfs_delayed_tree_ref {
143 	struct btrfs_delayed_ref_node node;
144 	u64 root;
145 	u64 parent;
146 	int level;
147 };
148 
149 struct btrfs_delayed_data_ref {
150 	struct btrfs_delayed_ref_node node;
151 	u64 root;
152 	u64 parent;
153 	u64 objectid;
154 	u64 offset;
155 };
156 
157 struct btrfs_delayed_ref_root {
158 	/* head ref rbtree */
159 	struct rb_root href_root;
160 
161 	/* dirty extent records */
162 	struct rb_root dirty_extent_root;
163 
164 	/* this spin lock protects the rbtree and the entries inside */
165 	spinlock_t lock;
166 
167 	/* how many delayed ref updates we've queued, used by the
168 	 * throttling code
169 	 */
170 	atomic_t num_entries;
171 
172 	/* total number of head nodes in tree */
173 	unsigned long num_heads;
174 
175 	/* total number of head nodes ready for processing */
176 	unsigned long num_heads_ready;
177 
178 	u64 pending_csums;
179 
180 	/*
181 	 * set when the tree is flushing before a transaction commit,
182 	 * used by the throttling code to decide if new updates need
183 	 * to be run right away
184 	 */
185 	int flushing;
186 
187 	u64 run_delayed_start;
188 
189 	/*
190 	 * To make qgroup to skip given root.
191 	 * This is for snapshot, as btrfs_qgroup_inherit() will manually
192 	 * modify counters for snapshot and its source, so we should skip
193 	 * the snapshot in new_root/old_roots or it will get calculated twice
194 	 */
195 	u64 qgroup_to_skip;
196 };
197 
198 extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
199 extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
200 extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
201 extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
202 
203 int btrfs_delayed_ref_init(void);
204 void btrfs_delayed_ref_exit(void);
205 
206 static inline struct btrfs_delayed_extent_op *
207 btrfs_alloc_delayed_extent_op(void)
208 {
209 	return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
210 }
211 
212 static inline void
213 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
214 {
215 	if (op)
216 		kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
217 }
218 
219 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
220 {
221 	WARN_ON(atomic_read(&ref->refs) == 0);
222 	if (atomic_dec_and_test(&ref->refs)) {
223 		WARN_ON(ref->in_tree);
224 		switch (ref->type) {
225 		case BTRFS_TREE_BLOCK_REF_KEY:
226 		case BTRFS_SHARED_BLOCK_REF_KEY:
227 			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
228 			break;
229 		case BTRFS_EXTENT_DATA_REF_KEY:
230 		case BTRFS_SHARED_DATA_REF_KEY:
231 			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
232 			break;
233 		case 0:
234 			kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
235 			break;
236 		default:
237 			BUG();
238 		}
239 	}
240 }
241 
242 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
243 			       struct btrfs_trans_handle *trans,
244 			       u64 bytenr, u64 num_bytes, u64 parent,
245 			       u64 ref_root, int level, int action,
246 			       struct btrfs_delayed_extent_op *extent_op);
247 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
248 			       struct btrfs_trans_handle *trans,
249 			       u64 bytenr, u64 num_bytes,
250 			       u64 parent, u64 ref_root,
251 			       u64 owner, u64 offset, u64 reserved, int action,
252 			       struct btrfs_delayed_extent_op *extent_op);
253 int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
254 				     struct btrfs_trans_handle *trans,
255 				     u64 ref_root, u64 bytenr, u64 num_bytes);
256 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
257 				struct btrfs_trans_handle *trans,
258 				u64 bytenr, u64 num_bytes,
259 				struct btrfs_delayed_extent_op *extent_op);
260 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
261 			      struct btrfs_fs_info *fs_info,
262 			      struct btrfs_delayed_ref_root *delayed_refs,
263 			      struct btrfs_delayed_ref_head *head);
264 
265 struct btrfs_delayed_ref_head *
266 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
267 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
268 			   struct btrfs_delayed_ref_head *head);
269 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
270 {
271 	mutex_unlock(&head->mutex);
272 }
273 
274 
275 struct btrfs_delayed_ref_head *
276 btrfs_select_ref_head(struct btrfs_trans_handle *trans);
277 
278 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
279 			    struct btrfs_delayed_ref_root *delayed_refs,
280 			    u64 seq);
281 
282 /*
283  * a node might live in a head or a regular ref, this lets you
284  * test for the proper type to use.
285  */
286 static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
287 {
288 	return node->is_head;
289 }
290 
291 /*
292  * helper functions to cast a node into its container
293  */
294 static inline struct btrfs_delayed_tree_ref *
295 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
296 {
297 	WARN_ON(btrfs_delayed_ref_is_head(node));
298 	return container_of(node, struct btrfs_delayed_tree_ref, node);
299 }
300 
301 static inline struct btrfs_delayed_data_ref *
302 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
303 {
304 	WARN_ON(btrfs_delayed_ref_is_head(node));
305 	return container_of(node, struct btrfs_delayed_data_ref, node);
306 }
307 
308 static inline struct btrfs_delayed_ref_head *
309 btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
310 {
311 	WARN_ON(!btrfs_delayed_ref_is_head(node));
312 	return container_of(node, struct btrfs_delayed_ref_head, node);
313 }
314 #endif
315