xref: /openbmc/linux/fs/btrfs/delayed-ref.h (revision cdfce539)
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #ifndef __DELAYED_REF__
19 #define __DELAYED_REF__
20 
21 /* these are the possible values of struct btrfs_delayed_ref_node->action */
22 #define BTRFS_ADD_DELAYED_REF    1 /* add one backref to the tree */
23 #define BTRFS_DROP_DELAYED_REF   2 /* delete one backref from the tree */
24 #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
25 #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
26 
27 struct btrfs_delayed_ref_node {
28 	struct rb_node rb_node;
29 
30 	/* the starting bytenr of the extent */
31 	u64 bytenr;
32 
33 	/* the size of the extent */
34 	u64 num_bytes;
35 
36 	/* seq number to keep track of insertion order */
37 	u64 seq;
38 
39 	/* ref count on this data structure */
40 	atomic_t refs;
41 
42 	/*
43 	 * how many refs is this entry adding or deleting.  For
44 	 * head refs, this may be a negative number because it is keeping
45 	 * track of the total mods done to the reference count.
46 	 * For individual refs, this will always be a positive number
47 	 *
48 	 * It may be more than one, since it is possible for a single
49 	 * parent to have more than one ref on an extent
50 	 */
51 	int ref_mod;
52 
53 	unsigned int action:8;
54 	unsigned int type:8;
55 	/* is this node still in the rbtree? */
56 	unsigned int is_head:1;
57 	unsigned int in_tree:1;
58 };
59 
60 struct btrfs_delayed_extent_op {
61 	struct btrfs_disk_key key;
62 	u64 flags_to_set;
63 	int level;
64 	unsigned int update_key:1;
65 	unsigned int update_flags:1;
66 	unsigned int is_data:1;
67 };
68 
69 /*
70  * the head refs are used to hold a lock on a given extent, which allows us
71  * to make sure that only one process is running the delayed refs
72  * at a time for a single extent.  They also store the sum of all the
73  * reference count modifications we've queued up.
74  */
75 struct btrfs_delayed_ref_head {
76 	struct btrfs_delayed_ref_node node;
77 
78 	/*
79 	 * the mutex is held while running the refs, and it is also
80 	 * held when checking the sum of reference modifications.
81 	 */
82 	struct mutex mutex;
83 
84 	struct list_head cluster;
85 
86 	struct btrfs_delayed_extent_op *extent_op;
87 	/*
88 	 * when a new extent is allocated, it is just reserved in memory
89 	 * The actual extent isn't inserted into the extent allocation tree
90 	 * until the delayed ref is processed.  must_insert_reserved is
91 	 * used to flag a delayed ref so the accounting can be updated
92 	 * when a full insert is done.
93 	 *
94 	 * It is possible the extent will be freed before it is ever
95 	 * inserted into the extent allocation tree.  In this case
96 	 * we need to update the in ram accounting to properly reflect
97 	 * the free has happened.
98 	 */
99 	unsigned int must_insert_reserved:1;
100 	unsigned int is_data:1;
101 };
102 
103 struct btrfs_delayed_tree_ref {
104 	struct btrfs_delayed_ref_node node;
105 	u64 root;
106 	u64 parent;
107 	int level;
108 };
109 
110 struct btrfs_delayed_data_ref {
111 	struct btrfs_delayed_ref_node node;
112 	u64 root;
113 	u64 parent;
114 	u64 objectid;
115 	u64 offset;
116 };
117 
118 struct btrfs_delayed_ref_root {
119 	struct rb_root root;
120 
121 	/* this spin lock protects the rbtree and the entries inside */
122 	spinlock_t lock;
123 
124 	/* how many delayed ref updates we've queued, used by the
125 	 * throttling code
126 	 */
127 	unsigned long num_entries;
128 
129 	/* total number of head nodes in tree */
130 	unsigned long num_heads;
131 
132 	/* total number of head nodes ready for processing */
133 	unsigned long num_heads_ready;
134 
135 	/*
136 	 * bumped when someone is making progress on the delayed
137 	 * refs, so that other procs know they are just adding to
138 	 * contention intead of helping
139 	 */
140 	atomic_t procs_running_refs;
141 	atomic_t ref_seq;
142 	wait_queue_head_t wait;
143 
144 	/*
145 	 * set when the tree is flushing before a transaction commit,
146 	 * used by the throttling code to decide if new updates need
147 	 * to be run right away
148 	 */
149 	int flushing;
150 
151 	u64 run_delayed_start;
152 };
153 
154 extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
155 extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
156 extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
157 extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
158 
159 int btrfs_delayed_ref_init(void);
160 void btrfs_delayed_ref_exit(void);
161 
162 static inline struct btrfs_delayed_extent_op *
163 btrfs_alloc_delayed_extent_op(void)
164 {
165 	return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
166 }
167 
168 static inline void
169 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
170 {
171 	if (op)
172 		kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
173 }
174 
175 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
176 {
177 	WARN_ON(atomic_read(&ref->refs) == 0);
178 	if (atomic_dec_and_test(&ref->refs)) {
179 		WARN_ON(ref->in_tree);
180 		switch (ref->type) {
181 		case BTRFS_TREE_BLOCK_REF_KEY:
182 		case BTRFS_SHARED_BLOCK_REF_KEY:
183 			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
184 			break;
185 		case BTRFS_EXTENT_DATA_REF_KEY:
186 		case BTRFS_SHARED_DATA_REF_KEY:
187 			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
188 			break;
189 		case 0:
190 			kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
191 			break;
192 		default:
193 			BUG();
194 		}
195 	}
196 }
197 
198 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
199 			       struct btrfs_trans_handle *trans,
200 			       u64 bytenr, u64 num_bytes, u64 parent,
201 			       u64 ref_root, int level, int action,
202 			       struct btrfs_delayed_extent_op *extent_op,
203 			       int for_cow);
204 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
205 			       struct btrfs_trans_handle *trans,
206 			       u64 bytenr, u64 num_bytes,
207 			       u64 parent, u64 ref_root,
208 			       u64 owner, u64 offset, int action,
209 			       struct btrfs_delayed_extent_op *extent_op,
210 			       int for_cow);
211 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
212 				struct btrfs_trans_handle *trans,
213 				u64 bytenr, u64 num_bytes,
214 				struct btrfs_delayed_extent_op *extent_op);
215 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
216 			      struct btrfs_fs_info *fs_info,
217 			      struct btrfs_delayed_ref_root *delayed_refs,
218 			      struct btrfs_delayed_ref_head *head);
219 
220 struct btrfs_delayed_ref_head *
221 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
222 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
223 			   struct btrfs_delayed_ref_head *head);
224 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
225 {
226 	mutex_unlock(&head->mutex);
227 }
228 
229 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
230 			   struct list_head *cluster, u64 search_start);
231 void btrfs_release_ref_cluster(struct list_head *cluster);
232 
233 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
234 			    struct btrfs_delayed_ref_root *delayed_refs,
235 			    u64 seq);
236 
237 /*
238  * delayed refs with a ref_seq > 0 must be held back during backref walking.
239  * this only applies to items in one of the fs-trees. for_cow items never need
240  * to be held back, so they won't get a ref_seq number.
241  */
242 static inline int need_ref_seq(int for_cow, u64 rootid)
243 {
244 	if (for_cow)
245 		return 0;
246 
247 	if (rootid == BTRFS_FS_TREE_OBJECTID)
248 		return 1;
249 
250 	if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
251 		return 1;
252 
253 	return 0;
254 }
255 
256 /*
257  * a node might live in a head or a regular ref, this lets you
258  * test for the proper type to use.
259  */
260 static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
261 {
262 	return node->is_head;
263 }
264 
265 /*
266  * helper functions to cast a node into its container
267  */
268 static inline struct btrfs_delayed_tree_ref *
269 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
270 {
271 	WARN_ON(btrfs_delayed_ref_is_head(node));
272 	return container_of(node, struct btrfs_delayed_tree_ref, node);
273 }
274 
275 static inline struct btrfs_delayed_data_ref *
276 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
277 {
278 	WARN_ON(btrfs_delayed_ref_is_head(node));
279 	return container_of(node, struct btrfs_delayed_data_ref, node);
280 }
281 
282 static inline struct btrfs_delayed_ref_head *
283 btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
284 {
285 	WARN_ON(!btrfs_delayed_ref_is_head(node));
286 	return container_of(node, struct btrfs_delayed_ref_head, node);
287 }
288 #endif
289