xref: /openbmc/linux/fs/btrfs/delayed-ref.c (revision afb46f79)
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25 
26 struct kmem_cache *btrfs_delayed_ref_head_cachep;
27 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
28 struct kmem_cache *btrfs_delayed_data_ref_cachep;
29 struct kmem_cache *btrfs_delayed_extent_op_cachep;
30 /*
31  * delayed back reference update tracking.  For subvolume trees
32  * we queue up extent allocations and backref maintenance for
33  * delayed processing.   This avoids deep call chains where we
34  * add extents in the middle of btrfs_search_slot, and it allows
35  * us to buffer up frequently modified backrefs in an rb tree instead
36  * of hammering updates on the extent allocation tree.
37  */
38 
39 /*
40  * compare two delayed tree backrefs with same bytenr and type
41  */
42 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
43 			  struct btrfs_delayed_tree_ref *ref1, int type)
44 {
45 	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
46 		if (ref1->root < ref2->root)
47 			return -1;
48 		if (ref1->root > ref2->root)
49 			return 1;
50 	} else {
51 		if (ref1->parent < ref2->parent)
52 			return -1;
53 		if (ref1->parent > ref2->parent)
54 			return 1;
55 	}
56 	return 0;
57 }
58 
59 /*
60  * compare two delayed data backrefs with same bytenr and type
61  */
62 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
63 			  struct btrfs_delayed_data_ref *ref1)
64 {
65 	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
66 		if (ref1->root < ref2->root)
67 			return -1;
68 		if (ref1->root > ref2->root)
69 			return 1;
70 		if (ref1->objectid < ref2->objectid)
71 			return -1;
72 		if (ref1->objectid > ref2->objectid)
73 			return 1;
74 		if (ref1->offset < ref2->offset)
75 			return -1;
76 		if (ref1->offset > ref2->offset)
77 			return 1;
78 	} else {
79 		if (ref1->parent < ref2->parent)
80 			return -1;
81 		if (ref1->parent > ref2->parent)
82 			return 1;
83 	}
84 	return 0;
85 }
86 
87 /*
88  * entries in the rb tree are ordered by the byte number of the extent,
89  * type of the delayed backrefs and content of delayed backrefs.
90  */
91 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
92 		      struct btrfs_delayed_ref_node *ref1,
93 		      bool compare_seq)
94 {
95 	if (ref1->bytenr < ref2->bytenr)
96 		return -1;
97 	if (ref1->bytenr > ref2->bytenr)
98 		return 1;
99 	if (ref1->is_head && ref2->is_head)
100 		return 0;
101 	if (ref2->is_head)
102 		return -1;
103 	if (ref1->is_head)
104 		return 1;
105 	if (ref1->type < ref2->type)
106 		return -1;
107 	if (ref1->type > ref2->type)
108 		return 1;
109 	/* merging of sequenced refs is not allowed */
110 	if (compare_seq) {
111 		if (ref1->seq < ref2->seq)
112 			return -1;
113 		if (ref1->seq > ref2->seq)
114 			return 1;
115 	}
116 	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
117 	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
118 		return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
119 				      btrfs_delayed_node_to_tree_ref(ref1),
120 				      ref1->type);
121 	} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
122 		   ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
123 		return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
124 				      btrfs_delayed_node_to_data_ref(ref1));
125 	}
126 	BUG();
127 	return 0;
128 }
129 
130 /*
131  * insert a new ref into the rbtree.  This returns any existing refs
132  * for the same (bytenr,parent) tuple, or NULL if the new node was properly
133  * inserted.
134  */
135 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
136 						  struct rb_node *node)
137 {
138 	struct rb_node **p = &root->rb_node;
139 	struct rb_node *parent_node = NULL;
140 	struct btrfs_delayed_ref_node *entry;
141 	struct btrfs_delayed_ref_node *ins;
142 	int cmp;
143 
144 	ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
145 	while (*p) {
146 		parent_node = *p;
147 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
148 				 rb_node);
149 
150 		cmp = comp_entry(entry, ins, 1);
151 		if (cmp < 0)
152 			p = &(*p)->rb_left;
153 		else if (cmp > 0)
154 			p = &(*p)->rb_right;
155 		else
156 			return entry;
157 	}
158 
159 	rb_link_node(node, parent_node, p);
160 	rb_insert_color(node, root);
161 	return NULL;
162 }
163 
164 /* insert a new ref to head ref rbtree */
165 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
166 						   struct rb_node *node)
167 {
168 	struct rb_node **p = &root->rb_node;
169 	struct rb_node *parent_node = NULL;
170 	struct btrfs_delayed_ref_head *entry;
171 	struct btrfs_delayed_ref_head *ins;
172 	u64 bytenr;
173 
174 	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
175 	bytenr = ins->node.bytenr;
176 	while (*p) {
177 		parent_node = *p;
178 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
179 				 href_node);
180 
181 		if (bytenr < entry->node.bytenr)
182 			p = &(*p)->rb_left;
183 		else if (bytenr > entry->node.bytenr)
184 			p = &(*p)->rb_right;
185 		else
186 			return entry;
187 	}
188 
189 	rb_link_node(node, parent_node, p);
190 	rb_insert_color(node, root);
191 	return NULL;
192 }
193 
194 /*
195  * find an head entry based on bytenr. This returns the delayed ref
196  * head if it was able to find one, or NULL if nothing was in that spot.
197  * If return_bigger is given, the next bigger entry is returned if no exact
198  * match is found.
199  */
200 static struct btrfs_delayed_ref_head *
201 find_ref_head(struct rb_root *root, u64 bytenr,
202 	      int return_bigger)
203 {
204 	struct rb_node *n;
205 	struct btrfs_delayed_ref_head *entry;
206 
207 	n = root->rb_node;
208 	entry = NULL;
209 	while (n) {
210 		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
211 
212 		if (bytenr < entry->node.bytenr)
213 			n = n->rb_left;
214 		else if (bytenr > entry->node.bytenr)
215 			n = n->rb_right;
216 		else
217 			return entry;
218 	}
219 	if (entry && return_bigger) {
220 		if (bytenr > entry->node.bytenr) {
221 			n = rb_next(&entry->href_node);
222 			if (!n)
223 				n = rb_first(root);
224 			entry = rb_entry(n, struct btrfs_delayed_ref_head,
225 					 href_node);
226 			return entry;
227 		}
228 		return entry;
229 	}
230 	return NULL;
231 }
232 
233 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
234 			   struct btrfs_delayed_ref_head *head)
235 {
236 	struct btrfs_delayed_ref_root *delayed_refs;
237 
238 	delayed_refs = &trans->transaction->delayed_refs;
239 	assert_spin_locked(&delayed_refs->lock);
240 	if (mutex_trylock(&head->mutex))
241 		return 0;
242 
243 	atomic_inc(&head->node.refs);
244 	spin_unlock(&delayed_refs->lock);
245 
246 	mutex_lock(&head->mutex);
247 	spin_lock(&delayed_refs->lock);
248 	if (!head->node.in_tree) {
249 		mutex_unlock(&head->mutex);
250 		btrfs_put_delayed_ref(&head->node);
251 		return -EAGAIN;
252 	}
253 	btrfs_put_delayed_ref(&head->node);
254 	return 0;
255 }
256 
257 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
258 				    struct btrfs_delayed_ref_root *delayed_refs,
259 				    struct btrfs_delayed_ref_head *head,
260 				    struct btrfs_delayed_ref_node *ref)
261 {
262 	if (btrfs_delayed_ref_is_head(ref)) {
263 		head = btrfs_delayed_node_to_head(ref);
264 		rb_erase(&head->href_node, &delayed_refs->href_root);
265 	} else {
266 		assert_spin_locked(&head->lock);
267 		rb_erase(&ref->rb_node, &head->ref_root);
268 	}
269 	ref->in_tree = 0;
270 	btrfs_put_delayed_ref(ref);
271 	atomic_dec(&delayed_refs->num_entries);
272 	if (trans->delayed_ref_updates)
273 		trans->delayed_ref_updates--;
274 }
275 
276 static int merge_ref(struct btrfs_trans_handle *trans,
277 		     struct btrfs_delayed_ref_root *delayed_refs,
278 		     struct btrfs_delayed_ref_head *head,
279 		     struct btrfs_delayed_ref_node *ref, u64 seq)
280 {
281 	struct rb_node *node;
282 	int mod = 0;
283 	int done = 0;
284 
285 	node = rb_next(&ref->rb_node);
286 	while (!done && node) {
287 		struct btrfs_delayed_ref_node *next;
288 
289 		next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
290 		node = rb_next(node);
291 		if (seq && next->seq >= seq)
292 			break;
293 		if (comp_entry(ref, next, 0))
294 			continue;
295 
296 		if (ref->action == next->action) {
297 			mod = next->ref_mod;
298 		} else {
299 			if (ref->ref_mod < next->ref_mod) {
300 				struct btrfs_delayed_ref_node *tmp;
301 
302 				tmp = ref;
303 				ref = next;
304 				next = tmp;
305 				done = 1;
306 			}
307 			mod = -next->ref_mod;
308 		}
309 
310 		drop_delayed_ref(trans, delayed_refs, head, next);
311 		ref->ref_mod += mod;
312 		if (ref->ref_mod == 0) {
313 			drop_delayed_ref(trans, delayed_refs, head, ref);
314 			done = 1;
315 		} else {
316 			/*
317 			 * You can't have multiples of the same ref on a tree
318 			 * block.
319 			 */
320 			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
321 				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
322 		}
323 	}
324 	return done;
325 }
326 
327 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
328 			      struct btrfs_fs_info *fs_info,
329 			      struct btrfs_delayed_ref_root *delayed_refs,
330 			      struct btrfs_delayed_ref_head *head)
331 {
332 	struct rb_node *node;
333 	u64 seq = 0;
334 
335 	assert_spin_locked(&head->lock);
336 	/*
337 	 * We don't have too much refs to merge in the case of delayed data
338 	 * refs.
339 	 */
340 	if (head->is_data)
341 		return;
342 
343 	spin_lock(&fs_info->tree_mod_seq_lock);
344 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
345 		struct seq_list *elem;
346 
347 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
348 					struct seq_list, list);
349 		seq = elem->seq;
350 	}
351 	spin_unlock(&fs_info->tree_mod_seq_lock);
352 
353 	node = rb_first(&head->ref_root);
354 	while (node) {
355 		struct btrfs_delayed_ref_node *ref;
356 
357 		ref = rb_entry(node, struct btrfs_delayed_ref_node,
358 			       rb_node);
359 		/* We can't merge refs that are outside of our seq count */
360 		if (seq && ref->seq >= seq)
361 			break;
362 		if (merge_ref(trans, delayed_refs, head, ref, seq))
363 			node = rb_first(&head->ref_root);
364 		else
365 			node = rb_next(&ref->rb_node);
366 	}
367 }
368 
369 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
370 			    struct btrfs_delayed_ref_root *delayed_refs,
371 			    u64 seq)
372 {
373 	struct seq_list *elem;
374 	int ret = 0;
375 
376 	spin_lock(&fs_info->tree_mod_seq_lock);
377 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
378 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
379 					struct seq_list, list);
380 		if (seq >= elem->seq) {
381 			pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
382 				 (u32)(seq >> 32), (u32)seq,
383 				 (u32)(elem->seq >> 32), (u32)elem->seq,
384 				 delayed_refs);
385 			ret = 1;
386 		}
387 	}
388 
389 	spin_unlock(&fs_info->tree_mod_seq_lock);
390 	return ret;
391 }
392 
393 struct btrfs_delayed_ref_head *
394 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
395 {
396 	struct btrfs_delayed_ref_root *delayed_refs;
397 	struct btrfs_delayed_ref_head *head;
398 	u64 start;
399 	bool loop = false;
400 
401 	delayed_refs = &trans->transaction->delayed_refs;
402 
403 again:
404 	start = delayed_refs->run_delayed_start;
405 	head = find_ref_head(&delayed_refs->href_root, start, 1);
406 	if (!head && !loop) {
407 		delayed_refs->run_delayed_start = 0;
408 		start = 0;
409 		loop = true;
410 		head = find_ref_head(&delayed_refs->href_root, start, 1);
411 		if (!head)
412 			return NULL;
413 	} else if (!head && loop) {
414 		return NULL;
415 	}
416 
417 	while (head->processing) {
418 		struct rb_node *node;
419 
420 		node = rb_next(&head->href_node);
421 		if (!node) {
422 			if (loop)
423 				return NULL;
424 			delayed_refs->run_delayed_start = 0;
425 			start = 0;
426 			loop = true;
427 			goto again;
428 		}
429 		head = rb_entry(node, struct btrfs_delayed_ref_head,
430 				href_node);
431 	}
432 
433 	head->processing = 1;
434 	WARN_ON(delayed_refs->num_heads_ready == 0);
435 	delayed_refs->num_heads_ready--;
436 	delayed_refs->run_delayed_start = head->node.bytenr +
437 		head->node.num_bytes;
438 	return head;
439 }
440 
441 /*
442  * helper function to update an extent delayed ref in the
443  * rbtree.  existing and update must both have the same
444  * bytenr and parent
445  *
446  * This may free existing if the update cancels out whatever
447  * operation it was doing.
448  */
449 static noinline void
450 update_existing_ref(struct btrfs_trans_handle *trans,
451 		    struct btrfs_delayed_ref_root *delayed_refs,
452 		    struct btrfs_delayed_ref_head *head,
453 		    struct btrfs_delayed_ref_node *existing,
454 		    struct btrfs_delayed_ref_node *update)
455 {
456 	if (update->action != existing->action) {
457 		/*
458 		 * this is effectively undoing either an add or a
459 		 * drop.  We decrement the ref_mod, and if it goes
460 		 * down to zero we just delete the entry without
461 		 * every changing the extent allocation tree.
462 		 */
463 		existing->ref_mod--;
464 		if (existing->ref_mod == 0)
465 			drop_delayed_ref(trans, delayed_refs, head, existing);
466 		else
467 			WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
468 				existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
469 	} else {
470 		WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
471 			existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
472 		/*
473 		 * the action on the existing ref matches
474 		 * the action on the ref we're trying to add.
475 		 * Bump the ref_mod by one so the backref that
476 		 * is eventually added/removed has the correct
477 		 * reference count
478 		 */
479 		existing->ref_mod += update->ref_mod;
480 	}
481 }
482 
483 /*
484  * helper function to update the accounting in the head ref
485  * existing and update must have the same bytenr
486  */
487 static noinline void
488 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
489 			 struct btrfs_delayed_ref_node *update)
490 {
491 	struct btrfs_delayed_ref_head *existing_ref;
492 	struct btrfs_delayed_ref_head *ref;
493 
494 	existing_ref = btrfs_delayed_node_to_head(existing);
495 	ref = btrfs_delayed_node_to_head(update);
496 	BUG_ON(existing_ref->is_data != ref->is_data);
497 
498 	spin_lock(&existing_ref->lock);
499 	if (ref->must_insert_reserved) {
500 		/* if the extent was freed and then
501 		 * reallocated before the delayed ref
502 		 * entries were processed, we can end up
503 		 * with an existing head ref without
504 		 * the must_insert_reserved flag set.
505 		 * Set it again here
506 		 */
507 		existing_ref->must_insert_reserved = ref->must_insert_reserved;
508 
509 		/*
510 		 * update the num_bytes so we make sure the accounting
511 		 * is done correctly
512 		 */
513 		existing->num_bytes = update->num_bytes;
514 
515 	}
516 
517 	if (ref->extent_op) {
518 		if (!existing_ref->extent_op) {
519 			existing_ref->extent_op = ref->extent_op;
520 		} else {
521 			if (ref->extent_op->update_key) {
522 				memcpy(&existing_ref->extent_op->key,
523 				       &ref->extent_op->key,
524 				       sizeof(ref->extent_op->key));
525 				existing_ref->extent_op->update_key = 1;
526 			}
527 			if (ref->extent_op->update_flags) {
528 				existing_ref->extent_op->flags_to_set |=
529 					ref->extent_op->flags_to_set;
530 				existing_ref->extent_op->update_flags = 1;
531 			}
532 			btrfs_free_delayed_extent_op(ref->extent_op);
533 		}
534 	}
535 	/*
536 	 * update the reference mod on the head to reflect this new operation,
537 	 * only need the lock for this case cause we could be processing it
538 	 * currently, for refs we just added we know we're a-ok.
539 	 */
540 	existing->ref_mod += update->ref_mod;
541 	spin_unlock(&existing_ref->lock);
542 }
543 
544 /*
545  * helper function to actually insert a head node into the rbtree.
546  * this does all the dirty work in terms of maintaining the correct
547  * overall modification count.
548  */
549 static noinline struct btrfs_delayed_ref_head *
550 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
551 		     struct btrfs_trans_handle *trans,
552 		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
553 		     u64 num_bytes, int action, int is_data)
554 {
555 	struct btrfs_delayed_ref_head *existing;
556 	struct btrfs_delayed_ref_head *head_ref = NULL;
557 	struct btrfs_delayed_ref_root *delayed_refs;
558 	int count_mod = 1;
559 	int must_insert_reserved = 0;
560 
561 	/*
562 	 * the head node stores the sum of all the mods, so dropping a ref
563 	 * should drop the sum in the head node by one.
564 	 */
565 	if (action == BTRFS_UPDATE_DELAYED_HEAD)
566 		count_mod = 0;
567 	else if (action == BTRFS_DROP_DELAYED_REF)
568 		count_mod = -1;
569 
570 	/*
571 	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
572 	 * the reserved accounting when the extent is finally added, or
573 	 * if a later modification deletes the delayed ref without ever
574 	 * inserting the extent into the extent allocation tree.
575 	 * ref->must_insert_reserved is the flag used to record
576 	 * that accounting mods are required.
577 	 *
578 	 * Once we record must_insert_reserved, switch the action to
579 	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
580 	 */
581 	if (action == BTRFS_ADD_DELAYED_EXTENT)
582 		must_insert_reserved = 1;
583 	else
584 		must_insert_reserved = 0;
585 
586 	delayed_refs = &trans->transaction->delayed_refs;
587 
588 	/* first set the basic ref node struct up */
589 	atomic_set(&ref->refs, 1);
590 	ref->bytenr = bytenr;
591 	ref->num_bytes = num_bytes;
592 	ref->ref_mod = count_mod;
593 	ref->type  = 0;
594 	ref->action  = 0;
595 	ref->is_head = 1;
596 	ref->in_tree = 1;
597 	ref->seq = 0;
598 
599 	head_ref = btrfs_delayed_node_to_head(ref);
600 	head_ref->must_insert_reserved = must_insert_reserved;
601 	head_ref->is_data = is_data;
602 	head_ref->ref_root = RB_ROOT;
603 	head_ref->processing = 0;
604 
605 	spin_lock_init(&head_ref->lock);
606 	mutex_init(&head_ref->mutex);
607 
608 	trace_add_delayed_ref_head(ref, head_ref, action);
609 
610 	existing = htree_insert(&delayed_refs->href_root,
611 				&head_ref->href_node);
612 	if (existing) {
613 		update_existing_head_ref(&existing->node, ref);
614 		/*
615 		 * we've updated the existing ref, free the newly
616 		 * allocated ref
617 		 */
618 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
619 		head_ref = existing;
620 	} else {
621 		delayed_refs->num_heads++;
622 		delayed_refs->num_heads_ready++;
623 		atomic_inc(&delayed_refs->num_entries);
624 		trans->delayed_ref_updates++;
625 	}
626 	return head_ref;
627 }
628 
629 /*
630  * helper to insert a delayed tree ref into the rbtree.
631  */
632 static noinline void
633 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
634 		     struct btrfs_trans_handle *trans,
635 		     struct btrfs_delayed_ref_head *head_ref,
636 		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
637 		     u64 num_bytes, u64 parent, u64 ref_root, int level,
638 		     int action, int for_cow)
639 {
640 	struct btrfs_delayed_ref_node *existing;
641 	struct btrfs_delayed_tree_ref *full_ref;
642 	struct btrfs_delayed_ref_root *delayed_refs;
643 	u64 seq = 0;
644 
645 	if (action == BTRFS_ADD_DELAYED_EXTENT)
646 		action = BTRFS_ADD_DELAYED_REF;
647 
648 	delayed_refs = &trans->transaction->delayed_refs;
649 
650 	/* first set the basic ref node struct up */
651 	atomic_set(&ref->refs, 1);
652 	ref->bytenr = bytenr;
653 	ref->num_bytes = num_bytes;
654 	ref->ref_mod = 1;
655 	ref->action = action;
656 	ref->is_head = 0;
657 	ref->in_tree = 1;
658 
659 	if (need_ref_seq(for_cow, ref_root))
660 		seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
661 	ref->seq = seq;
662 
663 	full_ref = btrfs_delayed_node_to_tree_ref(ref);
664 	full_ref->parent = parent;
665 	full_ref->root = ref_root;
666 	if (parent)
667 		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
668 	else
669 		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
670 	full_ref->level = level;
671 
672 	trace_add_delayed_tree_ref(ref, full_ref, action);
673 
674 	spin_lock(&head_ref->lock);
675 	existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
676 	if (existing) {
677 		update_existing_ref(trans, delayed_refs, head_ref, existing,
678 				    ref);
679 		/*
680 		 * we've updated the existing ref, free the newly
681 		 * allocated ref
682 		 */
683 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
684 	} else {
685 		atomic_inc(&delayed_refs->num_entries);
686 		trans->delayed_ref_updates++;
687 	}
688 	spin_unlock(&head_ref->lock);
689 }
690 
691 /*
692  * helper to insert a delayed data ref into the rbtree.
693  */
694 static noinline void
695 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
696 		     struct btrfs_trans_handle *trans,
697 		     struct btrfs_delayed_ref_head *head_ref,
698 		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
699 		     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
700 		     u64 offset, int action, int for_cow)
701 {
702 	struct btrfs_delayed_ref_node *existing;
703 	struct btrfs_delayed_data_ref *full_ref;
704 	struct btrfs_delayed_ref_root *delayed_refs;
705 	u64 seq = 0;
706 
707 	if (action == BTRFS_ADD_DELAYED_EXTENT)
708 		action = BTRFS_ADD_DELAYED_REF;
709 
710 	delayed_refs = &trans->transaction->delayed_refs;
711 
712 	/* first set the basic ref node struct up */
713 	atomic_set(&ref->refs, 1);
714 	ref->bytenr = bytenr;
715 	ref->num_bytes = num_bytes;
716 	ref->ref_mod = 1;
717 	ref->action = action;
718 	ref->is_head = 0;
719 	ref->in_tree = 1;
720 
721 	if (need_ref_seq(for_cow, ref_root))
722 		seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
723 	ref->seq = seq;
724 
725 	full_ref = btrfs_delayed_node_to_data_ref(ref);
726 	full_ref->parent = parent;
727 	full_ref->root = ref_root;
728 	if (parent)
729 		ref->type = BTRFS_SHARED_DATA_REF_KEY;
730 	else
731 		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
732 
733 	full_ref->objectid = owner;
734 	full_ref->offset = offset;
735 
736 	trace_add_delayed_data_ref(ref, full_ref, action);
737 
738 	spin_lock(&head_ref->lock);
739 	existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
740 	if (existing) {
741 		update_existing_ref(trans, delayed_refs, head_ref, existing,
742 				    ref);
743 		/*
744 		 * we've updated the existing ref, free the newly
745 		 * allocated ref
746 		 */
747 		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
748 	} else {
749 		atomic_inc(&delayed_refs->num_entries);
750 		trans->delayed_ref_updates++;
751 	}
752 	spin_unlock(&head_ref->lock);
753 }
754 
755 /*
756  * add a delayed tree ref.  This does all of the accounting required
757  * to make sure the delayed ref is eventually processed before this
758  * transaction commits.
759  */
760 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
761 			       struct btrfs_trans_handle *trans,
762 			       u64 bytenr, u64 num_bytes, u64 parent,
763 			       u64 ref_root,  int level, int action,
764 			       struct btrfs_delayed_extent_op *extent_op,
765 			       int for_cow)
766 {
767 	struct btrfs_delayed_tree_ref *ref;
768 	struct btrfs_delayed_ref_head *head_ref;
769 	struct btrfs_delayed_ref_root *delayed_refs;
770 
771 	BUG_ON(extent_op && extent_op->is_data);
772 	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
773 	if (!ref)
774 		return -ENOMEM;
775 
776 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
777 	if (!head_ref) {
778 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
779 		return -ENOMEM;
780 	}
781 
782 	head_ref->extent_op = extent_op;
783 
784 	delayed_refs = &trans->transaction->delayed_refs;
785 	spin_lock(&delayed_refs->lock);
786 
787 	/*
788 	 * insert both the head node and the new ref without dropping
789 	 * the spin lock
790 	 */
791 	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
792 					bytenr, num_bytes, action, 0);
793 
794 	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
795 				   num_bytes, parent, ref_root, level, action,
796 				   for_cow);
797 	spin_unlock(&delayed_refs->lock);
798 	if (need_ref_seq(for_cow, ref_root))
799 		btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
800 
801 	return 0;
802 }
803 
804 /*
805  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
806  */
807 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
808 			       struct btrfs_trans_handle *trans,
809 			       u64 bytenr, u64 num_bytes,
810 			       u64 parent, u64 ref_root,
811 			       u64 owner, u64 offset, int action,
812 			       struct btrfs_delayed_extent_op *extent_op,
813 			       int for_cow)
814 {
815 	struct btrfs_delayed_data_ref *ref;
816 	struct btrfs_delayed_ref_head *head_ref;
817 	struct btrfs_delayed_ref_root *delayed_refs;
818 
819 	BUG_ON(extent_op && !extent_op->is_data);
820 	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
821 	if (!ref)
822 		return -ENOMEM;
823 
824 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
825 	if (!head_ref) {
826 		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
827 		return -ENOMEM;
828 	}
829 
830 	head_ref->extent_op = extent_op;
831 
832 	delayed_refs = &trans->transaction->delayed_refs;
833 	spin_lock(&delayed_refs->lock);
834 
835 	/*
836 	 * insert both the head node and the new ref without dropping
837 	 * the spin lock
838 	 */
839 	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
840 					bytenr, num_bytes, action, 1);
841 
842 	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
843 				   num_bytes, parent, ref_root, owner, offset,
844 				   action, for_cow);
845 	spin_unlock(&delayed_refs->lock);
846 	if (need_ref_seq(for_cow, ref_root))
847 		btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
848 
849 	return 0;
850 }
851 
852 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
853 				struct btrfs_trans_handle *trans,
854 				u64 bytenr, u64 num_bytes,
855 				struct btrfs_delayed_extent_op *extent_op)
856 {
857 	struct btrfs_delayed_ref_head *head_ref;
858 	struct btrfs_delayed_ref_root *delayed_refs;
859 
860 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
861 	if (!head_ref)
862 		return -ENOMEM;
863 
864 	head_ref->extent_op = extent_op;
865 
866 	delayed_refs = &trans->transaction->delayed_refs;
867 	spin_lock(&delayed_refs->lock);
868 
869 	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
870 				   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
871 				   extent_op->is_data);
872 
873 	spin_unlock(&delayed_refs->lock);
874 	return 0;
875 }
876 
877 /*
878  * this does a simple search for the head node for a given extent.
879  * It must be called with the delayed ref spinlock held, and it returns
880  * the head node if any where found, or NULL if not.
881  */
882 struct btrfs_delayed_ref_head *
883 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
884 {
885 	struct btrfs_delayed_ref_root *delayed_refs;
886 
887 	delayed_refs = &trans->transaction->delayed_refs;
888 	return find_ref_head(&delayed_refs->href_root, bytenr, 0);
889 }
890 
891 void btrfs_delayed_ref_exit(void)
892 {
893 	if (btrfs_delayed_ref_head_cachep)
894 		kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
895 	if (btrfs_delayed_tree_ref_cachep)
896 		kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
897 	if (btrfs_delayed_data_ref_cachep)
898 		kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
899 	if (btrfs_delayed_extent_op_cachep)
900 		kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
901 }
902 
903 int btrfs_delayed_ref_init(void)
904 {
905 	btrfs_delayed_ref_head_cachep = kmem_cache_create(
906 				"btrfs_delayed_ref_head",
907 				sizeof(struct btrfs_delayed_ref_head), 0,
908 				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
909 	if (!btrfs_delayed_ref_head_cachep)
910 		goto fail;
911 
912 	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
913 				"btrfs_delayed_tree_ref",
914 				sizeof(struct btrfs_delayed_tree_ref), 0,
915 				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
916 	if (!btrfs_delayed_tree_ref_cachep)
917 		goto fail;
918 
919 	btrfs_delayed_data_ref_cachep = kmem_cache_create(
920 				"btrfs_delayed_data_ref",
921 				sizeof(struct btrfs_delayed_data_ref), 0,
922 				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
923 	if (!btrfs_delayed_data_ref_cachep)
924 		goto fail;
925 
926 	btrfs_delayed_extent_op_cachep = kmem_cache_create(
927 				"btrfs_delayed_extent_op",
928 				sizeof(struct btrfs_delayed_extent_op), 0,
929 				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
930 	if (!btrfs_delayed_extent_op_cachep)
931 		goto fail;
932 
933 	return 0;
934 fail:
935 	btrfs_delayed_ref_exit();
936 	return -ENOMEM;
937 }
938