xref: /openbmc/linux/fs/btrfs/delayed-ref.c (revision ae3473231e77a3f1909d48cd144cebe5e1d049b3)
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25 #include "qgroup.h"
26 
27 struct kmem_cache *btrfs_delayed_ref_head_cachep;
28 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29 struct kmem_cache *btrfs_delayed_data_ref_cachep;
30 struct kmem_cache *btrfs_delayed_extent_op_cachep;
31 /*
32  * delayed back reference update tracking.  For subvolume trees
33  * we queue up extent allocations and backref maintenance for
34  * delayed processing.   This avoids deep call chains where we
35  * add extents in the middle of btrfs_search_slot, and it allows
36  * us to buffer up frequently modified backrefs in an rb tree instead
37  * of hammering updates on the extent allocation tree.
38  */
39 
40 /*
41  * compare two delayed tree backrefs with same bytenr and type
42  */
43 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
44 			  struct btrfs_delayed_tree_ref *ref1, int type)
45 {
46 	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
47 		if (ref1->root < ref2->root)
48 			return -1;
49 		if (ref1->root > ref2->root)
50 			return 1;
51 	} else {
52 		if (ref1->parent < ref2->parent)
53 			return -1;
54 		if (ref1->parent > ref2->parent)
55 			return 1;
56 	}
57 	return 0;
58 }
59 
60 /*
61  * compare two delayed data backrefs with same bytenr and type
62  */
63 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
64 			  struct btrfs_delayed_data_ref *ref1)
65 {
66 	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67 		if (ref1->root < ref2->root)
68 			return -1;
69 		if (ref1->root > ref2->root)
70 			return 1;
71 		if (ref1->objectid < ref2->objectid)
72 			return -1;
73 		if (ref1->objectid > ref2->objectid)
74 			return 1;
75 		if (ref1->offset < ref2->offset)
76 			return -1;
77 		if (ref1->offset > ref2->offset)
78 			return 1;
79 	} else {
80 		if (ref1->parent < ref2->parent)
81 			return -1;
82 		if (ref1->parent > ref2->parent)
83 			return 1;
84 	}
85 	return 0;
86 }
87 
88 /* insert a new ref to head ref rbtree */
89 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
90 						   struct rb_node *node)
91 {
92 	struct rb_node **p = &root->rb_node;
93 	struct rb_node *parent_node = NULL;
94 	struct btrfs_delayed_ref_head *entry;
95 	struct btrfs_delayed_ref_head *ins;
96 	u64 bytenr;
97 
98 	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
99 	bytenr = ins->node.bytenr;
100 	while (*p) {
101 		parent_node = *p;
102 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103 				 href_node);
104 
105 		if (bytenr < entry->node.bytenr)
106 			p = &(*p)->rb_left;
107 		else if (bytenr > entry->node.bytenr)
108 			p = &(*p)->rb_right;
109 		else
110 			return entry;
111 	}
112 
113 	rb_link_node(node, parent_node, p);
114 	rb_insert_color(node, root);
115 	return NULL;
116 }
117 
118 /*
119  * find an head entry based on bytenr. This returns the delayed ref
120  * head if it was able to find one, or NULL if nothing was in that spot.
121  * If return_bigger is given, the next bigger entry is returned if no exact
122  * match is found.
123  */
124 static struct btrfs_delayed_ref_head *
125 find_ref_head(struct rb_root *root, u64 bytenr,
126 	      int return_bigger)
127 {
128 	struct rb_node *n;
129 	struct btrfs_delayed_ref_head *entry;
130 
131 	n = root->rb_node;
132 	entry = NULL;
133 	while (n) {
134 		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
135 
136 		if (bytenr < entry->node.bytenr)
137 			n = n->rb_left;
138 		else if (bytenr > entry->node.bytenr)
139 			n = n->rb_right;
140 		else
141 			return entry;
142 	}
143 	if (entry && return_bigger) {
144 		if (bytenr > entry->node.bytenr) {
145 			n = rb_next(&entry->href_node);
146 			if (!n)
147 				n = rb_first(root);
148 			entry = rb_entry(n, struct btrfs_delayed_ref_head,
149 					 href_node);
150 			return entry;
151 		}
152 		return entry;
153 	}
154 	return NULL;
155 }
156 
157 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158 			   struct btrfs_delayed_ref_head *head)
159 {
160 	struct btrfs_delayed_ref_root *delayed_refs;
161 
162 	delayed_refs = &trans->transaction->delayed_refs;
163 	assert_spin_locked(&delayed_refs->lock);
164 	if (mutex_trylock(&head->mutex))
165 		return 0;
166 
167 	atomic_inc(&head->node.refs);
168 	spin_unlock(&delayed_refs->lock);
169 
170 	mutex_lock(&head->mutex);
171 	spin_lock(&delayed_refs->lock);
172 	if (!head->node.in_tree) {
173 		mutex_unlock(&head->mutex);
174 		btrfs_put_delayed_ref(&head->node);
175 		return -EAGAIN;
176 	}
177 	btrfs_put_delayed_ref(&head->node);
178 	return 0;
179 }
180 
181 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182 				    struct btrfs_delayed_ref_root *delayed_refs,
183 				    struct btrfs_delayed_ref_head *head,
184 				    struct btrfs_delayed_ref_node *ref)
185 {
186 	if (btrfs_delayed_ref_is_head(ref)) {
187 		head = btrfs_delayed_node_to_head(ref);
188 		rb_erase(&head->href_node, &delayed_refs->href_root);
189 	} else {
190 		assert_spin_locked(&head->lock);
191 		list_del(&ref->list);
192 		if (!list_empty(&ref->add_list))
193 			list_del(&ref->add_list);
194 	}
195 	ref->in_tree = 0;
196 	btrfs_put_delayed_ref(ref);
197 	atomic_dec(&delayed_refs->num_entries);
198 	if (trans->delayed_ref_updates)
199 		trans->delayed_ref_updates--;
200 }
201 
202 static bool merge_ref(struct btrfs_trans_handle *trans,
203 		      struct btrfs_delayed_ref_root *delayed_refs,
204 		      struct btrfs_delayed_ref_head *head,
205 		      struct btrfs_delayed_ref_node *ref,
206 		      u64 seq)
207 {
208 	struct btrfs_delayed_ref_node *next;
209 	bool done = false;
210 
211 	next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
212 				list);
213 	while (!done && &next->list != &head->ref_list) {
214 		int mod;
215 		struct btrfs_delayed_ref_node *next2;
216 
217 		next2 = list_next_entry(next, list);
218 
219 		if (next == ref)
220 			goto next;
221 
222 		if (seq && next->seq >= seq)
223 			goto next;
224 
225 		if (next->type != ref->type)
226 			goto next;
227 
228 		if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
229 		     ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
230 		    comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
231 				   btrfs_delayed_node_to_tree_ref(next),
232 				   ref->type))
233 			goto next;
234 		if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
235 		     ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
236 		    comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
237 				   btrfs_delayed_node_to_data_ref(next)))
238 			goto next;
239 
240 		if (ref->action == next->action) {
241 			mod = next->ref_mod;
242 		} else {
243 			if (ref->ref_mod < next->ref_mod) {
244 				swap(ref, next);
245 				done = true;
246 			}
247 			mod = -next->ref_mod;
248 		}
249 
250 		drop_delayed_ref(trans, delayed_refs, head, next);
251 		ref->ref_mod += mod;
252 		if (ref->ref_mod == 0) {
253 			drop_delayed_ref(trans, delayed_refs, head, ref);
254 			done = true;
255 		} else {
256 			/*
257 			 * Can't have multiples of the same ref on a tree block.
258 			 */
259 			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
260 				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
261 		}
262 next:
263 		next = next2;
264 	}
265 
266 	return done;
267 }
268 
269 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
270 			      struct btrfs_fs_info *fs_info,
271 			      struct btrfs_delayed_ref_root *delayed_refs,
272 			      struct btrfs_delayed_ref_head *head)
273 {
274 	struct btrfs_delayed_ref_node *ref;
275 	u64 seq = 0;
276 
277 	assert_spin_locked(&head->lock);
278 
279 	if (list_empty(&head->ref_list))
280 		return;
281 
282 	/* We don't have too many refs to merge for data. */
283 	if (head->is_data)
284 		return;
285 
286 	spin_lock(&fs_info->tree_mod_seq_lock);
287 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
288 		struct seq_list *elem;
289 
290 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
291 					struct seq_list, list);
292 		seq = elem->seq;
293 	}
294 	spin_unlock(&fs_info->tree_mod_seq_lock);
295 
296 	ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
297 			       list);
298 	while (&ref->list != &head->ref_list) {
299 		if (seq && ref->seq >= seq)
300 			goto next;
301 
302 		if (merge_ref(trans, delayed_refs, head, ref, seq)) {
303 			if (list_empty(&head->ref_list))
304 				break;
305 			ref = list_first_entry(&head->ref_list,
306 					       struct btrfs_delayed_ref_node,
307 					       list);
308 			continue;
309 		}
310 next:
311 		ref = list_next_entry(ref, list);
312 	}
313 }
314 
315 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
316 			    struct btrfs_delayed_ref_root *delayed_refs,
317 			    u64 seq)
318 {
319 	struct seq_list *elem;
320 	int ret = 0;
321 
322 	spin_lock(&fs_info->tree_mod_seq_lock);
323 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
324 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
325 					struct seq_list, list);
326 		if (seq >= elem->seq) {
327 			btrfs_debug(fs_info,
328 				"holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
329 				(u32)(seq >> 32), (u32)seq,
330 				(u32)(elem->seq >> 32), (u32)elem->seq,
331 				delayed_refs);
332 			ret = 1;
333 		}
334 	}
335 
336 	spin_unlock(&fs_info->tree_mod_seq_lock);
337 	return ret;
338 }
339 
340 struct btrfs_delayed_ref_head *
341 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
342 {
343 	struct btrfs_delayed_ref_root *delayed_refs;
344 	struct btrfs_delayed_ref_head *head;
345 	u64 start;
346 	bool loop = false;
347 
348 	delayed_refs = &trans->transaction->delayed_refs;
349 
350 again:
351 	start = delayed_refs->run_delayed_start;
352 	head = find_ref_head(&delayed_refs->href_root, start, 1);
353 	if (!head && !loop) {
354 		delayed_refs->run_delayed_start = 0;
355 		start = 0;
356 		loop = true;
357 		head = find_ref_head(&delayed_refs->href_root, start, 1);
358 		if (!head)
359 			return NULL;
360 	} else if (!head && loop) {
361 		return NULL;
362 	}
363 
364 	while (head->processing) {
365 		struct rb_node *node;
366 
367 		node = rb_next(&head->href_node);
368 		if (!node) {
369 			if (loop)
370 				return NULL;
371 			delayed_refs->run_delayed_start = 0;
372 			start = 0;
373 			loop = true;
374 			goto again;
375 		}
376 		head = rb_entry(node, struct btrfs_delayed_ref_head,
377 				href_node);
378 	}
379 
380 	head->processing = 1;
381 	WARN_ON(delayed_refs->num_heads_ready == 0);
382 	delayed_refs->num_heads_ready--;
383 	delayed_refs->run_delayed_start = head->node.bytenr +
384 		head->node.num_bytes;
385 	return head;
386 }
387 
388 /*
389  * Helper to insert the ref_node to the tail or merge with tail.
390  *
391  * Return 0 for insert.
392  * Return >0 for merge.
393  */
394 static int
395 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
396 			   struct btrfs_delayed_ref_root *root,
397 			   struct btrfs_delayed_ref_head *href,
398 			   struct btrfs_delayed_ref_node *ref)
399 {
400 	struct btrfs_delayed_ref_node *exist;
401 	int mod;
402 	int ret = 0;
403 
404 	spin_lock(&href->lock);
405 	/* Check whether we can merge the tail node with ref */
406 	if (list_empty(&href->ref_list))
407 		goto add_tail;
408 	exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
409 			   list);
410 	/* No need to compare bytenr nor is_head */
411 	if (exist->type != ref->type || exist->seq != ref->seq)
412 		goto add_tail;
413 
414 	if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
415 	     exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
416 	    comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
417 			   btrfs_delayed_node_to_tree_ref(ref),
418 			   ref->type))
419 		goto add_tail;
420 	if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
421 	     exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
422 	    comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
423 			   btrfs_delayed_node_to_data_ref(ref)))
424 		goto add_tail;
425 
426 	/* Now we are sure we can merge */
427 	ret = 1;
428 	if (exist->action == ref->action) {
429 		mod = ref->ref_mod;
430 	} else {
431 		/* Need to change action */
432 		if (exist->ref_mod < ref->ref_mod) {
433 			exist->action = ref->action;
434 			mod = -exist->ref_mod;
435 			exist->ref_mod = ref->ref_mod;
436 			if (ref->action == BTRFS_ADD_DELAYED_REF)
437 				list_add_tail(&exist->add_list,
438 					      &href->ref_add_list);
439 			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
440 				ASSERT(!list_empty(&exist->add_list));
441 				list_del(&exist->add_list);
442 			} else {
443 				ASSERT(0);
444 			}
445 		} else
446 			mod = -ref->ref_mod;
447 	}
448 	exist->ref_mod += mod;
449 
450 	/* remove existing tail if its ref_mod is zero */
451 	if (exist->ref_mod == 0)
452 		drop_delayed_ref(trans, root, href, exist);
453 	spin_unlock(&href->lock);
454 	return ret;
455 
456 add_tail:
457 	list_add_tail(&ref->list, &href->ref_list);
458 	if (ref->action == BTRFS_ADD_DELAYED_REF)
459 		list_add_tail(&ref->add_list, &href->ref_add_list);
460 	atomic_inc(&root->num_entries);
461 	trans->delayed_ref_updates++;
462 	spin_unlock(&href->lock);
463 	return ret;
464 }
465 
466 /*
467  * helper function to update the accounting in the head ref
468  * existing and update must have the same bytenr
469  */
470 static noinline void
471 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
472 			 struct btrfs_delayed_ref_node *existing,
473 			 struct btrfs_delayed_ref_node *update)
474 {
475 	struct btrfs_delayed_ref_head *existing_ref;
476 	struct btrfs_delayed_ref_head *ref;
477 	int old_ref_mod;
478 
479 	existing_ref = btrfs_delayed_node_to_head(existing);
480 	ref = btrfs_delayed_node_to_head(update);
481 	BUG_ON(existing_ref->is_data != ref->is_data);
482 
483 	spin_lock(&existing_ref->lock);
484 	if (ref->must_insert_reserved) {
485 		/* if the extent was freed and then
486 		 * reallocated before the delayed ref
487 		 * entries were processed, we can end up
488 		 * with an existing head ref without
489 		 * the must_insert_reserved flag set.
490 		 * Set it again here
491 		 */
492 		existing_ref->must_insert_reserved = ref->must_insert_reserved;
493 
494 		/*
495 		 * update the num_bytes so we make sure the accounting
496 		 * is done correctly
497 		 */
498 		existing->num_bytes = update->num_bytes;
499 
500 	}
501 
502 	if (ref->extent_op) {
503 		if (!existing_ref->extent_op) {
504 			existing_ref->extent_op = ref->extent_op;
505 		} else {
506 			if (ref->extent_op->update_key) {
507 				memcpy(&existing_ref->extent_op->key,
508 				       &ref->extent_op->key,
509 				       sizeof(ref->extent_op->key));
510 				existing_ref->extent_op->update_key = true;
511 			}
512 			if (ref->extent_op->update_flags) {
513 				existing_ref->extent_op->flags_to_set |=
514 					ref->extent_op->flags_to_set;
515 				existing_ref->extent_op->update_flags = true;
516 			}
517 			btrfs_free_delayed_extent_op(ref->extent_op);
518 		}
519 	}
520 	/*
521 	 * update the reference mod on the head to reflect this new operation,
522 	 * only need the lock for this case cause we could be processing it
523 	 * currently, for refs we just added we know we're a-ok.
524 	 */
525 	old_ref_mod = existing_ref->total_ref_mod;
526 	existing->ref_mod += update->ref_mod;
527 	existing_ref->total_ref_mod += update->ref_mod;
528 
529 	/*
530 	 * If we are going to from a positive ref mod to a negative or vice
531 	 * versa we need to make sure to adjust pending_csums accordingly.
532 	 */
533 	if (existing_ref->is_data) {
534 		if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
535 			delayed_refs->pending_csums -= existing->num_bytes;
536 		if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
537 			delayed_refs->pending_csums += existing->num_bytes;
538 	}
539 	spin_unlock(&existing_ref->lock);
540 }
541 
542 /*
543  * helper function to actually insert a head node into the rbtree.
544  * this does all the dirty work in terms of maintaining the correct
545  * overall modification count.
546  */
547 static noinline struct btrfs_delayed_ref_head *
548 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
549 		     struct btrfs_trans_handle *trans,
550 		     struct btrfs_delayed_ref_node *ref,
551 		     struct btrfs_qgroup_extent_record *qrecord,
552 		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
553 		     int action, int is_data)
554 {
555 	struct btrfs_delayed_ref_head *existing;
556 	struct btrfs_delayed_ref_head *head_ref = NULL;
557 	struct btrfs_delayed_ref_root *delayed_refs;
558 	int count_mod = 1;
559 	int must_insert_reserved = 0;
560 
561 	/* If reserved is provided, it must be a data extent. */
562 	BUG_ON(!is_data && reserved);
563 
564 	/*
565 	 * the head node stores the sum of all the mods, so dropping a ref
566 	 * should drop the sum in the head node by one.
567 	 */
568 	if (action == BTRFS_UPDATE_DELAYED_HEAD)
569 		count_mod = 0;
570 	else if (action == BTRFS_DROP_DELAYED_REF)
571 		count_mod = -1;
572 
573 	/*
574 	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
575 	 * the reserved accounting when the extent is finally added, or
576 	 * if a later modification deletes the delayed ref without ever
577 	 * inserting the extent into the extent allocation tree.
578 	 * ref->must_insert_reserved is the flag used to record
579 	 * that accounting mods are required.
580 	 *
581 	 * Once we record must_insert_reserved, switch the action to
582 	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
583 	 */
584 	if (action == BTRFS_ADD_DELAYED_EXTENT)
585 		must_insert_reserved = 1;
586 	else
587 		must_insert_reserved = 0;
588 
589 	delayed_refs = &trans->transaction->delayed_refs;
590 
591 	/* first set the basic ref node struct up */
592 	atomic_set(&ref->refs, 1);
593 	ref->bytenr = bytenr;
594 	ref->num_bytes = num_bytes;
595 	ref->ref_mod = count_mod;
596 	ref->type  = 0;
597 	ref->action  = 0;
598 	ref->is_head = 1;
599 	ref->in_tree = 1;
600 	ref->seq = 0;
601 
602 	head_ref = btrfs_delayed_node_to_head(ref);
603 	head_ref->must_insert_reserved = must_insert_reserved;
604 	head_ref->is_data = is_data;
605 	INIT_LIST_HEAD(&head_ref->ref_list);
606 	INIT_LIST_HEAD(&head_ref->ref_add_list);
607 	head_ref->processing = 0;
608 	head_ref->total_ref_mod = count_mod;
609 	head_ref->qgroup_reserved = 0;
610 	head_ref->qgroup_ref_root = 0;
611 
612 	/* Record qgroup extent info if provided */
613 	if (qrecord) {
614 		if (ref_root && reserved) {
615 			head_ref->qgroup_ref_root = ref_root;
616 			head_ref->qgroup_reserved = reserved;
617 		}
618 
619 		qrecord->bytenr = bytenr;
620 		qrecord->num_bytes = num_bytes;
621 		qrecord->old_roots = NULL;
622 
623 		if(btrfs_qgroup_trace_extent_nolock(fs_info,
624 					delayed_refs, qrecord))
625 			kfree(qrecord);
626 	}
627 
628 	spin_lock_init(&head_ref->lock);
629 	mutex_init(&head_ref->mutex);
630 
631 	trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
632 
633 	existing = htree_insert(&delayed_refs->href_root,
634 				&head_ref->href_node);
635 	if (existing) {
636 		WARN_ON(ref_root && reserved && existing->qgroup_ref_root
637 			&& existing->qgroup_reserved);
638 		update_existing_head_ref(delayed_refs, &existing->node, ref);
639 		/*
640 		 * we've updated the existing ref, free the newly
641 		 * allocated ref
642 		 */
643 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
644 		head_ref = existing;
645 	} else {
646 		if (is_data && count_mod < 0)
647 			delayed_refs->pending_csums += num_bytes;
648 		delayed_refs->num_heads++;
649 		delayed_refs->num_heads_ready++;
650 		atomic_inc(&delayed_refs->num_entries);
651 		trans->delayed_ref_updates++;
652 	}
653 	return head_ref;
654 }
655 
656 /*
657  * helper to insert a delayed tree ref into the rbtree.
658  */
659 static noinline void
660 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
661 		     struct btrfs_trans_handle *trans,
662 		     struct btrfs_delayed_ref_head *head_ref,
663 		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
664 		     u64 num_bytes, u64 parent, u64 ref_root, int level,
665 		     int action)
666 {
667 	struct btrfs_delayed_tree_ref *full_ref;
668 	struct btrfs_delayed_ref_root *delayed_refs;
669 	u64 seq = 0;
670 	int ret;
671 
672 	if (action == BTRFS_ADD_DELAYED_EXTENT)
673 		action = BTRFS_ADD_DELAYED_REF;
674 
675 	if (is_fstree(ref_root))
676 		seq = atomic64_read(&fs_info->tree_mod_seq);
677 	delayed_refs = &trans->transaction->delayed_refs;
678 
679 	/* first set the basic ref node struct up */
680 	atomic_set(&ref->refs, 1);
681 	ref->bytenr = bytenr;
682 	ref->num_bytes = num_bytes;
683 	ref->ref_mod = 1;
684 	ref->action = action;
685 	ref->is_head = 0;
686 	ref->in_tree = 1;
687 	ref->seq = seq;
688 	INIT_LIST_HEAD(&ref->list);
689 	INIT_LIST_HEAD(&ref->add_list);
690 
691 	full_ref = btrfs_delayed_node_to_tree_ref(ref);
692 	full_ref->parent = parent;
693 	full_ref->root = ref_root;
694 	if (parent)
695 		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
696 	else
697 		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
698 	full_ref->level = level;
699 
700 	trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
701 
702 	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
703 
704 	/*
705 	 * XXX: memory should be freed at the same level allocated.
706 	 * But bad practice is anywhere... Follow it now. Need cleanup.
707 	 */
708 	if (ret > 0)
709 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
710 }
711 
712 /*
713  * helper to insert a delayed data ref into the rbtree.
714  */
715 static noinline void
716 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
717 		     struct btrfs_trans_handle *trans,
718 		     struct btrfs_delayed_ref_head *head_ref,
719 		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
720 		     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
721 		     u64 offset, int action)
722 {
723 	struct btrfs_delayed_data_ref *full_ref;
724 	struct btrfs_delayed_ref_root *delayed_refs;
725 	u64 seq = 0;
726 	int ret;
727 
728 	if (action == BTRFS_ADD_DELAYED_EXTENT)
729 		action = BTRFS_ADD_DELAYED_REF;
730 
731 	delayed_refs = &trans->transaction->delayed_refs;
732 
733 	if (is_fstree(ref_root))
734 		seq = atomic64_read(&fs_info->tree_mod_seq);
735 
736 	/* first set the basic ref node struct up */
737 	atomic_set(&ref->refs, 1);
738 	ref->bytenr = bytenr;
739 	ref->num_bytes = num_bytes;
740 	ref->ref_mod = 1;
741 	ref->action = action;
742 	ref->is_head = 0;
743 	ref->in_tree = 1;
744 	ref->seq = seq;
745 	INIT_LIST_HEAD(&ref->list);
746 	INIT_LIST_HEAD(&ref->add_list);
747 
748 	full_ref = btrfs_delayed_node_to_data_ref(ref);
749 	full_ref->parent = parent;
750 	full_ref->root = ref_root;
751 	if (parent)
752 		ref->type = BTRFS_SHARED_DATA_REF_KEY;
753 	else
754 		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
755 
756 	full_ref->objectid = owner;
757 	full_ref->offset = offset;
758 
759 	trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
760 
761 	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
762 
763 	if (ret > 0)
764 		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
765 }
766 
767 /*
768  * add a delayed tree ref.  This does all of the accounting required
769  * to make sure the delayed ref is eventually processed before this
770  * transaction commits.
771  */
772 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
773 			       struct btrfs_trans_handle *trans,
774 			       u64 bytenr, u64 num_bytes, u64 parent,
775 			       u64 ref_root,  int level, int action,
776 			       struct btrfs_delayed_extent_op *extent_op)
777 {
778 	struct btrfs_delayed_tree_ref *ref;
779 	struct btrfs_delayed_ref_head *head_ref;
780 	struct btrfs_delayed_ref_root *delayed_refs;
781 	struct btrfs_qgroup_extent_record *record = NULL;
782 
783 	BUG_ON(extent_op && extent_op->is_data);
784 	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
785 	if (!ref)
786 		return -ENOMEM;
787 
788 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
789 	if (!head_ref)
790 		goto free_ref;
791 
792 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
793 	    is_fstree(ref_root)) {
794 		record = kmalloc(sizeof(*record), GFP_NOFS);
795 		if (!record)
796 			goto free_head_ref;
797 	}
798 
799 	head_ref->extent_op = extent_op;
800 
801 	delayed_refs = &trans->transaction->delayed_refs;
802 	spin_lock(&delayed_refs->lock);
803 
804 	/*
805 	 * insert both the head node and the new ref without dropping
806 	 * the spin lock
807 	 */
808 	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
809 					bytenr, num_bytes, 0, 0, action, 0);
810 
811 	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
812 			     num_bytes, parent, ref_root, level, action);
813 	spin_unlock(&delayed_refs->lock);
814 
815 	return 0;
816 
817 free_head_ref:
818 	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
819 free_ref:
820 	kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
821 
822 	return -ENOMEM;
823 }
824 
825 /*
826  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
827  */
828 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
829 			       struct btrfs_trans_handle *trans,
830 			       u64 bytenr, u64 num_bytes,
831 			       u64 parent, u64 ref_root,
832 			       u64 owner, u64 offset, u64 reserved, int action,
833 			       struct btrfs_delayed_extent_op *extent_op)
834 {
835 	struct btrfs_delayed_data_ref *ref;
836 	struct btrfs_delayed_ref_head *head_ref;
837 	struct btrfs_delayed_ref_root *delayed_refs;
838 	struct btrfs_qgroup_extent_record *record = NULL;
839 
840 	BUG_ON(extent_op && !extent_op->is_data);
841 	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
842 	if (!ref)
843 		return -ENOMEM;
844 
845 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
846 	if (!head_ref) {
847 		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
848 		return -ENOMEM;
849 	}
850 
851 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
852 	    is_fstree(ref_root)) {
853 		record = kmalloc(sizeof(*record), GFP_NOFS);
854 		if (!record) {
855 			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
856 			kmem_cache_free(btrfs_delayed_ref_head_cachep,
857 					head_ref);
858 			return -ENOMEM;
859 		}
860 	}
861 
862 	head_ref->extent_op = extent_op;
863 
864 	delayed_refs = &trans->transaction->delayed_refs;
865 	spin_lock(&delayed_refs->lock);
866 
867 	/*
868 	 * insert both the head node and the new ref without dropping
869 	 * the spin lock
870 	 */
871 	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
872 					bytenr, num_bytes, ref_root, reserved,
873 					action, 1);
874 
875 	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
876 				   num_bytes, parent, ref_root, owner, offset,
877 				   action);
878 	spin_unlock(&delayed_refs->lock);
879 
880 	return 0;
881 }
882 
883 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
884 				struct btrfs_trans_handle *trans,
885 				u64 bytenr, u64 num_bytes,
886 				struct btrfs_delayed_extent_op *extent_op)
887 {
888 	struct btrfs_delayed_ref_head *head_ref;
889 	struct btrfs_delayed_ref_root *delayed_refs;
890 
891 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
892 	if (!head_ref)
893 		return -ENOMEM;
894 
895 	head_ref->extent_op = extent_op;
896 
897 	delayed_refs = &trans->transaction->delayed_refs;
898 	spin_lock(&delayed_refs->lock);
899 
900 	add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
901 			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
902 			     extent_op->is_data);
903 
904 	spin_unlock(&delayed_refs->lock);
905 	return 0;
906 }
907 
908 /*
909  * this does a simple search for the head node for a given extent.
910  * It must be called with the delayed ref spinlock held, and it returns
911  * the head node if any where found, or NULL if not.
912  */
913 struct btrfs_delayed_ref_head *
914 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
915 {
916 	struct btrfs_delayed_ref_root *delayed_refs;
917 
918 	delayed_refs = &trans->transaction->delayed_refs;
919 	return find_ref_head(&delayed_refs->href_root, bytenr, 0);
920 }
921 
922 void btrfs_delayed_ref_exit(void)
923 {
924 	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
925 	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
926 	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
927 	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
928 }
929 
930 int btrfs_delayed_ref_init(void)
931 {
932 	btrfs_delayed_ref_head_cachep = kmem_cache_create(
933 				"btrfs_delayed_ref_head",
934 				sizeof(struct btrfs_delayed_ref_head), 0,
935 				SLAB_MEM_SPREAD, NULL);
936 	if (!btrfs_delayed_ref_head_cachep)
937 		goto fail;
938 
939 	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
940 				"btrfs_delayed_tree_ref",
941 				sizeof(struct btrfs_delayed_tree_ref), 0,
942 				SLAB_MEM_SPREAD, NULL);
943 	if (!btrfs_delayed_tree_ref_cachep)
944 		goto fail;
945 
946 	btrfs_delayed_data_ref_cachep = kmem_cache_create(
947 				"btrfs_delayed_data_ref",
948 				sizeof(struct btrfs_delayed_data_ref), 0,
949 				SLAB_MEM_SPREAD, NULL);
950 	if (!btrfs_delayed_data_ref_cachep)
951 		goto fail;
952 
953 	btrfs_delayed_extent_op_cachep = kmem_cache_create(
954 				"btrfs_delayed_extent_op",
955 				sizeof(struct btrfs_delayed_extent_op), 0,
956 				SLAB_MEM_SPREAD, NULL);
957 	if (!btrfs_delayed_extent_op_cachep)
958 		goto fail;
959 
960 	return 0;
961 fail:
962 	btrfs_delayed_ref_exit();
963 	return -ENOMEM;
964 }
965