xref: /openbmc/linux/fs/btrfs/delayed-ref.c (revision de6da33e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "ctree.h"
10 #include "delayed-ref.h"
11 #include "transaction.h"
12 #include "qgroup.h"
13 #include "space-info.h"
14 #include "tree-mod-log.h"
15 
16 struct kmem_cache *btrfs_delayed_ref_head_cachep;
17 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
18 struct kmem_cache *btrfs_delayed_data_ref_cachep;
19 struct kmem_cache *btrfs_delayed_extent_op_cachep;
20 /*
21  * delayed back reference update tracking.  For subvolume trees
22  * we queue up extent allocations and backref maintenance for
23  * delayed processing.   This avoids deep call chains where we
24  * add extents in the middle of btrfs_search_slot, and it allows
25  * us to buffer up frequently modified backrefs in an rb tree instead
26  * of hammering updates on the extent allocation tree.
27  */
28 
29 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
30 {
31 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
32 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
33 	bool ret = false;
34 	u64 reserved;
35 
36 	spin_lock(&global_rsv->lock);
37 	reserved = global_rsv->reserved;
38 	spin_unlock(&global_rsv->lock);
39 
40 	/*
41 	 * Since the global reserve is just kind of magic we don't really want
42 	 * to rely on it to save our bacon, so if our size is more than the
43 	 * delayed_refs_rsv and the global rsv then it's time to think about
44 	 * bailing.
45 	 */
46 	spin_lock(&delayed_refs_rsv->lock);
47 	reserved += delayed_refs_rsv->reserved;
48 	if (delayed_refs_rsv->size >= reserved)
49 		ret = true;
50 	spin_unlock(&delayed_refs_rsv->lock);
51 	return ret;
52 }
53 
54 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
55 {
56 	u64 num_entries =
57 		atomic_read(&trans->transaction->delayed_refs.num_entries);
58 	u64 avg_runtime;
59 	u64 val;
60 
61 	smp_mb();
62 	avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
63 	val = num_entries * avg_runtime;
64 	if (val >= NSEC_PER_SEC)
65 		return 1;
66 	if (val >= NSEC_PER_SEC / 2)
67 		return 2;
68 
69 	return btrfs_check_space_for_delayed_refs(trans->fs_info);
70 }
71 
72 /**
73  * Release a ref head's reservation
74  *
75  * @fs_info:  the filesystem
76  * @nr:       number of items to drop
77  *
78  * This drops the delayed ref head's count from the delayed refs rsv and frees
79  * any excess reservation we had.
80  */
81 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
82 {
83 	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
84 	u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
85 	u64 released = 0;
86 
87 	released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
88 	if (released)
89 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
90 					      0, released, 0);
91 }
92 
93 /*
94  * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
95  * @trans - the trans that may have generated delayed refs
96  *
97  * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
98  * it'll calculate the additional size and add it to the delayed_refs_rsv.
99  */
100 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
101 {
102 	struct btrfs_fs_info *fs_info = trans->fs_info;
103 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
104 	u64 num_bytes;
105 
106 	if (!trans->delayed_ref_updates)
107 		return;
108 
109 	num_bytes = btrfs_calc_insert_metadata_size(fs_info,
110 						    trans->delayed_ref_updates);
111 	spin_lock(&delayed_rsv->lock);
112 	delayed_rsv->size += num_bytes;
113 	delayed_rsv->full = 0;
114 	spin_unlock(&delayed_rsv->lock);
115 	trans->delayed_ref_updates = 0;
116 }
117 
118 /**
119  * Transfer bytes to our delayed refs rsv
120  *
121  * @fs_info:   the filesystem
122  * @src:       source block rsv to transfer from
123  * @num_bytes: number of bytes to transfer
124  *
125  * This transfers up to the num_bytes amount from the src rsv to the
126  * delayed_refs_rsv.  Any extra bytes are returned to the space info.
127  */
128 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
129 				       struct btrfs_block_rsv *src,
130 				       u64 num_bytes)
131 {
132 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
133 	u64 to_free = 0;
134 
135 	spin_lock(&src->lock);
136 	src->reserved -= num_bytes;
137 	src->size -= num_bytes;
138 	spin_unlock(&src->lock);
139 
140 	spin_lock(&delayed_refs_rsv->lock);
141 	if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
142 		u64 delta = delayed_refs_rsv->size -
143 			delayed_refs_rsv->reserved;
144 		if (num_bytes > delta) {
145 			to_free = num_bytes - delta;
146 			num_bytes = delta;
147 		}
148 	} else {
149 		to_free = num_bytes;
150 		num_bytes = 0;
151 	}
152 
153 	if (num_bytes)
154 		delayed_refs_rsv->reserved += num_bytes;
155 	if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
156 		delayed_refs_rsv->full = 1;
157 	spin_unlock(&delayed_refs_rsv->lock);
158 
159 	if (num_bytes)
160 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
161 					      0, num_bytes, 1);
162 	if (to_free)
163 		btrfs_space_info_free_bytes_may_use(fs_info,
164 				delayed_refs_rsv->space_info, to_free);
165 }
166 
167 /**
168  * Refill based on our delayed refs usage
169  *
170  * @fs_info: the filesystem
171  * @flush:   control how we can flush for this reservation.
172  *
173  * This will refill the delayed block_rsv up to 1 items size worth of space and
174  * will return -ENOSPC if we can't make the reservation.
175  */
176 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
177 				  enum btrfs_reserve_flush_enum flush)
178 {
179 	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
180 	u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
181 	u64 num_bytes = 0;
182 	int ret = -ENOSPC;
183 
184 	spin_lock(&block_rsv->lock);
185 	if (block_rsv->reserved < block_rsv->size) {
186 		num_bytes = block_rsv->size - block_rsv->reserved;
187 		num_bytes = min(num_bytes, limit);
188 	}
189 	spin_unlock(&block_rsv->lock);
190 
191 	if (!num_bytes)
192 		return 0;
193 
194 	ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
195 					   num_bytes, flush);
196 	if (ret)
197 		return ret;
198 	btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
199 	trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
200 				      0, num_bytes, 1);
201 	return 0;
202 }
203 
204 /*
205  * compare two delayed tree backrefs with same bytenr and type
206  */
207 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
208 			  struct btrfs_delayed_tree_ref *ref2)
209 {
210 	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
211 		if (ref1->root < ref2->root)
212 			return -1;
213 		if (ref1->root > ref2->root)
214 			return 1;
215 	} else {
216 		if (ref1->parent < ref2->parent)
217 			return -1;
218 		if (ref1->parent > ref2->parent)
219 			return 1;
220 	}
221 	return 0;
222 }
223 
224 /*
225  * compare two delayed data backrefs with same bytenr and type
226  */
227 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
228 			  struct btrfs_delayed_data_ref *ref2)
229 {
230 	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
231 		if (ref1->root < ref2->root)
232 			return -1;
233 		if (ref1->root > ref2->root)
234 			return 1;
235 		if (ref1->objectid < ref2->objectid)
236 			return -1;
237 		if (ref1->objectid > ref2->objectid)
238 			return 1;
239 		if (ref1->offset < ref2->offset)
240 			return -1;
241 		if (ref1->offset > ref2->offset)
242 			return 1;
243 	} else {
244 		if (ref1->parent < ref2->parent)
245 			return -1;
246 		if (ref1->parent > ref2->parent)
247 			return 1;
248 	}
249 	return 0;
250 }
251 
252 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
253 		     struct btrfs_delayed_ref_node *ref2,
254 		     bool check_seq)
255 {
256 	int ret = 0;
257 
258 	if (ref1->type < ref2->type)
259 		return -1;
260 	if (ref1->type > ref2->type)
261 		return 1;
262 	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
263 	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
264 		ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
265 				     btrfs_delayed_node_to_tree_ref(ref2));
266 	else
267 		ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
268 				     btrfs_delayed_node_to_data_ref(ref2));
269 	if (ret)
270 		return ret;
271 	if (check_seq) {
272 		if (ref1->seq < ref2->seq)
273 			return -1;
274 		if (ref1->seq > ref2->seq)
275 			return 1;
276 	}
277 	return 0;
278 }
279 
280 /* insert a new ref to head ref rbtree */
281 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
282 						   struct rb_node *node)
283 {
284 	struct rb_node **p = &root->rb_root.rb_node;
285 	struct rb_node *parent_node = NULL;
286 	struct btrfs_delayed_ref_head *entry;
287 	struct btrfs_delayed_ref_head *ins;
288 	u64 bytenr;
289 	bool leftmost = true;
290 
291 	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
292 	bytenr = ins->bytenr;
293 	while (*p) {
294 		parent_node = *p;
295 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
296 				 href_node);
297 
298 		if (bytenr < entry->bytenr) {
299 			p = &(*p)->rb_left;
300 		} else if (bytenr > entry->bytenr) {
301 			p = &(*p)->rb_right;
302 			leftmost = false;
303 		} else {
304 			return entry;
305 		}
306 	}
307 
308 	rb_link_node(node, parent_node, p);
309 	rb_insert_color_cached(node, root, leftmost);
310 	return NULL;
311 }
312 
313 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
314 		struct btrfs_delayed_ref_node *ins)
315 {
316 	struct rb_node **p = &root->rb_root.rb_node;
317 	struct rb_node *node = &ins->ref_node;
318 	struct rb_node *parent_node = NULL;
319 	struct btrfs_delayed_ref_node *entry;
320 	bool leftmost = true;
321 
322 	while (*p) {
323 		int comp;
324 
325 		parent_node = *p;
326 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
327 				 ref_node);
328 		comp = comp_refs(ins, entry, true);
329 		if (comp < 0) {
330 			p = &(*p)->rb_left;
331 		} else if (comp > 0) {
332 			p = &(*p)->rb_right;
333 			leftmost = false;
334 		} else {
335 			return entry;
336 		}
337 	}
338 
339 	rb_link_node(node, parent_node, p);
340 	rb_insert_color_cached(node, root, leftmost);
341 	return NULL;
342 }
343 
344 static struct btrfs_delayed_ref_head *find_first_ref_head(
345 		struct btrfs_delayed_ref_root *dr)
346 {
347 	struct rb_node *n;
348 	struct btrfs_delayed_ref_head *entry;
349 
350 	n = rb_first_cached(&dr->href_root);
351 	if (!n)
352 		return NULL;
353 
354 	entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
355 
356 	return entry;
357 }
358 
359 /*
360  * Find a head entry based on bytenr. This returns the delayed ref head if it
361  * was able to find one, or NULL if nothing was in that spot.  If return_bigger
362  * is given, the next bigger entry is returned if no exact match is found.
363  */
364 static struct btrfs_delayed_ref_head *find_ref_head(
365 		struct btrfs_delayed_ref_root *dr, u64 bytenr,
366 		bool return_bigger)
367 {
368 	struct rb_root *root = &dr->href_root.rb_root;
369 	struct rb_node *n;
370 	struct btrfs_delayed_ref_head *entry;
371 
372 	n = root->rb_node;
373 	entry = NULL;
374 	while (n) {
375 		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
376 
377 		if (bytenr < entry->bytenr)
378 			n = n->rb_left;
379 		else if (bytenr > entry->bytenr)
380 			n = n->rb_right;
381 		else
382 			return entry;
383 	}
384 	if (entry && return_bigger) {
385 		if (bytenr > entry->bytenr) {
386 			n = rb_next(&entry->href_node);
387 			if (!n)
388 				return NULL;
389 			entry = rb_entry(n, struct btrfs_delayed_ref_head,
390 					 href_node);
391 		}
392 		return entry;
393 	}
394 	return NULL;
395 }
396 
397 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
398 			   struct btrfs_delayed_ref_head *head)
399 {
400 	lockdep_assert_held(&delayed_refs->lock);
401 	if (mutex_trylock(&head->mutex))
402 		return 0;
403 
404 	refcount_inc(&head->refs);
405 	spin_unlock(&delayed_refs->lock);
406 
407 	mutex_lock(&head->mutex);
408 	spin_lock(&delayed_refs->lock);
409 	if (RB_EMPTY_NODE(&head->href_node)) {
410 		mutex_unlock(&head->mutex);
411 		btrfs_put_delayed_ref_head(head);
412 		return -EAGAIN;
413 	}
414 	btrfs_put_delayed_ref_head(head);
415 	return 0;
416 }
417 
418 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
419 				    struct btrfs_delayed_ref_root *delayed_refs,
420 				    struct btrfs_delayed_ref_head *head,
421 				    struct btrfs_delayed_ref_node *ref)
422 {
423 	lockdep_assert_held(&head->lock);
424 	rb_erase_cached(&ref->ref_node, &head->ref_tree);
425 	RB_CLEAR_NODE(&ref->ref_node);
426 	if (!list_empty(&ref->add_list))
427 		list_del(&ref->add_list);
428 	ref->in_tree = 0;
429 	btrfs_put_delayed_ref(ref);
430 	atomic_dec(&delayed_refs->num_entries);
431 }
432 
433 static bool merge_ref(struct btrfs_trans_handle *trans,
434 		      struct btrfs_delayed_ref_root *delayed_refs,
435 		      struct btrfs_delayed_ref_head *head,
436 		      struct btrfs_delayed_ref_node *ref,
437 		      u64 seq)
438 {
439 	struct btrfs_delayed_ref_node *next;
440 	struct rb_node *node = rb_next(&ref->ref_node);
441 	bool done = false;
442 
443 	while (!done && node) {
444 		int mod;
445 
446 		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
447 		node = rb_next(node);
448 		if (seq && next->seq >= seq)
449 			break;
450 		if (comp_refs(ref, next, false))
451 			break;
452 
453 		if (ref->action == next->action) {
454 			mod = next->ref_mod;
455 		} else {
456 			if (ref->ref_mod < next->ref_mod) {
457 				swap(ref, next);
458 				done = true;
459 			}
460 			mod = -next->ref_mod;
461 		}
462 
463 		drop_delayed_ref(trans, delayed_refs, head, next);
464 		ref->ref_mod += mod;
465 		if (ref->ref_mod == 0) {
466 			drop_delayed_ref(trans, delayed_refs, head, ref);
467 			done = true;
468 		} else {
469 			/*
470 			 * Can't have multiples of the same ref on a tree block.
471 			 */
472 			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
473 				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
474 		}
475 	}
476 
477 	return done;
478 }
479 
480 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
481 			      struct btrfs_delayed_ref_root *delayed_refs,
482 			      struct btrfs_delayed_ref_head *head)
483 {
484 	struct btrfs_fs_info *fs_info = trans->fs_info;
485 	struct btrfs_delayed_ref_node *ref;
486 	struct rb_node *node;
487 	u64 seq = 0;
488 
489 	lockdep_assert_held(&head->lock);
490 
491 	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
492 		return;
493 
494 	/* We don't have too many refs to merge for data. */
495 	if (head->is_data)
496 		return;
497 
498 	seq = btrfs_tree_mod_log_lowest_seq(fs_info);
499 again:
500 	for (node = rb_first_cached(&head->ref_tree); node;
501 	     node = rb_next(node)) {
502 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
503 		if (seq && ref->seq >= seq)
504 			continue;
505 		if (merge_ref(trans, delayed_refs, head, ref, seq))
506 			goto again;
507 	}
508 }
509 
510 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
511 {
512 	int ret = 0;
513 	u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
514 
515 	if (min_seq != 0 && seq >= min_seq) {
516 		btrfs_debug(fs_info,
517 			    "holding back delayed_ref %llu, lowest is %llu",
518 			    seq, min_seq);
519 		ret = 1;
520 	}
521 
522 	return ret;
523 }
524 
525 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
526 		struct btrfs_delayed_ref_root *delayed_refs)
527 {
528 	struct btrfs_delayed_ref_head *head;
529 
530 again:
531 	head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
532 			     true);
533 	if (!head && delayed_refs->run_delayed_start != 0) {
534 		delayed_refs->run_delayed_start = 0;
535 		head = find_first_ref_head(delayed_refs);
536 	}
537 	if (!head)
538 		return NULL;
539 
540 	while (head->processing) {
541 		struct rb_node *node;
542 
543 		node = rb_next(&head->href_node);
544 		if (!node) {
545 			if (delayed_refs->run_delayed_start == 0)
546 				return NULL;
547 			delayed_refs->run_delayed_start = 0;
548 			goto again;
549 		}
550 		head = rb_entry(node, struct btrfs_delayed_ref_head,
551 				href_node);
552 	}
553 
554 	head->processing = 1;
555 	WARN_ON(delayed_refs->num_heads_ready == 0);
556 	delayed_refs->num_heads_ready--;
557 	delayed_refs->run_delayed_start = head->bytenr +
558 		head->num_bytes;
559 	return head;
560 }
561 
562 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
563 			   struct btrfs_delayed_ref_head *head)
564 {
565 	lockdep_assert_held(&delayed_refs->lock);
566 	lockdep_assert_held(&head->lock);
567 
568 	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
569 	RB_CLEAR_NODE(&head->href_node);
570 	atomic_dec(&delayed_refs->num_entries);
571 	delayed_refs->num_heads--;
572 	if (head->processing == 0)
573 		delayed_refs->num_heads_ready--;
574 }
575 
576 /*
577  * Helper to insert the ref_node to the tail or merge with tail.
578  *
579  * Return 0 for insert.
580  * Return >0 for merge.
581  */
582 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
583 			      struct btrfs_delayed_ref_root *root,
584 			      struct btrfs_delayed_ref_head *href,
585 			      struct btrfs_delayed_ref_node *ref)
586 {
587 	struct btrfs_delayed_ref_node *exist;
588 	int mod;
589 	int ret = 0;
590 
591 	spin_lock(&href->lock);
592 	exist = tree_insert(&href->ref_tree, ref);
593 	if (!exist)
594 		goto inserted;
595 
596 	/* Now we are sure we can merge */
597 	ret = 1;
598 	if (exist->action == ref->action) {
599 		mod = ref->ref_mod;
600 	} else {
601 		/* Need to change action */
602 		if (exist->ref_mod < ref->ref_mod) {
603 			exist->action = ref->action;
604 			mod = -exist->ref_mod;
605 			exist->ref_mod = ref->ref_mod;
606 			if (ref->action == BTRFS_ADD_DELAYED_REF)
607 				list_add_tail(&exist->add_list,
608 					      &href->ref_add_list);
609 			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
610 				ASSERT(!list_empty(&exist->add_list));
611 				list_del(&exist->add_list);
612 			} else {
613 				ASSERT(0);
614 			}
615 		} else
616 			mod = -ref->ref_mod;
617 	}
618 	exist->ref_mod += mod;
619 
620 	/* remove existing tail if its ref_mod is zero */
621 	if (exist->ref_mod == 0)
622 		drop_delayed_ref(trans, root, href, exist);
623 	spin_unlock(&href->lock);
624 	return ret;
625 inserted:
626 	if (ref->action == BTRFS_ADD_DELAYED_REF)
627 		list_add_tail(&ref->add_list, &href->ref_add_list);
628 	atomic_inc(&root->num_entries);
629 	spin_unlock(&href->lock);
630 	return ret;
631 }
632 
633 /*
634  * helper function to update the accounting in the head ref
635  * existing and update must have the same bytenr
636  */
637 static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
638 			 struct btrfs_delayed_ref_head *existing,
639 			 struct btrfs_delayed_ref_head *update)
640 {
641 	struct btrfs_delayed_ref_root *delayed_refs =
642 		&trans->transaction->delayed_refs;
643 	struct btrfs_fs_info *fs_info = trans->fs_info;
644 	int old_ref_mod;
645 
646 	BUG_ON(existing->is_data != update->is_data);
647 
648 	spin_lock(&existing->lock);
649 	if (update->must_insert_reserved) {
650 		/* if the extent was freed and then
651 		 * reallocated before the delayed ref
652 		 * entries were processed, we can end up
653 		 * with an existing head ref without
654 		 * the must_insert_reserved flag set.
655 		 * Set it again here
656 		 */
657 		existing->must_insert_reserved = update->must_insert_reserved;
658 
659 		/*
660 		 * update the num_bytes so we make sure the accounting
661 		 * is done correctly
662 		 */
663 		existing->num_bytes = update->num_bytes;
664 
665 	}
666 
667 	if (update->extent_op) {
668 		if (!existing->extent_op) {
669 			existing->extent_op = update->extent_op;
670 		} else {
671 			if (update->extent_op->update_key) {
672 				memcpy(&existing->extent_op->key,
673 				       &update->extent_op->key,
674 				       sizeof(update->extent_op->key));
675 				existing->extent_op->update_key = true;
676 			}
677 			if (update->extent_op->update_flags) {
678 				existing->extent_op->flags_to_set |=
679 					update->extent_op->flags_to_set;
680 				existing->extent_op->update_flags = true;
681 			}
682 			btrfs_free_delayed_extent_op(update->extent_op);
683 		}
684 	}
685 	/*
686 	 * update the reference mod on the head to reflect this new operation,
687 	 * only need the lock for this case cause we could be processing it
688 	 * currently, for refs we just added we know we're a-ok.
689 	 */
690 	old_ref_mod = existing->total_ref_mod;
691 	existing->ref_mod += update->ref_mod;
692 	existing->total_ref_mod += update->ref_mod;
693 
694 	/*
695 	 * If we are going to from a positive ref mod to a negative or vice
696 	 * versa we need to make sure to adjust pending_csums accordingly.
697 	 */
698 	if (existing->is_data) {
699 		u64 csum_leaves =
700 			btrfs_csum_bytes_to_leaves(fs_info,
701 						   existing->num_bytes);
702 
703 		if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
704 			delayed_refs->pending_csums -= existing->num_bytes;
705 			btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
706 		}
707 		if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
708 			delayed_refs->pending_csums += existing->num_bytes;
709 			trans->delayed_ref_updates += csum_leaves;
710 		}
711 	}
712 
713 	spin_unlock(&existing->lock);
714 }
715 
716 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
717 				  struct btrfs_qgroup_extent_record *qrecord,
718 				  u64 bytenr, u64 num_bytes, u64 ref_root,
719 				  u64 reserved, int action, bool is_data,
720 				  bool is_system)
721 {
722 	int count_mod = 1;
723 	int must_insert_reserved = 0;
724 
725 	/* If reserved is provided, it must be a data extent. */
726 	BUG_ON(!is_data && reserved);
727 
728 	/*
729 	 * The head node stores the sum of all the mods, so dropping a ref
730 	 * should drop the sum in the head node by one.
731 	 */
732 	if (action == BTRFS_UPDATE_DELAYED_HEAD)
733 		count_mod = 0;
734 	else if (action == BTRFS_DROP_DELAYED_REF)
735 		count_mod = -1;
736 
737 	/*
738 	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
739 	 * accounting when the extent is finally added, or if a later
740 	 * modification deletes the delayed ref without ever inserting the
741 	 * extent into the extent allocation tree.  ref->must_insert_reserved
742 	 * is the flag used to record that accounting mods are required.
743 	 *
744 	 * Once we record must_insert_reserved, switch the action to
745 	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
746 	 */
747 	if (action == BTRFS_ADD_DELAYED_EXTENT)
748 		must_insert_reserved = 1;
749 	else
750 		must_insert_reserved = 0;
751 
752 	refcount_set(&head_ref->refs, 1);
753 	head_ref->bytenr = bytenr;
754 	head_ref->num_bytes = num_bytes;
755 	head_ref->ref_mod = count_mod;
756 	head_ref->must_insert_reserved = must_insert_reserved;
757 	head_ref->is_data = is_data;
758 	head_ref->is_system = is_system;
759 	head_ref->ref_tree = RB_ROOT_CACHED;
760 	INIT_LIST_HEAD(&head_ref->ref_add_list);
761 	RB_CLEAR_NODE(&head_ref->href_node);
762 	head_ref->processing = 0;
763 	head_ref->total_ref_mod = count_mod;
764 	spin_lock_init(&head_ref->lock);
765 	mutex_init(&head_ref->mutex);
766 
767 	if (qrecord) {
768 		if (ref_root && reserved) {
769 			qrecord->data_rsv = reserved;
770 			qrecord->data_rsv_refroot = ref_root;
771 		}
772 		qrecord->bytenr = bytenr;
773 		qrecord->num_bytes = num_bytes;
774 		qrecord->old_roots = NULL;
775 	}
776 }
777 
778 /*
779  * helper function to actually insert a head node into the rbtree.
780  * this does all the dirty work in terms of maintaining the correct
781  * overall modification count.
782  */
783 static noinline struct btrfs_delayed_ref_head *
784 add_delayed_ref_head(struct btrfs_trans_handle *trans,
785 		     struct btrfs_delayed_ref_head *head_ref,
786 		     struct btrfs_qgroup_extent_record *qrecord,
787 		     int action, int *qrecord_inserted_ret)
788 {
789 	struct btrfs_delayed_ref_head *existing;
790 	struct btrfs_delayed_ref_root *delayed_refs;
791 	int qrecord_inserted = 0;
792 
793 	delayed_refs = &trans->transaction->delayed_refs;
794 
795 	/* Record qgroup extent info if provided */
796 	if (qrecord) {
797 		if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
798 					delayed_refs, qrecord))
799 			kfree(qrecord);
800 		else
801 			qrecord_inserted = 1;
802 	}
803 
804 	trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
805 
806 	existing = htree_insert(&delayed_refs->href_root,
807 				&head_ref->href_node);
808 	if (existing) {
809 		update_existing_head_ref(trans, existing, head_ref);
810 		/*
811 		 * we've updated the existing ref, free the newly
812 		 * allocated ref
813 		 */
814 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
815 		head_ref = existing;
816 	} else {
817 		if (head_ref->is_data && head_ref->ref_mod < 0) {
818 			delayed_refs->pending_csums += head_ref->num_bytes;
819 			trans->delayed_ref_updates +=
820 				btrfs_csum_bytes_to_leaves(trans->fs_info,
821 							   head_ref->num_bytes);
822 		}
823 		delayed_refs->num_heads++;
824 		delayed_refs->num_heads_ready++;
825 		atomic_inc(&delayed_refs->num_entries);
826 		trans->delayed_ref_updates++;
827 	}
828 	if (qrecord_inserted_ret)
829 		*qrecord_inserted_ret = qrecord_inserted;
830 
831 	return head_ref;
832 }
833 
834 /*
835  * init_delayed_ref_common - Initialize the structure which represents a
836  *			     modification to a an extent.
837  *
838  * @fs_info:    Internal to the mounted filesystem mount structure.
839  *
840  * @ref:	The structure which is going to be initialized.
841  *
842  * @bytenr:	The logical address of the extent for which a modification is
843  *		going to be recorded.
844  *
845  * @num_bytes:  Size of the extent whose modification is being recorded.
846  *
847  * @ref_root:	The id of the root where this modification has originated, this
848  *		can be either one of the well-known metadata trees or the
849  *		subvolume id which references this extent.
850  *
851  * @action:	Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
852  *		BTRFS_ADD_DELAYED_EXTENT
853  *
854  * @ref_type:	Holds the type of the extent which is being recorded, can be
855  *		one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
856  *		when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
857  *		BTRFS_EXTENT_DATA_REF_KEY when recording data extent
858  */
859 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
860 				    struct btrfs_delayed_ref_node *ref,
861 				    u64 bytenr, u64 num_bytes, u64 ref_root,
862 				    int action, u8 ref_type)
863 {
864 	u64 seq = 0;
865 
866 	if (action == BTRFS_ADD_DELAYED_EXTENT)
867 		action = BTRFS_ADD_DELAYED_REF;
868 
869 	if (is_fstree(ref_root))
870 		seq = atomic64_read(&fs_info->tree_mod_seq);
871 
872 	refcount_set(&ref->refs, 1);
873 	ref->bytenr = bytenr;
874 	ref->num_bytes = num_bytes;
875 	ref->ref_mod = 1;
876 	ref->action = action;
877 	ref->is_head = 0;
878 	ref->in_tree = 1;
879 	ref->seq = seq;
880 	ref->type = ref_type;
881 	RB_CLEAR_NODE(&ref->ref_node);
882 	INIT_LIST_HEAD(&ref->add_list);
883 }
884 
885 /*
886  * add a delayed tree ref.  This does all of the accounting required
887  * to make sure the delayed ref is eventually processed before this
888  * transaction commits.
889  */
890 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
891 			       struct btrfs_ref *generic_ref,
892 			       struct btrfs_delayed_extent_op *extent_op)
893 {
894 	struct btrfs_fs_info *fs_info = trans->fs_info;
895 	struct btrfs_delayed_tree_ref *ref;
896 	struct btrfs_delayed_ref_head *head_ref;
897 	struct btrfs_delayed_ref_root *delayed_refs;
898 	struct btrfs_qgroup_extent_record *record = NULL;
899 	int qrecord_inserted;
900 	bool is_system;
901 	int action = generic_ref->action;
902 	int level = generic_ref->tree_ref.level;
903 	int ret;
904 	u64 bytenr = generic_ref->bytenr;
905 	u64 num_bytes = generic_ref->len;
906 	u64 parent = generic_ref->parent;
907 	u8 ref_type;
908 
909 	is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
910 
911 	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
912 	BUG_ON(extent_op && extent_op->is_data);
913 	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
914 	if (!ref)
915 		return -ENOMEM;
916 
917 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
918 	if (!head_ref) {
919 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
920 		return -ENOMEM;
921 	}
922 
923 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
924 	    !generic_ref->skip_qgroup) {
925 		record = kzalloc(sizeof(*record), GFP_NOFS);
926 		if (!record) {
927 			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
928 			kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
929 			return -ENOMEM;
930 		}
931 	}
932 
933 	if (parent)
934 		ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
935 	else
936 		ref_type = BTRFS_TREE_BLOCK_REF_KEY;
937 
938 	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
939 				generic_ref->tree_ref.owning_root, action,
940 				ref_type);
941 	ref->root = generic_ref->tree_ref.owning_root;
942 	ref->parent = parent;
943 	ref->level = level;
944 
945 	init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
946 			      generic_ref->tree_ref.owning_root, 0, action,
947 			      false, is_system);
948 	head_ref->extent_op = extent_op;
949 
950 	delayed_refs = &trans->transaction->delayed_refs;
951 	spin_lock(&delayed_refs->lock);
952 
953 	/*
954 	 * insert both the head node and the new ref without dropping
955 	 * the spin lock
956 	 */
957 	head_ref = add_delayed_ref_head(trans, head_ref, record,
958 					action, &qrecord_inserted);
959 
960 	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
961 	spin_unlock(&delayed_refs->lock);
962 
963 	/*
964 	 * Need to update the delayed_refs_rsv with any changes we may have
965 	 * made.
966 	 */
967 	btrfs_update_delayed_refs_rsv(trans);
968 
969 	trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
970 				   action == BTRFS_ADD_DELAYED_EXTENT ?
971 				   BTRFS_ADD_DELAYED_REF : action);
972 	if (ret > 0)
973 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
974 
975 	if (qrecord_inserted)
976 		btrfs_qgroup_trace_extent_post(trans, record);
977 
978 	return 0;
979 }
980 
981 /*
982  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
983  */
984 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
985 			       struct btrfs_ref *generic_ref,
986 			       u64 reserved)
987 {
988 	struct btrfs_fs_info *fs_info = trans->fs_info;
989 	struct btrfs_delayed_data_ref *ref;
990 	struct btrfs_delayed_ref_head *head_ref;
991 	struct btrfs_delayed_ref_root *delayed_refs;
992 	struct btrfs_qgroup_extent_record *record = NULL;
993 	int qrecord_inserted;
994 	int action = generic_ref->action;
995 	int ret;
996 	u64 bytenr = generic_ref->bytenr;
997 	u64 num_bytes = generic_ref->len;
998 	u64 parent = generic_ref->parent;
999 	u64 ref_root = generic_ref->data_ref.owning_root;
1000 	u64 owner = generic_ref->data_ref.ino;
1001 	u64 offset = generic_ref->data_ref.offset;
1002 	u8 ref_type;
1003 
1004 	ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1005 	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1006 	if (!ref)
1007 		return -ENOMEM;
1008 
1009 	if (parent)
1010 	        ref_type = BTRFS_SHARED_DATA_REF_KEY;
1011 	else
1012 	        ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1013 	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1014 				ref_root, action, ref_type);
1015 	ref->root = ref_root;
1016 	ref->parent = parent;
1017 	ref->objectid = owner;
1018 	ref->offset = offset;
1019 
1020 
1021 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1022 	if (!head_ref) {
1023 		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1024 		return -ENOMEM;
1025 	}
1026 
1027 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1028 	    !generic_ref->skip_qgroup) {
1029 		record = kzalloc(sizeof(*record), GFP_NOFS);
1030 		if (!record) {
1031 			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1032 			kmem_cache_free(btrfs_delayed_ref_head_cachep,
1033 					head_ref);
1034 			return -ENOMEM;
1035 		}
1036 	}
1037 
1038 	init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1039 			      reserved, action, true, false);
1040 	head_ref->extent_op = NULL;
1041 
1042 	delayed_refs = &trans->transaction->delayed_refs;
1043 	spin_lock(&delayed_refs->lock);
1044 
1045 	/*
1046 	 * insert both the head node and the new ref without dropping
1047 	 * the spin lock
1048 	 */
1049 	head_ref = add_delayed_ref_head(trans, head_ref, record,
1050 					action, &qrecord_inserted);
1051 
1052 	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
1053 	spin_unlock(&delayed_refs->lock);
1054 
1055 	/*
1056 	 * Need to update the delayed_refs_rsv with any changes we may have
1057 	 * made.
1058 	 */
1059 	btrfs_update_delayed_refs_rsv(trans);
1060 
1061 	trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1062 				   action == BTRFS_ADD_DELAYED_EXTENT ?
1063 				   BTRFS_ADD_DELAYED_REF : action);
1064 	if (ret > 0)
1065 		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1066 
1067 
1068 	if (qrecord_inserted)
1069 		return btrfs_qgroup_trace_extent_post(trans, record);
1070 	return 0;
1071 }
1072 
1073 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1074 				u64 bytenr, u64 num_bytes,
1075 				struct btrfs_delayed_extent_op *extent_op)
1076 {
1077 	struct btrfs_delayed_ref_head *head_ref;
1078 	struct btrfs_delayed_ref_root *delayed_refs;
1079 
1080 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1081 	if (!head_ref)
1082 		return -ENOMEM;
1083 
1084 	init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1085 			      BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
1086 			      false);
1087 	head_ref->extent_op = extent_op;
1088 
1089 	delayed_refs = &trans->transaction->delayed_refs;
1090 	spin_lock(&delayed_refs->lock);
1091 
1092 	add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1093 			     NULL);
1094 
1095 	spin_unlock(&delayed_refs->lock);
1096 
1097 	/*
1098 	 * Need to update the delayed_refs_rsv with any changes we may have
1099 	 * made.
1100 	 */
1101 	btrfs_update_delayed_refs_rsv(trans);
1102 	return 0;
1103 }
1104 
1105 /*
1106  * This does a simple search for the head node for a given extent.  Returns the
1107  * head node if found, or NULL if not.
1108  */
1109 struct btrfs_delayed_ref_head *
1110 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1111 {
1112 	lockdep_assert_held(&delayed_refs->lock);
1113 
1114 	return find_ref_head(delayed_refs, bytenr, false);
1115 }
1116 
1117 void __cold btrfs_delayed_ref_exit(void)
1118 {
1119 	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1120 	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1121 	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1122 	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1123 }
1124 
1125 int __init btrfs_delayed_ref_init(void)
1126 {
1127 	btrfs_delayed_ref_head_cachep = kmem_cache_create(
1128 				"btrfs_delayed_ref_head",
1129 				sizeof(struct btrfs_delayed_ref_head), 0,
1130 				SLAB_MEM_SPREAD, NULL);
1131 	if (!btrfs_delayed_ref_head_cachep)
1132 		goto fail;
1133 
1134 	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1135 				"btrfs_delayed_tree_ref",
1136 				sizeof(struct btrfs_delayed_tree_ref), 0,
1137 				SLAB_MEM_SPREAD, NULL);
1138 	if (!btrfs_delayed_tree_ref_cachep)
1139 		goto fail;
1140 
1141 	btrfs_delayed_data_ref_cachep = kmem_cache_create(
1142 				"btrfs_delayed_data_ref",
1143 				sizeof(struct btrfs_delayed_data_ref), 0,
1144 				SLAB_MEM_SPREAD, NULL);
1145 	if (!btrfs_delayed_data_ref_cachep)
1146 		goto fail;
1147 
1148 	btrfs_delayed_extent_op_cachep = kmem_cache_create(
1149 				"btrfs_delayed_extent_op",
1150 				sizeof(struct btrfs_delayed_extent_op), 0,
1151 				SLAB_MEM_SPREAD, NULL);
1152 	if (!btrfs_delayed_extent_op_cachep)
1153 		goto fail;
1154 
1155 	return 0;
1156 fail:
1157 	btrfs_delayed_ref_exit();
1158 	return -ENOMEM;
1159 }
1160