xref: /openbmc/linux/fs/btrfs/delayed-inode.c (revision c5c87812)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 Fujitsu.  All rights reserved.
4  * Written by Miao Xie <miaox@cn.fujitsu.com>
5  */
6 
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include <linux/sched/mm.h>
10 #include "misc.h"
11 #include "delayed-inode.h"
12 #include "disk-io.h"
13 #include "transaction.h"
14 #include "ctree.h"
15 #include "qgroup.h"
16 #include "locking.h"
17 
18 #define BTRFS_DELAYED_WRITEBACK		512
19 #define BTRFS_DELAYED_BACKGROUND	128
20 #define BTRFS_DELAYED_BATCH		16
21 
22 static struct kmem_cache *delayed_node_cache;
23 
24 int __init btrfs_delayed_inode_init(void)
25 {
26 	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
27 					sizeof(struct btrfs_delayed_node),
28 					0,
29 					SLAB_MEM_SPREAD,
30 					NULL);
31 	if (!delayed_node_cache)
32 		return -ENOMEM;
33 	return 0;
34 }
35 
36 void __cold btrfs_delayed_inode_exit(void)
37 {
38 	kmem_cache_destroy(delayed_node_cache);
39 }
40 
41 static inline void btrfs_init_delayed_node(
42 				struct btrfs_delayed_node *delayed_node,
43 				struct btrfs_root *root, u64 inode_id)
44 {
45 	delayed_node->root = root;
46 	delayed_node->inode_id = inode_id;
47 	refcount_set(&delayed_node->refs, 0);
48 	delayed_node->ins_root = RB_ROOT_CACHED;
49 	delayed_node->del_root = RB_ROOT_CACHED;
50 	mutex_init(&delayed_node->mutex);
51 	INIT_LIST_HEAD(&delayed_node->n_list);
52 	INIT_LIST_HEAD(&delayed_node->p_list);
53 }
54 
55 static inline int btrfs_is_continuous_delayed_item(
56 					struct btrfs_delayed_item *item1,
57 					struct btrfs_delayed_item *item2)
58 {
59 	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
60 	    item1->key.objectid == item2->key.objectid &&
61 	    item1->key.type == item2->key.type &&
62 	    item1->key.offset + 1 == item2->key.offset)
63 		return 1;
64 	return 0;
65 }
66 
67 static struct btrfs_delayed_node *btrfs_get_delayed_node(
68 		struct btrfs_inode *btrfs_inode)
69 {
70 	struct btrfs_root *root = btrfs_inode->root;
71 	u64 ino = btrfs_ino(btrfs_inode);
72 	struct btrfs_delayed_node *node;
73 
74 	node = READ_ONCE(btrfs_inode->delayed_node);
75 	if (node) {
76 		refcount_inc(&node->refs);
77 		return node;
78 	}
79 
80 	spin_lock(&root->inode_lock);
81 	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
82 
83 	if (node) {
84 		if (btrfs_inode->delayed_node) {
85 			refcount_inc(&node->refs);	/* can be accessed */
86 			BUG_ON(btrfs_inode->delayed_node != node);
87 			spin_unlock(&root->inode_lock);
88 			return node;
89 		}
90 
91 		/*
92 		 * It's possible that we're racing into the middle of removing
93 		 * this node from the radix tree.  In this case, the refcount
94 		 * was zero and it should never go back to one.  Just return
95 		 * NULL like it was never in the radix at all; our release
96 		 * function is in the process of removing it.
97 		 *
98 		 * Some implementations of refcount_inc refuse to bump the
99 		 * refcount once it has hit zero.  If we don't do this dance
100 		 * here, refcount_inc() may decide to just WARN_ONCE() instead
101 		 * of actually bumping the refcount.
102 		 *
103 		 * If this node is properly in the radix, we want to bump the
104 		 * refcount twice, once for the inode and once for this get
105 		 * operation.
106 		 */
107 		if (refcount_inc_not_zero(&node->refs)) {
108 			refcount_inc(&node->refs);
109 			btrfs_inode->delayed_node = node;
110 		} else {
111 			node = NULL;
112 		}
113 
114 		spin_unlock(&root->inode_lock);
115 		return node;
116 	}
117 	spin_unlock(&root->inode_lock);
118 
119 	return NULL;
120 }
121 
122 /* Will return either the node or PTR_ERR(-ENOMEM) */
123 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
124 		struct btrfs_inode *btrfs_inode)
125 {
126 	struct btrfs_delayed_node *node;
127 	struct btrfs_root *root = btrfs_inode->root;
128 	u64 ino = btrfs_ino(btrfs_inode);
129 	int ret;
130 
131 again:
132 	node = btrfs_get_delayed_node(btrfs_inode);
133 	if (node)
134 		return node;
135 
136 	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
137 	if (!node)
138 		return ERR_PTR(-ENOMEM);
139 	btrfs_init_delayed_node(node, root, ino);
140 
141 	/* cached in the btrfs inode and can be accessed */
142 	refcount_set(&node->refs, 2);
143 
144 	ret = radix_tree_preload(GFP_NOFS);
145 	if (ret) {
146 		kmem_cache_free(delayed_node_cache, node);
147 		return ERR_PTR(ret);
148 	}
149 
150 	spin_lock(&root->inode_lock);
151 	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
152 	if (ret == -EEXIST) {
153 		spin_unlock(&root->inode_lock);
154 		kmem_cache_free(delayed_node_cache, node);
155 		radix_tree_preload_end();
156 		goto again;
157 	}
158 	btrfs_inode->delayed_node = node;
159 	spin_unlock(&root->inode_lock);
160 	radix_tree_preload_end();
161 
162 	return node;
163 }
164 
165 /*
166  * Call it when holding delayed_node->mutex
167  *
168  * If mod = 1, add this node into the prepared list.
169  */
170 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
171 				     struct btrfs_delayed_node *node,
172 				     int mod)
173 {
174 	spin_lock(&root->lock);
175 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
176 		if (!list_empty(&node->p_list))
177 			list_move_tail(&node->p_list, &root->prepare_list);
178 		else if (mod)
179 			list_add_tail(&node->p_list, &root->prepare_list);
180 	} else {
181 		list_add_tail(&node->n_list, &root->node_list);
182 		list_add_tail(&node->p_list, &root->prepare_list);
183 		refcount_inc(&node->refs);	/* inserted into list */
184 		root->nodes++;
185 		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
186 	}
187 	spin_unlock(&root->lock);
188 }
189 
190 /* Call it when holding delayed_node->mutex */
191 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
192 				       struct btrfs_delayed_node *node)
193 {
194 	spin_lock(&root->lock);
195 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
196 		root->nodes--;
197 		refcount_dec(&node->refs);	/* not in the list */
198 		list_del_init(&node->n_list);
199 		if (!list_empty(&node->p_list))
200 			list_del_init(&node->p_list);
201 		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
202 	}
203 	spin_unlock(&root->lock);
204 }
205 
206 static struct btrfs_delayed_node *btrfs_first_delayed_node(
207 			struct btrfs_delayed_root *delayed_root)
208 {
209 	struct list_head *p;
210 	struct btrfs_delayed_node *node = NULL;
211 
212 	spin_lock(&delayed_root->lock);
213 	if (list_empty(&delayed_root->node_list))
214 		goto out;
215 
216 	p = delayed_root->node_list.next;
217 	node = list_entry(p, struct btrfs_delayed_node, n_list);
218 	refcount_inc(&node->refs);
219 out:
220 	spin_unlock(&delayed_root->lock);
221 
222 	return node;
223 }
224 
225 static struct btrfs_delayed_node *btrfs_next_delayed_node(
226 						struct btrfs_delayed_node *node)
227 {
228 	struct btrfs_delayed_root *delayed_root;
229 	struct list_head *p;
230 	struct btrfs_delayed_node *next = NULL;
231 
232 	delayed_root = node->root->fs_info->delayed_root;
233 	spin_lock(&delayed_root->lock);
234 	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
235 		/* not in the list */
236 		if (list_empty(&delayed_root->node_list))
237 			goto out;
238 		p = delayed_root->node_list.next;
239 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
240 		goto out;
241 	else
242 		p = node->n_list.next;
243 
244 	next = list_entry(p, struct btrfs_delayed_node, n_list);
245 	refcount_inc(&next->refs);
246 out:
247 	spin_unlock(&delayed_root->lock);
248 
249 	return next;
250 }
251 
252 static void __btrfs_release_delayed_node(
253 				struct btrfs_delayed_node *delayed_node,
254 				int mod)
255 {
256 	struct btrfs_delayed_root *delayed_root;
257 
258 	if (!delayed_node)
259 		return;
260 
261 	delayed_root = delayed_node->root->fs_info->delayed_root;
262 
263 	mutex_lock(&delayed_node->mutex);
264 	if (delayed_node->count)
265 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
266 	else
267 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
268 	mutex_unlock(&delayed_node->mutex);
269 
270 	if (refcount_dec_and_test(&delayed_node->refs)) {
271 		struct btrfs_root *root = delayed_node->root;
272 
273 		spin_lock(&root->inode_lock);
274 		/*
275 		 * Once our refcount goes to zero, nobody is allowed to bump it
276 		 * back up.  We can delete it now.
277 		 */
278 		ASSERT(refcount_read(&delayed_node->refs) == 0);
279 		radix_tree_delete(&root->delayed_nodes_tree,
280 				  delayed_node->inode_id);
281 		spin_unlock(&root->inode_lock);
282 		kmem_cache_free(delayed_node_cache, delayed_node);
283 	}
284 }
285 
286 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
287 {
288 	__btrfs_release_delayed_node(node, 0);
289 }
290 
291 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
292 					struct btrfs_delayed_root *delayed_root)
293 {
294 	struct list_head *p;
295 	struct btrfs_delayed_node *node = NULL;
296 
297 	spin_lock(&delayed_root->lock);
298 	if (list_empty(&delayed_root->prepare_list))
299 		goto out;
300 
301 	p = delayed_root->prepare_list.next;
302 	list_del_init(p);
303 	node = list_entry(p, struct btrfs_delayed_node, p_list);
304 	refcount_inc(&node->refs);
305 out:
306 	spin_unlock(&delayed_root->lock);
307 
308 	return node;
309 }
310 
311 static inline void btrfs_release_prepared_delayed_node(
312 					struct btrfs_delayed_node *node)
313 {
314 	__btrfs_release_delayed_node(node, 1);
315 }
316 
317 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
318 {
319 	struct btrfs_delayed_item *item;
320 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
321 	if (item) {
322 		item->data_len = data_len;
323 		item->ins_or_del = 0;
324 		item->bytes_reserved = 0;
325 		item->delayed_node = NULL;
326 		refcount_set(&item->refs, 1);
327 	}
328 	return item;
329 }
330 
331 /*
332  * __btrfs_lookup_delayed_item - look up the delayed item by key
333  * @delayed_node: pointer to the delayed node
334  * @key:	  the key to look up
335  * @prev:	  used to store the prev item if the right item isn't found
336  * @next:	  used to store the next item if the right item isn't found
337  *
338  * Note: if we don't find the right item, we will return the prev item and
339  * the next item.
340  */
341 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
342 				struct rb_root *root,
343 				struct btrfs_key *key,
344 				struct btrfs_delayed_item **prev,
345 				struct btrfs_delayed_item **next)
346 {
347 	struct rb_node *node, *prev_node = NULL;
348 	struct btrfs_delayed_item *delayed_item = NULL;
349 	int ret = 0;
350 
351 	node = root->rb_node;
352 
353 	while (node) {
354 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
355 					rb_node);
356 		prev_node = node;
357 		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
358 		if (ret < 0)
359 			node = node->rb_right;
360 		else if (ret > 0)
361 			node = node->rb_left;
362 		else
363 			return delayed_item;
364 	}
365 
366 	if (prev) {
367 		if (!prev_node)
368 			*prev = NULL;
369 		else if (ret < 0)
370 			*prev = delayed_item;
371 		else if ((node = rb_prev(prev_node)) != NULL) {
372 			*prev = rb_entry(node, struct btrfs_delayed_item,
373 					 rb_node);
374 		} else
375 			*prev = NULL;
376 	}
377 
378 	if (next) {
379 		if (!prev_node)
380 			*next = NULL;
381 		else if (ret > 0)
382 			*next = delayed_item;
383 		else if ((node = rb_next(prev_node)) != NULL) {
384 			*next = rb_entry(node, struct btrfs_delayed_item,
385 					 rb_node);
386 		} else
387 			*next = NULL;
388 	}
389 	return NULL;
390 }
391 
392 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
393 					struct btrfs_delayed_node *delayed_node,
394 					struct btrfs_key *key)
395 {
396 	return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
397 					   NULL, NULL);
398 }
399 
400 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
401 				    struct btrfs_delayed_item *ins,
402 				    int action)
403 {
404 	struct rb_node **p, *node;
405 	struct rb_node *parent_node = NULL;
406 	struct rb_root_cached *root;
407 	struct btrfs_delayed_item *item;
408 	int cmp;
409 	bool leftmost = true;
410 
411 	if (action == BTRFS_DELAYED_INSERTION_ITEM)
412 		root = &delayed_node->ins_root;
413 	else if (action == BTRFS_DELAYED_DELETION_ITEM)
414 		root = &delayed_node->del_root;
415 	else
416 		BUG();
417 	p = &root->rb_root.rb_node;
418 	node = &ins->rb_node;
419 
420 	while (*p) {
421 		parent_node = *p;
422 		item = rb_entry(parent_node, struct btrfs_delayed_item,
423 				 rb_node);
424 
425 		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
426 		if (cmp < 0) {
427 			p = &(*p)->rb_right;
428 			leftmost = false;
429 		} else if (cmp > 0) {
430 			p = &(*p)->rb_left;
431 		} else {
432 			return -EEXIST;
433 		}
434 	}
435 
436 	rb_link_node(node, parent_node, p);
437 	rb_insert_color_cached(node, root, leftmost);
438 	ins->delayed_node = delayed_node;
439 	ins->ins_or_del = action;
440 
441 	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
442 	    action == BTRFS_DELAYED_INSERTION_ITEM &&
443 	    ins->key.offset >= delayed_node->index_cnt)
444 			delayed_node->index_cnt = ins->key.offset + 1;
445 
446 	delayed_node->count++;
447 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
448 	return 0;
449 }
450 
451 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
452 					      struct btrfs_delayed_item *item)
453 {
454 	return __btrfs_add_delayed_item(node, item,
455 					BTRFS_DELAYED_INSERTION_ITEM);
456 }
457 
458 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
459 					     struct btrfs_delayed_item *item)
460 {
461 	return __btrfs_add_delayed_item(node, item,
462 					BTRFS_DELAYED_DELETION_ITEM);
463 }
464 
465 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
466 {
467 	int seq = atomic_inc_return(&delayed_root->items_seq);
468 
469 	/* atomic_dec_return implies a barrier */
470 	if ((atomic_dec_return(&delayed_root->items) <
471 	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
472 		cond_wake_up_nomb(&delayed_root->wait);
473 }
474 
475 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
476 {
477 	struct rb_root_cached *root;
478 	struct btrfs_delayed_root *delayed_root;
479 
480 	/* Not associated with any delayed_node */
481 	if (!delayed_item->delayed_node)
482 		return;
483 	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
484 
485 	BUG_ON(!delayed_root);
486 	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
487 	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
488 
489 	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
490 		root = &delayed_item->delayed_node->ins_root;
491 	else
492 		root = &delayed_item->delayed_node->del_root;
493 
494 	rb_erase_cached(&delayed_item->rb_node, root);
495 	delayed_item->delayed_node->count--;
496 
497 	finish_one_item(delayed_root);
498 }
499 
500 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
501 {
502 	if (item) {
503 		__btrfs_remove_delayed_item(item);
504 		if (refcount_dec_and_test(&item->refs))
505 			kfree(item);
506 	}
507 }
508 
509 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
510 					struct btrfs_delayed_node *delayed_node)
511 {
512 	struct rb_node *p;
513 	struct btrfs_delayed_item *item = NULL;
514 
515 	p = rb_first_cached(&delayed_node->ins_root);
516 	if (p)
517 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
518 
519 	return item;
520 }
521 
522 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
523 					struct btrfs_delayed_node *delayed_node)
524 {
525 	struct rb_node *p;
526 	struct btrfs_delayed_item *item = NULL;
527 
528 	p = rb_first_cached(&delayed_node->del_root);
529 	if (p)
530 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
531 
532 	return item;
533 }
534 
535 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
536 						struct btrfs_delayed_item *item)
537 {
538 	struct rb_node *p;
539 	struct btrfs_delayed_item *next = NULL;
540 
541 	p = rb_next(&item->rb_node);
542 	if (p)
543 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
544 
545 	return next;
546 }
547 
548 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
549 					       struct btrfs_root *root,
550 					       struct btrfs_delayed_item *item)
551 {
552 	struct btrfs_block_rsv *src_rsv;
553 	struct btrfs_block_rsv *dst_rsv;
554 	struct btrfs_fs_info *fs_info = root->fs_info;
555 	u64 num_bytes;
556 	int ret;
557 
558 	if (!trans->bytes_reserved)
559 		return 0;
560 
561 	src_rsv = trans->block_rsv;
562 	dst_rsv = &fs_info->delayed_block_rsv;
563 
564 	num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
565 
566 	/*
567 	 * Here we migrate space rsv from transaction rsv, since have already
568 	 * reserved space when starting a transaction.  So no need to reserve
569 	 * qgroup space here.
570 	 */
571 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
572 	if (!ret) {
573 		trace_btrfs_space_reservation(fs_info, "delayed_item",
574 					      item->key.objectid,
575 					      num_bytes, 1);
576 		item->bytes_reserved = num_bytes;
577 	}
578 
579 	return ret;
580 }
581 
582 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
583 						struct btrfs_delayed_item *item)
584 {
585 	struct btrfs_block_rsv *rsv;
586 	struct btrfs_fs_info *fs_info = root->fs_info;
587 
588 	if (!item->bytes_reserved)
589 		return;
590 
591 	rsv = &fs_info->delayed_block_rsv;
592 	/*
593 	 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
594 	 * to release/reserve qgroup space.
595 	 */
596 	trace_btrfs_space_reservation(fs_info, "delayed_item",
597 				      item->key.objectid, item->bytes_reserved,
598 				      0);
599 	btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
600 }
601 
602 static int btrfs_delayed_inode_reserve_metadata(
603 					struct btrfs_trans_handle *trans,
604 					struct btrfs_root *root,
605 					struct btrfs_inode *inode,
606 					struct btrfs_delayed_node *node)
607 {
608 	struct btrfs_fs_info *fs_info = root->fs_info;
609 	struct btrfs_block_rsv *src_rsv;
610 	struct btrfs_block_rsv *dst_rsv;
611 	u64 num_bytes;
612 	int ret;
613 
614 	src_rsv = trans->block_rsv;
615 	dst_rsv = &fs_info->delayed_block_rsv;
616 
617 	num_bytes = btrfs_calc_metadata_size(fs_info, 1);
618 
619 	/*
620 	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
621 	 * which doesn't reserve space for speed.  This is a problem since we
622 	 * still need to reserve space for this update, so try to reserve the
623 	 * space.
624 	 *
625 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
626 	 * we always reserve enough to update the inode item.
627 	 */
628 	if (!src_rsv || (!trans->bytes_reserved &&
629 			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
630 		ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
631 		if (ret < 0)
632 			return ret;
633 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
634 					  BTRFS_RESERVE_NO_FLUSH);
635 		/*
636 		 * Since we're under a transaction reserve_metadata_bytes could
637 		 * try to commit the transaction which will make it return
638 		 * EAGAIN to make us stop the transaction we have, so return
639 		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
640 		 */
641 		if (ret == -EAGAIN) {
642 			ret = -ENOSPC;
643 			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
644 		}
645 		if (!ret) {
646 			node->bytes_reserved = num_bytes;
647 			trace_btrfs_space_reservation(fs_info,
648 						      "delayed_inode",
649 						      btrfs_ino(inode),
650 						      num_bytes, 1);
651 		} else {
652 			btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
653 		}
654 		return ret;
655 	}
656 
657 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
658 	if (!ret) {
659 		trace_btrfs_space_reservation(fs_info, "delayed_inode",
660 					      btrfs_ino(inode), num_bytes, 1);
661 		node->bytes_reserved = num_bytes;
662 	}
663 
664 	return ret;
665 }
666 
667 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
668 						struct btrfs_delayed_node *node,
669 						bool qgroup_free)
670 {
671 	struct btrfs_block_rsv *rsv;
672 
673 	if (!node->bytes_reserved)
674 		return;
675 
676 	rsv = &fs_info->delayed_block_rsv;
677 	trace_btrfs_space_reservation(fs_info, "delayed_inode",
678 				      node->inode_id, node->bytes_reserved, 0);
679 	btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
680 	if (qgroup_free)
681 		btrfs_qgroup_free_meta_prealloc(node->root,
682 				node->bytes_reserved);
683 	else
684 		btrfs_qgroup_convert_reserved_meta(node->root,
685 				node->bytes_reserved);
686 	node->bytes_reserved = 0;
687 }
688 
689 /*
690  * This helper will insert some continuous items into the same leaf according
691  * to the free space of the leaf.
692  */
693 static int btrfs_batch_insert_items(struct btrfs_root *root,
694 				    struct btrfs_path *path,
695 				    struct btrfs_delayed_item *item)
696 {
697 	struct btrfs_delayed_item *curr, *next;
698 	int free_space;
699 	int total_data_size = 0, total_size = 0;
700 	struct extent_buffer *leaf;
701 	char *data_ptr;
702 	struct btrfs_key *keys;
703 	u32 *data_size;
704 	struct list_head head;
705 	int slot;
706 	int nitems;
707 	int i;
708 	int ret = 0;
709 
710 	BUG_ON(!path->nodes[0]);
711 
712 	leaf = path->nodes[0];
713 	free_space = btrfs_leaf_free_space(leaf);
714 	INIT_LIST_HEAD(&head);
715 
716 	next = item;
717 	nitems = 0;
718 
719 	/*
720 	 * count the number of the continuous items that we can insert in batch
721 	 */
722 	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
723 	       free_space) {
724 		total_data_size += next->data_len;
725 		total_size += next->data_len + sizeof(struct btrfs_item);
726 		list_add_tail(&next->tree_list, &head);
727 		nitems++;
728 
729 		curr = next;
730 		next = __btrfs_next_delayed_item(curr);
731 		if (!next)
732 			break;
733 
734 		if (!btrfs_is_continuous_delayed_item(curr, next))
735 			break;
736 	}
737 
738 	if (!nitems) {
739 		ret = 0;
740 		goto out;
741 	}
742 
743 	/*
744 	 * we need allocate some memory space, but it might cause the task
745 	 * to sleep, so we set all locked nodes in the path to blocking locks
746 	 * first.
747 	 */
748 	btrfs_set_path_blocking(path);
749 
750 	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
751 	if (!keys) {
752 		ret = -ENOMEM;
753 		goto out;
754 	}
755 
756 	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
757 	if (!data_size) {
758 		ret = -ENOMEM;
759 		goto error;
760 	}
761 
762 	/* get keys of all the delayed items */
763 	i = 0;
764 	list_for_each_entry(next, &head, tree_list) {
765 		keys[i] = next->key;
766 		data_size[i] = next->data_len;
767 		i++;
768 	}
769 
770 	/* insert the keys of the items */
771 	setup_items_for_insert(root, path, keys, data_size, nitems);
772 
773 	/* insert the dir index items */
774 	slot = path->slots[0];
775 	list_for_each_entry_safe(curr, next, &head, tree_list) {
776 		data_ptr = btrfs_item_ptr(leaf, slot, char);
777 		write_extent_buffer(leaf, &curr->data,
778 				    (unsigned long)data_ptr,
779 				    curr->data_len);
780 		slot++;
781 
782 		btrfs_delayed_item_release_metadata(root, curr);
783 
784 		list_del(&curr->tree_list);
785 		btrfs_release_delayed_item(curr);
786 	}
787 
788 error:
789 	kfree(data_size);
790 	kfree(keys);
791 out:
792 	return ret;
793 }
794 
795 /*
796  * This helper can just do simple insertion that needn't extend item for new
797  * data, such as directory name index insertion, inode insertion.
798  */
799 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
800 				     struct btrfs_root *root,
801 				     struct btrfs_path *path,
802 				     struct btrfs_delayed_item *delayed_item)
803 {
804 	struct extent_buffer *leaf;
805 	unsigned int nofs_flag;
806 	char *ptr;
807 	int ret;
808 
809 	nofs_flag = memalloc_nofs_save();
810 	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
811 				      delayed_item->data_len);
812 	memalloc_nofs_restore(nofs_flag);
813 	if (ret < 0 && ret != -EEXIST)
814 		return ret;
815 
816 	leaf = path->nodes[0];
817 
818 	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
819 
820 	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
821 			    delayed_item->data_len);
822 	btrfs_mark_buffer_dirty(leaf);
823 
824 	btrfs_delayed_item_release_metadata(root, delayed_item);
825 	return 0;
826 }
827 
828 /*
829  * we insert an item first, then if there are some continuous items, we try
830  * to insert those items into the same leaf.
831  */
832 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
833 				      struct btrfs_path *path,
834 				      struct btrfs_root *root,
835 				      struct btrfs_delayed_node *node)
836 {
837 	struct btrfs_delayed_item *curr, *prev;
838 	int ret = 0;
839 
840 do_again:
841 	mutex_lock(&node->mutex);
842 	curr = __btrfs_first_delayed_insertion_item(node);
843 	if (!curr)
844 		goto insert_end;
845 
846 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
847 	if (ret < 0) {
848 		btrfs_release_path(path);
849 		goto insert_end;
850 	}
851 
852 	prev = curr;
853 	curr = __btrfs_next_delayed_item(prev);
854 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
855 		/* insert the continuous items into the same leaf */
856 		path->slots[0]++;
857 		btrfs_batch_insert_items(root, path, curr);
858 	}
859 	btrfs_release_delayed_item(prev);
860 	btrfs_mark_buffer_dirty(path->nodes[0]);
861 
862 	btrfs_release_path(path);
863 	mutex_unlock(&node->mutex);
864 	goto do_again;
865 
866 insert_end:
867 	mutex_unlock(&node->mutex);
868 	return ret;
869 }
870 
871 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
872 				    struct btrfs_root *root,
873 				    struct btrfs_path *path,
874 				    struct btrfs_delayed_item *item)
875 {
876 	struct btrfs_delayed_item *curr, *next;
877 	struct extent_buffer *leaf;
878 	struct btrfs_key key;
879 	struct list_head head;
880 	int nitems, i, last_item;
881 	int ret = 0;
882 
883 	BUG_ON(!path->nodes[0]);
884 
885 	leaf = path->nodes[0];
886 
887 	i = path->slots[0];
888 	last_item = btrfs_header_nritems(leaf) - 1;
889 	if (i > last_item)
890 		return -ENOENT;	/* FIXME: Is errno suitable? */
891 
892 	next = item;
893 	INIT_LIST_HEAD(&head);
894 	btrfs_item_key_to_cpu(leaf, &key, i);
895 	nitems = 0;
896 	/*
897 	 * count the number of the dir index items that we can delete in batch
898 	 */
899 	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
900 		list_add_tail(&next->tree_list, &head);
901 		nitems++;
902 
903 		curr = next;
904 		next = __btrfs_next_delayed_item(curr);
905 		if (!next)
906 			break;
907 
908 		if (!btrfs_is_continuous_delayed_item(curr, next))
909 			break;
910 
911 		i++;
912 		if (i > last_item)
913 			break;
914 		btrfs_item_key_to_cpu(leaf, &key, i);
915 	}
916 
917 	if (!nitems)
918 		return 0;
919 
920 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
921 	if (ret)
922 		goto out;
923 
924 	list_for_each_entry_safe(curr, next, &head, tree_list) {
925 		btrfs_delayed_item_release_metadata(root, curr);
926 		list_del(&curr->tree_list);
927 		btrfs_release_delayed_item(curr);
928 	}
929 
930 out:
931 	return ret;
932 }
933 
934 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
935 				      struct btrfs_path *path,
936 				      struct btrfs_root *root,
937 				      struct btrfs_delayed_node *node)
938 {
939 	struct btrfs_delayed_item *curr, *prev;
940 	unsigned int nofs_flag;
941 	int ret = 0;
942 
943 do_again:
944 	mutex_lock(&node->mutex);
945 	curr = __btrfs_first_delayed_deletion_item(node);
946 	if (!curr)
947 		goto delete_fail;
948 
949 	nofs_flag = memalloc_nofs_save();
950 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
951 	memalloc_nofs_restore(nofs_flag);
952 	if (ret < 0)
953 		goto delete_fail;
954 	else if (ret > 0) {
955 		/*
956 		 * can't find the item which the node points to, so this node
957 		 * is invalid, just drop it.
958 		 */
959 		prev = curr;
960 		curr = __btrfs_next_delayed_item(prev);
961 		btrfs_release_delayed_item(prev);
962 		ret = 0;
963 		btrfs_release_path(path);
964 		if (curr) {
965 			mutex_unlock(&node->mutex);
966 			goto do_again;
967 		} else
968 			goto delete_fail;
969 	}
970 
971 	btrfs_batch_delete_items(trans, root, path, curr);
972 	btrfs_release_path(path);
973 	mutex_unlock(&node->mutex);
974 	goto do_again;
975 
976 delete_fail:
977 	btrfs_release_path(path);
978 	mutex_unlock(&node->mutex);
979 	return ret;
980 }
981 
982 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
983 {
984 	struct btrfs_delayed_root *delayed_root;
985 
986 	if (delayed_node &&
987 	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
988 		BUG_ON(!delayed_node->root);
989 		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
990 		delayed_node->count--;
991 
992 		delayed_root = delayed_node->root->fs_info->delayed_root;
993 		finish_one_item(delayed_root);
994 	}
995 }
996 
997 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
998 {
999 	struct btrfs_delayed_root *delayed_root;
1000 
1001 	ASSERT(delayed_node->root);
1002 	clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1003 	delayed_node->count--;
1004 
1005 	delayed_root = delayed_node->root->fs_info->delayed_root;
1006 	finish_one_item(delayed_root);
1007 }
1008 
1009 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1010 					struct btrfs_root *root,
1011 					struct btrfs_path *path,
1012 					struct btrfs_delayed_node *node)
1013 {
1014 	struct btrfs_fs_info *fs_info = root->fs_info;
1015 	struct btrfs_key key;
1016 	struct btrfs_inode_item *inode_item;
1017 	struct extent_buffer *leaf;
1018 	unsigned int nofs_flag;
1019 	int mod;
1020 	int ret;
1021 
1022 	key.objectid = node->inode_id;
1023 	key.type = BTRFS_INODE_ITEM_KEY;
1024 	key.offset = 0;
1025 
1026 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1027 		mod = -1;
1028 	else
1029 		mod = 1;
1030 
1031 	nofs_flag = memalloc_nofs_save();
1032 	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1033 	memalloc_nofs_restore(nofs_flag);
1034 	if (ret > 0) {
1035 		btrfs_release_path(path);
1036 		return -ENOENT;
1037 	} else if (ret < 0) {
1038 		return ret;
1039 	}
1040 
1041 	leaf = path->nodes[0];
1042 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1043 				    struct btrfs_inode_item);
1044 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1045 			    sizeof(struct btrfs_inode_item));
1046 	btrfs_mark_buffer_dirty(leaf);
1047 
1048 	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1049 		goto no_iref;
1050 
1051 	path->slots[0]++;
1052 	if (path->slots[0] >= btrfs_header_nritems(leaf))
1053 		goto search;
1054 again:
1055 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1056 	if (key.objectid != node->inode_id)
1057 		goto out;
1058 
1059 	if (key.type != BTRFS_INODE_REF_KEY &&
1060 	    key.type != BTRFS_INODE_EXTREF_KEY)
1061 		goto out;
1062 
1063 	/*
1064 	 * Delayed iref deletion is for the inode who has only one link,
1065 	 * so there is only one iref. The case that several irefs are
1066 	 * in the same item doesn't exist.
1067 	 */
1068 	btrfs_del_item(trans, root, path);
1069 out:
1070 	btrfs_release_delayed_iref(node);
1071 no_iref:
1072 	btrfs_release_path(path);
1073 err_out:
1074 	btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1075 	btrfs_release_delayed_inode(node);
1076 
1077 	return ret;
1078 
1079 search:
1080 	btrfs_release_path(path);
1081 
1082 	key.type = BTRFS_INODE_EXTREF_KEY;
1083 	key.offset = -1;
1084 
1085 	nofs_flag = memalloc_nofs_save();
1086 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1087 	memalloc_nofs_restore(nofs_flag);
1088 	if (ret < 0)
1089 		goto err_out;
1090 	ASSERT(ret);
1091 
1092 	ret = 0;
1093 	leaf = path->nodes[0];
1094 	path->slots[0]--;
1095 	goto again;
1096 }
1097 
1098 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1099 					     struct btrfs_root *root,
1100 					     struct btrfs_path *path,
1101 					     struct btrfs_delayed_node *node)
1102 {
1103 	int ret;
1104 
1105 	mutex_lock(&node->mutex);
1106 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1107 		mutex_unlock(&node->mutex);
1108 		return 0;
1109 	}
1110 
1111 	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1112 	mutex_unlock(&node->mutex);
1113 	return ret;
1114 }
1115 
1116 static inline int
1117 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1118 				   struct btrfs_path *path,
1119 				   struct btrfs_delayed_node *node)
1120 {
1121 	int ret;
1122 
1123 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1124 	if (ret)
1125 		return ret;
1126 
1127 	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1128 	if (ret)
1129 		return ret;
1130 
1131 	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1132 	return ret;
1133 }
1134 
1135 /*
1136  * Called when committing the transaction.
1137  * Returns 0 on success.
1138  * Returns < 0 on error and returns with an aborted transaction with any
1139  * outstanding delayed items cleaned up.
1140  */
1141 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1142 {
1143 	struct btrfs_fs_info *fs_info = trans->fs_info;
1144 	struct btrfs_delayed_root *delayed_root;
1145 	struct btrfs_delayed_node *curr_node, *prev_node;
1146 	struct btrfs_path *path;
1147 	struct btrfs_block_rsv *block_rsv;
1148 	int ret = 0;
1149 	bool count = (nr > 0);
1150 
1151 	if (TRANS_ABORTED(trans))
1152 		return -EIO;
1153 
1154 	path = btrfs_alloc_path();
1155 	if (!path)
1156 		return -ENOMEM;
1157 	path->leave_spinning = 1;
1158 
1159 	block_rsv = trans->block_rsv;
1160 	trans->block_rsv = &fs_info->delayed_block_rsv;
1161 
1162 	delayed_root = fs_info->delayed_root;
1163 
1164 	curr_node = btrfs_first_delayed_node(delayed_root);
1165 	while (curr_node && (!count || (count && nr--))) {
1166 		ret = __btrfs_commit_inode_delayed_items(trans, path,
1167 							 curr_node);
1168 		if (ret) {
1169 			btrfs_release_delayed_node(curr_node);
1170 			curr_node = NULL;
1171 			btrfs_abort_transaction(trans, ret);
1172 			break;
1173 		}
1174 
1175 		prev_node = curr_node;
1176 		curr_node = btrfs_next_delayed_node(curr_node);
1177 		btrfs_release_delayed_node(prev_node);
1178 	}
1179 
1180 	if (curr_node)
1181 		btrfs_release_delayed_node(curr_node);
1182 	btrfs_free_path(path);
1183 	trans->block_rsv = block_rsv;
1184 
1185 	return ret;
1186 }
1187 
1188 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1189 {
1190 	return __btrfs_run_delayed_items(trans, -1);
1191 }
1192 
1193 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1194 {
1195 	return __btrfs_run_delayed_items(trans, nr);
1196 }
1197 
1198 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1199 				     struct btrfs_inode *inode)
1200 {
1201 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1202 	struct btrfs_path *path;
1203 	struct btrfs_block_rsv *block_rsv;
1204 	int ret;
1205 
1206 	if (!delayed_node)
1207 		return 0;
1208 
1209 	mutex_lock(&delayed_node->mutex);
1210 	if (!delayed_node->count) {
1211 		mutex_unlock(&delayed_node->mutex);
1212 		btrfs_release_delayed_node(delayed_node);
1213 		return 0;
1214 	}
1215 	mutex_unlock(&delayed_node->mutex);
1216 
1217 	path = btrfs_alloc_path();
1218 	if (!path) {
1219 		btrfs_release_delayed_node(delayed_node);
1220 		return -ENOMEM;
1221 	}
1222 	path->leave_spinning = 1;
1223 
1224 	block_rsv = trans->block_rsv;
1225 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1226 
1227 	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1228 
1229 	btrfs_release_delayed_node(delayed_node);
1230 	btrfs_free_path(path);
1231 	trans->block_rsv = block_rsv;
1232 
1233 	return ret;
1234 }
1235 
1236 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1237 {
1238 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1239 	struct btrfs_trans_handle *trans;
1240 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1241 	struct btrfs_path *path;
1242 	struct btrfs_block_rsv *block_rsv;
1243 	int ret;
1244 
1245 	if (!delayed_node)
1246 		return 0;
1247 
1248 	mutex_lock(&delayed_node->mutex);
1249 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1250 		mutex_unlock(&delayed_node->mutex);
1251 		btrfs_release_delayed_node(delayed_node);
1252 		return 0;
1253 	}
1254 	mutex_unlock(&delayed_node->mutex);
1255 
1256 	trans = btrfs_join_transaction(delayed_node->root);
1257 	if (IS_ERR(trans)) {
1258 		ret = PTR_ERR(trans);
1259 		goto out;
1260 	}
1261 
1262 	path = btrfs_alloc_path();
1263 	if (!path) {
1264 		ret = -ENOMEM;
1265 		goto trans_out;
1266 	}
1267 	path->leave_spinning = 1;
1268 
1269 	block_rsv = trans->block_rsv;
1270 	trans->block_rsv = &fs_info->delayed_block_rsv;
1271 
1272 	mutex_lock(&delayed_node->mutex);
1273 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1274 		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1275 						   path, delayed_node);
1276 	else
1277 		ret = 0;
1278 	mutex_unlock(&delayed_node->mutex);
1279 
1280 	btrfs_free_path(path);
1281 	trans->block_rsv = block_rsv;
1282 trans_out:
1283 	btrfs_end_transaction(trans);
1284 	btrfs_btree_balance_dirty(fs_info);
1285 out:
1286 	btrfs_release_delayed_node(delayed_node);
1287 
1288 	return ret;
1289 }
1290 
1291 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1292 {
1293 	struct btrfs_delayed_node *delayed_node;
1294 
1295 	delayed_node = READ_ONCE(inode->delayed_node);
1296 	if (!delayed_node)
1297 		return;
1298 
1299 	inode->delayed_node = NULL;
1300 	btrfs_release_delayed_node(delayed_node);
1301 }
1302 
1303 struct btrfs_async_delayed_work {
1304 	struct btrfs_delayed_root *delayed_root;
1305 	int nr;
1306 	struct btrfs_work work;
1307 };
1308 
1309 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1310 {
1311 	struct btrfs_async_delayed_work *async_work;
1312 	struct btrfs_delayed_root *delayed_root;
1313 	struct btrfs_trans_handle *trans;
1314 	struct btrfs_path *path;
1315 	struct btrfs_delayed_node *delayed_node = NULL;
1316 	struct btrfs_root *root;
1317 	struct btrfs_block_rsv *block_rsv;
1318 	int total_done = 0;
1319 
1320 	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1321 	delayed_root = async_work->delayed_root;
1322 
1323 	path = btrfs_alloc_path();
1324 	if (!path)
1325 		goto out;
1326 
1327 	do {
1328 		if (atomic_read(&delayed_root->items) <
1329 		    BTRFS_DELAYED_BACKGROUND / 2)
1330 			break;
1331 
1332 		delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1333 		if (!delayed_node)
1334 			break;
1335 
1336 		path->leave_spinning = 1;
1337 		root = delayed_node->root;
1338 
1339 		trans = btrfs_join_transaction(root);
1340 		if (IS_ERR(trans)) {
1341 			btrfs_release_path(path);
1342 			btrfs_release_prepared_delayed_node(delayed_node);
1343 			total_done++;
1344 			continue;
1345 		}
1346 
1347 		block_rsv = trans->block_rsv;
1348 		trans->block_rsv = &root->fs_info->delayed_block_rsv;
1349 
1350 		__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1351 
1352 		trans->block_rsv = block_rsv;
1353 		btrfs_end_transaction(trans);
1354 		btrfs_btree_balance_dirty_nodelay(root->fs_info);
1355 
1356 		btrfs_release_path(path);
1357 		btrfs_release_prepared_delayed_node(delayed_node);
1358 		total_done++;
1359 
1360 	} while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1361 		 || total_done < async_work->nr);
1362 
1363 	btrfs_free_path(path);
1364 out:
1365 	wake_up(&delayed_root->wait);
1366 	kfree(async_work);
1367 }
1368 
1369 
1370 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1371 				     struct btrfs_fs_info *fs_info, int nr)
1372 {
1373 	struct btrfs_async_delayed_work *async_work;
1374 
1375 	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1376 	if (!async_work)
1377 		return -ENOMEM;
1378 
1379 	async_work->delayed_root = delayed_root;
1380 	btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1381 			NULL);
1382 	async_work->nr = nr;
1383 
1384 	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1385 	return 0;
1386 }
1387 
1388 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1389 {
1390 	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1391 }
1392 
1393 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1394 {
1395 	int val = atomic_read(&delayed_root->items_seq);
1396 
1397 	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1398 		return 1;
1399 
1400 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1401 		return 1;
1402 
1403 	return 0;
1404 }
1405 
1406 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1407 {
1408 	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1409 
1410 	if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1411 		btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1412 		return;
1413 
1414 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1415 		int seq;
1416 		int ret;
1417 
1418 		seq = atomic_read(&delayed_root->items_seq);
1419 
1420 		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1421 		if (ret)
1422 			return;
1423 
1424 		wait_event_interruptible(delayed_root->wait,
1425 					 could_end_wait(delayed_root, seq));
1426 		return;
1427 	}
1428 
1429 	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1430 }
1431 
1432 /* Will return 0 or -ENOMEM */
1433 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1434 				   const char *name, int name_len,
1435 				   struct btrfs_inode *dir,
1436 				   struct btrfs_disk_key *disk_key, u8 type,
1437 				   u64 index)
1438 {
1439 	struct btrfs_delayed_node *delayed_node;
1440 	struct btrfs_delayed_item *delayed_item;
1441 	struct btrfs_dir_item *dir_item;
1442 	int ret;
1443 
1444 	delayed_node = btrfs_get_or_create_delayed_node(dir);
1445 	if (IS_ERR(delayed_node))
1446 		return PTR_ERR(delayed_node);
1447 
1448 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1449 	if (!delayed_item) {
1450 		ret = -ENOMEM;
1451 		goto release_node;
1452 	}
1453 
1454 	delayed_item->key.objectid = btrfs_ino(dir);
1455 	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1456 	delayed_item->key.offset = index;
1457 
1458 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1459 	dir_item->location = *disk_key;
1460 	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1461 	btrfs_set_stack_dir_data_len(dir_item, 0);
1462 	btrfs_set_stack_dir_name_len(dir_item, name_len);
1463 	btrfs_set_stack_dir_type(dir_item, type);
1464 	memcpy((char *)(dir_item + 1), name, name_len);
1465 
1466 	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1467 	/*
1468 	 * we have reserved enough space when we start a new transaction,
1469 	 * so reserving metadata failure is impossible
1470 	 */
1471 	BUG_ON(ret);
1472 
1473 	mutex_lock(&delayed_node->mutex);
1474 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1475 	if (unlikely(ret)) {
1476 		btrfs_err(trans->fs_info,
1477 			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1478 			  name_len, name, delayed_node->root->root_key.objectid,
1479 			  delayed_node->inode_id, ret);
1480 		BUG();
1481 	}
1482 	mutex_unlock(&delayed_node->mutex);
1483 
1484 release_node:
1485 	btrfs_release_delayed_node(delayed_node);
1486 	return ret;
1487 }
1488 
1489 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1490 					       struct btrfs_delayed_node *node,
1491 					       struct btrfs_key *key)
1492 {
1493 	struct btrfs_delayed_item *item;
1494 
1495 	mutex_lock(&node->mutex);
1496 	item = __btrfs_lookup_delayed_insertion_item(node, key);
1497 	if (!item) {
1498 		mutex_unlock(&node->mutex);
1499 		return 1;
1500 	}
1501 
1502 	btrfs_delayed_item_release_metadata(node->root, item);
1503 	btrfs_release_delayed_item(item);
1504 	mutex_unlock(&node->mutex);
1505 	return 0;
1506 }
1507 
1508 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1509 				   struct btrfs_inode *dir, u64 index)
1510 {
1511 	struct btrfs_delayed_node *node;
1512 	struct btrfs_delayed_item *item;
1513 	struct btrfs_key item_key;
1514 	int ret;
1515 
1516 	node = btrfs_get_or_create_delayed_node(dir);
1517 	if (IS_ERR(node))
1518 		return PTR_ERR(node);
1519 
1520 	item_key.objectid = btrfs_ino(dir);
1521 	item_key.type = BTRFS_DIR_INDEX_KEY;
1522 	item_key.offset = index;
1523 
1524 	ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1525 						  &item_key);
1526 	if (!ret)
1527 		goto end;
1528 
1529 	item = btrfs_alloc_delayed_item(0);
1530 	if (!item) {
1531 		ret = -ENOMEM;
1532 		goto end;
1533 	}
1534 
1535 	item->key = item_key;
1536 
1537 	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1538 	/*
1539 	 * we have reserved enough space when we start a new transaction,
1540 	 * so reserving metadata failure is impossible.
1541 	 */
1542 	if (ret < 0) {
1543 		btrfs_err(trans->fs_info,
1544 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1545 		btrfs_release_delayed_item(item);
1546 		goto end;
1547 	}
1548 
1549 	mutex_lock(&node->mutex);
1550 	ret = __btrfs_add_delayed_deletion_item(node, item);
1551 	if (unlikely(ret)) {
1552 		btrfs_err(trans->fs_info,
1553 			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1554 			  index, node->root->root_key.objectid,
1555 			  node->inode_id, ret);
1556 		btrfs_delayed_item_release_metadata(dir->root, item);
1557 		btrfs_release_delayed_item(item);
1558 	}
1559 	mutex_unlock(&node->mutex);
1560 end:
1561 	btrfs_release_delayed_node(node);
1562 	return ret;
1563 }
1564 
1565 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1566 {
1567 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1568 
1569 	if (!delayed_node)
1570 		return -ENOENT;
1571 
1572 	/*
1573 	 * Since we have held i_mutex of this directory, it is impossible that
1574 	 * a new directory index is added into the delayed node and index_cnt
1575 	 * is updated now. So we needn't lock the delayed node.
1576 	 */
1577 	if (!delayed_node->index_cnt) {
1578 		btrfs_release_delayed_node(delayed_node);
1579 		return -EINVAL;
1580 	}
1581 
1582 	inode->index_cnt = delayed_node->index_cnt;
1583 	btrfs_release_delayed_node(delayed_node);
1584 	return 0;
1585 }
1586 
1587 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1588 				     struct list_head *ins_list,
1589 				     struct list_head *del_list)
1590 {
1591 	struct btrfs_delayed_node *delayed_node;
1592 	struct btrfs_delayed_item *item;
1593 
1594 	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1595 	if (!delayed_node)
1596 		return false;
1597 
1598 	/*
1599 	 * We can only do one readdir with delayed items at a time because of
1600 	 * item->readdir_list.
1601 	 */
1602 	inode_unlock_shared(inode);
1603 	inode_lock(inode);
1604 
1605 	mutex_lock(&delayed_node->mutex);
1606 	item = __btrfs_first_delayed_insertion_item(delayed_node);
1607 	while (item) {
1608 		refcount_inc(&item->refs);
1609 		list_add_tail(&item->readdir_list, ins_list);
1610 		item = __btrfs_next_delayed_item(item);
1611 	}
1612 
1613 	item = __btrfs_first_delayed_deletion_item(delayed_node);
1614 	while (item) {
1615 		refcount_inc(&item->refs);
1616 		list_add_tail(&item->readdir_list, del_list);
1617 		item = __btrfs_next_delayed_item(item);
1618 	}
1619 	mutex_unlock(&delayed_node->mutex);
1620 	/*
1621 	 * This delayed node is still cached in the btrfs inode, so refs
1622 	 * must be > 1 now, and we needn't check it is going to be freed
1623 	 * or not.
1624 	 *
1625 	 * Besides that, this function is used to read dir, we do not
1626 	 * insert/delete delayed items in this period. So we also needn't
1627 	 * requeue or dequeue this delayed node.
1628 	 */
1629 	refcount_dec(&delayed_node->refs);
1630 
1631 	return true;
1632 }
1633 
1634 void btrfs_readdir_put_delayed_items(struct inode *inode,
1635 				     struct list_head *ins_list,
1636 				     struct list_head *del_list)
1637 {
1638 	struct btrfs_delayed_item *curr, *next;
1639 
1640 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1641 		list_del(&curr->readdir_list);
1642 		if (refcount_dec_and_test(&curr->refs))
1643 			kfree(curr);
1644 	}
1645 
1646 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1647 		list_del(&curr->readdir_list);
1648 		if (refcount_dec_and_test(&curr->refs))
1649 			kfree(curr);
1650 	}
1651 
1652 	/*
1653 	 * The VFS is going to do up_read(), so we need to downgrade back to a
1654 	 * read lock.
1655 	 */
1656 	downgrade_write(&inode->i_rwsem);
1657 }
1658 
1659 int btrfs_should_delete_dir_index(struct list_head *del_list,
1660 				  u64 index)
1661 {
1662 	struct btrfs_delayed_item *curr;
1663 	int ret = 0;
1664 
1665 	list_for_each_entry(curr, del_list, readdir_list) {
1666 		if (curr->key.offset > index)
1667 			break;
1668 		if (curr->key.offset == index) {
1669 			ret = 1;
1670 			break;
1671 		}
1672 	}
1673 	return ret;
1674 }
1675 
1676 /*
1677  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1678  *
1679  */
1680 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1681 				    struct list_head *ins_list)
1682 {
1683 	struct btrfs_dir_item *di;
1684 	struct btrfs_delayed_item *curr, *next;
1685 	struct btrfs_key location;
1686 	char *name;
1687 	int name_len;
1688 	int over = 0;
1689 	unsigned char d_type;
1690 
1691 	if (list_empty(ins_list))
1692 		return 0;
1693 
1694 	/*
1695 	 * Changing the data of the delayed item is impossible. So
1696 	 * we needn't lock them. And we have held i_mutex of the
1697 	 * directory, nobody can delete any directory indexes now.
1698 	 */
1699 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1700 		list_del(&curr->readdir_list);
1701 
1702 		if (curr->key.offset < ctx->pos) {
1703 			if (refcount_dec_and_test(&curr->refs))
1704 				kfree(curr);
1705 			continue;
1706 		}
1707 
1708 		ctx->pos = curr->key.offset;
1709 
1710 		di = (struct btrfs_dir_item *)curr->data;
1711 		name = (char *)(di + 1);
1712 		name_len = btrfs_stack_dir_name_len(di);
1713 
1714 		d_type = fs_ftype_to_dtype(di->type);
1715 		btrfs_disk_key_to_cpu(&location, &di->location);
1716 
1717 		over = !dir_emit(ctx, name, name_len,
1718 			       location.objectid, d_type);
1719 
1720 		if (refcount_dec_and_test(&curr->refs))
1721 			kfree(curr);
1722 
1723 		if (over)
1724 			return 1;
1725 		ctx->pos++;
1726 	}
1727 	return 0;
1728 }
1729 
1730 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1731 				  struct btrfs_inode_item *inode_item,
1732 				  struct inode *inode)
1733 {
1734 	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1735 	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1736 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1737 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1738 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1739 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1740 	btrfs_set_stack_inode_generation(inode_item,
1741 					 BTRFS_I(inode)->generation);
1742 	btrfs_set_stack_inode_sequence(inode_item,
1743 				       inode_peek_iversion(inode));
1744 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1745 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1746 	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1747 	btrfs_set_stack_inode_block_group(inode_item, 0);
1748 
1749 	btrfs_set_stack_timespec_sec(&inode_item->atime,
1750 				     inode->i_atime.tv_sec);
1751 	btrfs_set_stack_timespec_nsec(&inode_item->atime,
1752 				      inode->i_atime.tv_nsec);
1753 
1754 	btrfs_set_stack_timespec_sec(&inode_item->mtime,
1755 				     inode->i_mtime.tv_sec);
1756 	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1757 				      inode->i_mtime.tv_nsec);
1758 
1759 	btrfs_set_stack_timespec_sec(&inode_item->ctime,
1760 				     inode->i_ctime.tv_sec);
1761 	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1762 				      inode->i_ctime.tv_nsec);
1763 
1764 	btrfs_set_stack_timespec_sec(&inode_item->otime,
1765 				     BTRFS_I(inode)->i_otime.tv_sec);
1766 	btrfs_set_stack_timespec_nsec(&inode_item->otime,
1767 				     BTRFS_I(inode)->i_otime.tv_nsec);
1768 }
1769 
1770 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1771 {
1772 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1773 	struct btrfs_delayed_node *delayed_node;
1774 	struct btrfs_inode_item *inode_item;
1775 
1776 	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1777 	if (!delayed_node)
1778 		return -ENOENT;
1779 
1780 	mutex_lock(&delayed_node->mutex);
1781 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1782 		mutex_unlock(&delayed_node->mutex);
1783 		btrfs_release_delayed_node(delayed_node);
1784 		return -ENOENT;
1785 	}
1786 
1787 	inode_item = &delayed_node->inode_item;
1788 
1789 	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1790 	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1791 	btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1792 	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1793 			round_up(i_size_read(inode), fs_info->sectorsize));
1794 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1795 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1796 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1797 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1798         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1799 
1800 	inode_set_iversion_queried(inode,
1801 				   btrfs_stack_inode_sequence(inode_item));
1802 	inode->i_rdev = 0;
1803 	*rdev = btrfs_stack_inode_rdev(inode_item);
1804 	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1805 
1806 	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1807 	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1808 
1809 	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1810 	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1811 
1812 	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1813 	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1814 
1815 	BTRFS_I(inode)->i_otime.tv_sec =
1816 		btrfs_stack_timespec_sec(&inode_item->otime);
1817 	BTRFS_I(inode)->i_otime.tv_nsec =
1818 		btrfs_stack_timespec_nsec(&inode_item->otime);
1819 
1820 	inode->i_generation = BTRFS_I(inode)->generation;
1821 	BTRFS_I(inode)->index_cnt = (u64)-1;
1822 
1823 	mutex_unlock(&delayed_node->mutex);
1824 	btrfs_release_delayed_node(delayed_node);
1825 	return 0;
1826 }
1827 
1828 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1829 			       struct btrfs_root *root, struct inode *inode)
1830 {
1831 	struct btrfs_delayed_node *delayed_node;
1832 	int ret = 0;
1833 
1834 	delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1835 	if (IS_ERR(delayed_node))
1836 		return PTR_ERR(delayed_node);
1837 
1838 	mutex_lock(&delayed_node->mutex);
1839 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1840 		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1841 		goto release_node;
1842 	}
1843 
1844 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1845 						   delayed_node);
1846 	if (ret)
1847 		goto release_node;
1848 
1849 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1850 	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1851 	delayed_node->count++;
1852 	atomic_inc(&root->fs_info->delayed_root->items);
1853 release_node:
1854 	mutex_unlock(&delayed_node->mutex);
1855 	btrfs_release_delayed_node(delayed_node);
1856 	return ret;
1857 }
1858 
1859 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1860 {
1861 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1862 	struct btrfs_delayed_node *delayed_node;
1863 
1864 	/*
1865 	 * we don't do delayed inode updates during log recovery because it
1866 	 * leads to enospc problems.  This means we also can't do
1867 	 * delayed inode refs
1868 	 */
1869 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1870 		return -EAGAIN;
1871 
1872 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1873 	if (IS_ERR(delayed_node))
1874 		return PTR_ERR(delayed_node);
1875 
1876 	/*
1877 	 * We don't reserve space for inode ref deletion is because:
1878 	 * - We ONLY do async inode ref deletion for the inode who has only
1879 	 *   one link(i_nlink == 1), it means there is only one inode ref.
1880 	 *   And in most case, the inode ref and the inode item are in the
1881 	 *   same leaf, and we will deal with them at the same time.
1882 	 *   Since we are sure we will reserve the space for the inode item,
1883 	 *   it is unnecessary to reserve space for inode ref deletion.
1884 	 * - If the inode ref and the inode item are not in the same leaf,
1885 	 *   We also needn't worry about enospc problem, because we reserve
1886 	 *   much more space for the inode update than it needs.
1887 	 * - At the worst, we can steal some space from the global reservation.
1888 	 *   It is very rare.
1889 	 */
1890 	mutex_lock(&delayed_node->mutex);
1891 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1892 		goto release_node;
1893 
1894 	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1895 	delayed_node->count++;
1896 	atomic_inc(&fs_info->delayed_root->items);
1897 release_node:
1898 	mutex_unlock(&delayed_node->mutex);
1899 	btrfs_release_delayed_node(delayed_node);
1900 	return 0;
1901 }
1902 
1903 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1904 {
1905 	struct btrfs_root *root = delayed_node->root;
1906 	struct btrfs_fs_info *fs_info = root->fs_info;
1907 	struct btrfs_delayed_item *curr_item, *prev_item;
1908 
1909 	mutex_lock(&delayed_node->mutex);
1910 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1911 	while (curr_item) {
1912 		btrfs_delayed_item_release_metadata(root, curr_item);
1913 		prev_item = curr_item;
1914 		curr_item = __btrfs_next_delayed_item(prev_item);
1915 		btrfs_release_delayed_item(prev_item);
1916 	}
1917 
1918 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1919 	while (curr_item) {
1920 		btrfs_delayed_item_release_metadata(root, curr_item);
1921 		prev_item = curr_item;
1922 		curr_item = __btrfs_next_delayed_item(prev_item);
1923 		btrfs_release_delayed_item(prev_item);
1924 	}
1925 
1926 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1927 		btrfs_release_delayed_iref(delayed_node);
1928 
1929 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1930 		btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1931 		btrfs_release_delayed_inode(delayed_node);
1932 	}
1933 	mutex_unlock(&delayed_node->mutex);
1934 }
1935 
1936 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1937 {
1938 	struct btrfs_delayed_node *delayed_node;
1939 
1940 	delayed_node = btrfs_get_delayed_node(inode);
1941 	if (!delayed_node)
1942 		return;
1943 
1944 	__btrfs_kill_delayed_node(delayed_node);
1945 	btrfs_release_delayed_node(delayed_node);
1946 }
1947 
1948 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1949 {
1950 	u64 inode_id = 0;
1951 	struct btrfs_delayed_node *delayed_nodes[8];
1952 	int i, n;
1953 
1954 	while (1) {
1955 		spin_lock(&root->inode_lock);
1956 		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1957 					   (void **)delayed_nodes, inode_id,
1958 					   ARRAY_SIZE(delayed_nodes));
1959 		if (!n) {
1960 			spin_unlock(&root->inode_lock);
1961 			break;
1962 		}
1963 
1964 		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1965 		for (i = 0; i < n; i++) {
1966 			/*
1967 			 * Don't increase refs in case the node is dead and
1968 			 * about to be removed from the tree in the loop below
1969 			 */
1970 			if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1971 				delayed_nodes[i] = NULL;
1972 		}
1973 		spin_unlock(&root->inode_lock);
1974 
1975 		for (i = 0; i < n; i++) {
1976 			if (!delayed_nodes[i])
1977 				continue;
1978 			__btrfs_kill_delayed_node(delayed_nodes[i]);
1979 			btrfs_release_delayed_node(delayed_nodes[i]);
1980 		}
1981 	}
1982 }
1983 
1984 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1985 {
1986 	struct btrfs_delayed_node *curr_node, *prev_node;
1987 
1988 	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1989 	while (curr_node) {
1990 		__btrfs_kill_delayed_node(curr_node);
1991 
1992 		prev_node = curr_node;
1993 		curr_node = btrfs_next_delayed_node(curr_node);
1994 		btrfs_release_delayed_node(prev_node);
1995 	}
1996 }
1997 
1998