xref: /openbmc/linux/fs/btrfs/delayed-inode.c (revision b8b350af)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 Fujitsu.  All rights reserved.
4  * Written by Miao Xie <miaox@cn.fujitsu.com>
5  */
6 
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include "misc.h"
10 #include "delayed-inode.h"
11 #include "disk-io.h"
12 #include "transaction.h"
13 #include "ctree.h"
14 #include "qgroup.h"
15 #include "locking.h"
16 
17 #define BTRFS_DELAYED_WRITEBACK		512
18 #define BTRFS_DELAYED_BACKGROUND	128
19 #define BTRFS_DELAYED_BATCH		16
20 
21 static struct kmem_cache *delayed_node_cache;
22 
23 int __init btrfs_delayed_inode_init(void)
24 {
25 	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
26 					sizeof(struct btrfs_delayed_node),
27 					0,
28 					SLAB_MEM_SPREAD,
29 					NULL);
30 	if (!delayed_node_cache)
31 		return -ENOMEM;
32 	return 0;
33 }
34 
35 void __cold btrfs_delayed_inode_exit(void)
36 {
37 	kmem_cache_destroy(delayed_node_cache);
38 }
39 
40 static inline void btrfs_init_delayed_node(
41 				struct btrfs_delayed_node *delayed_node,
42 				struct btrfs_root *root, u64 inode_id)
43 {
44 	delayed_node->root = root;
45 	delayed_node->inode_id = inode_id;
46 	refcount_set(&delayed_node->refs, 0);
47 	delayed_node->ins_root = RB_ROOT_CACHED;
48 	delayed_node->del_root = RB_ROOT_CACHED;
49 	mutex_init(&delayed_node->mutex);
50 	INIT_LIST_HEAD(&delayed_node->n_list);
51 	INIT_LIST_HEAD(&delayed_node->p_list);
52 }
53 
54 static inline int btrfs_is_continuous_delayed_item(
55 					struct btrfs_delayed_item *item1,
56 					struct btrfs_delayed_item *item2)
57 {
58 	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
59 	    item1->key.objectid == item2->key.objectid &&
60 	    item1->key.type == item2->key.type &&
61 	    item1->key.offset + 1 == item2->key.offset)
62 		return 1;
63 	return 0;
64 }
65 
66 static struct btrfs_delayed_node *btrfs_get_delayed_node(
67 		struct btrfs_inode *btrfs_inode)
68 {
69 	struct btrfs_root *root = btrfs_inode->root;
70 	u64 ino = btrfs_ino(btrfs_inode);
71 	struct btrfs_delayed_node *node;
72 
73 	node = READ_ONCE(btrfs_inode->delayed_node);
74 	if (node) {
75 		refcount_inc(&node->refs);
76 		return node;
77 	}
78 
79 	spin_lock(&root->inode_lock);
80 	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
81 
82 	if (node) {
83 		if (btrfs_inode->delayed_node) {
84 			refcount_inc(&node->refs);	/* can be accessed */
85 			BUG_ON(btrfs_inode->delayed_node != node);
86 			spin_unlock(&root->inode_lock);
87 			return node;
88 		}
89 
90 		/*
91 		 * It's possible that we're racing into the middle of removing
92 		 * this node from the radix tree.  In this case, the refcount
93 		 * was zero and it should never go back to one.  Just return
94 		 * NULL like it was never in the radix at all; our release
95 		 * function is in the process of removing it.
96 		 *
97 		 * Some implementations of refcount_inc refuse to bump the
98 		 * refcount once it has hit zero.  If we don't do this dance
99 		 * here, refcount_inc() may decide to just WARN_ONCE() instead
100 		 * of actually bumping the refcount.
101 		 *
102 		 * If this node is properly in the radix, we want to bump the
103 		 * refcount twice, once for the inode and once for this get
104 		 * operation.
105 		 */
106 		if (refcount_inc_not_zero(&node->refs)) {
107 			refcount_inc(&node->refs);
108 			btrfs_inode->delayed_node = node;
109 		} else {
110 			node = NULL;
111 		}
112 
113 		spin_unlock(&root->inode_lock);
114 		return node;
115 	}
116 	spin_unlock(&root->inode_lock);
117 
118 	return NULL;
119 }
120 
121 /* Will return either the node or PTR_ERR(-ENOMEM) */
122 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
123 		struct btrfs_inode *btrfs_inode)
124 {
125 	struct btrfs_delayed_node *node;
126 	struct btrfs_root *root = btrfs_inode->root;
127 	u64 ino = btrfs_ino(btrfs_inode);
128 	int ret;
129 
130 again:
131 	node = btrfs_get_delayed_node(btrfs_inode);
132 	if (node)
133 		return node;
134 
135 	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
136 	if (!node)
137 		return ERR_PTR(-ENOMEM);
138 	btrfs_init_delayed_node(node, root, ino);
139 
140 	/* cached in the btrfs inode and can be accessed */
141 	refcount_set(&node->refs, 2);
142 
143 	ret = radix_tree_preload(GFP_NOFS);
144 	if (ret) {
145 		kmem_cache_free(delayed_node_cache, node);
146 		return ERR_PTR(ret);
147 	}
148 
149 	spin_lock(&root->inode_lock);
150 	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
151 	if (ret == -EEXIST) {
152 		spin_unlock(&root->inode_lock);
153 		kmem_cache_free(delayed_node_cache, node);
154 		radix_tree_preload_end();
155 		goto again;
156 	}
157 	btrfs_inode->delayed_node = node;
158 	spin_unlock(&root->inode_lock);
159 	radix_tree_preload_end();
160 
161 	return node;
162 }
163 
164 /*
165  * Call it when holding delayed_node->mutex
166  *
167  * If mod = 1, add this node into the prepared list.
168  */
169 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
170 				     struct btrfs_delayed_node *node,
171 				     int mod)
172 {
173 	spin_lock(&root->lock);
174 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
175 		if (!list_empty(&node->p_list))
176 			list_move_tail(&node->p_list, &root->prepare_list);
177 		else if (mod)
178 			list_add_tail(&node->p_list, &root->prepare_list);
179 	} else {
180 		list_add_tail(&node->n_list, &root->node_list);
181 		list_add_tail(&node->p_list, &root->prepare_list);
182 		refcount_inc(&node->refs);	/* inserted into list */
183 		root->nodes++;
184 		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
185 	}
186 	spin_unlock(&root->lock);
187 }
188 
189 /* Call it when holding delayed_node->mutex */
190 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
191 				       struct btrfs_delayed_node *node)
192 {
193 	spin_lock(&root->lock);
194 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
195 		root->nodes--;
196 		refcount_dec(&node->refs);	/* not in the list */
197 		list_del_init(&node->n_list);
198 		if (!list_empty(&node->p_list))
199 			list_del_init(&node->p_list);
200 		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
201 	}
202 	spin_unlock(&root->lock);
203 }
204 
205 static struct btrfs_delayed_node *btrfs_first_delayed_node(
206 			struct btrfs_delayed_root *delayed_root)
207 {
208 	struct list_head *p;
209 	struct btrfs_delayed_node *node = NULL;
210 
211 	spin_lock(&delayed_root->lock);
212 	if (list_empty(&delayed_root->node_list))
213 		goto out;
214 
215 	p = delayed_root->node_list.next;
216 	node = list_entry(p, struct btrfs_delayed_node, n_list);
217 	refcount_inc(&node->refs);
218 out:
219 	spin_unlock(&delayed_root->lock);
220 
221 	return node;
222 }
223 
224 static struct btrfs_delayed_node *btrfs_next_delayed_node(
225 						struct btrfs_delayed_node *node)
226 {
227 	struct btrfs_delayed_root *delayed_root;
228 	struct list_head *p;
229 	struct btrfs_delayed_node *next = NULL;
230 
231 	delayed_root = node->root->fs_info->delayed_root;
232 	spin_lock(&delayed_root->lock);
233 	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
234 		/* not in the list */
235 		if (list_empty(&delayed_root->node_list))
236 			goto out;
237 		p = delayed_root->node_list.next;
238 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
239 		goto out;
240 	else
241 		p = node->n_list.next;
242 
243 	next = list_entry(p, struct btrfs_delayed_node, n_list);
244 	refcount_inc(&next->refs);
245 out:
246 	spin_unlock(&delayed_root->lock);
247 
248 	return next;
249 }
250 
251 static void __btrfs_release_delayed_node(
252 				struct btrfs_delayed_node *delayed_node,
253 				int mod)
254 {
255 	struct btrfs_delayed_root *delayed_root;
256 
257 	if (!delayed_node)
258 		return;
259 
260 	delayed_root = delayed_node->root->fs_info->delayed_root;
261 
262 	mutex_lock(&delayed_node->mutex);
263 	if (delayed_node->count)
264 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
265 	else
266 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
267 	mutex_unlock(&delayed_node->mutex);
268 
269 	if (refcount_dec_and_test(&delayed_node->refs)) {
270 		struct btrfs_root *root = delayed_node->root;
271 
272 		spin_lock(&root->inode_lock);
273 		/*
274 		 * Once our refcount goes to zero, nobody is allowed to bump it
275 		 * back up.  We can delete it now.
276 		 */
277 		ASSERT(refcount_read(&delayed_node->refs) == 0);
278 		radix_tree_delete(&root->delayed_nodes_tree,
279 				  delayed_node->inode_id);
280 		spin_unlock(&root->inode_lock);
281 		kmem_cache_free(delayed_node_cache, delayed_node);
282 	}
283 }
284 
285 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
286 {
287 	__btrfs_release_delayed_node(node, 0);
288 }
289 
290 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
291 					struct btrfs_delayed_root *delayed_root)
292 {
293 	struct list_head *p;
294 	struct btrfs_delayed_node *node = NULL;
295 
296 	spin_lock(&delayed_root->lock);
297 	if (list_empty(&delayed_root->prepare_list))
298 		goto out;
299 
300 	p = delayed_root->prepare_list.next;
301 	list_del_init(p);
302 	node = list_entry(p, struct btrfs_delayed_node, p_list);
303 	refcount_inc(&node->refs);
304 out:
305 	spin_unlock(&delayed_root->lock);
306 
307 	return node;
308 }
309 
310 static inline void btrfs_release_prepared_delayed_node(
311 					struct btrfs_delayed_node *node)
312 {
313 	__btrfs_release_delayed_node(node, 1);
314 }
315 
316 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
317 {
318 	struct btrfs_delayed_item *item;
319 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
320 	if (item) {
321 		item->data_len = data_len;
322 		item->ins_or_del = 0;
323 		item->bytes_reserved = 0;
324 		item->delayed_node = NULL;
325 		refcount_set(&item->refs, 1);
326 	}
327 	return item;
328 }
329 
330 /*
331  * __btrfs_lookup_delayed_item - look up the delayed item by key
332  * @delayed_node: pointer to the delayed node
333  * @key:	  the key to look up
334  * @prev:	  used to store the prev item if the right item isn't found
335  * @next:	  used to store the next item if the right item isn't found
336  *
337  * Note: if we don't find the right item, we will return the prev item and
338  * the next item.
339  */
340 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
341 				struct rb_root *root,
342 				struct btrfs_key *key,
343 				struct btrfs_delayed_item **prev,
344 				struct btrfs_delayed_item **next)
345 {
346 	struct rb_node *node, *prev_node = NULL;
347 	struct btrfs_delayed_item *delayed_item = NULL;
348 	int ret = 0;
349 
350 	node = root->rb_node;
351 
352 	while (node) {
353 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
354 					rb_node);
355 		prev_node = node;
356 		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
357 		if (ret < 0)
358 			node = node->rb_right;
359 		else if (ret > 0)
360 			node = node->rb_left;
361 		else
362 			return delayed_item;
363 	}
364 
365 	if (prev) {
366 		if (!prev_node)
367 			*prev = NULL;
368 		else if (ret < 0)
369 			*prev = delayed_item;
370 		else if ((node = rb_prev(prev_node)) != NULL) {
371 			*prev = rb_entry(node, struct btrfs_delayed_item,
372 					 rb_node);
373 		} else
374 			*prev = NULL;
375 	}
376 
377 	if (next) {
378 		if (!prev_node)
379 			*next = NULL;
380 		else if (ret > 0)
381 			*next = delayed_item;
382 		else if ((node = rb_next(prev_node)) != NULL) {
383 			*next = rb_entry(node, struct btrfs_delayed_item,
384 					 rb_node);
385 		} else
386 			*next = NULL;
387 	}
388 	return NULL;
389 }
390 
391 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
392 					struct btrfs_delayed_node *delayed_node,
393 					struct btrfs_key *key)
394 {
395 	return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
396 					   NULL, NULL);
397 }
398 
399 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
400 				    struct btrfs_delayed_item *ins,
401 				    int action)
402 {
403 	struct rb_node **p, *node;
404 	struct rb_node *parent_node = NULL;
405 	struct rb_root_cached *root;
406 	struct btrfs_delayed_item *item;
407 	int cmp;
408 	bool leftmost = true;
409 
410 	if (action == BTRFS_DELAYED_INSERTION_ITEM)
411 		root = &delayed_node->ins_root;
412 	else if (action == BTRFS_DELAYED_DELETION_ITEM)
413 		root = &delayed_node->del_root;
414 	else
415 		BUG();
416 	p = &root->rb_root.rb_node;
417 	node = &ins->rb_node;
418 
419 	while (*p) {
420 		parent_node = *p;
421 		item = rb_entry(parent_node, struct btrfs_delayed_item,
422 				 rb_node);
423 
424 		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
425 		if (cmp < 0) {
426 			p = &(*p)->rb_right;
427 			leftmost = false;
428 		} else if (cmp > 0) {
429 			p = &(*p)->rb_left;
430 		} else {
431 			return -EEXIST;
432 		}
433 	}
434 
435 	rb_link_node(node, parent_node, p);
436 	rb_insert_color_cached(node, root, leftmost);
437 	ins->delayed_node = delayed_node;
438 	ins->ins_or_del = action;
439 
440 	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
441 	    action == BTRFS_DELAYED_INSERTION_ITEM &&
442 	    ins->key.offset >= delayed_node->index_cnt)
443 			delayed_node->index_cnt = ins->key.offset + 1;
444 
445 	delayed_node->count++;
446 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
447 	return 0;
448 }
449 
450 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
451 					      struct btrfs_delayed_item *item)
452 {
453 	return __btrfs_add_delayed_item(node, item,
454 					BTRFS_DELAYED_INSERTION_ITEM);
455 }
456 
457 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
458 					     struct btrfs_delayed_item *item)
459 {
460 	return __btrfs_add_delayed_item(node, item,
461 					BTRFS_DELAYED_DELETION_ITEM);
462 }
463 
464 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
465 {
466 	int seq = atomic_inc_return(&delayed_root->items_seq);
467 
468 	/* atomic_dec_return implies a barrier */
469 	if ((atomic_dec_return(&delayed_root->items) <
470 	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
471 		cond_wake_up_nomb(&delayed_root->wait);
472 }
473 
474 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
475 {
476 	struct rb_root_cached *root;
477 	struct btrfs_delayed_root *delayed_root;
478 
479 	/* Not associated with any delayed_node */
480 	if (!delayed_item->delayed_node)
481 		return;
482 	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
483 
484 	BUG_ON(!delayed_root);
485 	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
486 	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
487 
488 	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
489 		root = &delayed_item->delayed_node->ins_root;
490 	else
491 		root = &delayed_item->delayed_node->del_root;
492 
493 	rb_erase_cached(&delayed_item->rb_node, root);
494 	delayed_item->delayed_node->count--;
495 
496 	finish_one_item(delayed_root);
497 }
498 
499 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
500 {
501 	if (item) {
502 		__btrfs_remove_delayed_item(item);
503 		if (refcount_dec_and_test(&item->refs))
504 			kfree(item);
505 	}
506 }
507 
508 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
509 					struct btrfs_delayed_node *delayed_node)
510 {
511 	struct rb_node *p;
512 	struct btrfs_delayed_item *item = NULL;
513 
514 	p = rb_first_cached(&delayed_node->ins_root);
515 	if (p)
516 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
517 
518 	return item;
519 }
520 
521 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
522 					struct btrfs_delayed_node *delayed_node)
523 {
524 	struct rb_node *p;
525 	struct btrfs_delayed_item *item = NULL;
526 
527 	p = rb_first_cached(&delayed_node->del_root);
528 	if (p)
529 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
530 
531 	return item;
532 }
533 
534 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
535 						struct btrfs_delayed_item *item)
536 {
537 	struct rb_node *p;
538 	struct btrfs_delayed_item *next = NULL;
539 
540 	p = rb_next(&item->rb_node);
541 	if (p)
542 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
543 
544 	return next;
545 }
546 
547 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
548 					       struct btrfs_root *root,
549 					       struct btrfs_delayed_item *item)
550 {
551 	struct btrfs_block_rsv *src_rsv;
552 	struct btrfs_block_rsv *dst_rsv;
553 	struct btrfs_fs_info *fs_info = root->fs_info;
554 	u64 num_bytes;
555 	int ret;
556 
557 	if (!trans->bytes_reserved)
558 		return 0;
559 
560 	src_rsv = trans->block_rsv;
561 	dst_rsv = &fs_info->delayed_block_rsv;
562 
563 	num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
564 
565 	/*
566 	 * Here we migrate space rsv from transaction rsv, since have already
567 	 * reserved space when starting a transaction.  So no need to reserve
568 	 * qgroup space here.
569 	 */
570 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
571 	if (!ret) {
572 		trace_btrfs_space_reservation(fs_info, "delayed_item",
573 					      item->key.objectid,
574 					      num_bytes, 1);
575 		item->bytes_reserved = num_bytes;
576 	}
577 
578 	return ret;
579 }
580 
581 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
582 						struct btrfs_delayed_item *item)
583 {
584 	struct btrfs_block_rsv *rsv;
585 	struct btrfs_fs_info *fs_info = root->fs_info;
586 
587 	if (!item->bytes_reserved)
588 		return;
589 
590 	rsv = &fs_info->delayed_block_rsv;
591 	/*
592 	 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
593 	 * to release/reserve qgroup space.
594 	 */
595 	trace_btrfs_space_reservation(fs_info, "delayed_item",
596 				      item->key.objectid, item->bytes_reserved,
597 				      0);
598 	btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
599 }
600 
601 static int btrfs_delayed_inode_reserve_metadata(
602 					struct btrfs_trans_handle *trans,
603 					struct btrfs_root *root,
604 					struct btrfs_delayed_node *node)
605 {
606 	struct btrfs_fs_info *fs_info = root->fs_info;
607 	struct btrfs_block_rsv *src_rsv;
608 	struct btrfs_block_rsv *dst_rsv;
609 	u64 num_bytes;
610 	int ret;
611 
612 	src_rsv = trans->block_rsv;
613 	dst_rsv = &fs_info->delayed_block_rsv;
614 
615 	num_bytes = btrfs_calc_metadata_size(fs_info, 1);
616 
617 	/*
618 	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
619 	 * which doesn't reserve space for speed.  This is a problem since we
620 	 * still need to reserve space for this update, so try to reserve the
621 	 * space.
622 	 *
623 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
624 	 * we always reserve enough to update the inode item.
625 	 */
626 	if (!src_rsv || (!trans->bytes_reserved &&
627 			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
628 		ret = btrfs_qgroup_reserve_meta(root, num_bytes,
629 					  BTRFS_QGROUP_RSV_META_PREALLOC, true);
630 		if (ret < 0)
631 			return ret;
632 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
633 					  BTRFS_RESERVE_NO_FLUSH);
634 		/* NO_FLUSH could only fail with -ENOSPC */
635 		ASSERT(ret == 0 || ret == -ENOSPC);
636 		if (ret)
637 			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
638 	} else {
639 		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
640 	}
641 
642 	if (!ret) {
643 		trace_btrfs_space_reservation(fs_info, "delayed_inode",
644 					      node->inode_id, num_bytes, 1);
645 		node->bytes_reserved = num_bytes;
646 	}
647 
648 	return ret;
649 }
650 
651 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
652 						struct btrfs_delayed_node *node,
653 						bool qgroup_free)
654 {
655 	struct btrfs_block_rsv *rsv;
656 
657 	if (!node->bytes_reserved)
658 		return;
659 
660 	rsv = &fs_info->delayed_block_rsv;
661 	trace_btrfs_space_reservation(fs_info, "delayed_inode",
662 				      node->inode_id, node->bytes_reserved, 0);
663 	btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
664 	if (qgroup_free)
665 		btrfs_qgroup_free_meta_prealloc(node->root,
666 				node->bytes_reserved);
667 	else
668 		btrfs_qgroup_convert_reserved_meta(node->root,
669 				node->bytes_reserved);
670 	node->bytes_reserved = 0;
671 }
672 
673 /*
674  * Insert a single delayed item or a batch of delayed items that have consecutive
675  * keys if they exist.
676  */
677 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
678 				     struct btrfs_root *root,
679 				     struct btrfs_path *path,
680 				     struct btrfs_delayed_item *first_item)
681 {
682 	LIST_HEAD(batch);
683 	struct btrfs_delayed_item *curr;
684 	struct btrfs_delayed_item *next;
685 	const int max_size = BTRFS_LEAF_DATA_SIZE(root->fs_info);
686 	int total_size;
687 	int nitems;
688 	char *ins_data = NULL;
689 	struct btrfs_key *ins_keys;
690 	u32 *ins_sizes;
691 	int ret;
692 
693 	list_add_tail(&first_item->tree_list, &batch);
694 	nitems = 1;
695 	total_size = first_item->data_len + sizeof(struct btrfs_item);
696 	curr = first_item;
697 
698 	while (true) {
699 		int next_size;
700 
701 		next = __btrfs_next_delayed_item(curr);
702 		if (!next || !btrfs_is_continuous_delayed_item(curr, next))
703 			break;
704 
705 		next_size = next->data_len + sizeof(struct btrfs_item);
706 		if (total_size + next_size > max_size)
707 			break;
708 
709 		list_add_tail(&next->tree_list, &batch);
710 		nitems++;
711 		total_size += next_size;
712 		curr = next;
713 	}
714 
715 	if (nitems == 1) {
716 		ins_keys = &first_item->key;
717 		ins_sizes = &first_item->data_len;
718 	} else {
719 		int i = 0;
720 
721 		ins_data = kmalloc(nitems * sizeof(u32) +
722 				   nitems * sizeof(struct btrfs_key), GFP_NOFS);
723 		if (!ins_data) {
724 			ret = -ENOMEM;
725 			goto out;
726 		}
727 		ins_sizes = (u32 *)ins_data;
728 		ins_keys = (struct btrfs_key *)(ins_data + nitems * sizeof(u32));
729 		list_for_each_entry(curr, &batch, tree_list) {
730 			ins_keys[i] = curr->key;
731 			ins_sizes[i] = curr->data_len;
732 			i++;
733 		}
734 	}
735 
736 	ret = btrfs_insert_empty_items(trans, root, path, ins_keys, ins_sizes,
737 				       nitems);
738 	if (ret)
739 		goto out;
740 
741 	list_for_each_entry(curr, &batch, tree_list) {
742 		char *data_ptr;
743 
744 		data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
745 		write_extent_buffer(path->nodes[0], &curr->data,
746 				    (unsigned long)data_ptr, curr->data_len);
747 		path->slots[0]++;
748 	}
749 
750 	/*
751 	 * Now release our path before releasing the delayed items and their
752 	 * metadata reservations, so that we don't block other tasks for more
753 	 * time than needed.
754 	 */
755 	btrfs_release_path(path);
756 
757 	list_for_each_entry_safe(curr, next, &batch, tree_list) {
758 		list_del(&curr->tree_list);
759 		btrfs_delayed_item_release_metadata(root, curr);
760 		btrfs_release_delayed_item(curr);
761 	}
762 out:
763 	kfree(ins_data);
764 	return ret;
765 }
766 
767 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
768 				      struct btrfs_path *path,
769 				      struct btrfs_root *root,
770 				      struct btrfs_delayed_node *node)
771 {
772 	int ret = 0;
773 
774 	while (ret == 0) {
775 		struct btrfs_delayed_item *curr;
776 
777 		mutex_lock(&node->mutex);
778 		curr = __btrfs_first_delayed_insertion_item(node);
779 		if (!curr) {
780 			mutex_unlock(&node->mutex);
781 			break;
782 		}
783 		ret = btrfs_insert_delayed_item(trans, root, path, curr);
784 		mutex_unlock(&node->mutex);
785 	}
786 
787 	return ret;
788 }
789 
790 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
791 				    struct btrfs_root *root,
792 				    struct btrfs_path *path,
793 				    struct btrfs_delayed_item *item)
794 {
795 	struct btrfs_delayed_item *curr, *next;
796 	struct extent_buffer *leaf;
797 	struct btrfs_key key;
798 	struct list_head head;
799 	int nitems, i, last_item;
800 	int ret = 0;
801 
802 	BUG_ON(!path->nodes[0]);
803 
804 	leaf = path->nodes[0];
805 
806 	i = path->slots[0];
807 	last_item = btrfs_header_nritems(leaf) - 1;
808 	if (i > last_item)
809 		return -ENOENT;	/* FIXME: Is errno suitable? */
810 
811 	next = item;
812 	INIT_LIST_HEAD(&head);
813 	btrfs_item_key_to_cpu(leaf, &key, i);
814 	nitems = 0;
815 	/*
816 	 * count the number of the dir index items that we can delete in batch
817 	 */
818 	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
819 		list_add_tail(&next->tree_list, &head);
820 		nitems++;
821 
822 		curr = next;
823 		next = __btrfs_next_delayed_item(curr);
824 		if (!next)
825 			break;
826 
827 		if (!btrfs_is_continuous_delayed_item(curr, next))
828 			break;
829 
830 		i++;
831 		if (i > last_item)
832 			break;
833 		btrfs_item_key_to_cpu(leaf, &key, i);
834 	}
835 
836 	if (!nitems)
837 		return 0;
838 
839 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
840 	if (ret)
841 		goto out;
842 
843 	list_for_each_entry_safe(curr, next, &head, tree_list) {
844 		btrfs_delayed_item_release_metadata(root, curr);
845 		list_del(&curr->tree_list);
846 		btrfs_release_delayed_item(curr);
847 	}
848 
849 out:
850 	return ret;
851 }
852 
853 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
854 				      struct btrfs_path *path,
855 				      struct btrfs_root *root,
856 				      struct btrfs_delayed_node *node)
857 {
858 	struct btrfs_delayed_item *curr, *prev;
859 	int ret = 0;
860 
861 do_again:
862 	mutex_lock(&node->mutex);
863 	curr = __btrfs_first_delayed_deletion_item(node);
864 	if (!curr)
865 		goto delete_fail;
866 
867 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
868 	if (ret < 0)
869 		goto delete_fail;
870 	else if (ret > 0) {
871 		/*
872 		 * can't find the item which the node points to, so this node
873 		 * is invalid, just drop it.
874 		 */
875 		prev = curr;
876 		curr = __btrfs_next_delayed_item(prev);
877 		btrfs_release_delayed_item(prev);
878 		ret = 0;
879 		btrfs_release_path(path);
880 		if (curr) {
881 			mutex_unlock(&node->mutex);
882 			goto do_again;
883 		} else
884 			goto delete_fail;
885 	}
886 
887 	btrfs_batch_delete_items(trans, root, path, curr);
888 	btrfs_release_path(path);
889 	mutex_unlock(&node->mutex);
890 	goto do_again;
891 
892 delete_fail:
893 	btrfs_release_path(path);
894 	mutex_unlock(&node->mutex);
895 	return ret;
896 }
897 
898 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
899 {
900 	struct btrfs_delayed_root *delayed_root;
901 
902 	if (delayed_node &&
903 	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
904 		BUG_ON(!delayed_node->root);
905 		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
906 		delayed_node->count--;
907 
908 		delayed_root = delayed_node->root->fs_info->delayed_root;
909 		finish_one_item(delayed_root);
910 	}
911 }
912 
913 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
914 {
915 
916 	if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
917 		struct btrfs_delayed_root *delayed_root;
918 
919 		ASSERT(delayed_node->root);
920 		delayed_node->count--;
921 
922 		delayed_root = delayed_node->root->fs_info->delayed_root;
923 		finish_one_item(delayed_root);
924 	}
925 }
926 
927 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
928 					struct btrfs_root *root,
929 					struct btrfs_path *path,
930 					struct btrfs_delayed_node *node)
931 {
932 	struct btrfs_fs_info *fs_info = root->fs_info;
933 	struct btrfs_key key;
934 	struct btrfs_inode_item *inode_item;
935 	struct extent_buffer *leaf;
936 	int mod;
937 	int ret;
938 
939 	key.objectid = node->inode_id;
940 	key.type = BTRFS_INODE_ITEM_KEY;
941 	key.offset = 0;
942 
943 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
944 		mod = -1;
945 	else
946 		mod = 1;
947 
948 	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
949 	if (ret > 0)
950 		ret = -ENOENT;
951 	if (ret < 0)
952 		goto out;
953 
954 	leaf = path->nodes[0];
955 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
956 				    struct btrfs_inode_item);
957 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
958 			    sizeof(struct btrfs_inode_item));
959 	btrfs_mark_buffer_dirty(leaf);
960 
961 	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
962 		goto out;
963 
964 	path->slots[0]++;
965 	if (path->slots[0] >= btrfs_header_nritems(leaf))
966 		goto search;
967 again:
968 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
969 	if (key.objectid != node->inode_id)
970 		goto out;
971 
972 	if (key.type != BTRFS_INODE_REF_KEY &&
973 	    key.type != BTRFS_INODE_EXTREF_KEY)
974 		goto out;
975 
976 	/*
977 	 * Delayed iref deletion is for the inode who has only one link,
978 	 * so there is only one iref. The case that several irefs are
979 	 * in the same item doesn't exist.
980 	 */
981 	btrfs_del_item(trans, root, path);
982 out:
983 	btrfs_release_delayed_iref(node);
984 	btrfs_release_path(path);
985 err_out:
986 	btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
987 	btrfs_release_delayed_inode(node);
988 
989 	/*
990 	 * If we fail to update the delayed inode we need to abort the
991 	 * transaction, because we could leave the inode with the improper
992 	 * counts behind.
993 	 */
994 	if (ret && ret != -ENOENT)
995 		btrfs_abort_transaction(trans, ret);
996 
997 	return ret;
998 
999 search:
1000 	btrfs_release_path(path);
1001 
1002 	key.type = BTRFS_INODE_EXTREF_KEY;
1003 	key.offset = -1;
1004 
1005 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1006 	if (ret < 0)
1007 		goto err_out;
1008 	ASSERT(ret);
1009 
1010 	ret = 0;
1011 	leaf = path->nodes[0];
1012 	path->slots[0]--;
1013 	goto again;
1014 }
1015 
1016 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1017 					     struct btrfs_root *root,
1018 					     struct btrfs_path *path,
1019 					     struct btrfs_delayed_node *node)
1020 {
1021 	int ret;
1022 
1023 	mutex_lock(&node->mutex);
1024 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1025 		mutex_unlock(&node->mutex);
1026 		return 0;
1027 	}
1028 
1029 	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1030 	mutex_unlock(&node->mutex);
1031 	return ret;
1032 }
1033 
1034 static inline int
1035 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1036 				   struct btrfs_path *path,
1037 				   struct btrfs_delayed_node *node)
1038 {
1039 	int ret;
1040 
1041 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1042 	if (ret)
1043 		return ret;
1044 
1045 	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1046 	if (ret)
1047 		return ret;
1048 
1049 	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1050 	return ret;
1051 }
1052 
1053 /*
1054  * Called when committing the transaction.
1055  * Returns 0 on success.
1056  * Returns < 0 on error and returns with an aborted transaction with any
1057  * outstanding delayed items cleaned up.
1058  */
1059 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1060 {
1061 	struct btrfs_fs_info *fs_info = trans->fs_info;
1062 	struct btrfs_delayed_root *delayed_root;
1063 	struct btrfs_delayed_node *curr_node, *prev_node;
1064 	struct btrfs_path *path;
1065 	struct btrfs_block_rsv *block_rsv;
1066 	int ret = 0;
1067 	bool count = (nr > 0);
1068 
1069 	if (TRANS_ABORTED(trans))
1070 		return -EIO;
1071 
1072 	path = btrfs_alloc_path();
1073 	if (!path)
1074 		return -ENOMEM;
1075 
1076 	block_rsv = trans->block_rsv;
1077 	trans->block_rsv = &fs_info->delayed_block_rsv;
1078 
1079 	delayed_root = fs_info->delayed_root;
1080 
1081 	curr_node = btrfs_first_delayed_node(delayed_root);
1082 	while (curr_node && (!count || nr--)) {
1083 		ret = __btrfs_commit_inode_delayed_items(trans, path,
1084 							 curr_node);
1085 		if (ret) {
1086 			btrfs_release_delayed_node(curr_node);
1087 			curr_node = NULL;
1088 			btrfs_abort_transaction(trans, ret);
1089 			break;
1090 		}
1091 
1092 		prev_node = curr_node;
1093 		curr_node = btrfs_next_delayed_node(curr_node);
1094 		btrfs_release_delayed_node(prev_node);
1095 	}
1096 
1097 	if (curr_node)
1098 		btrfs_release_delayed_node(curr_node);
1099 	btrfs_free_path(path);
1100 	trans->block_rsv = block_rsv;
1101 
1102 	return ret;
1103 }
1104 
1105 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1106 {
1107 	return __btrfs_run_delayed_items(trans, -1);
1108 }
1109 
1110 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1111 {
1112 	return __btrfs_run_delayed_items(trans, nr);
1113 }
1114 
1115 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1116 				     struct btrfs_inode *inode)
1117 {
1118 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1119 	struct btrfs_path *path;
1120 	struct btrfs_block_rsv *block_rsv;
1121 	int ret;
1122 
1123 	if (!delayed_node)
1124 		return 0;
1125 
1126 	mutex_lock(&delayed_node->mutex);
1127 	if (!delayed_node->count) {
1128 		mutex_unlock(&delayed_node->mutex);
1129 		btrfs_release_delayed_node(delayed_node);
1130 		return 0;
1131 	}
1132 	mutex_unlock(&delayed_node->mutex);
1133 
1134 	path = btrfs_alloc_path();
1135 	if (!path) {
1136 		btrfs_release_delayed_node(delayed_node);
1137 		return -ENOMEM;
1138 	}
1139 
1140 	block_rsv = trans->block_rsv;
1141 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1142 
1143 	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1144 
1145 	btrfs_release_delayed_node(delayed_node);
1146 	btrfs_free_path(path);
1147 	trans->block_rsv = block_rsv;
1148 
1149 	return ret;
1150 }
1151 
1152 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1153 {
1154 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1155 	struct btrfs_trans_handle *trans;
1156 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1157 	struct btrfs_path *path;
1158 	struct btrfs_block_rsv *block_rsv;
1159 	int ret;
1160 
1161 	if (!delayed_node)
1162 		return 0;
1163 
1164 	mutex_lock(&delayed_node->mutex);
1165 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1166 		mutex_unlock(&delayed_node->mutex);
1167 		btrfs_release_delayed_node(delayed_node);
1168 		return 0;
1169 	}
1170 	mutex_unlock(&delayed_node->mutex);
1171 
1172 	trans = btrfs_join_transaction(delayed_node->root);
1173 	if (IS_ERR(trans)) {
1174 		ret = PTR_ERR(trans);
1175 		goto out;
1176 	}
1177 
1178 	path = btrfs_alloc_path();
1179 	if (!path) {
1180 		ret = -ENOMEM;
1181 		goto trans_out;
1182 	}
1183 
1184 	block_rsv = trans->block_rsv;
1185 	trans->block_rsv = &fs_info->delayed_block_rsv;
1186 
1187 	mutex_lock(&delayed_node->mutex);
1188 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1189 		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1190 						   path, delayed_node);
1191 	else
1192 		ret = 0;
1193 	mutex_unlock(&delayed_node->mutex);
1194 
1195 	btrfs_free_path(path);
1196 	trans->block_rsv = block_rsv;
1197 trans_out:
1198 	btrfs_end_transaction(trans);
1199 	btrfs_btree_balance_dirty(fs_info);
1200 out:
1201 	btrfs_release_delayed_node(delayed_node);
1202 
1203 	return ret;
1204 }
1205 
1206 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1207 {
1208 	struct btrfs_delayed_node *delayed_node;
1209 
1210 	delayed_node = READ_ONCE(inode->delayed_node);
1211 	if (!delayed_node)
1212 		return;
1213 
1214 	inode->delayed_node = NULL;
1215 	btrfs_release_delayed_node(delayed_node);
1216 }
1217 
1218 struct btrfs_async_delayed_work {
1219 	struct btrfs_delayed_root *delayed_root;
1220 	int nr;
1221 	struct btrfs_work work;
1222 };
1223 
1224 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1225 {
1226 	struct btrfs_async_delayed_work *async_work;
1227 	struct btrfs_delayed_root *delayed_root;
1228 	struct btrfs_trans_handle *trans;
1229 	struct btrfs_path *path;
1230 	struct btrfs_delayed_node *delayed_node = NULL;
1231 	struct btrfs_root *root;
1232 	struct btrfs_block_rsv *block_rsv;
1233 	int total_done = 0;
1234 
1235 	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1236 	delayed_root = async_work->delayed_root;
1237 
1238 	path = btrfs_alloc_path();
1239 	if (!path)
1240 		goto out;
1241 
1242 	do {
1243 		if (atomic_read(&delayed_root->items) <
1244 		    BTRFS_DELAYED_BACKGROUND / 2)
1245 			break;
1246 
1247 		delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1248 		if (!delayed_node)
1249 			break;
1250 
1251 		root = delayed_node->root;
1252 
1253 		trans = btrfs_join_transaction(root);
1254 		if (IS_ERR(trans)) {
1255 			btrfs_release_path(path);
1256 			btrfs_release_prepared_delayed_node(delayed_node);
1257 			total_done++;
1258 			continue;
1259 		}
1260 
1261 		block_rsv = trans->block_rsv;
1262 		trans->block_rsv = &root->fs_info->delayed_block_rsv;
1263 
1264 		__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1265 
1266 		trans->block_rsv = block_rsv;
1267 		btrfs_end_transaction(trans);
1268 		btrfs_btree_balance_dirty_nodelay(root->fs_info);
1269 
1270 		btrfs_release_path(path);
1271 		btrfs_release_prepared_delayed_node(delayed_node);
1272 		total_done++;
1273 
1274 	} while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1275 		 || total_done < async_work->nr);
1276 
1277 	btrfs_free_path(path);
1278 out:
1279 	wake_up(&delayed_root->wait);
1280 	kfree(async_work);
1281 }
1282 
1283 
1284 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1285 				     struct btrfs_fs_info *fs_info, int nr)
1286 {
1287 	struct btrfs_async_delayed_work *async_work;
1288 
1289 	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1290 	if (!async_work)
1291 		return -ENOMEM;
1292 
1293 	async_work->delayed_root = delayed_root;
1294 	btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1295 			NULL);
1296 	async_work->nr = nr;
1297 
1298 	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1299 	return 0;
1300 }
1301 
1302 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1303 {
1304 	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1305 }
1306 
1307 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1308 {
1309 	int val = atomic_read(&delayed_root->items_seq);
1310 
1311 	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1312 		return 1;
1313 
1314 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1315 		return 1;
1316 
1317 	return 0;
1318 }
1319 
1320 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1321 {
1322 	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1323 
1324 	if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1325 		btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1326 		return;
1327 
1328 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1329 		int seq;
1330 		int ret;
1331 
1332 		seq = atomic_read(&delayed_root->items_seq);
1333 
1334 		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1335 		if (ret)
1336 			return;
1337 
1338 		wait_event_interruptible(delayed_root->wait,
1339 					 could_end_wait(delayed_root, seq));
1340 		return;
1341 	}
1342 
1343 	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1344 }
1345 
1346 /* Will return 0 or -ENOMEM */
1347 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1348 				   const char *name, int name_len,
1349 				   struct btrfs_inode *dir,
1350 				   struct btrfs_disk_key *disk_key, u8 type,
1351 				   u64 index)
1352 {
1353 	struct btrfs_delayed_node *delayed_node;
1354 	struct btrfs_delayed_item *delayed_item;
1355 	struct btrfs_dir_item *dir_item;
1356 	int ret;
1357 
1358 	delayed_node = btrfs_get_or_create_delayed_node(dir);
1359 	if (IS_ERR(delayed_node))
1360 		return PTR_ERR(delayed_node);
1361 
1362 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1363 	if (!delayed_item) {
1364 		ret = -ENOMEM;
1365 		goto release_node;
1366 	}
1367 
1368 	delayed_item->key.objectid = btrfs_ino(dir);
1369 	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1370 	delayed_item->key.offset = index;
1371 
1372 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1373 	dir_item->location = *disk_key;
1374 	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1375 	btrfs_set_stack_dir_data_len(dir_item, 0);
1376 	btrfs_set_stack_dir_name_len(dir_item, name_len);
1377 	btrfs_set_stack_dir_type(dir_item, type);
1378 	memcpy((char *)(dir_item + 1), name, name_len);
1379 
1380 	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1381 	/*
1382 	 * we have reserved enough space when we start a new transaction,
1383 	 * so reserving metadata failure is impossible
1384 	 */
1385 	BUG_ON(ret);
1386 
1387 	mutex_lock(&delayed_node->mutex);
1388 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1389 	if (unlikely(ret)) {
1390 		btrfs_err(trans->fs_info,
1391 			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1392 			  name_len, name, delayed_node->root->root_key.objectid,
1393 			  delayed_node->inode_id, ret);
1394 		BUG();
1395 	}
1396 	mutex_unlock(&delayed_node->mutex);
1397 
1398 release_node:
1399 	btrfs_release_delayed_node(delayed_node);
1400 	return ret;
1401 }
1402 
1403 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1404 					       struct btrfs_delayed_node *node,
1405 					       struct btrfs_key *key)
1406 {
1407 	struct btrfs_delayed_item *item;
1408 
1409 	mutex_lock(&node->mutex);
1410 	item = __btrfs_lookup_delayed_insertion_item(node, key);
1411 	if (!item) {
1412 		mutex_unlock(&node->mutex);
1413 		return 1;
1414 	}
1415 
1416 	btrfs_delayed_item_release_metadata(node->root, item);
1417 	btrfs_release_delayed_item(item);
1418 	mutex_unlock(&node->mutex);
1419 	return 0;
1420 }
1421 
1422 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1423 				   struct btrfs_inode *dir, u64 index)
1424 {
1425 	struct btrfs_delayed_node *node;
1426 	struct btrfs_delayed_item *item;
1427 	struct btrfs_key item_key;
1428 	int ret;
1429 
1430 	node = btrfs_get_or_create_delayed_node(dir);
1431 	if (IS_ERR(node))
1432 		return PTR_ERR(node);
1433 
1434 	item_key.objectid = btrfs_ino(dir);
1435 	item_key.type = BTRFS_DIR_INDEX_KEY;
1436 	item_key.offset = index;
1437 
1438 	ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1439 						  &item_key);
1440 	if (!ret)
1441 		goto end;
1442 
1443 	item = btrfs_alloc_delayed_item(0);
1444 	if (!item) {
1445 		ret = -ENOMEM;
1446 		goto end;
1447 	}
1448 
1449 	item->key = item_key;
1450 
1451 	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1452 	/*
1453 	 * we have reserved enough space when we start a new transaction,
1454 	 * so reserving metadata failure is impossible.
1455 	 */
1456 	if (ret < 0) {
1457 		btrfs_err(trans->fs_info,
1458 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1459 		btrfs_release_delayed_item(item);
1460 		goto end;
1461 	}
1462 
1463 	mutex_lock(&node->mutex);
1464 	ret = __btrfs_add_delayed_deletion_item(node, item);
1465 	if (unlikely(ret)) {
1466 		btrfs_err(trans->fs_info,
1467 			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1468 			  index, node->root->root_key.objectid,
1469 			  node->inode_id, ret);
1470 		btrfs_delayed_item_release_metadata(dir->root, item);
1471 		btrfs_release_delayed_item(item);
1472 	}
1473 	mutex_unlock(&node->mutex);
1474 end:
1475 	btrfs_release_delayed_node(node);
1476 	return ret;
1477 }
1478 
1479 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1480 {
1481 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1482 
1483 	if (!delayed_node)
1484 		return -ENOENT;
1485 
1486 	/*
1487 	 * Since we have held i_mutex of this directory, it is impossible that
1488 	 * a new directory index is added into the delayed node and index_cnt
1489 	 * is updated now. So we needn't lock the delayed node.
1490 	 */
1491 	if (!delayed_node->index_cnt) {
1492 		btrfs_release_delayed_node(delayed_node);
1493 		return -EINVAL;
1494 	}
1495 
1496 	inode->index_cnt = delayed_node->index_cnt;
1497 	btrfs_release_delayed_node(delayed_node);
1498 	return 0;
1499 }
1500 
1501 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1502 				     struct list_head *ins_list,
1503 				     struct list_head *del_list)
1504 {
1505 	struct btrfs_delayed_node *delayed_node;
1506 	struct btrfs_delayed_item *item;
1507 
1508 	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1509 	if (!delayed_node)
1510 		return false;
1511 
1512 	/*
1513 	 * We can only do one readdir with delayed items at a time because of
1514 	 * item->readdir_list.
1515 	 */
1516 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
1517 	btrfs_inode_lock(inode, 0);
1518 
1519 	mutex_lock(&delayed_node->mutex);
1520 	item = __btrfs_first_delayed_insertion_item(delayed_node);
1521 	while (item) {
1522 		refcount_inc(&item->refs);
1523 		list_add_tail(&item->readdir_list, ins_list);
1524 		item = __btrfs_next_delayed_item(item);
1525 	}
1526 
1527 	item = __btrfs_first_delayed_deletion_item(delayed_node);
1528 	while (item) {
1529 		refcount_inc(&item->refs);
1530 		list_add_tail(&item->readdir_list, del_list);
1531 		item = __btrfs_next_delayed_item(item);
1532 	}
1533 	mutex_unlock(&delayed_node->mutex);
1534 	/*
1535 	 * This delayed node is still cached in the btrfs inode, so refs
1536 	 * must be > 1 now, and we needn't check it is going to be freed
1537 	 * or not.
1538 	 *
1539 	 * Besides that, this function is used to read dir, we do not
1540 	 * insert/delete delayed items in this period. So we also needn't
1541 	 * requeue or dequeue this delayed node.
1542 	 */
1543 	refcount_dec(&delayed_node->refs);
1544 
1545 	return true;
1546 }
1547 
1548 void btrfs_readdir_put_delayed_items(struct inode *inode,
1549 				     struct list_head *ins_list,
1550 				     struct list_head *del_list)
1551 {
1552 	struct btrfs_delayed_item *curr, *next;
1553 
1554 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1555 		list_del(&curr->readdir_list);
1556 		if (refcount_dec_and_test(&curr->refs))
1557 			kfree(curr);
1558 	}
1559 
1560 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1561 		list_del(&curr->readdir_list);
1562 		if (refcount_dec_and_test(&curr->refs))
1563 			kfree(curr);
1564 	}
1565 
1566 	/*
1567 	 * The VFS is going to do up_read(), so we need to downgrade back to a
1568 	 * read lock.
1569 	 */
1570 	downgrade_write(&inode->i_rwsem);
1571 }
1572 
1573 int btrfs_should_delete_dir_index(struct list_head *del_list,
1574 				  u64 index)
1575 {
1576 	struct btrfs_delayed_item *curr;
1577 	int ret = 0;
1578 
1579 	list_for_each_entry(curr, del_list, readdir_list) {
1580 		if (curr->key.offset > index)
1581 			break;
1582 		if (curr->key.offset == index) {
1583 			ret = 1;
1584 			break;
1585 		}
1586 	}
1587 	return ret;
1588 }
1589 
1590 /*
1591  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1592  *
1593  */
1594 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1595 				    struct list_head *ins_list)
1596 {
1597 	struct btrfs_dir_item *di;
1598 	struct btrfs_delayed_item *curr, *next;
1599 	struct btrfs_key location;
1600 	char *name;
1601 	int name_len;
1602 	int over = 0;
1603 	unsigned char d_type;
1604 
1605 	if (list_empty(ins_list))
1606 		return 0;
1607 
1608 	/*
1609 	 * Changing the data of the delayed item is impossible. So
1610 	 * we needn't lock them. And we have held i_mutex of the
1611 	 * directory, nobody can delete any directory indexes now.
1612 	 */
1613 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1614 		list_del(&curr->readdir_list);
1615 
1616 		if (curr->key.offset < ctx->pos) {
1617 			if (refcount_dec_and_test(&curr->refs))
1618 				kfree(curr);
1619 			continue;
1620 		}
1621 
1622 		ctx->pos = curr->key.offset;
1623 
1624 		di = (struct btrfs_dir_item *)curr->data;
1625 		name = (char *)(di + 1);
1626 		name_len = btrfs_stack_dir_name_len(di);
1627 
1628 		d_type = fs_ftype_to_dtype(di->type);
1629 		btrfs_disk_key_to_cpu(&location, &di->location);
1630 
1631 		over = !dir_emit(ctx, name, name_len,
1632 			       location.objectid, d_type);
1633 
1634 		if (refcount_dec_and_test(&curr->refs))
1635 			kfree(curr);
1636 
1637 		if (over)
1638 			return 1;
1639 		ctx->pos++;
1640 	}
1641 	return 0;
1642 }
1643 
1644 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1645 				  struct btrfs_inode_item *inode_item,
1646 				  struct inode *inode)
1647 {
1648 	u64 flags;
1649 
1650 	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1651 	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1652 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1653 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1654 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1655 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1656 	btrfs_set_stack_inode_generation(inode_item,
1657 					 BTRFS_I(inode)->generation);
1658 	btrfs_set_stack_inode_sequence(inode_item,
1659 				       inode_peek_iversion(inode));
1660 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1661 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1662 	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1663 					  BTRFS_I(inode)->ro_flags);
1664 	btrfs_set_stack_inode_flags(inode_item, flags);
1665 	btrfs_set_stack_inode_block_group(inode_item, 0);
1666 
1667 	btrfs_set_stack_timespec_sec(&inode_item->atime,
1668 				     inode->i_atime.tv_sec);
1669 	btrfs_set_stack_timespec_nsec(&inode_item->atime,
1670 				      inode->i_atime.tv_nsec);
1671 
1672 	btrfs_set_stack_timespec_sec(&inode_item->mtime,
1673 				     inode->i_mtime.tv_sec);
1674 	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1675 				      inode->i_mtime.tv_nsec);
1676 
1677 	btrfs_set_stack_timespec_sec(&inode_item->ctime,
1678 				     inode->i_ctime.tv_sec);
1679 	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1680 				      inode->i_ctime.tv_nsec);
1681 
1682 	btrfs_set_stack_timespec_sec(&inode_item->otime,
1683 				     BTRFS_I(inode)->i_otime.tv_sec);
1684 	btrfs_set_stack_timespec_nsec(&inode_item->otime,
1685 				     BTRFS_I(inode)->i_otime.tv_nsec);
1686 }
1687 
1688 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1689 {
1690 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1691 	struct btrfs_delayed_node *delayed_node;
1692 	struct btrfs_inode_item *inode_item;
1693 
1694 	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1695 	if (!delayed_node)
1696 		return -ENOENT;
1697 
1698 	mutex_lock(&delayed_node->mutex);
1699 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1700 		mutex_unlock(&delayed_node->mutex);
1701 		btrfs_release_delayed_node(delayed_node);
1702 		return -ENOENT;
1703 	}
1704 
1705 	inode_item = &delayed_node->inode_item;
1706 
1707 	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1708 	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1709 	btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1710 	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1711 			round_up(i_size_read(inode), fs_info->sectorsize));
1712 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1713 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1714 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1715 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1716         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1717 
1718 	inode_set_iversion_queried(inode,
1719 				   btrfs_stack_inode_sequence(inode_item));
1720 	inode->i_rdev = 0;
1721 	*rdev = btrfs_stack_inode_rdev(inode_item);
1722 	btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1723 				&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1724 
1725 	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1726 	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1727 
1728 	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1729 	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1730 
1731 	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1732 	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1733 
1734 	BTRFS_I(inode)->i_otime.tv_sec =
1735 		btrfs_stack_timespec_sec(&inode_item->otime);
1736 	BTRFS_I(inode)->i_otime.tv_nsec =
1737 		btrfs_stack_timespec_nsec(&inode_item->otime);
1738 
1739 	inode->i_generation = BTRFS_I(inode)->generation;
1740 	BTRFS_I(inode)->index_cnt = (u64)-1;
1741 
1742 	mutex_unlock(&delayed_node->mutex);
1743 	btrfs_release_delayed_node(delayed_node);
1744 	return 0;
1745 }
1746 
1747 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1748 			       struct btrfs_root *root,
1749 			       struct btrfs_inode *inode)
1750 {
1751 	struct btrfs_delayed_node *delayed_node;
1752 	int ret = 0;
1753 
1754 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1755 	if (IS_ERR(delayed_node))
1756 		return PTR_ERR(delayed_node);
1757 
1758 	mutex_lock(&delayed_node->mutex);
1759 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1760 		fill_stack_inode_item(trans, &delayed_node->inode_item,
1761 				      &inode->vfs_inode);
1762 		goto release_node;
1763 	}
1764 
1765 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1766 	if (ret)
1767 		goto release_node;
1768 
1769 	fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1770 	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1771 	delayed_node->count++;
1772 	atomic_inc(&root->fs_info->delayed_root->items);
1773 release_node:
1774 	mutex_unlock(&delayed_node->mutex);
1775 	btrfs_release_delayed_node(delayed_node);
1776 	return ret;
1777 }
1778 
1779 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1780 {
1781 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1782 	struct btrfs_delayed_node *delayed_node;
1783 
1784 	/*
1785 	 * we don't do delayed inode updates during log recovery because it
1786 	 * leads to enospc problems.  This means we also can't do
1787 	 * delayed inode refs
1788 	 */
1789 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1790 		return -EAGAIN;
1791 
1792 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1793 	if (IS_ERR(delayed_node))
1794 		return PTR_ERR(delayed_node);
1795 
1796 	/*
1797 	 * We don't reserve space for inode ref deletion is because:
1798 	 * - We ONLY do async inode ref deletion for the inode who has only
1799 	 *   one link(i_nlink == 1), it means there is only one inode ref.
1800 	 *   And in most case, the inode ref and the inode item are in the
1801 	 *   same leaf, and we will deal with them at the same time.
1802 	 *   Since we are sure we will reserve the space for the inode item,
1803 	 *   it is unnecessary to reserve space for inode ref deletion.
1804 	 * - If the inode ref and the inode item are not in the same leaf,
1805 	 *   We also needn't worry about enospc problem, because we reserve
1806 	 *   much more space for the inode update than it needs.
1807 	 * - At the worst, we can steal some space from the global reservation.
1808 	 *   It is very rare.
1809 	 */
1810 	mutex_lock(&delayed_node->mutex);
1811 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1812 		goto release_node;
1813 
1814 	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1815 	delayed_node->count++;
1816 	atomic_inc(&fs_info->delayed_root->items);
1817 release_node:
1818 	mutex_unlock(&delayed_node->mutex);
1819 	btrfs_release_delayed_node(delayed_node);
1820 	return 0;
1821 }
1822 
1823 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1824 {
1825 	struct btrfs_root *root = delayed_node->root;
1826 	struct btrfs_fs_info *fs_info = root->fs_info;
1827 	struct btrfs_delayed_item *curr_item, *prev_item;
1828 
1829 	mutex_lock(&delayed_node->mutex);
1830 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1831 	while (curr_item) {
1832 		btrfs_delayed_item_release_metadata(root, curr_item);
1833 		prev_item = curr_item;
1834 		curr_item = __btrfs_next_delayed_item(prev_item);
1835 		btrfs_release_delayed_item(prev_item);
1836 	}
1837 
1838 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1839 	while (curr_item) {
1840 		btrfs_delayed_item_release_metadata(root, curr_item);
1841 		prev_item = curr_item;
1842 		curr_item = __btrfs_next_delayed_item(prev_item);
1843 		btrfs_release_delayed_item(prev_item);
1844 	}
1845 
1846 	btrfs_release_delayed_iref(delayed_node);
1847 
1848 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1849 		btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1850 		btrfs_release_delayed_inode(delayed_node);
1851 	}
1852 	mutex_unlock(&delayed_node->mutex);
1853 }
1854 
1855 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1856 {
1857 	struct btrfs_delayed_node *delayed_node;
1858 
1859 	delayed_node = btrfs_get_delayed_node(inode);
1860 	if (!delayed_node)
1861 		return;
1862 
1863 	__btrfs_kill_delayed_node(delayed_node);
1864 	btrfs_release_delayed_node(delayed_node);
1865 }
1866 
1867 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1868 {
1869 	u64 inode_id = 0;
1870 	struct btrfs_delayed_node *delayed_nodes[8];
1871 	int i, n;
1872 
1873 	while (1) {
1874 		spin_lock(&root->inode_lock);
1875 		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1876 					   (void **)delayed_nodes, inode_id,
1877 					   ARRAY_SIZE(delayed_nodes));
1878 		if (!n) {
1879 			spin_unlock(&root->inode_lock);
1880 			break;
1881 		}
1882 
1883 		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1884 		for (i = 0; i < n; i++) {
1885 			/*
1886 			 * Don't increase refs in case the node is dead and
1887 			 * about to be removed from the tree in the loop below
1888 			 */
1889 			if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1890 				delayed_nodes[i] = NULL;
1891 		}
1892 		spin_unlock(&root->inode_lock);
1893 
1894 		for (i = 0; i < n; i++) {
1895 			if (!delayed_nodes[i])
1896 				continue;
1897 			__btrfs_kill_delayed_node(delayed_nodes[i]);
1898 			btrfs_release_delayed_node(delayed_nodes[i]);
1899 		}
1900 	}
1901 }
1902 
1903 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1904 {
1905 	struct btrfs_delayed_node *curr_node, *prev_node;
1906 
1907 	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1908 	while (curr_node) {
1909 		__btrfs_kill_delayed_node(curr_node);
1910 
1911 		prev_node = curr_node;
1912 		curr_node = btrfs_next_delayed_node(curr_node);
1913 		btrfs_release_delayed_node(prev_node);
1914 	}
1915 }
1916 
1917