xref: /openbmc/linux/fs/btrfs/delayed-inode.c (revision ae3473231e77a3f1909d48cd144cebe5e1d049b3)
1 /*
2  * Copyright (C) 2011 Fujitsu.  All rights reserved.
3  * Written by Miao Xie <miaox@cn.fujitsu.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19 
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "ctree.h"
25 
26 #define BTRFS_DELAYED_WRITEBACK		512
27 #define BTRFS_DELAYED_BACKGROUND	128
28 #define BTRFS_DELAYED_BATCH		16
29 
30 static struct kmem_cache *delayed_node_cache;
31 
32 int __init btrfs_delayed_inode_init(void)
33 {
34 	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
35 					sizeof(struct btrfs_delayed_node),
36 					0,
37 					SLAB_MEM_SPREAD,
38 					NULL);
39 	if (!delayed_node_cache)
40 		return -ENOMEM;
41 	return 0;
42 }
43 
44 void btrfs_delayed_inode_exit(void)
45 {
46 	kmem_cache_destroy(delayed_node_cache);
47 }
48 
49 static inline void btrfs_init_delayed_node(
50 				struct btrfs_delayed_node *delayed_node,
51 				struct btrfs_root *root, u64 inode_id)
52 {
53 	delayed_node->root = root;
54 	delayed_node->inode_id = inode_id;
55 	atomic_set(&delayed_node->refs, 0);
56 	delayed_node->ins_root = RB_ROOT;
57 	delayed_node->del_root = RB_ROOT;
58 	mutex_init(&delayed_node->mutex);
59 	INIT_LIST_HEAD(&delayed_node->n_list);
60 	INIT_LIST_HEAD(&delayed_node->p_list);
61 }
62 
63 static inline int btrfs_is_continuous_delayed_item(
64 					struct btrfs_delayed_item *item1,
65 					struct btrfs_delayed_item *item2)
66 {
67 	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
68 	    item1->key.objectid == item2->key.objectid &&
69 	    item1->key.type == item2->key.type &&
70 	    item1->key.offset + 1 == item2->key.offset)
71 		return 1;
72 	return 0;
73 }
74 
75 static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
76 {
77 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
78 	struct btrfs_root *root = btrfs_inode->root;
79 	u64 ino = btrfs_ino(inode);
80 	struct btrfs_delayed_node *node;
81 
82 	node = ACCESS_ONCE(btrfs_inode->delayed_node);
83 	if (node) {
84 		atomic_inc(&node->refs);
85 		return node;
86 	}
87 
88 	spin_lock(&root->inode_lock);
89 	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
90 	if (node) {
91 		if (btrfs_inode->delayed_node) {
92 			atomic_inc(&node->refs);	/* can be accessed */
93 			BUG_ON(btrfs_inode->delayed_node != node);
94 			spin_unlock(&root->inode_lock);
95 			return node;
96 		}
97 		btrfs_inode->delayed_node = node;
98 		/* can be accessed and cached in the inode */
99 		atomic_add(2, &node->refs);
100 		spin_unlock(&root->inode_lock);
101 		return node;
102 	}
103 	spin_unlock(&root->inode_lock);
104 
105 	return NULL;
106 }
107 
108 /* Will return either the node or PTR_ERR(-ENOMEM) */
109 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
110 							struct inode *inode)
111 {
112 	struct btrfs_delayed_node *node;
113 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
114 	struct btrfs_root *root = btrfs_inode->root;
115 	u64 ino = btrfs_ino(inode);
116 	int ret;
117 
118 again:
119 	node = btrfs_get_delayed_node(inode);
120 	if (node)
121 		return node;
122 
123 	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
124 	if (!node)
125 		return ERR_PTR(-ENOMEM);
126 	btrfs_init_delayed_node(node, root, ino);
127 
128 	/* cached in the btrfs inode and can be accessed */
129 	atomic_add(2, &node->refs);
130 
131 	ret = radix_tree_preload(GFP_NOFS);
132 	if (ret) {
133 		kmem_cache_free(delayed_node_cache, node);
134 		return ERR_PTR(ret);
135 	}
136 
137 	spin_lock(&root->inode_lock);
138 	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
139 	if (ret == -EEXIST) {
140 		spin_unlock(&root->inode_lock);
141 		kmem_cache_free(delayed_node_cache, node);
142 		radix_tree_preload_end();
143 		goto again;
144 	}
145 	btrfs_inode->delayed_node = node;
146 	spin_unlock(&root->inode_lock);
147 	radix_tree_preload_end();
148 
149 	return node;
150 }
151 
152 /*
153  * Call it when holding delayed_node->mutex
154  *
155  * If mod = 1, add this node into the prepared list.
156  */
157 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
158 				     struct btrfs_delayed_node *node,
159 				     int mod)
160 {
161 	spin_lock(&root->lock);
162 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
163 		if (!list_empty(&node->p_list))
164 			list_move_tail(&node->p_list, &root->prepare_list);
165 		else if (mod)
166 			list_add_tail(&node->p_list, &root->prepare_list);
167 	} else {
168 		list_add_tail(&node->n_list, &root->node_list);
169 		list_add_tail(&node->p_list, &root->prepare_list);
170 		atomic_inc(&node->refs);	/* inserted into list */
171 		root->nodes++;
172 		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
173 	}
174 	spin_unlock(&root->lock);
175 }
176 
177 /* Call it when holding delayed_node->mutex */
178 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
179 				       struct btrfs_delayed_node *node)
180 {
181 	spin_lock(&root->lock);
182 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
183 		root->nodes--;
184 		atomic_dec(&node->refs);	/* not in the list */
185 		list_del_init(&node->n_list);
186 		if (!list_empty(&node->p_list))
187 			list_del_init(&node->p_list);
188 		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
189 	}
190 	spin_unlock(&root->lock);
191 }
192 
193 static struct btrfs_delayed_node *btrfs_first_delayed_node(
194 			struct btrfs_delayed_root *delayed_root)
195 {
196 	struct list_head *p;
197 	struct btrfs_delayed_node *node = NULL;
198 
199 	spin_lock(&delayed_root->lock);
200 	if (list_empty(&delayed_root->node_list))
201 		goto out;
202 
203 	p = delayed_root->node_list.next;
204 	node = list_entry(p, struct btrfs_delayed_node, n_list);
205 	atomic_inc(&node->refs);
206 out:
207 	spin_unlock(&delayed_root->lock);
208 
209 	return node;
210 }
211 
212 static struct btrfs_delayed_node *btrfs_next_delayed_node(
213 						struct btrfs_delayed_node *node)
214 {
215 	struct btrfs_delayed_root *delayed_root;
216 	struct list_head *p;
217 	struct btrfs_delayed_node *next = NULL;
218 
219 	delayed_root = node->root->fs_info->delayed_root;
220 	spin_lock(&delayed_root->lock);
221 	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
222 		/* not in the list */
223 		if (list_empty(&delayed_root->node_list))
224 			goto out;
225 		p = delayed_root->node_list.next;
226 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
227 		goto out;
228 	else
229 		p = node->n_list.next;
230 
231 	next = list_entry(p, struct btrfs_delayed_node, n_list);
232 	atomic_inc(&next->refs);
233 out:
234 	spin_unlock(&delayed_root->lock);
235 
236 	return next;
237 }
238 
239 static void __btrfs_release_delayed_node(
240 				struct btrfs_delayed_node *delayed_node,
241 				int mod)
242 {
243 	struct btrfs_delayed_root *delayed_root;
244 
245 	if (!delayed_node)
246 		return;
247 
248 	delayed_root = delayed_node->root->fs_info->delayed_root;
249 
250 	mutex_lock(&delayed_node->mutex);
251 	if (delayed_node->count)
252 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
253 	else
254 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
255 	mutex_unlock(&delayed_node->mutex);
256 
257 	if (atomic_dec_and_test(&delayed_node->refs)) {
258 		bool free = false;
259 		struct btrfs_root *root = delayed_node->root;
260 		spin_lock(&root->inode_lock);
261 		if (atomic_read(&delayed_node->refs) == 0) {
262 			radix_tree_delete(&root->delayed_nodes_tree,
263 					  delayed_node->inode_id);
264 			free = true;
265 		}
266 		spin_unlock(&root->inode_lock);
267 		if (free)
268 			kmem_cache_free(delayed_node_cache, delayed_node);
269 	}
270 }
271 
272 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
273 {
274 	__btrfs_release_delayed_node(node, 0);
275 }
276 
277 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
278 					struct btrfs_delayed_root *delayed_root)
279 {
280 	struct list_head *p;
281 	struct btrfs_delayed_node *node = NULL;
282 
283 	spin_lock(&delayed_root->lock);
284 	if (list_empty(&delayed_root->prepare_list))
285 		goto out;
286 
287 	p = delayed_root->prepare_list.next;
288 	list_del_init(p);
289 	node = list_entry(p, struct btrfs_delayed_node, p_list);
290 	atomic_inc(&node->refs);
291 out:
292 	spin_unlock(&delayed_root->lock);
293 
294 	return node;
295 }
296 
297 static inline void btrfs_release_prepared_delayed_node(
298 					struct btrfs_delayed_node *node)
299 {
300 	__btrfs_release_delayed_node(node, 1);
301 }
302 
303 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
304 {
305 	struct btrfs_delayed_item *item;
306 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
307 	if (item) {
308 		item->data_len = data_len;
309 		item->ins_or_del = 0;
310 		item->bytes_reserved = 0;
311 		item->delayed_node = NULL;
312 		atomic_set(&item->refs, 1);
313 	}
314 	return item;
315 }
316 
317 /*
318  * __btrfs_lookup_delayed_item - look up the delayed item by key
319  * @delayed_node: pointer to the delayed node
320  * @key:	  the key to look up
321  * @prev:	  used to store the prev item if the right item isn't found
322  * @next:	  used to store the next item if the right item isn't found
323  *
324  * Note: if we don't find the right item, we will return the prev item and
325  * the next item.
326  */
327 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
328 				struct rb_root *root,
329 				struct btrfs_key *key,
330 				struct btrfs_delayed_item **prev,
331 				struct btrfs_delayed_item **next)
332 {
333 	struct rb_node *node, *prev_node = NULL;
334 	struct btrfs_delayed_item *delayed_item = NULL;
335 	int ret = 0;
336 
337 	node = root->rb_node;
338 
339 	while (node) {
340 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
341 					rb_node);
342 		prev_node = node;
343 		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
344 		if (ret < 0)
345 			node = node->rb_right;
346 		else if (ret > 0)
347 			node = node->rb_left;
348 		else
349 			return delayed_item;
350 	}
351 
352 	if (prev) {
353 		if (!prev_node)
354 			*prev = NULL;
355 		else if (ret < 0)
356 			*prev = delayed_item;
357 		else if ((node = rb_prev(prev_node)) != NULL) {
358 			*prev = rb_entry(node, struct btrfs_delayed_item,
359 					 rb_node);
360 		} else
361 			*prev = NULL;
362 	}
363 
364 	if (next) {
365 		if (!prev_node)
366 			*next = NULL;
367 		else if (ret > 0)
368 			*next = delayed_item;
369 		else if ((node = rb_next(prev_node)) != NULL) {
370 			*next = rb_entry(node, struct btrfs_delayed_item,
371 					 rb_node);
372 		} else
373 			*next = NULL;
374 	}
375 	return NULL;
376 }
377 
378 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
379 					struct btrfs_delayed_node *delayed_node,
380 					struct btrfs_key *key)
381 {
382 	return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
383 					   NULL, NULL);
384 }
385 
386 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
387 				    struct btrfs_delayed_item *ins,
388 				    int action)
389 {
390 	struct rb_node **p, *node;
391 	struct rb_node *parent_node = NULL;
392 	struct rb_root *root;
393 	struct btrfs_delayed_item *item;
394 	int cmp;
395 
396 	if (action == BTRFS_DELAYED_INSERTION_ITEM)
397 		root = &delayed_node->ins_root;
398 	else if (action == BTRFS_DELAYED_DELETION_ITEM)
399 		root = &delayed_node->del_root;
400 	else
401 		BUG();
402 	p = &root->rb_node;
403 	node = &ins->rb_node;
404 
405 	while (*p) {
406 		parent_node = *p;
407 		item = rb_entry(parent_node, struct btrfs_delayed_item,
408 				 rb_node);
409 
410 		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
411 		if (cmp < 0)
412 			p = &(*p)->rb_right;
413 		else if (cmp > 0)
414 			p = &(*p)->rb_left;
415 		else
416 			return -EEXIST;
417 	}
418 
419 	rb_link_node(node, parent_node, p);
420 	rb_insert_color(node, root);
421 	ins->delayed_node = delayed_node;
422 	ins->ins_or_del = action;
423 
424 	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
425 	    action == BTRFS_DELAYED_INSERTION_ITEM &&
426 	    ins->key.offset >= delayed_node->index_cnt)
427 			delayed_node->index_cnt = ins->key.offset + 1;
428 
429 	delayed_node->count++;
430 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
431 	return 0;
432 }
433 
434 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
435 					      struct btrfs_delayed_item *item)
436 {
437 	return __btrfs_add_delayed_item(node, item,
438 					BTRFS_DELAYED_INSERTION_ITEM);
439 }
440 
441 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
442 					     struct btrfs_delayed_item *item)
443 {
444 	return __btrfs_add_delayed_item(node, item,
445 					BTRFS_DELAYED_DELETION_ITEM);
446 }
447 
448 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
449 {
450 	int seq = atomic_inc_return(&delayed_root->items_seq);
451 
452 	/*
453 	 * atomic_dec_return implies a barrier for waitqueue_active
454 	 */
455 	if ((atomic_dec_return(&delayed_root->items) <
456 	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
457 	    waitqueue_active(&delayed_root->wait))
458 		wake_up(&delayed_root->wait);
459 }
460 
461 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
462 {
463 	struct rb_root *root;
464 	struct btrfs_delayed_root *delayed_root;
465 
466 	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
467 
468 	BUG_ON(!delayed_root);
469 	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
470 	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
471 
472 	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
473 		root = &delayed_item->delayed_node->ins_root;
474 	else
475 		root = &delayed_item->delayed_node->del_root;
476 
477 	rb_erase(&delayed_item->rb_node, root);
478 	delayed_item->delayed_node->count--;
479 
480 	finish_one_item(delayed_root);
481 }
482 
483 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
484 {
485 	if (item) {
486 		__btrfs_remove_delayed_item(item);
487 		if (atomic_dec_and_test(&item->refs))
488 			kfree(item);
489 	}
490 }
491 
492 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
493 					struct btrfs_delayed_node *delayed_node)
494 {
495 	struct rb_node *p;
496 	struct btrfs_delayed_item *item = NULL;
497 
498 	p = rb_first(&delayed_node->ins_root);
499 	if (p)
500 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
501 
502 	return item;
503 }
504 
505 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
506 					struct btrfs_delayed_node *delayed_node)
507 {
508 	struct rb_node *p;
509 	struct btrfs_delayed_item *item = NULL;
510 
511 	p = rb_first(&delayed_node->del_root);
512 	if (p)
513 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
514 
515 	return item;
516 }
517 
518 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
519 						struct btrfs_delayed_item *item)
520 {
521 	struct rb_node *p;
522 	struct btrfs_delayed_item *next = NULL;
523 
524 	p = rb_next(&item->rb_node);
525 	if (p)
526 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
527 
528 	return next;
529 }
530 
531 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
532 					       struct btrfs_fs_info *fs_info,
533 					       struct btrfs_delayed_item *item)
534 {
535 	struct btrfs_block_rsv *src_rsv;
536 	struct btrfs_block_rsv *dst_rsv;
537 	u64 num_bytes;
538 	int ret;
539 
540 	if (!trans->bytes_reserved)
541 		return 0;
542 
543 	src_rsv = trans->block_rsv;
544 	dst_rsv = &fs_info->delayed_block_rsv;
545 
546 	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
547 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
548 	if (!ret) {
549 		trace_btrfs_space_reservation(fs_info, "delayed_item",
550 					      item->key.objectid,
551 					      num_bytes, 1);
552 		item->bytes_reserved = num_bytes;
553 	}
554 
555 	return ret;
556 }
557 
558 static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
559 						struct btrfs_delayed_item *item)
560 {
561 	struct btrfs_block_rsv *rsv;
562 
563 	if (!item->bytes_reserved)
564 		return;
565 
566 	rsv = &fs_info->delayed_block_rsv;
567 	trace_btrfs_space_reservation(fs_info, "delayed_item",
568 				      item->key.objectid, item->bytes_reserved,
569 				      0);
570 	btrfs_block_rsv_release(fs_info, rsv,
571 				item->bytes_reserved);
572 }
573 
574 static int btrfs_delayed_inode_reserve_metadata(
575 					struct btrfs_trans_handle *trans,
576 					struct btrfs_root *root,
577 					struct inode *inode,
578 					struct btrfs_delayed_node *node)
579 {
580 	struct btrfs_fs_info *fs_info = root->fs_info;
581 	struct btrfs_block_rsv *src_rsv;
582 	struct btrfs_block_rsv *dst_rsv;
583 	u64 num_bytes;
584 	int ret;
585 	bool release = false;
586 
587 	src_rsv = trans->block_rsv;
588 	dst_rsv = &fs_info->delayed_block_rsv;
589 
590 	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
591 
592 	/*
593 	 * If our block_rsv is the delalloc block reserve then check and see if
594 	 * we have our extra reservation for updating the inode.  If not fall
595 	 * through and try to reserve space quickly.
596 	 *
597 	 * We used to try and steal from the delalloc block rsv or the global
598 	 * reserve, but we'd steal a full reservation, which isn't kind.  We are
599 	 * here through delalloc which means we've likely just cowed down close
600 	 * to the leaf that contains the inode, so we would steal less just
601 	 * doing the fallback inode update, so if we do end up having to steal
602 	 * from the global block rsv we hopefully only steal one or two blocks
603 	 * worth which is less likely to hurt us.
604 	 */
605 	if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
606 		spin_lock(&BTRFS_I(inode)->lock);
607 		if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
608 				       &BTRFS_I(inode)->runtime_flags))
609 			release = true;
610 		else
611 			src_rsv = NULL;
612 		spin_unlock(&BTRFS_I(inode)->lock);
613 	}
614 
615 	/*
616 	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
617 	 * which doesn't reserve space for speed.  This is a problem since we
618 	 * still need to reserve space for this update, so try to reserve the
619 	 * space.
620 	 *
621 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
622 	 * we're accounted for.
623 	 */
624 	if (!src_rsv || (!trans->bytes_reserved &&
625 			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
626 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
627 					  BTRFS_RESERVE_NO_FLUSH);
628 		/*
629 		 * Since we're under a transaction reserve_metadata_bytes could
630 		 * try to commit the transaction which will make it return
631 		 * EAGAIN to make us stop the transaction we have, so return
632 		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
633 		 */
634 		if (ret == -EAGAIN)
635 			ret = -ENOSPC;
636 		if (!ret) {
637 			node->bytes_reserved = num_bytes;
638 			trace_btrfs_space_reservation(fs_info,
639 						      "delayed_inode",
640 						      btrfs_ino(inode),
641 						      num_bytes, 1);
642 		}
643 		return ret;
644 	}
645 
646 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
647 
648 	/*
649 	 * Migrate only takes a reservation, it doesn't touch the size of the
650 	 * block_rsv.  This is to simplify people who don't normally have things
651 	 * migrated from their block rsv.  If they go to release their
652 	 * reservation, that will decrease the size as well, so if migrate
653 	 * reduced size we'd end up with a negative size.  But for the
654 	 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
655 	 * but we could in fact do this reserve/migrate dance several times
656 	 * between the time we did the original reservation and we'd clean it
657 	 * up.  So to take care of this, release the space for the meta
658 	 * reservation here.  I think it may be time for a documentation page on
659 	 * how block rsvs. work.
660 	 */
661 	if (!ret) {
662 		trace_btrfs_space_reservation(fs_info, "delayed_inode",
663 					      btrfs_ino(inode), num_bytes, 1);
664 		node->bytes_reserved = num_bytes;
665 	}
666 
667 	if (release) {
668 		trace_btrfs_space_reservation(fs_info, "delalloc",
669 					      btrfs_ino(inode), num_bytes, 0);
670 		btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
671 	}
672 
673 	return ret;
674 }
675 
676 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
677 						struct btrfs_delayed_node *node)
678 {
679 	struct btrfs_block_rsv *rsv;
680 
681 	if (!node->bytes_reserved)
682 		return;
683 
684 	rsv = &fs_info->delayed_block_rsv;
685 	trace_btrfs_space_reservation(fs_info, "delayed_inode",
686 				      node->inode_id, node->bytes_reserved, 0);
687 	btrfs_block_rsv_release(fs_info, rsv,
688 				node->bytes_reserved);
689 	node->bytes_reserved = 0;
690 }
691 
692 /*
693  * This helper will insert some continuous items into the same leaf according
694  * to the free space of the leaf.
695  */
696 static int btrfs_batch_insert_items(struct btrfs_root *root,
697 				    struct btrfs_path *path,
698 				    struct btrfs_delayed_item *item)
699 {
700 	struct btrfs_fs_info *fs_info = root->fs_info;
701 	struct btrfs_delayed_item *curr, *next;
702 	int free_space;
703 	int total_data_size = 0, total_size = 0;
704 	struct extent_buffer *leaf;
705 	char *data_ptr;
706 	struct btrfs_key *keys;
707 	u32 *data_size;
708 	struct list_head head;
709 	int slot;
710 	int nitems;
711 	int i;
712 	int ret = 0;
713 
714 	BUG_ON(!path->nodes[0]);
715 
716 	leaf = path->nodes[0];
717 	free_space = btrfs_leaf_free_space(fs_info, leaf);
718 	INIT_LIST_HEAD(&head);
719 
720 	next = item;
721 	nitems = 0;
722 
723 	/*
724 	 * count the number of the continuous items that we can insert in batch
725 	 */
726 	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
727 	       free_space) {
728 		total_data_size += next->data_len;
729 		total_size += next->data_len + sizeof(struct btrfs_item);
730 		list_add_tail(&next->tree_list, &head);
731 		nitems++;
732 
733 		curr = next;
734 		next = __btrfs_next_delayed_item(curr);
735 		if (!next)
736 			break;
737 
738 		if (!btrfs_is_continuous_delayed_item(curr, next))
739 			break;
740 	}
741 
742 	if (!nitems) {
743 		ret = 0;
744 		goto out;
745 	}
746 
747 	/*
748 	 * we need allocate some memory space, but it might cause the task
749 	 * to sleep, so we set all locked nodes in the path to blocking locks
750 	 * first.
751 	 */
752 	btrfs_set_path_blocking(path);
753 
754 	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
755 	if (!keys) {
756 		ret = -ENOMEM;
757 		goto out;
758 	}
759 
760 	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
761 	if (!data_size) {
762 		ret = -ENOMEM;
763 		goto error;
764 	}
765 
766 	/* get keys of all the delayed items */
767 	i = 0;
768 	list_for_each_entry(next, &head, tree_list) {
769 		keys[i] = next->key;
770 		data_size[i] = next->data_len;
771 		i++;
772 	}
773 
774 	/* reset all the locked nodes in the patch to spinning locks. */
775 	btrfs_clear_path_blocking(path, NULL, 0);
776 
777 	/* insert the keys of the items */
778 	setup_items_for_insert(root, path, keys, data_size,
779 			       total_data_size, total_size, nitems);
780 
781 	/* insert the dir index items */
782 	slot = path->slots[0];
783 	list_for_each_entry_safe(curr, next, &head, tree_list) {
784 		data_ptr = btrfs_item_ptr(leaf, slot, char);
785 		write_extent_buffer(leaf, &curr->data,
786 				    (unsigned long)data_ptr,
787 				    curr->data_len);
788 		slot++;
789 
790 		btrfs_delayed_item_release_metadata(fs_info, curr);
791 
792 		list_del(&curr->tree_list);
793 		btrfs_release_delayed_item(curr);
794 	}
795 
796 error:
797 	kfree(data_size);
798 	kfree(keys);
799 out:
800 	return ret;
801 }
802 
803 /*
804  * This helper can just do simple insertion that needn't extend item for new
805  * data, such as directory name index insertion, inode insertion.
806  */
807 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
808 				     struct btrfs_root *root,
809 				     struct btrfs_path *path,
810 				     struct btrfs_delayed_item *delayed_item)
811 {
812 	struct btrfs_fs_info *fs_info = root->fs_info;
813 	struct extent_buffer *leaf;
814 	char *ptr;
815 	int ret;
816 
817 	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
818 				      delayed_item->data_len);
819 	if (ret < 0 && ret != -EEXIST)
820 		return ret;
821 
822 	leaf = path->nodes[0];
823 
824 	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
825 
826 	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
827 			    delayed_item->data_len);
828 	btrfs_mark_buffer_dirty(leaf);
829 
830 	btrfs_delayed_item_release_metadata(fs_info, delayed_item);
831 	return 0;
832 }
833 
834 /*
835  * we insert an item first, then if there are some continuous items, we try
836  * to insert those items into the same leaf.
837  */
838 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
839 				      struct btrfs_path *path,
840 				      struct btrfs_root *root,
841 				      struct btrfs_delayed_node *node)
842 {
843 	struct btrfs_delayed_item *curr, *prev;
844 	int ret = 0;
845 
846 do_again:
847 	mutex_lock(&node->mutex);
848 	curr = __btrfs_first_delayed_insertion_item(node);
849 	if (!curr)
850 		goto insert_end;
851 
852 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
853 	if (ret < 0) {
854 		btrfs_release_path(path);
855 		goto insert_end;
856 	}
857 
858 	prev = curr;
859 	curr = __btrfs_next_delayed_item(prev);
860 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
861 		/* insert the continuous items into the same leaf */
862 		path->slots[0]++;
863 		btrfs_batch_insert_items(root, path, curr);
864 	}
865 	btrfs_release_delayed_item(prev);
866 	btrfs_mark_buffer_dirty(path->nodes[0]);
867 
868 	btrfs_release_path(path);
869 	mutex_unlock(&node->mutex);
870 	goto do_again;
871 
872 insert_end:
873 	mutex_unlock(&node->mutex);
874 	return ret;
875 }
876 
877 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
878 				    struct btrfs_root *root,
879 				    struct btrfs_path *path,
880 				    struct btrfs_delayed_item *item)
881 {
882 	struct btrfs_fs_info *fs_info = root->fs_info;
883 	struct btrfs_delayed_item *curr, *next;
884 	struct extent_buffer *leaf;
885 	struct btrfs_key key;
886 	struct list_head head;
887 	int nitems, i, last_item;
888 	int ret = 0;
889 
890 	BUG_ON(!path->nodes[0]);
891 
892 	leaf = path->nodes[0];
893 
894 	i = path->slots[0];
895 	last_item = btrfs_header_nritems(leaf) - 1;
896 	if (i > last_item)
897 		return -ENOENT;	/* FIXME: Is errno suitable? */
898 
899 	next = item;
900 	INIT_LIST_HEAD(&head);
901 	btrfs_item_key_to_cpu(leaf, &key, i);
902 	nitems = 0;
903 	/*
904 	 * count the number of the dir index items that we can delete in batch
905 	 */
906 	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
907 		list_add_tail(&next->tree_list, &head);
908 		nitems++;
909 
910 		curr = next;
911 		next = __btrfs_next_delayed_item(curr);
912 		if (!next)
913 			break;
914 
915 		if (!btrfs_is_continuous_delayed_item(curr, next))
916 			break;
917 
918 		i++;
919 		if (i > last_item)
920 			break;
921 		btrfs_item_key_to_cpu(leaf, &key, i);
922 	}
923 
924 	if (!nitems)
925 		return 0;
926 
927 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
928 	if (ret)
929 		goto out;
930 
931 	list_for_each_entry_safe(curr, next, &head, tree_list) {
932 		btrfs_delayed_item_release_metadata(fs_info, curr);
933 		list_del(&curr->tree_list);
934 		btrfs_release_delayed_item(curr);
935 	}
936 
937 out:
938 	return ret;
939 }
940 
941 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
942 				      struct btrfs_path *path,
943 				      struct btrfs_root *root,
944 				      struct btrfs_delayed_node *node)
945 {
946 	struct btrfs_delayed_item *curr, *prev;
947 	int ret = 0;
948 
949 do_again:
950 	mutex_lock(&node->mutex);
951 	curr = __btrfs_first_delayed_deletion_item(node);
952 	if (!curr)
953 		goto delete_fail;
954 
955 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
956 	if (ret < 0)
957 		goto delete_fail;
958 	else if (ret > 0) {
959 		/*
960 		 * can't find the item which the node points to, so this node
961 		 * is invalid, just drop it.
962 		 */
963 		prev = curr;
964 		curr = __btrfs_next_delayed_item(prev);
965 		btrfs_release_delayed_item(prev);
966 		ret = 0;
967 		btrfs_release_path(path);
968 		if (curr) {
969 			mutex_unlock(&node->mutex);
970 			goto do_again;
971 		} else
972 			goto delete_fail;
973 	}
974 
975 	btrfs_batch_delete_items(trans, root, path, curr);
976 	btrfs_release_path(path);
977 	mutex_unlock(&node->mutex);
978 	goto do_again;
979 
980 delete_fail:
981 	btrfs_release_path(path);
982 	mutex_unlock(&node->mutex);
983 	return ret;
984 }
985 
986 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
987 {
988 	struct btrfs_delayed_root *delayed_root;
989 
990 	if (delayed_node &&
991 	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
992 		BUG_ON(!delayed_node->root);
993 		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
994 		delayed_node->count--;
995 
996 		delayed_root = delayed_node->root->fs_info->delayed_root;
997 		finish_one_item(delayed_root);
998 	}
999 }
1000 
1001 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1002 {
1003 	struct btrfs_delayed_root *delayed_root;
1004 
1005 	ASSERT(delayed_node->root);
1006 	clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1007 	delayed_node->count--;
1008 
1009 	delayed_root = delayed_node->root->fs_info->delayed_root;
1010 	finish_one_item(delayed_root);
1011 }
1012 
1013 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1014 					struct btrfs_root *root,
1015 					struct btrfs_path *path,
1016 					struct btrfs_delayed_node *node)
1017 {
1018 	struct btrfs_fs_info *fs_info = root->fs_info;
1019 	struct btrfs_key key;
1020 	struct btrfs_inode_item *inode_item;
1021 	struct extent_buffer *leaf;
1022 	int mod;
1023 	int ret;
1024 
1025 	key.objectid = node->inode_id;
1026 	key.type = BTRFS_INODE_ITEM_KEY;
1027 	key.offset = 0;
1028 
1029 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1030 		mod = -1;
1031 	else
1032 		mod = 1;
1033 
1034 	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1035 	if (ret > 0) {
1036 		btrfs_release_path(path);
1037 		return -ENOENT;
1038 	} else if (ret < 0) {
1039 		return ret;
1040 	}
1041 
1042 	leaf = path->nodes[0];
1043 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1044 				    struct btrfs_inode_item);
1045 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1046 			    sizeof(struct btrfs_inode_item));
1047 	btrfs_mark_buffer_dirty(leaf);
1048 
1049 	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1050 		goto no_iref;
1051 
1052 	path->slots[0]++;
1053 	if (path->slots[0] >= btrfs_header_nritems(leaf))
1054 		goto search;
1055 again:
1056 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1057 	if (key.objectid != node->inode_id)
1058 		goto out;
1059 
1060 	if (key.type != BTRFS_INODE_REF_KEY &&
1061 	    key.type != BTRFS_INODE_EXTREF_KEY)
1062 		goto out;
1063 
1064 	/*
1065 	 * Delayed iref deletion is for the inode who has only one link,
1066 	 * so there is only one iref. The case that several irefs are
1067 	 * in the same item doesn't exist.
1068 	 */
1069 	btrfs_del_item(trans, root, path);
1070 out:
1071 	btrfs_release_delayed_iref(node);
1072 no_iref:
1073 	btrfs_release_path(path);
1074 err_out:
1075 	btrfs_delayed_inode_release_metadata(fs_info, node);
1076 	btrfs_release_delayed_inode(node);
1077 
1078 	return ret;
1079 
1080 search:
1081 	btrfs_release_path(path);
1082 
1083 	key.type = BTRFS_INODE_EXTREF_KEY;
1084 	key.offset = -1;
1085 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1086 	if (ret < 0)
1087 		goto err_out;
1088 	ASSERT(ret);
1089 
1090 	ret = 0;
1091 	leaf = path->nodes[0];
1092 	path->slots[0]--;
1093 	goto again;
1094 }
1095 
1096 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1097 					     struct btrfs_root *root,
1098 					     struct btrfs_path *path,
1099 					     struct btrfs_delayed_node *node)
1100 {
1101 	int ret;
1102 
1103 	mutex_lock(&node->mutex);
1104 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1105 		mutex_unlock(&node->mutex);
1106 		return 0;
1107 	}
1108 
1109 	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1110 	mutex_unlock(&node->mutex);
1111 	return ret;
1112 }
1113 
1114 static inline int
1115 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1116 				   struct btrfs_path *path,
1117 				   struct btrfs_delayed_node *node)
1118 {
1119 	int ret;
1120 
1121 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1122 	if (ret)
1123 		return ret;
1124 
1125 	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1126 	if (ret)
1127 		return ret;
1128 
1129 	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1130 	return ret;
1131 }
1132 
1133 /*
1134  * Called when committing the transaction.
1135  * Returns 0 on success.
1136  * Returns < 0 on error and returns with an aborted transaction with any
1137  * outstanding delayed items cleaned up.
1138  */
1139 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1140 				     struct btrfs_fs_info *fs_info, int nr)
1141 {
1142 	struct btrfs_delayed_root *delayed_root;
1143 	struct btrfs_delayed_node *curr_node, *prev_node;
1144 	struct btrfs_path *path;
1145 	struct btrfs_block_rsv *block_rsv;
1146 	int ret = 0;
1147 	bool count = (nr > 0);
1148 
1149 	if (trans->aborted)
1150 		return -EIO;
1151 
1152 	path = btrfs_alloc_path();
1153 	if (!path)
1154 		return -ENOMEM;
1155 	path->leave_spinning = 1;
1156 
1157 	block_rsv = trans->block_rsv;
1158 	trans->block_rsv = &fs_info->delayed_block_rsv;
1159 
1160 	delayed_root = fs_info->delayed_root;
1161 
1162 	curr_node = btrfs_first_delayed_node(delayed_root);
1163 	while (curr_node && (!count || (count && nr--))) {
1164 		ret = __btrfs_commit_inode_delayed_items(trans, path,
1165 							 curr_node);
1166 		if (ret) {
1167 			btrfs_release_delayed_node(curr_node);
1168 			curr_node = NULL;
1169 			btrfs_abort_transaction(trans, ret);
1170 			break;
1171 		}
1172 
1173 		prev_node = curr_node;
1174 		curr_node = btrfs_next_delayed_node(curr_node);
1175 		btrfs_release_delayed_node(prev_node);
1176 	}
1177 
1178 	if (curr_node)
1179 		btrfs_release_delayed_node(curr_node);
1180 	btrfs_free_path(path);
1181 	trans->block_rsv = block_rsv;
1182 
1183 	return ret;
1184 }
1185 
1186 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1187 			    struct btrfs_fs_info *fs_info)
1188 {
1189 	return __btrfs_run_delayed_items(trans, fs_info, -1);
1190 }
1191 
1192 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1193 			       struct btrfs_fs_info *fs_info, int nr)
1194 {
1195 	return __btrfs_run_delayed_items(trans, fs_info, nr);
1196 }
1197 
1198 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1199 				     struct inode *inode)
1200 {
1201 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1202 	struct btrfs_path *path;
1203 	struct btrfs_block_rsv *block_rsv;
1204 	int ret;
1205 
1206 	if (!delayed_node)
1207 		return 0;
1208 
1209 	mutex_lock(&delayed_node->mutex);
1210 	if (!delayed_node->count) {
1211 		mutex_unlock(&delayed_node->mutex);
1212 		btrfs_release_delayed_node(delayed_node);
1213 		return 0;
1214 	}
1215 	mutex_unlock(&delayed_node->mutex);
1216 
1217 	path = btrfs_alloc_path();
1218 	if (!path) {
1219 		btrfs_release_delayed_node(delayed_node);
1220 		return -ENOMEM;
1221 	}
1222 	path->leave_spinning = 1;
1223 
1224 	block_rsv = trans->block_rsv;
1225 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1226 
1227 	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1228 
1229 	btrfs_release_delayed_node(delayed_node);
1230 	btrfs_free_path(path);
1231 	trans->block_rsv = block_rsv;
1232 
1233 	return ret;
1234 }
1235 
1236 int btrfs_commit_inode_delayed_inode(struct inode *inode)
1237 {
1238 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1239 	struct btrfs_trans_handle *trans;
1240 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1241 	struct btrfs_path *path;
1242 	struct btrfs_block_rsv *block_rsv;
1243 	int ret;
1244 
1245 	if (!delayed_node)
1246 		return 0;
1247 
1248 	mutex_lock(&delayed_node->mutex);
1249 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1250 		mutex_unlock(&delayed_node->mutex);
1251 		btrfs_release_delayed_node(delayed_node);
1252 		return 0;
1253 	}
1254 	mutex_unlock(&delayed_node->mutex);
1255 
1256 	trans = btrfs_join_transaction(delayed_node->root);
1257 	if (IS_ERR(trans)) {
1258 		ret = PTR_ERR(trans);
1259 		goto out;
1260 	}
1261 
1262 	path = btrfs_alloc_path();
1263 	if (!path) {
1264 		ret = -ENOMEM;
1265 		goto trans_out;
1266 	}
1267 	path->leave_spinning = 1;
1268 
1269 	block_rsv = trans->block_rsv;
1270 	trans->block_rsv = &fs_info->delayed_block_rsv;
1271 
1272 	mutex_lock(&delayed_node->mutex);
1273 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1274 		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1275 						   path, delayed_node);
1276 	else
1277 		ret = 0;
1278 	mutex_unlock(&delayed_node->mutex);
1279 
1280 	btrfs_free_path(path);
1281 	trans->block_rsv = block_rsv;
1282 trans_out:
1283 	btrfs_end_transaction(trans);
1284 	btrfs_btree_balance_dirty(fs_info);
1285 out:
1286 	btrfs_release_delayed_node(delayed_node);
1287 
1288 	return ret;
1289 }
1290 
1291 void btrfs_remove_delayed_node(struct inode *inode)
1292 {
1293 	struct btrfs_delayed_node *delayed_node;
1294 
1295 	delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1296 	if (!delayed_node)
1297 		return;
1298 
1299 	BTRFS_I(inode)->delayed_node = NULL;
1300 	btrfs_release_delayed_node(delayed_node);
1301 }
1302 
1303 struct btrfs_async_delayed_work {
1304 	struct btrfs_delayed_root *delayed_root;
1305 	int nr;
1306 	struct btrfs_work work;
1307 };
1308 
1309 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1310 {
1311 	struct btrfs_async_delayed_work *async_work;
1312 	struct btrfs_delayed_root *delayed_root;
1313 	struct btrfs_trans_handle *trans;
1314 	struct btrfs_path *path;
1315 	struct btrfs_delayed_node *delayed_node = NULL;
1316 	struct btrfs_root *root;
1317 	struct btrfs_block_rsv *block_rsv;
1318 	int total_done = 0;
1319 
1320 	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1321 	delayed_root = async_work->delayed_root;
1322 
1323 	path = btrfs_alloc_path();
1324 	if (!path)
1325 		goto out;
1326 
1327 again:
1328 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1329 		goto free_path;
1330 
1331 	delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1332 	if (!delayed_node)
1333 		goto free_path;
1334 
1335 	path->leave_spinning = 1;
1336 	root = delayed_node->root;
1337 
1338 	trans = btrfs_join_transaction(root);
1339 	if (IS_ERR(trans))
1340 		goto release_path;
1341 
1342 	block_rsv = trans->block_rsv;
1343 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1344 
1345 	__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1346 
1347 	trans->block_rsv = block_rsv;
1348 	btrfs_end_transaction(trans);
1349 	btrfs_btree_balance_dirty_nodelay(root->fs_info);
1350 
1351 release_path:
1352 	btrfs_release_path(path);
1353 	total_done++;
1354 
1355 	btrfs_release_prepared_delayed_node(delayed_node);
1356 	if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
1357 	    total_done < async_work->nr)
1358 		goto again;
1359 
1360 free_path:
1361 	btrfs_free_path(path);
1362 out:
1363 	wake_up(&delayed_root->wait);
1364 	kfree(async_work);
1365 }
1366 
1367 
1368 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1369 				     struct btrfs_fs_info *fs_info, int nr)
1370 {
1371 	struct btrfs_async_delayed_work *async_work;
1372 
1373 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
1374 	    btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1375 		return 0;
1376 
1377 	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1378 	if (!async_work)
1379 		return -ENOMEM;
1380 
1381 	async_work->delayed_root = delayed_root;
1382 	btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1383 			btrfs_async_run_delayed_root, NULL, NULL);
1384 	async_work->nr = nr;
1385 
1386 	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1387 	return 0;
1388 }
1389 
1390 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1391 {
1392 	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1393 }
1394 
1395 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1396 {
1397 	int val = atomic_read(&delayed_root->items_seq);
1398 
1399 	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1400 		return 1;
1401 
1402 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1403 		return 1;
1404 
1405 	return 0;
1406 }
1407 
1408 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1409 {
1410 	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1411 
1412 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1413 		return;
1414 
1415 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1416 		int seq;
1417 		int ret;
1418 
1419 		seq = atomic_read(&delayed_root->items_seq);
1420 
1421 		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1422 		if (ret)
1423 			return;
1424 
1425 		wait_event_interruptible(delayed_root->wait,
1426 					 could_end_wait(delayed_root, seq));
1427 		return;
1428 	}
1429 
1430 	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1431 }
1432 
1433 /* Will return 0 or -ENOMEM */
1434 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1435 				   struct btrfs_fs_info *fs_info,
1436 				   const char *name, int name_len,
1437 				   struct inode *dir,
1438 				   struct btrfs_disk_key *disk_key, u8 type,
1439 				   u64 index)
1440 {
1441 	struct btrfs_delayed_node *delayed_node;
1442 	struct btrfs_delayed_item *delayed_item;
1443 	struct btrfs_dir_item *dir_item;
1444 	int ret;
1445 
1446 	delayed_node = btrfs_get_or_create_delayed_node(dir);
1447 	if (IS_ERR(delayed_node))
1448 		return PTR_ERR(delayed_node);
1449 
1450 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1451 	if (!delayed_item) {
1452 		ret = -ENOMEM;
1453 		goto release_node;
1454 	}
1455 
1456 	delayed_item->key.objectid = btrfs_ino(dir);
1457 	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1458 	delayed_item->key.offset = index;
1459 
1460 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1461 	dir_item->location = *disk_key;
1462 	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1463 	btrfs_set_stack_dir_data_len(dir_item, 0);
1464 	btrfs_set_stack_dir_name_len(dir_item, name_len);
1465 	btrfs_set_stack_dir_type(dir_item, type);
1466 	memcpy((char *)(dir_item + 1), name, name_len);
1467 
1468 	ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
1469 	/*
1470 	 * we have reserved enough space when we start a new transaction,
1471 	 * so reserving metadata failure is impossible
1472 	 */
1473 	BUG_ON(ret);
1474 
1475 
1476 	mutex_lock(&delayed_node->mutex);
1477 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1478 	if (unlikely(ret)) {
1479 		btrfs_err(fs_info,
1480 			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1481 			  name_len, name, delayed_node->root->objectid,
1482 			  delayed_node->inode_id, ret);
1483 		BUG();
1484 	}
1485 	mutex_unlock(&delayed_node->mutex);
1486 
1487 release_node:
1488 	btrfs_release_delayed_node(delayed_node);
1489 	return ret;
1490 }
1491 
1492 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1493 					       struct btrfs_delayed_node *node,
1494 					       struct btrfs_key *key)
1495 {
1496 	struct btrfs_delayed_item *item;
1497 
1498 	mutex_lock(&node->mutex);
1499 	item = __btrfs_lookup_delayed_insertion_item(node, key);
1500 	if (!item) {
1501 		mutex_unlock(&node->mutex);
1502 		return 1;
1503 	}
1504 
1505 	btrfs_delayed_item_release_metadata(fs_info, item);
1506 	btrfs_release_delayed_item(item);
1507 	mutex_unlock(&node->mutex);
1508 	return 0;
1509 }
1510 
1511 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1512 				   struct btrfs_fs_info *fs_info,
1513 				   struct inode *dir, u64 index)
1514 {
1515 	struct btrfs_delayed_node *node;
1516 	struct btrfs_delayed_item *item;
1517 	struct btrfs_key item_key;
1518 	int ret;
1519 
1520 	node = btrfs_get_or_create_delayed_node(dir);
1521 	if (IS_ERR(node))
1522 		return PTR_ERR(node);
1523 
1524 	item_key.objectid = btrfs_ino(dir);
1525 	item_key.type = BTRFS_DIR_INDEX_KEY;
1526 	item_key.offset = index;
1527 
1528 	ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
1529 	if (!ret)
1530 		goto end;
1531 
1532 	item = btrfs_alloc_delayed_item(0);
1533 	if (!item) {
1534 		ret = -ENOMEM;
1535 		goto end;
1536 	}
1537 
1538 	item->key = item_key;
1539 
1540 	ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
1541 	/*
1542 	 * we have reserved enough space when we start a new transaction,
1543 	 * so reserving metadata failure is impossible.
1544 	 */
1545 	BUG_ON(ret);
1546 
1547 	mutex_lock(&node->mutex);
1548 	ret = __btrfs_add_delayed_deletion_item(node, item);
1549 	if (unlikely(ret)) {
1550 		btrfs_err(fs_info,
1551 			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1552 			  index, node->root->objectid, node->inode_id, ret);
1553 		BUG();
1554 	}
1555 	mutex_unlock(&node->mutex);
1556 end:
1557 	btrfs_release_delayed_node(node);
1558 	return ret;
1559 }
1560 
1561 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1562 {
1563 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1564 
1565 	if (!delayed_node)
1566 		return -ENOENT;
1567 
1568 	/*
1569 	 * Since we have held i_mutex of this directory, it is impossible that
1570 	 * a new directory index is added into the delayed node and index_cnt
1571 	 * is updated now. So we needn't lock the delayed node.
1572 	 */
1573 	if (!delayed_node->index_cnt) {
1574 		btrfs_release_delayed_node(delayed_node);
1575 		return -EINVAL;
1576 	}
1577 
1578 	BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1579 	btrfs_release_delayed_node(delayed_node);
1580 	return 0;
1581 }
1582 
1583 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1584 				     struct list_head *ins_list,
1585 				     struct list_head *del_list)
1586 {
1587 	struct btrfs_delayed_node *delayed_node;
1588 	struct btrfs_delayed_item *item;
1589 
1590 	delayed_node = btrfs_get_delayed_node(inode);
1591 	if (!delayed_node)
1592 		return false;
1593 
1594 	/*
1595 	 * We can only do one readdir with delayed items at a time because of
1596 	 * item->readdir_list.
1597 	 */
1598 	inode_unlock_shared(inode);
1599 	inode_lock(inode);
1600 
1601 	mutex_lock(&delayed_node->mutex);
1602 	item = __btrfs_first_delayed_insertion_item(delayed_node);
1603 	while (item) {
1604 		atomic_inc(&item->refs);
1605 		list_add_tail(&item->readdir_list, ins_list);
1606 		item = __btrfs_next_delayed_item(item);
1607 	}
1608 
1609 	item = __btrfs_first_delayed_deletion_item(delayed_node);
1610 	while (item) {
1611 		atomic_inc(&item->refs);
1612 		list_add_tail(&item->readdir_list, del_list);
1613 		item = __btrfs_next_delayed_item(item);
1614 	}
1615 	mutex_unlock(&delayed_node->mutex);
1616 	/*
1617 	 * This delayed node is still cached in the btrfs inode, so refs
1618 	 * must be > 1 now, and we needn't check it is going to be freed
1619 	 * or not.
1620 	 *
1621 	 * Besides that, this function is used to read dir, we do not
1622 	 * insert/delete delayed items in this period. So we also needn't
1623 	 * requeue or dequeue this delayed node.
1624 	 */
1625 	atomic_dec(&delayed_node->refs);
1626 
1627 	return true;
1628 }
1629 
1630 void btrfs_readdir_put_delayed_items(struct inode *inode,
1631 				     struct list_head *ins_list,
1632 				     struct list_head *del_list)
1633 {
1634 	struct btrfs_delayed_item *curr, *next;
1635 
1636 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1637 		list_del(&curr->readdir_list);
1638 		if (atomic_dec_and_test(&curr->refs))
1639 			kfree(curr);
1640 	}
1641 
1642 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1643 		list_del(&curr->readdir_list);
1644 		if (atomic_dec_and_test(&curr->refs))
1645 			kfree(curr);
1646 	}
1647 
1648 	/*
1649 	 * The VFS is going to do up_read(), so we need to downgrade back to a
1650 	 * read lock.
1651 	 */
1652 	downgrade_write(&inode->i_rwsem);
1653 }
1654 
1655 int btrfs_should_delete_dir_index(struct list_head *del_list,
1656 				  u64 index)
1657 {
1658 	struct btrfs_delayed_item *curr, *next;
1659 	int ret;
1660 
1661 	if (list_empty(del_list))
1662 		return 0;
1663 
1664 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1665 		if (curr->key.offset > index)
1666 			break;
1667 
1668 		list_del(&curr->readdir_list);
1669 		ret = (curr->key.offset == index);
1670 
1671 		if (atomic_dec_and_test(&curr->refs))
1672 			kfree(curr);
1673 
1674 		if (ret)
1675 			return 1;
1676 		else
1677 			continue;
1678 	}
1679 	return 0;
1680 }
1681 
1682 /*
1683  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1684  *
1685  */
1686 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1687 				    struct list_head *ins_list)
1688 {
1689 	struct btrfs_dir_item *di;
1690 	struct btrfs_delayed_item *curr, *next;
1691 	struct btrfs_key location;
1692 	char *name;
1693 	int name_len;
1694 	int over = 0;
1695 	unsigned char d_type;
1696 
1697 	if (list_empty(ins_list))
1698 		return 0;
1699 
1700 	/*
1701 	 * Changing the data of the delayed item is impossible. So
1702 	 * we needn't lock them. And we have held i_mutex of the
1703 	 * directory, nobody can delete any directory indexes now.
1704 	 */
1705 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1706 		list_del(&curr->readdir_list);
1707 
1708 		if (curr->key.offset < ctx->pos) {
1709 			if (atomic_dec_and_test(&curr->refs))
1710 				kfree(curr);
1711 			continue;
1712 		}
1713 
1714 		ctx->pos = curr->key.offset;
1715 
1716 		di = (struct btrfs_dir_item *)curr->data;
1717 		name = (char *)(di + 1);
1718 		name_len = btrfs_stack_dir_name_len(di);
1719 
1720 		d_type = btrfs_filetype_table[di->type];
1721 		btrfs_disk_key_to_cpu(&location, &di->location);
1722 
1723 		over = !dir_emit(ctx, name, name_len,
1724 			       location.objectid, d_type);
1725 
1726 		if (atomic_dec_and_test(&curr->refs))
1727 			kfree(curr);
1728 
1729 		if (over)
1730 			return 1;
1731 	}
1732 	return 0;
1733 }
1734 
1735 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1736 				  struct btrfs_inode_item *inode_item,
1737 				  struct inode *inode)
1738 {
1739 	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1740 	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1741 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1742 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1743 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1744 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1745 	btrfs_set_stack_inode_generation(inode_item,
1746 					 BTRFS_I(inode)->generation);
1747 	btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1748 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1749 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1750 	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1751 	btrfs_set_stack_inode_block_group(inode_item, 0);
1752 
1753 	btrfs_set_stack_timespec_sec(&inode_item->atime,
1754 				     inode->i_atime.tv_sec);
1755 	btrfs_set_stack_timespec_nsec(&inode_item->atime,
1756 				      inode->i_atime.tv_nsec);
1757 
1758 	btrfs_set_stack_timespec_sec(&inode_item->mtime,
1759 				     inode->i_mtime.tv_sec);
1760 	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1761 				      inode->i_mtime.tv_nsec);
1762 
1763 	btrfs_set_stack_timespec_sec(&inode_item->ctime,
1764 				     inode->i_ctime.tv_sec);
1765 	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1766 				      inode->i_ctime.tv_nsec);
1767 
1768 	btrfs_set_stack_timespec_sec(&inode_item->otime,
1769 				     BTRFS_I(inode)->i_otime.tv_sec);
1770 	btrfs_set_stack_timespec_nsec(&inode_item->otime,
1771 				     BTRFS_I(inode)->i_otime.tv_nsec);
1772 }
1773 
1774 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1775 {
1776 	struct btrfs_delayed_node *delayed_node;
1777 	struct btrfs_inode_item *inode_item;
1778 
1779 	delayed_node = btrfs_get_delayed_node(inode);
1780 	if (!delayed_node)
1781 		return -ENOENT;
1782 
1783 	mutex_lock(&delayed_node->mutex);
1784 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1785 		mutex_unlock(&delayed_node->mutex);
1786 		btrfs_release_delayed_node(delayed_node);
1787 		return -ENOENT;
1788 	}
1789 
1790 	inode_item = &delayed_node->inode_item;
1791 
1792 	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1793 	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1794 	btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1795 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1796 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1797 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1798 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1799         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1800 
1801 	inode->i_version = btrfs_stack_inode_sequence(inode_item);
1802 	inode->i_rdev = 0;
1803 	*rdev = btrfs_stack_inode_rdev(inode_item);
1804 	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1805 
1806 	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1807 	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1808 
1809 	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1810 	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1811 
1812 	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1813 	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1814 
1815 	BTRFS_I(inode)->i_otime.tv_sec =
1816 		btrfs_stack_timespec_sec(&inode_item->otime);
1817 	BTRFS_I(inode)->i_otime.tv_nsec =
1818 		btrfs_stack_timespec_nsec(&inode_item->otime);
1819 
1820 	inode->i_generation = BTRFS_I(inode)->generation;
1821 	BTRFS_I(inode)->index_cnt = (u64)-1;
1822 
1823 	mutex_unlock(&delayed_node->mutex);
1824 	btrfs_release_delayed_node(delayed_node);
1825 	return 0;
1826 }
1827 
1828 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1829 			       struct btrfs_root *root, struct inode *inode)
1830 {
1831 	struct btrfs_delayed_node *delayed_node;
1832 	int ret = 0;
1833 
1834 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1835 	if (IS_ERR(delayed_node))
1836 		return PTR_ERR(delayed_node);
1837 
1838 	mutex_lock(&delayed_node->mutex);
1839 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1840 		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1841 		goto release_node;
1842 	}
1843 
1844 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1845 						   delayed_node);
1846 	if (ret)
1847 		goto release_node;
1848 
1849 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1850 	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1851 	delayed_node->count++;
1852 	atomic_inc(&root->fs_info->delayed_root->items);
1853 release_node:
1854 	mutex_unlock(&delayed_node->mutex);
1855 	btrfs_release_delayed_node(delayed_node);
1856 	return ret;
1857 }
1858 
1859 int btrfs_delayed_delete_inode_ref(struct inode *inode)
1860 {
1861 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1862 	struct btrfs_delayed_node *delayed_node;
1863 
1864 	/*
1865 	 * we don't do delayed inode updates during log recovery because it
1866 	 * leads to enospc problems.  This means we also can't do
1867 	 * delayed inode refs
1868 	 */
1869 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1870 		return -EAGAIN;
1871 
1872 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1873 	if (IS_ERR(delayed_node))
1874 		return PTR_ERR(delayed_node);
1875 
1876 	/*
1877 	 * We don't reserve space for inode ref deletion is because:
1878 	 * - We ONLY do async inode ref deletion for the inode who has only
1879 	 *   one link(i_nlink == 1), it means there is only one inode ref.
1880 	 *   And in most case, the inode ref and the inode item are in the
1881 	 *   same leaf, and we will deal with them at the same time.
1882 	 *   Since we are sure we will reserve the space for the inode item,
1883 	 *   it is unnecessary to reserve space for inode ref deletion.
1884 	 * - If the inode ref and the inode item are not in the same leaf,
1885 	 *   We also needn't worry about enospc problem, because we reserve
1886 	 *   much more space for the inode update than it needs.
1887 	 * - At the worst, we can steal some space from the global reservation.
1888 	 *   It is very rare.
1889 	 */
1890 	mutex_lock(&delayed_node->mutex);
1891 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1892 		goto release_node;
1893 
1894 	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1895 	delayed_node->count++;
1896 	atomic_inc(&fs_info->delayed_root->items);
1897 release_node:
1898 	mutex_unlock(&delayed_node->mutex);
1899 	btrfs_release_delayed_node(delayed_node);
1900 	return 0;
1901 }
1902 
1903 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1904 {
1905 	struct btrfs_root *root = delayed_node->root;
1906 	struct btrfs_fs_info *fs_info = root->fs_info;
1907 	struct btrfs_delayed_item *curr_item, *prev_item;
1908 
1909 	mutex_lock(&delayed_node->mutex);
1910 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1911 	while (curr_item) {
1912 		btrfs_delayed_item_release_metadata(fs_info, curr_item);
1913 		prev_item = curr_item;
1914 		curr_item = __btrfs_next_delayed_item(prev_item);
1915 		btrfs_release_delayed_item(prev_item);
1916 	}
1917 
1918 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1919 	while (curr_item) {
1920 		btrfs_delayed_item_release_metadata(fs_info, curr_item);
1921 		prev_item = curr_item;
1922 		curr_item = __btrfs_next_delayed_item(prev_item);
1923 		btrfs_release_delayed_item(prev_item);
1924 	}
1925 
1926 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1927 		btrfs_release_delayed_iref(delayed_node);
1928 
1929 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1930 		btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
1931 		btrfs_release_delayed_inode(delayed_node);
1932 	}
1933 	mutex_unlock(&delayed_node->mutex);
1934 }
1935 
1936 void btrfs_kill_delayed_inode_items(struct inode *inode)
1937 {
1938 	struct btrfs_delayed_node *delayed_node;
1939 
1940 	delayed_node = btrfs_get_delayed_node(inode);
1941 	if (!delayed_node)
1942 		return;
1943 
1944 	__btrfs_kill_delayed_node(delayed_node);
1945 	btrfs_release_delayed_node(delayed_node);
1946 }
1947 
1948 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1949 {
1950 	u64 inode_id = 0;
1951 	struct btrfs_delayed_node *delayed_nodes[8];
1952 	int i, n;
1953 
1954 	while (1) {
1955 		spin_lock(&root->inode_lock);
1956 		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1957 					   (void **)delayed_nodes, inode_id,
1958 					   ARRAY_SIZE(delayed_nodes));
1959 		if (!n) {
1960 			spin_unlock(&root->inode_lock);
1961 			break;
1962 		}
1963 
1964 		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1965 
1966 		for (i = 0; i < n; i++)
1967 			atomic_inc(&delayed_nodes[i]->refs);
1968 		spin_unlock(&root->inode_lock);
1969 
1970 		for (i = 0; i < n; i++) {
1971 			__btrfs_kill_delayed_node(delayed_nodes[i]);
1972 			btrfs_release_delayed_node(delayed_nodes[i]);
1973 		}
1974 	}
1975 }
1976 
1977 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1978 {
1979 	struct btrfs_delayed_node *curr_node, *prev_node;
1980 
1981 	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1982 	while (curr_node) {
1983 		__btrfs_kill_delayed_node(curr_node);
1984 
1985 		prev_node = curr_node;
1986 		curr_node = btrfs_next_delayed_node(curr_node);
1987 		btrfs_release_delayed_node(prev_node);
1988 	}
1989 }
1990 
1991