xref: /openbmc/linux/fs/btrfs/delayed-inode.c (revision 2eb0f624b709e78ec8e2f4c3412947703db99301)
1 /*
2  * Copyright (C) 2011 Fujitsu.  All rights reserved.
3  * Written by Miao Xie <miaox@cn.fujitsu.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19 
20 #include <linux/slab.h>
21 #include <linux/iversion.h>
22 #include "delayed-inode.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "ctree.h"
26 #include "qgroup.h"
27 
28 #define BTRFS_DELAYED_WRITEBACK		512
29 #define BTRFS_DELAYED_BACKGROUND	128
30 #define BTRFS_DELAYED_BATCH		16
31 
32 static struct kmem_cache *delayed_node_cache;
33 
34 int __init btrfs_delayed_inode_init(void)
35 {
36 	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
37 					sizeof(struct btrfs_delayed_node),
38 					0,
39 					SLAB_MEM_SPREAD,
40 					NULL);
41 	if (!delayed_node_cache)
42 		return -ENOMEM;
43 	return 0;
44 }
45 
46 void __cold btrfs_delayed_inode_exit(void)
47 {
48 	kmem_cache_destroy(delayed_node_cache);
49 }
50 
51 static inline void btrfs_init_delayed_node(
52 				struct btrfs_delayed_node *delayed_node,
53 				struct btrfs_root *root, u64 inode_id)
54 {
55 	delayed_node->root = root;
56 	delayed_node->inode_id = inode_id;
57 	refcount_set(&delayed_node->refs, 0);
58 	delayed_node->ins_root = RB_ROOT;
59 	delayed_node->del_root = RB_ROOT;
60 	mutex_init(&delayed_node->mutex);
61 	INIT_LIST_HEAD(&delayed_node->n_list);
62 	INIT_LIST_HEAD(&delayed_node->p_list);
63 }
64 
65 static inline int btrfs_is_continuous_delayed_item(
66 					struct btrfs_delayed_item *item1,
67 					struct btrfs_delayed_item *item2)
68 {
69 	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
70 	    item1->key.objectid == item2->key.objectid &&
71 	    item1->key.type == item2->key.type &&
72 	    item1->key.offset + 1 == item2->key.offset)
73 		return 1;
74 	return 0;
75 }
76 
77 static struct btrfs_delayed_node *btrfs_get_delayed_node(
78 		struct btrfs_inode *btrfs_inode)
79 {
80 	struct btrfs_root *root = btrfs_inode->root;
81 	u64 ino = btrfs_ino(btrfs_inode);
82 	struct btrfs_delayed_node *node;
83 
84 	node = READ_ONCE(btrfs_inode->delayed_node);
85 	if (node) {
86 		refcount_inc(&node->refs);
87 		return node;
88 	}
89 
90 	spin_lock(&root->inode_lock);
91 	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
92 
93 	if (node) {
94 		if (btrfs_inode->delayed_node) {
95 			refcount_inc(&node->refs);	/* can be accessed */
96 			BUG_ON(btrfs_inode->delayed_node != node);
97 			spin_unlock(&root->inode_lock);
98 			return node;
99 		}
100 
101 		/*
102 		 * It's possible that we're racing into the middle of removing
103 		 * this node from the radix tree.  In this case, the refcount
104 		 * was zero and it should never go back to one.  Just return
105 		 * NULL like it was never in the radix at all; our release
106 		 * function is in the process of removing it.
107 		 *
108 		 * Some implementations of refcount_inc refuse to bump the
109 		 * refcount once it has hit zero.  If we don't do this dance
110 		 * here, refcount_inc() may decide to just WARN_ONCE() instead
111 		 * of actually bumping the refcount.
112 		 *
113 		 * If this node is properly in the radix, we want to bump the
114 		 * refcount twice, once for the inode and once for this get
115 		 * operation.
116 		 */
117 		if (refcount_inc_not_zero(&node->refs)) {
118 			refcount_inc(&node->refs);
119 			btrfs_inode->delayed_node = node;
120 		} else {
121 			node = NULL;
122 		}
123 
124 		spin_unlock(&root->inode_lock);
125 		return node;
126 	}
127 	spin_unlock(&root->inode_lock);
128 
129 	return NULL;
130 }
131 
132 /* Will return either the node or PTR_ERR(-ENOMEM) */
133 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
134 		struct btrfs_inode *btrfs_inode)
135 {
136 	struct btrfs_delayed_node *node;
137 	struct btrfs_root *root = btrfs_inode->root;
138 	u64 ino = btrfs_ino(btrfs_inode);
139 	int ret;
140 
141 again:
142 	node = btrfs_get_delayed_node(btrfs_inode);
143 	if (node)
144 		return node;
145 
146 	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
147 	if (!node)
148 		return ERR_PTR(-ENOMEM);
149 	btrfs_init_delayed_node(node, root, ino);
150 
151 	/* cached in the btrfs inode and can be accessed */
152 	refcount_set(&node->refs, 2);
153 
154 	ret = radix_tree_preload(GFP_NOFS);
155 	if (ret) {
156 		kmem_cache_free(delayed_node_cache, node);
157 		return ERR_PTR(ret);
158 	}
159 
160 	spin_lock(&root->inode_lock);
161 	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
162 	if (ret == -EEXIST) {
163 		spin_unlock(&root->inode_lock);
164 		kmem_cache_free(delayed_node_cache, node);
165 		radix_tree_preload_end();
166 		goto again;
167 	}
168 	btrfs_inode->delayed_node = node;
169 	spin_unlock(&root->inode_lock);
170 	radix_tree_preload_end();
171 
172 	return node;
173 }
174 
175 /*
176  * Call it when holding delayed_node->mutex
177  *
178  * If mod = 1, add this node into the prepared list.
179  */
180 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
181 				     struct btrfs_delayed_node *node,
182 				     int mod)
183 {
184 	spin_lock(&root->lock);
185 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
186 		if (!list_empty(&node->p_list))
187 			list_move_tail(&node->p_list, &root->prepare_list);
188 		else if (mod)
189 			list_add_tail(&node->p_list, &root->prepare_list);
190 	} else {
191 		list_add_tail(&node->n_list, &root->node_list);
192 		list_add_tail(&node->p_list, &root->prepare_list);
193 		refcount_inc(&node->refs);	/* inserted into list */
194 		root->nodes++;
195 		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
196 	}
197 	spin_unlock(&root->lock);
198 }
199 
200 /* Call it when holding delayed_node->mutex */
201 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
202 				       struct btrfs_delayed_node *node)
203 {
204 	spin_lock(&root->lock);
205 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
206 		root->nodes--;
207 		refcount_dec(&node->refs);	/* not in the list */
208 		list_del_init(&node->n_list);
209 		if (!list_empty(&node->p_list))
210 			list_del_init(&node->p_list);
211 		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
212 	}
213 	spin_unlock(&root->lock);
214 }
215 
216 static struct btrfs_delayed_node *btrfs_first_delayed_node(
217 			struct btrfs_delayed_root *delayed_root)
218 {
219 	struct list_head *p;
220 	struct btrfs_delayed_node *node = NULL;
221 
222 	spin_lock(&delayed_root->lock);
223 	if (list_empty(&delayed_root->node_list))
224 		goto out;
225 
226 	p = delayed_root->node_list.next;
227 	node = list_entry(p, struct btrfs_delayed_node, n_list);
228 	refcount_inc(&node->refs);
229 out:
230 	spin_unlock(&delayed_root->lock);
231 
232 	return node;
233 }
234 
235 static struct btrfs_delayed_node *btrfs_next_delayed_node(
236 						struct btrfs_delayed_node *node)
237 {
238 	struct btrfs_delayed_root *delayed_root;
239 	struct list_head *p;
240 	struct btrfs_delayed_node *next = NULL;
241 
242 	delayed_root = node->root->fs_info->delayed_root;
243 	spin_lock(&delayed_root->lock);
244 	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
245 		/* not in the list */
246 		if (list_empty(&delayed_root->node_list))
247 			goto out;
248 		p = delayed_root->node_list.next;
249 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
250 		goto out;
251 	else
252 		p = node->n_list.next;
253 
254 	next = list_entry(p, struct btrfs_delayed_node, n_list);
255 	refcount_inc(&next->refs);
256 out:
257 	spin_unlock(&delayed_root->lock);
258 
259 	return next;
260 }
261 
262 static void __btrfs_release_delayed_node(
263 				struct btrfs_delayed_node *delayed_node,
264 				int mod)
265 {
266 	struct btrfs_delayed_root *delayed_root;
267 
268 	if (!delayed_node)
269 		return;
270 
271 	delayed_root = delayed_node->root->fs_info->delayed_root;
272 
273 	mutex_lock(&delayed_node->mutex);
274 	if (delayed_node->count)
275 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
276 	else
277 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
278 	mutex_unlock(&delayed_node->mutex);
279 
280 	if (refcount_dec_and_test(&delayed_node->refs)) {
281 		struct btrfs_root *root = delayed_node->root;
282 
283 		spin_lock(&root->inode_lock);
284 		/*
285 		 * Once our refcount goes to zero, nobody is allowed to bump it
286 		 * back up.  We can delete it now.
287 		 */
288 		ASSERT(refcount_read(&delayed_node->refs) == 0);
289 		radix_tree_delete(&root->delayed_nodes_tree,
290 				  delayed_node->inode_id);
291 		spin_unlock(&root->inode_lock);
292 		kmem_cache_free(delayed_node_cache, delayed_node);
293 	}
294 }
295 
296 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
297 {
298 	__btrfs_release_delayed_node(node, 0);
299 }
300 
301 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
302 					struct btrfs_delayed_root *delayed_root)
303 {
304 	struct list_head *p;
305 	struct btrfs_delayed_node *node = NULL;
306 
307 	spin_lock(&delayed_root->lock);
308 	if (list_empty(&delayed_root->prepare_list))
309 		goto out;
310 
311 	p = delayed_root->prepare_list.next;
312 	list_del_init(p);
313 	node = list_entry(p, struct btrfs_delayed_node, p_list);
314 	refcount_inc(&node->refs);
315 out:
316 	spin_unlock(&delayed_root->lock);
317 
318 	return node;
319 }
320 
321 static inline void btrfs_release_prepared_delayed_node(
322 					struct btrfs_delayed_node *node)
323 {
324 	__btrfs_release_delayed_node(node, 1);
325 }
326 
327 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
328 {
329 	struct btrfs_delayed_item *item;
330 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
331 	if (item) {
332 		item->data_len = data_len;
333 		item->ins_or_del = 0;
334 		item->bytes_reserved = 0;
335 		item->delayed_node = NULL;
336 		refcount_set(&item->refs, 1);
337 	}
338 	return item;
339 }
340 
341 /*
342  * __btrfs_lookup_delayed_item - look up the delayed item by key
343  * @delayed_node: pointer to the delayed node
344  * @key:	  the key to look up
345  * @prev:	  used to store the prev item if the right item isn't found
346  * @next:	  used to store the next item if the right item isn't found
347  *
348  * Note: if we don't find the right item, we will return the prev item and
349  * the next item.
350  */
351 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
352 				struct rb_root *root,
353 				struct btrfs_key *key,
354 				struct btrfs_delayed_item **prev,
355 				struct btrfs_delayed_item **next)
356 {
357 	struct rb_node *node, *prev_node = NULL;
358 	struct btrfs_delayed_item *delayed_item = NULL;
359 	int ret = 0;
360 
361 	node = root->rb_node;
362 
363 	while (node) {
364 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
365 					rb_node);
366 		prev_node = node;
367 		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
368 		if (ret < 0)
369 			node = node->rb_right;
370 		else if (ret > 0)
371 			node = node->rb_left;
372 		else
373 			return delayed_item;
374 	}
375 
376 	if (prev) {
377 		if (!prev_node)
378 			*prev = NULL;
379 		else if (ret < 0)
380 			*prev = delayed_item;
381 		else if ((node = rb_prev(prev_node)) != NULL) {
382 			*prev = rb_entry(node, struct btrfs_delayed_item,
383 					 rb_node);
384 		} else
385 			*prev = NULL;
386 	}
387 
388 	if (next) {
389 		if (!prev_node)
390 			*next = NULL;
391 		else if (ret > 0)
392 			*next = delayed_item;
393 		else if ((node = rb_next(prev_node)) != NULL) {
394 			*next = rb_entry(node, struct btrfs_delayed_item,
395 					 rb_node);
396 		} else
397 			*next = NULL;
398 	}
399 	return NULL;
400 }
401 
402 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
403 					struct btrfs_delayed_node *delayed_node,
404 					struct btrfs_key *key)
405 {
406 	return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
407 					   NULL, NULL);
408 }
409 
410 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
411 				    struct btrfs_delayed_item *ins,
412 				    int action)
413 {
414 	struct rb_node **p, *node;
415 	struct rb_node *parent_node = NULL;
416 	struct rb_root *root;
417 	struct btrfs_delayed_item *item;
418 	int cmp;
419 
420 	if (action == BTRFS_DELAYED_INSERTION_ITEM)
421 		root = &delayed_node->ins_root;
422 	else if (action == BTRFS_DELAYED_DELETION_ITEM)
423 		root = &delayed_node->del_root;
424 	else
425 		BUG();
426 	p = &root->rb_node;
427 	node = &ins->rb_node;
428 
429 	while (*p) {
430 		parent_node = *p;
431 		item = rb_entry(parent_node, struct btrfs_delayed_item,
432 				 rb_node);
433 
434 		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
435 		if (cmp < 0)
436 			p = &(*p)->rb_right;
437 		else if (cmp > 0)
438 			p = &(*p)->rb_left;
439 		else
440 			return -EEXIST;
441 	}
442 
443 	rb_link_node(node, parent_node, p);
444 	rb_insert_color(node, root);
445 	ins->delayed_node = delayed_node;
446 	ins->ins_or_del = action;
447 
448 	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
449 	    action == BTRFS_DELAYED_INSERTION_ITEM &&
450 	    ins->key.offset >= delayed_node->index_cnt)
451 			delayed_node->index_cnt = ins->key.offset + 1;
452 
453 	delayed_node->count++;
454 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
455 	return 0;
456 }
457 
458 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
459 					      struct btrfs_delayed_item *item)
460 {
461 	return __btrfs_add_delayed_item(node, item,
462 					BTRFS_DELAYED_INSERTION_ITEM);
463 }
464 
465 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
466 					     struct btrfs_delayed_item *item)
467 {
468 	return __btrfs_add_delayed_item(node, item,
469 					BTRFS_DELAYED_DELETION_ITEM);
470 }
471 
472 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
473 {
474 	int seq = atomic_inc_return(&delayed_root->items_seq);
475 
476 	/*
477 	 * atomic_dec_return implies a barrier for waitqueue_active
478 	 */
479 	if ((atomic_dec_return(&delayed_root->items) <
480 	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
481 	    waitqueue_active(&delayed_root->wait))
482 		wake_up(&delayed_root->wait);
483 }
484 
485 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
486 {
487 	struct rb_root *root;
488 	struct btrfs_delayed_root *delayed_root;
489 
490 	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
491 
492 	BUG_ON(!delayed_root);
493 	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
494 	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
495 
496 	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
497 		root = &delayed_item->delayed_node->ins_root;
498 	else
499 		root = &delayed_item->delayed_node->del_root;
500 
501 	rb_erase(&delayed_item->rb_node, root);
502 	delayed_item->delayed_node->count--;
503 
504 	finish_one_item(delayed_root);
505 }
506 
507 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
508 {
509 	if (item) {
510 		__btrfs_remove_delayed_item(item);
511 		if (refcount_dec_and_test(&item->refs))
512 			kfree(item);
513 	}
514 }
515 
516 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
517 					struct btrfs_delayed_node *delayed_node)
518 {
519 	struct rb_node *p;
520 	struct btrfs_delayed_item *item = NULL;
521 
522 	p = rb_first(&delayed_node->ins_root);
523 	if (p)
524 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
525 
526 	return item;
527 }
528 
529 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
530 					struct btrfs_delayed_node *delayed_node)
531 {
532 	struct rb_node *p;
533 	struct btrfs_delayed_item *item = NULL;
534 
535 	p = rb_first(&delayed_node->del_root);
536 	if (p)
537 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
538 
539 	return item;
540 }
541 
542 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
543 						struct btrfs_delayed_item *item)
544 {
545 	struct rb_node *p;
546 	struct btrfs_delayed_item *next = NULL;
547 
548 	p = rb_next(&item->rb_node);
549 	if (p)
550 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
551 
552 	return next;
553 }
554 
555 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
556 					       struct btrfs_root *root,
557 					       struct btrfs_delayed_item *item)
558 {
559 	struct btrfs_block_rsv *src_rsv;
560 	struct btrfs_block_rsv *dst_rsv;
561 	struct btrfs_fs_info *fs_info = root->fs_info;
562 	u64 num_bytes;
563 	int ret;
564 
565 	if (!trans->bytes_reserved)
566 		return 0;
567 
568 	src_rsv = trans->block_rsv;
569 	dst_rsv = &fs_info->delayed_block_rsv;
570 
571 	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
572 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
573 	if (!ret) {
574 		trace_btrfs_space_reservation(fs_info, "delayed_item",
575 					      item->key.objectid,
576 					      num_bytes, 1);
577 		item->bytes_reserved = num_bytes;
578 	}
579 
580 	return ret;
581 }
582 
583 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
584 						struct btrfs_delayed_item *item)
585 {
586 	struct btrfs_block_rsv *rsv;
587 	struct btrfs_fs_info *fs_info = root->fs_info;
588 
589 	if (!item->bytes_reserved)
590 		return;
591 
592 	rsv = &fs_info->delayed_block_rsv;
593 	btrfs_qgroup_convert_reserved_meta(root, item->bytes_reserved);
594 	trace_btrfs_space_reservation(fs_info, "delayed_item",
595 				      item->key.objectid, item->bytes_reserved,
596 				      0);
597 	btrfs_block_rsv_release(fs_info, rsv,
598 				item->bytes_reserved);
599 }
600 
601 static int btrfs_delayed_inode_reserve_metadata(
602 					struct btrfs_trans_handle *trans,
603 					struct btrfs_root *root,
604 					struct btrfs_inode *inode,
605 					struct btrfs_delayed_node *node)
606 {
607 	struct btrfs_fs_info *fs_info = root->fs_info;
608 	struct btrfs_block_rsv *src_rsv;
609 	struct btrfs_block_rsv *dst_rsv;
610 	u64 num_bytes;
611 	int ret;
612 
613 	src_rsv = trans->block_rsv;
614 	dst_rsv = &fs_info->delayed_block_rsv;
615 
616 	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
617 
618 	ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
619 	if (ret < 0)
620 		return ret;
621 	/*
622 	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
623 	 * which doesn't reserve space for speed.  This is a problem since we
624 	 * still need to reserve space for this update, so try to reserve the
625 	 * space.
626 	 *
627 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
628 	 * we always reserve enough to update the inode item.
629 	 */
630 	if (!src_rsv || (!trans->bytes_reserved &&
631 			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
632 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
633 					  BTRFS_RESERVE_NO_FLUSH);
634 		/*
635 		 * Since we're under a transaction reserve_metadata_bytes could
636 		 * try to commit the transaction which will make it return
637 		 * EAGAIN to make us stop the transaction we have, so return
638 		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
639 		 */
640 		if (ret == -EAGAIN) {
641 			ret = -ENOSPC;
642 			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
643 		}
644 		if (!ret) {
645 			node->bytes_reserved = num_bytes;
646 			trace_btrfs_space_reservation(fs_info,
647 						      "delayed_inode",
648 						      btrfs_ino(inode),
649 						      num_bytes, 1);
650 		}
651 		return ret;
652 	}
653 
654 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
655 	if (!ret) {
656 		trace_btrfs_space_reservation(fs_info, "delayed_inode",
657 					      btrfs_ino(inode), num_bytes, 1);
658 		node->bytes_reserved = num_bytes;
659 	}
660 
661 	return ret;
662 }
663 
664 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
665 						struct btrfs_delayed_node *node,
666 						bool qgroup_free)
667 {
668 	struct btrfs_block_rsv *rsv;
669 
670 	if (!node->bytes_reserved)
671 		return;
672 
673 	rsv = &fs_info->delayed_block_rsv;
674 	trace_btrfs_space_reservation(fs_info, "delayed_inode",
675 				      node->inode_id, node->bytes_reserved, 0);
676 	btrfs_block_rsv_release(fs_info, rsv,
677 				node->bytes_reserved);
678 	if (qgroup_free)
679 		btrfs_qgroup_free_meta_prealloc(node->root,
680 				node->bytes_reserved);
681 	else
682 		btrfs_qgroup_convert_reserved_meta(node->root,
683 				node->bytes_reserved);
684 	node->bytes_reserved = 0;
685 }
686 
687 /*
688  * This helper will insert some continuous items into the same leaf according
689  * to the free space of the leaf.
690  */
691 static int btrfs_batch_insert_items(struct btrfs_root *root,
692 				    struct btrfs_path *path,
693 				    struct btrfs_delayed_item *item)
694 {
695 	struct btrfs_fs_info *fs_info = root->fs_info;
696 	struct btrfs_delayed_item *curr, *next;
697 	int free_space;
698 	int total_data_size = 0, total_size = 0;
699 	struct extent_buffer *leaf;
700 	char *data_ptr;
701 	struct btrfs_key *keys;
702 	u32 *data_size;
703 	struct list_head head;
704 	int slot;
705 	int nitems;
706 	int i;
707 	int ret = 0;
708 
709 	BUG_ON(!path->nodes[0]);
710 
711 	leaf = path->nodes[0];
712 	free_space = btrfs_leaf_free_space(fs_info, leaf);
713 	INIT_LIST_HEAD(&head);
714 
715 	next = item;
716 	nitems = 0;
717 
718 	/*
719 	 * count the number of the continuous items that we can insert in batch
720 	 */
721 	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
722 	       free_space) {
723 		total_data_size += next->data_len;
724 		total_size += next->data_len + sizeof(struct btrfs_item);
725 		list_add_tail(&next->tree_list, &head);
726 		nitems++;
727 
728 		curr = next;
729 		next = __btrfs_next_delayed_item(curr);
730 		if (!next)
731 			break;
732 
733 		if (!btrfs_is_continuous_delayed_item(curr, next))
734 			break;
735 	}
736 
737 	if (!nitems) {
738 		ret = 0;
739 		goto out;
740 	}
741 
742 	/*
743 	 * we need allocate some memory space, but it might cause the task
744 	 * to sleep, so we set all locked nodes in the path to blocking locks
745 	 * first.
746 	 */
747 	btrfs_set_path_blocking(path);
748 
749 	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
750 	if (!keys) {
751 		ret = -ENOMEM;
752 		goto out;
753 	}
754 
755 	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
756 	if (!data_size) {
757 		ret = -ENOMEM;
758 		goto error;
759 	}
760 
761 	/* get keys of all the delayed items */
762 	i = 0;
763 	list_for_each_entry(next, &head, tree_list) {
764 		keys[i] = next->key;
765 		data_size[i] = next->data_len;
766 		i++;
767 	}
768 
769 	/* reset all the locked nodes in the patch to spinning locks. */
770 	btrfs_clear_path_blocking(path, NULL, 0);
771 
772 	/* insert the keys of the items */
773 	setup_items_for_insert(root, path, keys, data_size,
774 			       total_data_size, total_size, nitems);
775 
776 	/* insert the dir index items */
777 	slot = path->slots[0];
778 	list_for_each_entry_safe(curr, next, &head, tree_list) {
779 		data_ptr = btrfs_item_ptr(leaf, slot, char);
780 		write_extent_buffer(leaf, &curr->data,
781 				    (unsigned long)data_ptr,
782 				    curr->data_len);
783 		slot++;
784 
785 		btrfs_delayed_item_release_metadata(root, curr);
786 
787 		list_del(&curr->tree_list);
788 		btrfs_release_delayed_item(curr);
789 	}
790 
791 error:
792 	kfree(data_size);
793 	kfree(keys);
794 out:
795 	return ret;
796 }
797 
798 /*
799  * This helper can just do simple insertion that needn't extend item for new
800  * data, such as directory name index insertion, inode insertion.
801  */
802 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
803 				     struct btrfs_root *root,
804 				     struct btrfs_path *path,
805 				     struct btrfs_delayed_item *delayed_item)
806 {
807 	struct extent_buffer *leaf;
808 	char *ptr;
809 	int ret;
810 
811 	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
812 				      delayed_item->data_len);
813 	if (ret < 0 && ret != -EEXIST)
814 		return ret;
815 
816 	leaf = path->nodes[0];
817 
818 	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
819 
820 	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
821 			    delayed_item->data_len);
822 	btrfs_mark_buffer_dirty(leaf);
823 
824 	btrfs_delayed_item_release_metadata(root, delayed_item);
825 	return 0;
826 }
827 
828 /*
829  * we insert an item first, then if there are some continuous items, we try
830  * to insert those items into the same leaf.
831  */
832 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
833 				      struct btrfs_path *path,
834 				      struct btrfs_root *root,
835 				      struct btrfs_delayed_node *node)
836 {
837 	struct btrfs_delayed_item *curr, *prev;
838 	int ret = 0;
839 
840 do_again:
841 	mutex_lock(&node->mutex);
842 	curr = __btrfs_first_delayed_insertion_item(node);
843 	if (!curr)
844 		goto insert_end;
845 
846 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
847 	if (ret < 0) {
848 		btrfs_release_path(path);
849 		goto insert_end;
850 	}
851 
852 	prev = curr;
853 	curr = __btrfs_next_delayed_item(prev);
854 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
855 		/* insert the continuous items into the same leaf */
856 		path->slots[0]++;
857 		btrfs_batch_insert_items(root, path, curr);
858 	}
859 	btrfs_release_delayed_item(prev);
860 	btrfs_mark_buffer_dirty(path->nodes[0]);
861 
862 	btrfs_release_path(path);
863 	mutex_unlock(&node->mutex);
864 	goto do_again;
865 
866 insert_end:
867 	mutex_unlock(&node->mutex);
868 	return ret;
869 }
870 
871 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
872 				    struct btrfs_root *root,
873 				    struct btrfs_path *path,
874 				    struct btrfs_delayed_item *item)
875 {
876 	struct btrfs_delayed_item *curr, *next;
877 	struct extent_buffer *leaf;
878 	struct btrfs_key key;
879 	struct list_head head;
880 	int nitems, i, last_item;
881 	int ret = 0;
882 
883 	BUG_ON(!path->nodes[0]);
884 
885 	leaf = path->nodes[0];
886 
887 	i = path->slots[0];
888 	last_item = btrfs_header_nritems(leaf) - 1;
889 	if (i > last_item)
890 		return -ENOENT;	/* FIXME: Is errno suitable? */
891 
892 	next = item;
893 	INIT_LIST_HEAD(&head);
894 	btrfs_item_key_to_cpu(leaf, &key, i);
895 	nitems = 0;
896 	/*
897 	 * count the number of the dir index items that we can delete in batch
898 	 */
899 	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
900 		list_add_tail(&next->tree_list, &head);
901 		nitems++;
902 
903 		curr = next;
904 		next = __btrfs_next_delayed_item(curr);
905 		if (!next)
906 			break;
907 
908 		if (!btrfs_is_continuous_delayed_item(curr, next))
909 			break;
910 
911 		i++;
912 		if (i > last_item)
913 			break;
914 		btrfs_item_key_to_cpu(leaf, &key, i);
915 	}
916 
917 	if (!nitems)
918 		return 0;
919 
920 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
921 	if (ret)
922 		goto out;
923 
924 	list_for_each_entry_safe(curr, next, &head, tree_list) {
925 		btrfs_delayed_item_release_metadata(root, curr);
926 		list_del(&curr->tree_list);
927 		btrfs_release_delayed_item(curr);
928 	}
929 
930 out:
931 	return ret;
932 }
933 
934 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
935 				      struct btrfs_path *path,
936 				      struct btrfs_root *root,
937 				      struct btrfs_delayed_node *node)
938 {
939 	struct btrfs_delayed_item *curr, *prev;
940 	int ret = 0;
941 
942 do_again:
943 	mutex_lock(&node->mutex);
944 	curr = __btrfs_first_delayed_deletion_item(node);
945 	if (!curr)
946 		goto delete_fail;
947 
948 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
949 	if (ret < 0)
950 		goto delete_fail;
951 	else if (ret > 0) {
952 		/*
953 		 * can't find the item which the node points to, so this node
954 		 * is invalid, just drop it.
955 		 */
956 		prev = curr;
957 		curr = __btrfs_next_delayed_item(prev);
958 		btrfs_release_delayed_item(prev);
959 		ret = 0;
960 		btrfs_release_path(path);
961 		if (curr) {
962 			mutex_unlock(&node->mutex);
963 			goto do_again;
964 		} else
965 			goto delete_fail;
966 	}
967 
968 	btrfs_batch_delete_items(trans, root, path, curr);
969 	btrfs_release_path(path);
970 	mutex_unlock(&node->mutex);
971 	goto do_again;
972 
973 delete_fail:
974 	btrfs_release_path(path);
975 	mutex_unlock(&node->mutex);
976 	return ret;
977 }
978 
979 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
980 {
981 	struct btrfs_delayed_root *delayed_root;
982 
983 	if (delayed_node &&
984 	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
985 		BUG_ON(!delayed_node->root);
986 		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
987 		delayed_node->count--;
988 
989 		delayed_root = delayed_node->root->fs_info->delayed_root;
990 		finish_one_item(delayed_root);
991 	}
992 }
993 
994 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
995 {
996 	struct btrfs_delayed_root *delayed_root;
997 
998 	ASSERT(delayed_node->root);
999 	clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1000 	delayed_node->count--;
1001 
1002 	delayed_root = delayed_node->root->fs_info->delayed_root;
1003 	finish_one_item(delayed_root);
1004 }
1005 
1006 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1007 					struct btrfs_root *root,
1008 					struct btrfs_path *path,
1009 					struct btrfs_delayed_node *node)
1010 {
1011 	struct btrfs_fs_info *fs_info = root->fs_info;
1012 	struct btrfs_key key;
1013 	struct btrfs_inode_item *inode_item;
1014 	struct extent_buffer *leaf;
1015 	int mod;
1016 	int ret;
1017 
1018 	key.objectid = node->inode_id;
1019 	key.type = BTRFS_INODE_ITEM_KEY;
1020 	key.offset = 0;
1021 
1022 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1023 		mod = -1;
1024 	else
1025 		mod = 1;
1026 
1027 	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1028 	if (ret > 0) {
1029 		btrfs_release_path(path);
1030 		return -ENOENT;
1031 	} else if (ret < 0) {
1032 		return ret;
1033 	}
1034 
1035 	leaf = path->nodes[0];
1036 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1037 				    struct btrfs_inode_item);
1038 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1039 			    sizeof(struct btrfs_inode_item));
1040 	btrfs_mark_buffer_dirty(leaf);
1041 
1042 	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1043 		goto no_iref;
1044 
1045 	path->slots[0]++;
1046 	if (path->slots[0] >= btrfs_header_nritems(leaf))
1047 		goto search;
1048 again:
1049 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1050 	if (key.objectid != node->inode_id)
1051 		goto out;
1052 
1053 	if (key.type != BTRFS_INODE_REF_KEY &&
1054 	    key.type != BTRFS_INODE_EXTREF_KEY)
1055 		goto out;
1056 
1057 	/*
1058 	 * Delayed iref deletion is for the inode who has only one link,
1059 	 * so there is only one iref. The case that several irefs are
1060 	 * in the same item doesn't exist.
1061 	 */
1062 	btrfs_del_item(trans, root, path);
1063 out:
1064 	btrfs_release_delayed_iref(node);
1065 no_iref:
1066 	btrfs_release_path(path);
1067 err_out:
1068 	btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1069 	btrfs_release_delayed_inode(node);
1070 
1071 	return ret;
1072 
1073 search:
1074 	btrfs_release_path(path);
1075 
1076 	key.type = BTRFS_INODE_EXTREF_KEY;
1077 	key.offset = -1;
1078 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1079 	if (ret < 0)
1080 		goto err_out;
1081 	ASSERT(ret);
1082 
1083 	ret = 0;
1084 	leaf = path->nodes[0];
1085 	path->slots[0]--;
1086 	goto again;
1087 }
1088 
1089 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1090 					     struct btrfs_root *root,
1091 					     struct btrfs_path *path,
1092 					     struct btrfs_delayed_node *node)
1093 {
1094 	int ret;
1095 
1096 	mutex_lock(&node->mutex);
1097 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1098 		mutex_unlock(&node->mutex);
1099 		return 0;
1100 	}
1101 
1102 	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1103 	mutex_unlock(&node->mutex);
1104 	return ret;
1105 }
1106 
1107 static inline int
1108 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1109 				   struct btrfs_path *path,
1110 				   struct btrfs_delayed_node *node)
1111 {
1112 	int ret;
1113 
1114 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1115 	if (ret)
1116 		return ret;
1117 
1118 	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1119 	if (ret)
1120 		return ret;
1121 
1122 	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1123 	return ret;
1124 }
1125 
1126 /*
1127  * Called when committing the transaction.
1128  * Returns 0 on success.
1129  * Returns < 0 on error and returns with an aborted transaction with any
1130  * outstanding delayed items cleaned up.
1131  */
1132 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1133 {
1134 	struct btrfs_fs_info *fs_info = trans->fs_info;
1135 	struct btrfs_delayed_root *delayed_root;
1136 	struct btrfs_delayed_node *curr_node, *prev_node;
1137 	struct btrfs_path *path;
1138 	struct btrfs_block_rsv *block_rsv;
1139 	int ret = 0;
1140 	bool count = (nr > 0);
1141 
1142 	if (trans->aborted)
1143 		return -EIO;
1144 
1145 	path = btrfs_alloc_path();
1146 	if (!path)
1147 		return -ENOMEM;
1148 	path->leave_spinning = 1;
1149 
1150 	block_rsv = trans->block_rsv;
1151 	trans->block_rsv = &fs_info->delayed_block_rsv;
1152 
1153 	delayed_root = fs_info->delayed_root;
1154 
1155 	curr_node = btrfs_first_delayed_node(delayed_root);
1156 	while (curr_node && (!count || (count && nr--))) {
1157 		ret = __btrfs_commit_inode_delayed_items(trans, path,
1158 							 curr_node);
1159 		if (ret) {
1160 			btrfs_release_delayed_node(curr_node);
1161 			curr_node = NULL;
1162 			btrfs_abort_transaction(trans, ret);
1163 			break;
1164 		}
1165 
1166 		prev_node = curr_node;
1167 		curr_node = btrfs_next_delayed_node(curr_node);
1168 		btrfs_release_delayed_node(prev_node);
1169 	}
1170 
1171 	if (curr_node)
1172 		btrfs_release_delayed_node(curr_node);
1173 	btrfs_free_path(path);
1174 	trans->block_rsv = block_rsv;
1175 
1176 	return ret;
1177 }
1178 
1179 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1180 {
1181 	return __btrfs_run_delayed_items(trans, -1);
1182 }
1183 
1184 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1185 {
1186 	return __btrfs_run_delayed_items(trans, nr);
1187 }
1188 
1189 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1190 				     struct btrfs_inode *inode)
1191 {
1192 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1193 	struct btrfs_path *path;
1194 	struct btrfs_block_rsv *block_rsv;
1195 	int ret;
1196 
1197 	if (!delayed_node)
1198 		return 0;
1199 
1200 	mutex_lock(&delayed_node->mutex);
1201 	if (!delayed_node->count) {
1202 		mutex_unlock(&delayed_node->mutex);
1203 		btrfs_release_delayed_node(delayed_node);
1204 		return 0;
1205 	}
1206 	mutex_unlock(&delayed_node->mutex);
1207 
1208 	path = btrfs_alloc_path();
1209 	if (!path) {
1210 		btrfs_release_delayed_node(delayed_node);
1211 		return -ENOMEM;
1212 	}
1213 	path->leave_spinning = 1;
1214 
1215 	block_rsv = trans->block_rsv;
1216 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1217 
1218 	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1219 
1220 	btrfs_release_delayed_node(delayed_node);
1221 	btrfs_free_path(path);
1222 	trans->block_rsv = block_rsv;
1223 
1224 	return ret;
1225 }
1226 
1227 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1228 {
1229 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1230 	struct btrfs_trans_handle *trans;
1231 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1232 	struct btrfs_path *path;
1233 	struct btrfs_block_rsv *block_rsv;
1234 	int ret;
1235 
1236 	if (!delayed_node)
1237 		return 0;
1238 
1239 	mutex_lock(&delayed_node->mutex);
1240 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1241 		mutex_unlock(&delayed_node->mutex);
1242 		btrfs_release_delayed_node(delayed_node);
1243 		return 0;
1244 	}
1245 	mutex_unlock(&delayed_node->mutex);
1246 
1247 	trans = btrfs_join_transaction(delayed_node->root);
1248 	if (IS_ERR(trans)) {
1249 		ret = PTR_ERR(trans);
1250 		goto out;
1251 	}
1252 
1253 	path = btrfs_alloc_path();
1254 	if (!path) {
1255 		ret = -ENOMEM;
1256 		goto trans_out;
1257 	}
1258 	path->leave_spinning = 1;
1259 
1260 	block_rsv = trans->block_rsv;
1261 	trans->block_rsv = &fs_info->delayed_block_rsv;
1262 
1263 	mutex_lock(&delayed_node->mutex);
1264 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1265 		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1266 						   path, delayed_node);
1267 	else
1268 		ret = 0;
1269 	mutex_unlock(&delayed_node->mutex);
1270 
1271 	btrfs_free_path(path);
1272 	trans->block_rsv = block_rsv;
1273 trans_out:
1274 	btrfs_end_transaction(trans);
1275 	btrfs_btree_balance_dirty(fs_info);
1276 out:
1277 	btrfs_release_delayed_node(delayed_node);
1278 
1279 	return ret;
1280 }
1281 
1282 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1283 {
1284 	struct btrfs_delayed_node *delayed_node;
1285 
1286 	delayed_node = READ_ONCE(inode->delayed_node);
1287 	if (!delayed_node)
1288 		return;
1289 
1290 	inode->delayed_node = NULL;
1291 	btrfs_release_delayed_node(delayed_node);
1292 }
1293 
1294 struct btrfs_async_delayed_work {
1295 	struct btrfs_delayed_root *delayed_root;
1296 	int nr;
1297 	struct btrfs_work work;
1298 };
1299 
1300 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1301 {
1302 	struct btrfs_async_delayed_work *async_work;
1303 	struct btrfs_delayed_root *delayed_root;
1304 	struct btrfs_trans_handle *trans;
1305 	struct btrfs_path *path;
1306 	struct btrfs_delayed_node *delayed_node = NULL;
1307 	struct btrfs_root *root;
1308 	struct btrfs_block_rsv *block_rsv;
1309 	int total_done = 0;
1310 
1311 	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1312 	delayed_root = async_work->delayed_root;
1313 
1314 	path = btrfs_alloc_path();
1315 	if (!path)
1316 		goto out;
1317 
1318 	do {
1319 		if (atomic_read(&delayed_root->items) <
1320 		    BTRFS_DELAYED_BACKGROUND / 2)
1321 			break;
1322 
1323 		delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1324 		if (!delayed_node)
1325 			break;
1326 
1327 		path->leave_spinning = 1;
1328 		root = delayed_node->root;
1329 
1330 		trans = btrfs_join_transaction(root);
1331 		if (IS_ERR(trans)) {
1332 			btrfs_release_path(path);
1333 			btrfs_release_prepared_delayed_node(delayed_node);
1334 			total_done++;
1335 			continue;
1336 		}
1337 
1338 		block_rsv = trans->block_rsv;
1339 		trans->block_rsv = &root->fs_info->delayed_block_rsv;
1340 
1341 		__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1342 
1343 		trans->block_rsv = block_rsv;
1344 		btrfs_end_transaction(trans);
1345 		btrfs_btree_balance_dirty_nodelay(root->fs_info);
1346 
1347 		btrfs_release_path(path);
1348 		btrfs_release_prepared_delayed_node(delayed_node);
1349 		total_done++;
1350 
1351 	} while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1352 		 || total_done < async_work->nr);
1353 
1354 	btrfs_free_path(path);
1355 out:
1356 	wake_up(&delayed_root->wait);
1357 	kfree(async_work);
1358 }
1359 
1360 
1361 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1362 				     struct btrfs_fs_info *fs_info, int nr)
1363 {
1364 	struct btrfs_async_delayed_work *async_work;
1365 
1366 	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1367 	if (!async_work)
1368 		return -ENOMEM;
1369 
1370 	async_work->delayed_root = delayed_root;
1371 	btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1372 			btrfs_async_run_delayed_root, NULL, NULL);
1373 	async_work->nr = nr;
1374 
1375 	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1376 	return 0;
1377 }
1378 
1379 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1380 {
1381 	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1382 }
1383 
1384 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1385 {
1386 	int val = atomic_read(&delayed_root->items_seq);
1387 
1388 	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1389 		return 1;
1390 
1391 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1392 		return 1;
1393 
1394 	return 0;
1395 }
1396 
1397 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1398 {
1399 	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1400 
1401 	if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1402 		btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1403 		return;
1404 
1405 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1406 		int seq;
1407 		int ret;
1408 
1409 		seq = atomic_read(&delayed_root->items_seq);
1410 
1411 		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1412 		if (ret)
1413 			return;
1414 
1415 		wait_event_interruptible(delayed_root->wait,
1416 					 could_end_wait(delayed_root, seq));
1417 		return;
1418 	}
1419 
1420 	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1421 }
1422 
1423 /* Will return 0 or -ENOMEM */
1424 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1425 				   struct btrfs_fs_info *fs_info,
1426 				   const char *name, int name_len,
1427 				   struct btrfs_inode *dir,
1428 				   struct btrfs_disk_key *disk_key, u8 type,
1429 				   u64 index)
1430 {
1431 	struct btrfs_delayed_node *delayed_node;
1432 	struct btrfs_delayed_item *delayed_item;
1433 	struct btrfs_dir_item *dir_item;
1434 	int ret;
1435 
1436 	delayed_node = btrfs_get_or_create_delayed_node(dir);
1437 	if (IS_ERR(delayed_node))
1438 		return PTR_ERR(delayed_node);
1439 
1440 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1441 	if (!delayed_item) {
1442 		ret = -ENOMEM;
1443 		goto release_node;
1444 	}
1445 
1446 	delayed_item->key.objectid = btrfs_ino(dir);
1447 	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1448 	delayed_item->key.offset = index;
1449 
1450 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1451 	dir_item->location = *disk_key;
1452 	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1453 	btrfs_set_stack_dir_data_len(dir_item, 0);
1454 	btrfs_set_stack_dir_name_len(dir_item, name_len);
1455 	btrfs_set_stack_dir_type(dir_item, type);
1456 	memcpy((char *)(dir_item + 1), name, name_len);
1457 
1458 	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1459 	/*
1460 	 * we have reserved enough space when we start a new transaction,
1461 	 * so reserving metadata failure is impossible
1462 	 */
1463 	BUG_ON(ret);
1464 
1465 
1466 	mutex_lock(&delayed_node->mutex);
1467 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1468 	if (unlikely(ret)) {
1469 		btrfs_err(fs_info,
1470 			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1471 			  name_len, name, delayed_node->root->objectid,
1472 			  delayed_node->inode_id, ret);
1473 		BUG();
1474 	}
1475 	mutex_unlock(&delayed_node->mutex);
1476 
1477 release_node:
1478 	btrfs_release_delayed_node(delayed_node);
1479 	return ret;
1480 }
1481 
1482 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1483 					       struct btrfs_delayed_node *node,
1484 					       struct btrfs_key *key)
1485 {
1486 	struct btrfs_delayed_item *item;
1487 
1488 	mutex_lock(&node->mutex);
1489 	item = __btrfs_lookup_delayed_insertion_item(node, key);
1490 	if (!item) {
1491 		mutex_unlock(&node->mutex);
1492 		return 1;
1493 	}
1494 
1495 	btrfs_delayed_item_release_metadata(node->root, item);
1496 	btrfs_release_delayed_item(item);
1497 	mutex_unlock(&node->mutex);
1498 	return 0;
1499 }
1500 
1501 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1502 				   struct btrfs_fs_info *fs_info,
1503 				   struct btrfs_inode *dir, u64 index)
1504 {
1505 	struct btrfs_delayed_node *node;
1506 	struct btrfs_delayed_item *item;
1507 	struct btrfs_key item_key;
1508 	int ret;
1509 
1510 	node = btrfs_get_or_create_delayed_node(dir);
1511 	if (IS_ERR(node))
1512 		return PTR_ERR(node);
1513 
1514 	item_key.objectid = btrfs_ino(dir);
1515 	item_key.type = BTRFS_DIR_INDEX_KEY;
1516 	item_key.offset = index;
1517 
1518 	ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
1519 	if (!ret)
1520 		goto end;
1521 
1522 	item = btrfs_alloc_delayed_item(0);
1523 	if (!item) {
1524 		ret = -ENOMEM;
1525 		goto end;
1526 	}
1527 
1528 	item->key = item_key;
1529 
1530 	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1531 	/*
1532 	 * we have reserved enough space when we start a new transaction,
1533 	 * so reserving metadata failure is impossible.
1534 	 */
1535 	BUG_ON(ret);
1536 
1537 	mutex_lock(&node->mutex);
1538 	ret = __btrfs_add_delayed_deletion_item(node, item);
1539 	if (unlikely(ret)) {
1540 		btrfs_err(fs_info,
1541 			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1542 			  index, node->root->objectid, node->inode_id, ret);
1543 		BUG();
1544 	}
1545 	mutex_unlock(&node->mutex);
1546 end:
1547 	btrfs_release_delayed_node(node);
1548 	return ret;
1549 }
1550 
1551 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1552 {
1553 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1554 
1555 	if (!delayed_node)
1556 		return -ENOENT;
1557 
1558 	/*
1559 	 * Since we have held i_mutex of this directory, it is impossible that
1560 	 * a new directory index is added into the delayed node and index_cnt
1561 	 * is updated now. So we needn't lock the delayed node.
1562 	 */
1563 	if (!delayed_node->index_cnt) {
1564 		btrfs_release_delayed_node(delayed_node);
1565 		return -EINVAL;
1566 	}
1567 
1568 	inode->index_cnt = delayed_node->index_cnt;
1569 	btrfs_release_delayed_node(delayed_node);
1570 	return 0;
1571 }
1572 
1573 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1574 				     struct list_head *ins_list,
1575 				     struct list_head *del_list)
1576 {
1577 	struct btrfs_delayed_node *delayed_node;
1578 	struct btrfs_delayed_item *item;
1579 
1580 	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1581 	if (!delayed_node)
1582 		return false;
1583 
1584 	/*
1585 	 * We can only do one readdir with delayed items at a time because of
1586 	 * item->readdir_list.
1587 	 */
1588 	inode_unlock_shared(inode);
1589 	inode_lock(inode);
1590 
1591 	mutex_lock(&delayed_node->mutex);
1592 	item = __btrfs_first_delayed_insertion_item(delayed_node);
1593 	while (item) {
1594 		refcount_inc(&item->refs);
1595 		list_add_tail(&item->readdir_list, ins_list);
1596 		item = __btrfs_next_delayed_item(item);
1597 	}
1598 
1599 	item = __btrfs_first_delayed_deletion_item(delayed_node);
1600 	while (item) {
1601 		refcount_inc(&item->refs);
1602 		list_add_tail(&item->readdir_list, del_list);
1603 		item = __btrfs_next_delayed_item(item);
1604 	}
1605 	mutex_unlock(&delayed_node->mutex);
1606 	/*
1607 	 * This delayed node is still cached in the btrfs inode, so refs
1608 	 * must be > 1 now, and we needn't check it is going to be freed
1609 	 * or not.
1610 	 *
1611 	 * Besides that, this function is used to read dir, we do not
1612 	 * insert/delete delayed items in this period. So we also needn't
1613 	 * requeue or dequeue this delayed node.
1614 	 */
1615 	refcount_dec(&delayed_node->refs);
1616 
1617 	return true;
1618 }
1619 
1620 void btrfs_readdir_put_delayed_items(struct inode *inode,
1621 				     struct list_head *ins_list,
1622 				     struct list_head *del_list)
1623 {
1624 	struct btrfs_delayed_item *curr, *next;
1625 
1626 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1627 		list_del(&curr->readdir_list);
1628 		if (refcount_dec_and_test(&curr->refs))
1629 			kfree(curr);
1630 	}
1631 
1632 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1633 		list_del(&curr->readdir_list);
1634 		if (refcount_dec_and_test(&curr->refs))
1635 			kfree(curr);
1636 	}
1637 
1638 	/*
1639 	 * The VFS is going to do up_read(), so we need to downgrade back to a
1640 	 * read lock.
1641 	 */
1642 	downgrade_write(&inode->i_rwsem);
1643 }
1644 
1645 int btrfs_should_delete_dir_index(struct list_head *del_list,
1646 				  u64 index)
1647 {
1648 	struct btrfs_delayed_item *curr;
1649 	int ret = 0;
1650 
1651 	list_for_each_entry(curr, del_list, readdir_list) {
1652 		if (curr->key.offset > index)
1653 			break;
1654 		if (curr->key.offset == index) {
1655 			ret = 1;
1656 			break;
1657 		}
1658 	}
1659 	return ret;
1660 }
1661 
1662 /*
1663  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1664  *
1665  */
1666 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1667 				    struct list_head *ins_list)
1668 {
1669 	struct btrfs_dir_item *di;
1670 	struct btrfs_delayed_item *curr, *next;
1671 	struct btrfs_key location;
1672 	char *name;
1673 	int name_len;
1674 	int over = 0;
1675 	unsigned char d_type;
1676 
1677 	if (list_empty(ins_list))
1678 		return 0;
1679 
1680 	/*
1681 	 * Changing the data of the delayed item is impossible. So
1682 	 * we needn't lock them. And we have held i_mutex of the
1683 	 * directory, nobody can delete any directory indexes now.
1684 	 */
1685 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1686 		list_del(&curr->readdir_list);
1687 
1688 		if (curr->key.offset < ctx->pos) {
1689 			if (refcount_dec_and_test(&curr->refs))
1690 				kfree(curr);
1691 			continue;
1692 		}
1693 
1694 		ctx->pos = curr->key.offset;
1695 
1696 		di = (struct btrfs_dir_item *)curr->data;
1697 		name = (char *)(di + 1);
1698 		name_len = btrfs_stack_dir_name_len(di);
1699 
1700 		d_type = btrfs_filetype_table[di->type];
1701 		btrfs_disk_key_to_cpu(&location, &di->location);
1702 
1703 		over = !dir_emit(ctx, name, name_len,
1704 			       location.objectid, d_type);
1705 
1706 		if (refcount_dec_and_test(&curr->refs))
1707 			kfree(curr);
1708 
1709 		if (over)
1710 			return 1;
1711 		ctx->pos++;
1712 	}
1713 	return 0;
1714 }
1715 
1716 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1717 				  struct btrfs_inode_item *inode_item,
1718 				  struct inode *inode)
1719 {
1720 	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1721 	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1722 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1723 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1724 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1725 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1726 	btrfs_set_stack_inode_generation(inode_item,
1727 					 BTRFS_I(inode)->generation);
1728 	btrfs_set_stack_inode_sequence(inode_item,
1729 				       inode_peek_iversion(inode));
1730 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1731 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1732 	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1733 	btrfs_set_stack_inode_block_group(inode_item, 0);
1734 
1735 	btrfs_set_stack_timespec_sec(&inode_item->atime,
1736 				     inode->i_atime.tv_sec);
1737 	btrfs_set_stack_timespec_nsec(&inode_item->atime,
1738 				      inode->i_atime.tv_nsec);
1739 
1740 	btrfs_set_stack_timespec_sec(&inode_item->mtime,
1741 				     inode->i_mtime.tv_sec);
1742 	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1743 				      inode->i_mtime.tv_nsec);
1744 
1745 	btrfs_set_stack_timespec_sec(&inode_item->ctime,
1746 				     inode->i_ctime.tv_sec);
1747 	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1748 				      inode->i_ctime.tv_nsec);
1749 
1750 	btrfs_set_stack_timespec_sec(&inode_item->otime,
1751 				     BTRFS_I(inode)->i_otime.tv_sec);
1752 	btrfs_set_stack_timespec_nsec(&inode_item->otime,
1753 				     BTRFS_I(inode)->i_otime.tv_nsec);
1754 }
1755 
1756 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1757 {
1758 	struct btrfs_delayed_node *delayed_node;
1759 	struct btrfs_inode_item *inode_item;
1760 
1761 	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1762 	if (!delayed_node)
1763 		return -ENOENT;
1764 
1765 	mutex_lock(&delayed_node->mutex);
1766 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1767 		mutex_unlock(&delayed_node->mutex);
1768 		btrfs_release_delayed_node(delayed_node);
1769 		return -ENOENT;
1770 	}
1771 
1772 	inode_item = &delayed_node->inode_item;
1773 
1774 	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1775 	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1776 	btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1777 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1778 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1779 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1780 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1781         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1782 
1783 	inode_set_iversion_queried(inode,
1784 				   btrfs_stack_inode_sequence(inode_item));
1785 	inode->i_rdev = 0;
1786 	*rdev = btrfs_stack_inode_rdev(inode_item);
1787 	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1788 
1789 	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1790 	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1791 
1792 	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1793 	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1794 
1795 	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1796 	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1797 
1798 	BTRFS_I(inode)->i_otime.tv_sec =
1799 		btrfs_stack_timespec_sec(&inode_item->otime);
1800 	BTRFS_I(inode)->i_otime.tv_nsec =
1801 		btrfs_stack_timespec_nsec(&inode_item->otime);
1802 
1803 	inode->i_generation = BTRFS_I(inode)->generation;
1804 	BTRFS_I(inode)->index_cnt = (u64)-1;
1805 
1806 	mutex_unlock(&delayed_node->mutex);
1807 	btrfs_release_delayed_node(delayed_node);
1808 	return 0;
1809 }
1810 
1811 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1812 			       struct btrfs_root *root, struct inode *inode)
1813 {
1814 	struct btrfs_delayed_node *delayed_node;
1815 	int ret = 0;
1816 
1817 	delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1818 	if (IS_ERR(delayed_node))
1819 		return PTR_ERR(delayed_node);
1820 
1821 	mutex_lock(&delayed_node->mutex);
1822 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1823 		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1824 		goto release_node;
1825 	}
1826 
1827 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1828 						   delayed_node);
1829 	if (ret)
1830 		goto release_node;
1831 
1832 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1833 	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1834 	delayed_node->count++;
1835 	atomic_inc(&root->fs_info->delayed_root->items);
1836 release_node:
1837 	mutex_unlock(&delayed_node->mutex);
1838 	btrfs_release_delayed_node(delayed_node);
1839 	return ret;
1840 }
1841 
1842 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1843 {
1844 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1845 	struct btrfs_delayed_node *delayed_node;
1846 
1847 	/*
1848 	 * we don't do delayed inode updates during log recovery because it
1849 	 * leads to enospc problems.  This means we also can't do
1850 	 * delayed inode refs
1851 	 */
1852 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1853 		return -EAGAIN;
1854 
1855 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1856 	if (IS_ERR(delayed_node))
1857 		return PTR_ERR(delayed_node);
1858 
1859 	/*
1860 	 * We don't reserve space for inode ref deletion is because:
1861 	 * - We ONLY do async inode ref deletion for the inode who has only
1862 	 *   one link(i_nlink == 1), it means there is only one inode ref.
1863 	 *   And in most case, the inode ref and the inode item are in the
1864 	 *   same leaf, and we will deal with them at the same time.
1865 	 *   Since we are sure we will reserve the space for the inode item,
1866 	 *   it is unnecessary to reserve space for inode ref deletion.
1867 	 * - If the inode ref and the inode item are not in the same leaf,
1868 	 *   We also needn't worry about enospc problem, because we reserve
1869 	 *   much more space for the inode update than it needs.
1870 	 * - At the worst, we can steal some space from the global reservation.
1871 	 *   It is very rare.
1872 	 */
1873 	mutex_lock(&delayed_node->mutex);
1874 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1875 		goto release_node;
1876 
1877 	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1878 	delayed_node->count++;
1879 	atomic_inc(&fs_info->delayed_root->items);
1880 release_node:
1881 	mutex_unlock(&delayed_node->mutex);
1882 	btrfs_release_delayed_node(delayed_node);
1883 	return 0;
1884 }
1885 
1886 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1887 {
1888 	struct btrfs_root *root = delayed_node->root;
1889 	struct btrfs_fs_info *fs_info = root->fs_info;
1890 	struct btrfs_delayed_item *curr_item, *prev_item;
1891 
1892 	mutex_lock(&delayed_node->mutex);
1893 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1894 	while (curr_item) {
1895 		btrfs_delayed_item_release_metadata(root, curr_item);
1896 		prev_item = curr_item;
1897 		curr_item = __btrfs_next_delayed_item(prev_item);
1898 		btrfs_release_delayed_item(prev_item);
1899 	}
1900 
1901 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1902 	while (curr_item) {
1903 		btrfs_delayed_item_release_metadata(root, curr_item);
1904 		prev_item = curr_item;
1905 		curr_item = __btrfs_next_delayed_item(prev_item);
1906 		btrfs_release_delayed_item(prev_item);
1907 	}
1908 
1909 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1910 		btrfs_release_delayed_iref(delayed_node);
1911 
1912 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1913 		btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1914 		btrfs_release_delayed_inode(delayed_node);
1915 	}
1916 	mutex_unlock(&delayed_node->mutex);
1917 }
1918 
1919 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1920 {
1921 	struct btrfs_delayed_node *delayed_node;
1922 
1923 	delayed_node = btrfs_get_delayed_node(inode);
1924 	if (!delayed_node)
1925 		return;
1926 
1927 	__btrfs_kill_delayed_node(delayed_node);
1928 	btrfs_release_delayed_node(delayed_node);
1929 }
1930 
1931 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1932 {
1933 	u64 inode_id = 0;
1934 	struct btrfs_delayed_node *delayed_nodes[8];
1935 	int i, n;
1936 
1937 	while (1) {
1938 		spin_lock(&root->inode_lock);
1939 		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1940 					   (void **)delayed_nodes, inode_id,
1941 					   ARRAY_SIZE(delayed_nodes));
1942 		if (!n) {
1943 			spin_unlock(&root->inode_lock);
1944 			break;
1945 		}
1946 
1947 		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1948 
1949 		for (i = 0; i < n; i++)
1950 			refcount_inc(&delayed_nodes[i]->refs);
1951 		spin_unlock(&root->inode_lock);
1952 
1953 		for (i = 0; i < n; i++) {
1954 			__btrfs_kill_delayed_node(delayed_nodes[i]);
1955 			btrfs_release_delayed_node(delayed_nodes[i]);
1956 		}
1957 	}
1958 }
1959 
1960 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1961 {
1962 	struct btrfs_delayed_node *curr_node, *prev_node;
1963 
1964 	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1965 	while (curr_node) {
1966 		__btrfs_kill_delayed_node(curr_node);
1967 
1968 		prev_node = curr_node;
1969 		curr_node = btrfs_next_delayed_node(curr_node);
1970 		btrfs_release_delayed_node(prev_node);
1971 	}
1972 }
1973 
1974