xref: /openbmc/linux/fs/btrfs/delayed-inode.c (revision 6d668dda)
116cdcec7SMiao Xie /*
216cdcec7SMiao Xie  * Copyright (C) 2011 Fujitsu.  All rights reserved.
316cdcec7SMiao Xie  * Written by Miao Xie <miaox@cn.fujitsu.com>
416cdcec7SMiao Xie  *
516cdcec7SMiao Xie  * This program is free software; you can redistribute it and/or
616cdcec7SMiao Xie  * modify it under the terms of the GNU General Public
716cdcec7SMiao Xie  * License v2 as published by the Free Software Foundation.
816cdcec7SMiao Xie  *
916cdcec7SMiao Xie  * This program is distributed in the hope that it will be useful,
1016cdcec7SMiao Xie  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1116cdcec7SMiao Xie  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1216cdcec7SMiao Xie  * General Public License for more details.
1316cdcec7SMiao Xie  *
1416cdcec7SMiao Xie  * You should have received a copy of the GNU General Public
1516cdcec7SMiao Xie  * License along with this program; if not, write to the
1616cdcec7SMiao Xie  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
1716cdcec7SMiao Xie  * Boston, MA 021110-1307, USA.
1816cdcec7SMiao Xie  */
1916cdcec7SMiao Xie 
2016cdcec7SMiao Xie #include <linux/slab.h>
2116cdcec7SMiao Xie #include "delayed-inode.h"
2216cdcec7SMiao Xie #include "disk-io.h"
2316cdcec7SMiao Xie #include "transaction.h"
2416cdcec7SMiao Xie 
2516cdcec7SMiao Xie #define BTRFS_DELAYED_WRITEBACK		400
2616cdcec7SMiao Xie #define BTRFS_DELAYED_BACKGROUND	100
2716cdcec7SMiao Xie 
2816cdcec7SMiao Xie static struct kmem_cache *delayed_node_cache;
2916cdcec7SMiao Xie 
3016cdcec7SMiao Xie int __init btrfs_delayed_inode_init(void)
3116cdcec7SMiao Xie {
3216cdcec7SMiao Xie 	delayed_node_cache = kmem_cache_create("delayed_node",
3316cdcec7SMiao Xie 					sizeof(struct btrfs_delayed_node),
3416cdcec7SMiao Xie 					0,
3516cdcec7SMiao Xie 					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
3616cdcec7SMiao Xie 					NULL);
3716cdcec7SMiao Xie 	if (!delayed_node_cache)
3816cdcec7SMiao Xie 		return -ENOMEM;
3916cdcec7SMiao Xie 	return 0;
4016cdcec7SMiao Xie }
4116cdcec7SMiao Xie 
4216cdcec7SMiao Xie void btrfs_delayed_inode_exit(void)
4316cdcec7SMiao Xie {
4416cdcec7SMiao Xie 	if (delayed_node_cache)
4516cdcec7SMiao Xie 		kmem_cache_destroy(delayed_node_cache);
4616cdcec7SMiao Xie }
4716cdcec7SMiao Xie 
4816cdcec7SMiao Xie static inline void btrfs_init_delayed_node(
4916cdcec7SMiao Xie 				struct btrfs_delayed_node *delayed_node,
5016cdcec7SMiao Xie 				struct btrfs_root *root, u64 inode_id)
5116cdcec7SMiao Xie {
5216cdcec7SMiao Xie 	delayed_node->root = root;
5316cdcec7SMiao Xie 	delayed_node->inode_id = inode_id;
5416cdcec7SMiao Xie 	atomic_set(&delayed_node->refs, 0);
5516cdcec7SMiao Xie 	delayed_node->count = 0;
5616cdcec7SMiao Xie 	delayed_node->in_list = 0;
5716cdcec7SMiao Xie 	delayed_node->inode_dirty = 0;
5816cdcec7SMiao Xie 	delayed_node->ins_root = RB_ROOT;
5916cdcec7SMiao Xie 	delayed_node->del_root = RB_ROOT;
6016cdcec7SMiao Xie 	mutex_init(&delayed_node->mutex);
6116cdcec7SMiao Xie 	delayed_node->index_cnt = 0;
6216cdcec7SMiao Xie 	INIT_LIST_HEAD(&delayed_node->n_list);
6316cdcec7SMiao Xie 	INIT_LIST_HEAD(&delayed_node->p_list);
6416cdcec7SMiao Xie 	delayed_node->bytes_reserved = 0;
6516cdcec7SMiao Xie }
6616cdcec7SMiao Xie 
6716cdcec7SMiao Xie static inline int btrfs_is_continuous_delayed_item(
6816cdcec7SMiao Xie 					struct btrfs_delayed_item *item1,
6916cdcec7SMiao Xie 					struct btrfs_delayed_item *item2)
7016cdcec7SMiao Xie {
7116cdcec7SMiao Xie 	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
7216cdcec7SMiao Xie 	    item1->key.objectid == item2->key.objectid &&
7316cdcec7SMiao Xie 	    item1->key.type == item2->key.type &&
7416cdcec7SMiao Xie 	    item1->key.offset + 1 == item2->key.offset)
7516cdcec7SMiao Xie 		return 1;
7616cdcec7SMiao Xie 	return 0;
7716cdcec7SMiao Xie }
7816cdcec7SMiao Xie 
7916cdcec7SMiao Xie static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
8016cdcec7SMiao Xie 							struct btrfs_root *root)
8116cdcec7SMiao Xie {
8216cdcec7SMiao Xie 	return root->fs_info->delayed_root;
8316cdcec7SMiao Xie }
8416cdcec7SMiao Xie 
852f7e33d4SMiao Xie static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
862f7e33d4SMiao Xie {
872f7e33d4SMiao Xie 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
882f7e33d4SMiao Xie 	struct btrfs_root *root = btrfs_inode->root;
892f7e33d4SMiao Xie 	u64 ino = btrfs_ino(inode);
902f7e33d4SMiao Xie 	struct btrfs_delayed_node *node;
912f7e33d4SMiao Xie 
922f7e33d4SMiao Xie 	node = ACCESS_ONCE(btrfs_inode->delayed_node);
932f7e33d4SMiao Xie 	if (node) {
942f7e33d4SMiao Xie 		atomic_inc(&node->refs);
952f7e33d4SMiao Xie 		return node;
962f7e33d4SMiao Xie 	}
972f7e33d4SMiao Xie 
982f7e33d4SMiao Xie 	spin_lock(&root->inode_lock);
992f7e33d4SMiao Xie 	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
1002f7e33d4SMiao Xie 	if (node) {
1012f7e33d4SMiao Xie 		if (btrfs_inode->delayed_node) {
1022f7e33d4SMiao Xie 			atomic_inc(&node->refs);	/* can be accessed */
1032f7e33d4SMiao Xie 			BUG_ON(btrfs_inode->delayed_node != node);
1042f7e33d4SMiao Xie 			spin_unlock(&root->inode_lock);
1052f7e33d4SMiao Xie 			return node;
1062f7e33d4SMiao Xie 		}
1072f7e33d4SMiao Xie 		btrfs_inode->delayed_node = node;
1082f7e33d4SMiao Xie 		atomic_inc(&node->refs);	/* can be accessed */
1092f7e33d4SMiao Xie 		atomic_inc(&node->refs);	/* cached in the inode */
1102f7e33d4SMiao Xie 		spin_unlock(&root->inode_lock);
1112f7e33d4SMiao Xie 		return node;
1122f7e33d4SMiao Xie 	}
1132f7e33d4SMiao Xie 	spin_unlock(&root->inode_lock);
1142f7e33d4SMiao Xie 
1152f7e33d4SMiao Xie 	return NULL;
1162f7e33d4SMiao Xie }
1172f7e33d4SMiao Xie 
11816cdcec7SMiao Xie static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
11916cdcec7SMiao Xie 							struct inode *inode)
12016cdcec7SMiao Xie {
12116cdcec7SMiao Xie 	struct btrfs_delayed_node *node;
12216cdcec7SMiao Xie 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
12316cdcec7SMiao Xie 	struct btrfs_root *root = btrfs_inode->root;
1240d0ca30fSChris Mason 	u64 ino = btrfs_ino(inode);
12516cdcec7SMiao Xie 	int ret;
12616cdcec7SMiao Xie 
12716cdcec7SMiao Xie again:
1282f7e33d4SMiao Xie 	node = btrfs_get_delayed_node(inode);
1292f7e33d4SMiao Xie 	if (node)
13016cdcec7SMiao Xie 		return node;
13116cdcec7SMiao Xie 
13216cdcec7SMiao Xie 	node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
13316cdcec7SMiao Xie 	if (!node)
13416cdcec7SMiao Xie 		return ERR_PTR(-ENOMEM);
1350d0ca30fSChris Mason 	btrfs_init_delayed_node(node, root, ino);
13616cdcec7SMiao Xie 
13716cdcec7SMiao Xie 	atomic_inc(&node->refs);	/* cached in the btrfs inode */
13816cdcec7SMiao Xie 	atomic_inc(&node->refs);	/* can be accessed */
13916cdcec7SMiao Xie 
14016cdcec7SMiao Xie 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
14116cdcec7SMiao Xie 	if (ret) {
14216cdcec7SMiao Xie 		kmem_cache_free(delayed_node_cache, node);
14316cdcec7SMiao Xie 		return ERR_PTR(ret);
14416cdcec7SMiao Xie 	}
14516cdcec7SMiao Xie 
14616cdcec7SMiao Xie 	spin_lock(&root->inode_lock);
1470d0ca30fSChris Mason 	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
14816cdcec7SMiao Xie 	if (ret == -EEXIST) {
14916cdcec7SMiao Xie 		kmem_cache_free(delayed_node_cache, node);
15016cdcec7SMiao Xie 		spin_unlock(&root->inode_lock);
15116cdcec7SMiao Xie 		radix_tree_preload_end();
15216cdcec7SMiao Xie 		goto again;
15316cdcec7SMiao Xie 	}
15416cdcec7SMiao Xie 	btrfs_inode->delayed_node = node;
15516cdcec7SMiao Xie 	spin_unlock(&root->inode_lock);
15616cdcec7SMiao Xie 	radix_tree_preload_end();
15716cdcec7SMiao Xie 
15816cdcec7SMiao Xie 	return node;
15916cdcec7SMiao Xie }
16016cdcec7SMiao Xie 
16116cdcec7SMiao Xie /*
16216cdcec7SMiao Xie  * Call it when holding delayed_node->mutex
16316cdcec7SMiao Xie  *
16416cdcec7SMiao Xie  * If mod = 1, add this node into the prepared list.
16516cdcec7SMiao Xie  */
16616cdcec7SMiao Xie static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
16716cdcec7SMiao Xie 				     struct btrfs_delayed_node *node,
16816cdcec7SMiao Xie 				     int mod)
16916cdcec7SMiao Xie {
17016cdcec7SMiao Xie 	spin_lock(&root->lock);
17116cdcec7SMiao Xie 	if (node->in_list) {
17216cdcec7SMiao Xie 		if (!list_empty(&node->p_list))
17316cdcec7SMiao Xie 			list_move_tail(&node->p_list, &root->prepare_list);
17416cdcec7SMiao Xie 		else if (mod)
17516cdcec7SMiao Xie 			list_add_tail(&node->p_list, &root->prepare_list);
17616cdcec7SMiao Xie 	} else {
17716cdcec7SMiao Xie 		list_add_tail(&node->n_list, &root->node_list);
17816cdcec7SMiao Xie 		list_add_tail(&node->p_list, &root->prepare_list);
17916cdcec7SMiao Xie 		atomic_inc(&node->refs);	/* inserted into list */
18016cdcec7SMiao Xie 		root->nodes++;
18116cdcec7SMiao Xie 		node->in_list = 1;
18216cdcec7SMiao Xie 	}
18316cdcec7SMiao Xie 	spin_unlock(&root->lock);
18416cdcec7SMiao Xie }
18516cdcec7SMiao Xie 
18616cdcec7SMiao Xie /* Call it when holding delayed_node->mutex */
18716cdcec7SMiao Xie static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
18816cdcec7SMiao Xie 				       struct btrfs_delayed_node *node)
18916cdcec7SMiao Xie {
19016cdcec7SMiao Xie 	spin_lock(&root->lock);
19116cdcec7SMiao Xie 	if (node->in_list) {
19216cdcec7SMiao Xie 		root->nodes--;
19316cdcec7SMiao Xie 		atomic_dec(&node->refs);	/* not in the list */
19416cdcec7SMiao Xie 		list_del_init(&node->n_list);
19516cdcec7SMiao Xie 		if (!list_empty(&node->p_list))
19616cdcec7SMiao Xie 			list_del_init(&node->p_list);
19716cdcec7SMiao Xie 		node->in_list = 0;
19816cdcec7SMiao Xie 	}
19916cdcec7SMiao Xie 	spin_unlock(&root->lock);
20016cdcec7SMiao Xie }
20116cdcec7SMiao Xie 
20216cdcec7SMiao Xie struct btrfs_delayed_node *btrfs_first_delayed_node(
20316cdcec7SMiao Xie 			struct btrfs_delayed_root *delayed_root)
20416cdcec7SMiao Xie {
20516cdcec7SMiao Xie 	struct list_head *p;
20616cdcec7SMiao Xie 	struct btrfs_delayed_node *node = NULL;
20716cdcec7SMiao Xie 
20816cdcec7SMiao Xie 	spin_lock(&delayed_root->lock);
20916cdcec7SMiao Xie 	if (list_empty(&delayed_root->node_list))
21016cdcec7SMiao Xie 		goto out;
21116cdcec7SMiao Xie 
21216cdcec7SMiao Xie 	p = delayed_root->node_list.next;
21316cdcec7SMiao Xie 	node = list_entry(p, struct btrfs_delayed_node, n_list);
21416cdcec7SMiao Xie 	atomic_inc(&node->refs);
21516cdcec7SMiao Xie out:
21616cdcec7SMiao Xie 	spin_unlock(&delayed_root->lock);
21716cdcec7SMiao Xie 
21816cdcec7SMiao Xie 	return node;
21916cdcec7SMiao Xie }
22016cdcec7SMiao Xie 
22116cdcec7SMiao Xie struct btrfs_delayed_node *btrfs_next_delayed_node(
22216cdcec7SMiao Xie 						struct btrfs_delayed_node *node)
22316cdcec7SMiao Xie {
22416cdcec7SMiao Xie 	struct btrfs_delayed_root *delayed_root;
22516cdcec7SMiao Xie 	struct list_head *p;
22616cdcec7SMiao Xie 	struct btrfs_delayed_node *next = NULL;
22716cdcec7SMiao Xie 
22816cdcec7SMiao Xie 	delayed_root = node->root->fs_info->delayed_root;
22916cdcec7SMiao Xie 	spin_lock(&delayed_root->lock);
23016cdcec7SMiao Xie 	if (!node->in_list) {	/* not in the list */
23116cdcec7SMiao Xie 		if (list_empty(&delayed_root->node_list))
23216cdcec7SMiao Xie 			goto out;
23316cdcec7SMiao Xie 		p = delayed_root->node_list.next;
23416cdcec7SMiao Xie 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
23516cdcec7SMiao Xie 		goto out;
23616cdcec7SMiao Xie 	else
23716cdcec7SMiao Xie 		p = node->n_list.next;
23816cdcec7SMiao Xie 
23916cdcec7SMiao Xie 	next = list_entry(p, struct btrfs_delayed_node, n_list);
24016cdcec7SMiao Xie 	atomic_inc(&next->refs);
24116cdcec7SMiao Xie out:
24216cdcec7SMiao Xie 	spin_unlock(&delayed_root->lock);
24316cdcec7SMiao Xie 
24416cdcec7SMiao Xie 	return next;
24516cdcec7SMiao Xie }
24616cdcec7SMiao Xie 
24716cdcec7SMiao Xie static void __btrfs_release_delayed_node(
24816cdcec7SMiao Xie 				struct btrfs_delayed_node *delayed_node,
24916cdcec7SMiao Xie 				int mod)
25016cdcec7SMiao Xie {
25116cdcec7SMiao Xie 	struct btrfs_delayed_root *delayed_root;
25216cdcec7SMiao Xie 
25316cdcec7SMiao Xie 	if (!delayed_node)
25416cdcec7SMiao Xie 		return;
25516cdcec7SMiao Xie 
25616cdcec7SMiao Xie 	delayed_root = delayed_node->root->fs_info->delayed_root;
25716cdcec7SMiao Xie 
25816cdcec7SMiao Xie 	mutex_lock(&delayed_node->mutex);
25916cdcec7SMiao Xie 	if (delayed_node->count)
26016cdcec7SMiao Xie 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
26116cdcec7SMiao Xie 	else
26216cdcec7SMiao Xie 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
26316cdcec7SMiao Xie 	mutex_unlock(&delayed_node->mutex);
26416cdcec7SMiao Xie 
26516cdcec7SMiao Xie 	if (atomic_dec_and_test(&delayed_node->refs)) {
26616cdcec7SMiao Xie 		struct btrfs_root *root = delayed_node->root;
26716cdcec7SMiao Xie 		spin_lock(&root->inode_lock);
26816cdcec7SMiao Xie 		if (atomic_read(&delayed_node->refs) == 0) {
26916cdcec7SMiao Xie 			radix_tree_delete(&root->delayed_nodes_tree,
27016cdcec7SMiao Xie 					  delayed_node->inode_id);
27116cdcec7SMiao Xie 			kmem_cache_free(delayed_node_cache, delayed_node);
27216cdcec7SMiao Xie 		}
27316cdcec7SMiao Xie 		spin_unlock(&root->inode_lock);
27416cdcec7SMiao Xie 	}
27516cdcec7SMiao Xie }
27616cdcec7SMiao Xie 
27716cdcec7SMiao Xie static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
27816cdcec7SMiao Xie {
27916cdcec7SMiao Xie 	__btrfs_release_delayed_node(node, 0);
28016cdcec7SMiao Xie }
28116cdcec7SMiao Xie 
28216cdcec7SMiao Xie struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
28316cdcec7SMiao Xie 					struct btrfs_delayed_root *delayed_root)
28416cdcec7SMiao Xie {
28516cdcec7SMiao Xie 	struct list_head *p;
28616cdcec7SMiao Xie 	struct btrfs_delayed_node *node = NULL;
28716cdcec7SMiao Xie 
28816cdcec7SMiao Xie 	spin_lock(&delayed_root->lock);
28916cdcec7SMiao Xie 	if (list_empty(&delayed_root->prepare_list))
29016cdcec7SMiao Xie 		goto out;
29116cdcec7SMiao Xie 
29216cdcec7SMiao Xie 	p = delayed_root->prepare_list.next;
29316cdcec7SMiao Xie 	list_del_init(p);
29416cdcec7SMiao Xie 	node = list_entry(p, struct btrfs_delayed_node, p_list);
29516cdcec7SMiao Xie 	atomic_inc(&node->refs);
29616cdcec7SMiao Xie out:
29716cdcec7SMiao Xie 	spin_unlock(&delayed_root->lock);
29816cdcec7SMiao Xie 
29916cdcec7SMiao Xie 	return node;
30016cdcec7SMiao Xie }
30116cdcec7SMiao Xie 
30216cdcec7SMiao Xie static inline void btrfs_release_prepared_delayed_node(
30316cdcec7SMiao Xie 					struct btrfs_delayed_node *node)
30416cdcec7SMiao Xie {
30516cdcec7SMiao Xie 	__btrfs_release_delayed_node(node, 1);
30616cdcec7SMiao Xie }
30716cdcec7SMiao Xie 
30816cdcec7SMiao Xie struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
30916cdcec7SMiao Xie {
31016cdcec7SMiao Xie 	struct btrfs_delayed_item *item;
31116cdcec7SMiao Xie 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
31216cdcec7SMiao Xie 	if (item) {
31316cdcec7SMiao Xie 		item->data_len = data_len;
31416cdcec7SMiao Xie 		item->ins_or_del = 0;
31516cdcec7SMiao Xie 		item->bytes_reserved = 0;
31616cdcec7SMiao Xie 		item->delayed_node = NULL;
31716cdcec7SMiao Xie 		atomic_set(&item->refs, 1);
31816cdcec7SMiao Xie 	}
31916cdcec7SMiao Xie 	return item;
32016cdcec7SMiao Xie }
32116cdcec7SMiao Xie 
32216cdcec7SMiao Xie /*
32316cdcec7SMiao Xie  * __btrfs_lookup_delayed_item - look up the delayed item by key
32416cdcec7SMiao Xie  * @delayed_node: pointer to the delayed node
32516cdcec7SMiao Xie  * @key:	  the key to look up
32616cdcec7SMiao Xie  * @prev:	  used to store the prev item if the right item isn't found
32716cdcec7SMiao Xie  * @next:	  used to store the next item if the right item isn't found
32816cdcec7SMiao Xie  *
32916cdcec7SMiao Xie  * Note: if we don't find the right item, we will return the prev item and
33016cdcec7SMiao Xie  * the next item.
33116cdcec7SMiao Xie  */
33216cdcec7SMiao Xie static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
33316cdcec7SMiao Xie 				struct rb_root *root,
33416cdcec7SMiao Xie 				struct btrfs_key *key,
33516cdcec7SMiao Xie 				struct btrfs_delayed_item **prev,
33616cdcec7SMiao Xie 				struct btrfs_delayed_item **next)
33716cdcec7SMiao Xie {
33816cdcec7SMiao Xie 	struct rb_node *node, *prev_node = NULL;
33916cdcec7SMiao Xie 	struct btrfs_delayed_item *delayed_item = NULL;
34016cdcec7SMiao Xie 	int ret = 0;
34116cdcec7SMiao Xie 
34216cdcec7SMiao Xie 	node = root->rb_node;
34316cdcec7SMiao Xie 
34416cdcec7SMiao Xie 	while (node) {
34516cdcec7SMiao Xie 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
34616cdcec7SMiao Xie 					rb_node);
34716cdcec7SMiao Xie 		prev_node = node;
34816cdcec7SMiao Xie 		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
34916cdcec7SMiao Xie 		if (ret < 0)
35016cdcec7SMiao Xie 			node = node->rb_right;
35116cdcec7SMiao Xie 		else if (ret > 0)
35216cdcec7SMiao Xie 			node = node->rb_left;
35316cdcec7SMiao Xie 		else
35416cdcec7SMiao Xie 			return delayed_item;
35516cdcec7SMiao Xie 	}
35616cdcec7SMiao Xie 
35716cdcec7SMiao Xie 	if (prev) {
35816cdcec7SMiao Xie 		if (!prev_node)
35916cdcec7SMiao Xie 			*prev = NULL;
36016cdcec7SMiao Xie 		else if (ret < 0)
36116cdcec7SMiao Xie 			*prev = delayed_item;
36216cdcec7SMiao Xie 		else if ((node = rb_prev(prev_node)) != NULL) {
36316cdcec7SMiao Xie 			*prev = rb_entry(node, struct btrfs_delayed_item,
36416cdcec7SMiao Xie 					 rb_node);
36516cdcec7SMiao Xie 		} else
36616cdcec7SMiao Xie 			*prev = NULL;
36716cdcec7SMiao Xie 	}
36816cdcec7SMiao Xie 
36916cdcec7SMiao Xie 	if (next) {
37016cdcec7SMiao Xie 		if (!prev_node)
37116cdcec7SMiao Xie 			*next = NULL;
37216cdcec7SMiao Xie 		else if (ret > 0)
37316cdcec7SMiao Xie 			*next = delayed_item;
37416cdcec7SMiao Xie 		else if ((node = rb_next(prev_node)) != NULL) {
37516cdcec7SMiao Xie 			*next = rb_entry(node, struct btrfs_delayed_item,
37616cdcec7SMiao Xie 					 rb_node);
37716cdcec7SMiao Xie 		} else
37816cdcec7SMiao Xie 			*next = NULL;
37916cdcec7SMiao Xie 	}
38016cdcec7SMiao Xie 	return NULL;
38116cdcec7SMiao Xie }
38216cdcec7SMiao Xie 
38316cdcec7SMiao Xie struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
38416cdcec7SMiao Xie 					struct btrfs_delayed_node *delayed_node,
38516cdcec7SMiao Xie 					struct btrfs_key *key)
38616cdcec7SMiao Xie {
38716cdcec7SMiao Xie 	struct btrfs_delayed_item *item;
38816cdcec7SMiao Xie 
38916cdcec7SMiao Xie 	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
39016cdcec7SMiao Xie 					   NULL, NULL);
39116cdcec7SMiao Xie 	return item;
39216cdcec7SMiao Xie }
39316cdcec7SMiao Xie 
39416cdcec7SMiao Xie struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
39516cdcec7SMiao Xie 					struct btrfs_delayed_node *delayed_node,
39616cdcec7SMiao Xie 					struct btrfs_key *key)
39716cdcec7SMiao Xie {
39816cdcec7SMiao Xie 	struct btrfs_delayed_item *item;
39916cdcec7SMiao Xie 
40016cdcec7SMiao Xie 	item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
40116cdcec7SMiao Xie 					   NULL, NULL);
40216cdcec7SMiao Xie 	return item;
40316cdcec7SMiao Xie }
40416cdcec7SMiao Xie 
40516cdcec7SMiao Xie struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
40616cdcec7SMiao Xie 					struct btrfs_delayed_node *delayed_node,
40716cdcec7SMiao Xie 					struct btrfs_key *key)
40816cdcec7SMiao Xie {
40916cdcec7SMiao Xie 	struct btrfs_delayed_item *item, *next;
41016cdcec7SMiao Xie 
41116cdcec7SMiao Xie 	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
41216cdcec7SMiao Xie 					   NULL, &next);
41316cdcec7SMiao Xie 	if (!item)
41416cdcec7SMiao Xie 		item = next;
41516cdcec7SMiao Xie 
41616cdcec7SMiao Xie 	return item;
41716cdcec7SMiao Xie }
41816cdcec7SMiao Xie 
41916cdcec7SMiao Xie struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
42016cdcec7SMiao Xie 					struct btrfs_delayed_node *delayed_node,
42116cdcec7SMiao Xie 					struct btrfs_key *key)
42216cdcec7SMiao Xie {
42316cdcec7SMiao Xie 	struct btrfs_delayed_item *item, *next;
42416cdcec7SMiao Xie 
42516cdcec7SMiao Xie 	item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
42616cdcec7SMiao Xie 					   NULL, &next);
42716cdcec7SMiao Xie 	if (!item)
42816cdcec7SMiao Xie 		item = next;
42916cdcec7SMiao Xie 
43016cdcec7SMiao Xie 	return item;
43116cdcec7SMiao Xie }
43216cdcec7SMiao Xie 
43316cdcec7SMiao Xie static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
43416cdcec7SMiao Xie 				    struct btrfs_delayed_item *ins,
43516cdcec7SMiao Xie 				    int action)
43616cdcec7SMiao Xie {
43716cdcec7SMiao Xie 	struct rb_node **p, *node;
43816cdcec7SMiao Xie 	struct rb_node *parent_node = NULL;
43916cdcec7SMiao Xie 	struct rb_root *root;
44016cdcec7SMiao Xie 	struct btrfs_delayed_item *item;
44116cdcec7SMiao Xie 	int cmp;
44216cdcec7SMiao Xie 
44316cdcec7SMiao Xie 	if (action == BTRFS_DELAYED_INSERTION_ITEM)
44416cdcec7SMiao Xie 		root = &delayed_node->ins_root;
44516cdcec7SMiao Xie 	else if (action == BTRFS_DELAYED_DELETION_ITEM)
44616cdcec7SMiao Xie 		root = &delayed_node->del_root;
44716cdcec7SMiao Xie 	else
44816cdcec7SMiao Xie 		BUG();
44916cdcec7SMiao Xie 	p = &root->rb_node;
45016cdcec7SMiao Xie 	node = &ins->rb_node;
45116cdcec7SMiao Xie 
45216cdcec7SMiao Xie 	while (*p) {
45316cdcec7SMiao Xie 		parent_node = *p;
45416cdcec7SMiao Xie 		item = rb_entry(parent_node, struct btrfs_delayed_item,
45516cdcec7SMiao Xie 				 rb_node);
45616cdcec7SMiao Xie 
45716cdcec7SMiao Xie 		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
45816cdcec7SMiao Xie 		if (cmp < 0)
45916cdcec7SMiao Xie 			p = &(*p)->rb_right;
46016cdcec7SMiao Xie 		else if (cmp > 0)
46116cdcec7SMiao Xie 			p = &(*p)->rb_left;
46216cdcec7SMiao Xie 		else
46316cdcec7SMiao Xie 			return -EEXIST;
46416cdcec7SMiao Xie 	}
46516cdcec7SMiao Xie 
46616cdcec7SMiao Xie 	rb_link_node(node, parent_node, p);
46716cdcec7SMiao Xie 	rb_insert_color(node, root);
46816cdcec7SMiao Xie 	ins->delayed_node = delayed_node;
46916cdcec7SMiao Xie 	ins->ins_or_del = action;
47016cdcec7SMiao Xie 
47116cdcec7SMiao Xie 	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
47216cdcec7SMiao Xie 	    action == BTRFS_DELAYED_INSERTION_ITEM &&
47316cdcec7SMiao Xie 	    ins->key.offset >= delayed_node->index_cnt)
47416cdcec7SMiao Xie 			delayed_node->index_cnt = ins->key.offset + 1;
47516cdcec7SMiao Xie 
47616cdcec7SMiao Xie 	delayed_node->count++;
47716cdcec7SMiao Xie 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
47816cdcec7SMiao Xie 	return 0;
47916cdcec7SMiao Xie }
48016cdcec7SMiao Xie 
48116cdcec7SMiao Xie static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
48216cdcec7SMiao Xie 					      struct btrfs_delayed_item *item)
48316cdcec7SMiao Xie {
48416cdcec7SMiao Xie 	return __btrfs_add_delayed_item(node, item,
48516cdcec7SMiao Xie 					BTRFS_DELAYED_INSERTION_ITEM);
48616cdcec7SMiao Xie }
48716cdcec7SMiao Xie 
48816cdcec7SMiao Xie static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
48916cdcec7SMiao Xie 					     struct btrfs_delayed_item *item)
49016cdcec7SMiao Xie {
49116cdcec7SMiao Xie 	return __btrfs_add_delayed_item(node, item,
49216cdcec7SMiao Xie 					BTRFS_DELAYED_DELETION_ITEM);
49316cdcec7SMiao Xie }
49416cdcec7SMiao Xie 
49516cdcec7SMiao Xie static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
49616cdcec7SMiao Xie {
49716cdcec7SMiao Xie 	struct rb_root *root;
49816cdcec7SMiao Xie 	struct btrfs_delayed_root *delayed_root;
49916cdcec7SMiao Xie 
50016cdcec7SMiao Xie 	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
50116cdcec7SMiao Xie 
50216cdcec7SMiao Xie 	BUG_ON(!delayed_root);
50316cdcec7SMiao Xie 	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
50416cdcec7SMiao Xie 	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
50516cdcec7SMiao Xie 
50616cdcec7SMiao Xie 	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
50716cdcec7SMiao Xie 		root = &delayed_item->delayed_node->ins_root;
50816cdcec7SMiao Xie 	else
50916cdcec7SMiao Xie 		root = &delayed_item->delayed_node->del_root;
51016cdcec7SMiao Xie 
51116cdcec7SMiao Xie 	rb_erase(&delayed_item->rb_node, root);
51216cdcec7SMiao Xie 	delayed_item->delayed_node->count--;
51316cdcec7SMiao Xie 	atomic_dec(&delayed_root->items);
51416cdcec7SMiao Xie 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
51516cdcec7SMiao Xie 	    waitqueue_active(&delayed_root->wait))
51616cdcec7SMiao Xie 		wake_up(&delayed_root->wait);
51716cdcec7SMiao Xie }
51816cdcec7SMiao Xie 
51916cdcec7SMiao Xie static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
52016cdcec7SMiao Xie {
52116cdcec7SMiao Xie 	if (item) {
52216cdcec7SMiao Xie 		__btrfs_remove_delayed_item(item);
52316cdcec7SMiao Xie 		if (atomic_dec_and_test(&item->refs))
52416cdcec7SMiao Xie 			kfree(item);
52516cdcec7SMiao Xie 	}
52616cdcec7SMiao Xie }
52716cdcec7SMiao Xie 
52816cdcec7SMiao Xie struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
52916cdcec7SMiao Xie 					struct btrfs_delayed_node *delayed_node)
53016cdcec7SMiao Xie {
53116cdcec7SMiao Xie 	struct rb_node *p;
53216cdcec7SMiao Xie 	struct btrfs_delayed_item *item = NULL;
53316cdcec7SMiao Xie 
53416cdcec7SMiao Xie 	p = rb_first(&delayed_node->ins_root);
53516cdcec7SMiao Xie 	if (p)
53616cdcec7SMiao Xie 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
53716cdcec7SMiao Xie 
53816cdcec7SMiao Xie 	return item;
53916cdcec7SMiao Xie }
54016cdcec7SMiao Xie 
54116cdcec7SMiao Xie struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
54216cdcec7SMiao Xie 					struct btrfs_delayed_node *delayed_node)
54316cdcec7SMiao Xie {
54416cdcec7SMiao Xie 	struct rb_node *p;
54516cdcec7SMiao Xie 	struct btrfs_delayed_item *item = NULL;
54616cdcec7SMiao Xie 
54716cdcec7SMiao Xie 	p = rb_first(&delayed_node->del_root);
54816cdcec7SMiao Xie 	if (p)
54916cdcec7SMiao Xie 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
55016cdcec7SMiao Xie 
55116cdcec7SMiao Xie 	return item;
55216cdcec7SMiao Xie }
55316cdcec7SMiao Xie 
55416cdcec7SMiao Xie struct btrfs_delayed_item *__btrfs_next_delayed_item(
55516cdcec7SMiao Xie 						struct btrfs_delayed_item *item)
55616cdcec7SMiao Xie {
55716cdcec7SMiao Xie 	struct rb_node *p;
55816cdcec7SMiao Xie 	struct btrfs_delayed_item *next = NULL;
55916cdcec7SMiao Xie 
56016cdcec7SMiao Xie 	p = rb_next(&item->rb_node);
56116cdcec7SMiao Xie 	if (p)
56216cdcec7SMiao Xie 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
56316cdcec7SMiao Xie 
56416cdcec7SMiao Xie 	return next;
56516cdcec7SMiao Xie }
56616cdcec7SMiao Xie 
56716cdcec7SMiao Xie static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
56816cdcec7SMiao Xie 						   u64 root_id)
56916cdcec7SMiao Xie {
57016cdcec7SMiao Xie 	struct btrfs_key root_key;
57116cdcec7SMiao Xie 
57216cdcec7SMiao Xie 	if (root->objectid == root_id)
57316cdcec7SMiao Xie 		return root;
57416cdcec7SMiao Xie 
57516cdcec7SMiao Xie 	root_key.objectid = root_id;
57616cdcec7SMiao Xie 	root_key.type = BTRFS_ROOT_ITEM_KEY;
57716cdcec7SMiao Xie 	root_key.offset = (u64)-1;
57816cdcec7SMiao Xie 	return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
57916cdcec7SMiao Xie }
58016cdcec7SMiao Xie 
58116cdcec7SMiao Xie static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
58216cdcec7SMiao Xie 					       struct btrfs_root *root,
58316cdcec7SMiao Xie 					       struct btrfs_delayed_item *item)
58416cdcec7SMiao Xie {
58516cdcec7SMiao Xie 	struct btrfs_block_rsv *src_rsv;
58616cdcec7SMiao Xie 	struct btrfs_block_rsv *dst_rsv;
58716cdcec7SMiao Xie 	u64 num_bytes;
58816cdcec7SMiao Xie 	int ret;
58916cdcec7SMiao Xie 
59016cdcec7SMiao Xie 	if (!trans->bytes_reserved)
59116cdcec7SMiao Xie 		return 0;
59216cdcec7SMiao Xie 
59316cdcec7SMiao Xie 	src_rsv = trans->block_rsv;
594*6d668ddaSJosef Bacik 	dst_rsv = &root->fs_info->delayed_block_rsv;
59516cdcec7SMiao Xie 
59616cdcec7SMiao Xie 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
59716cdcec7SMiao Xie 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
59819fd2949SMiao Xie 	if (!ret)
59916cdcec7SMiao Xie 		item->bytes_reserved = num_bytes;
60016cdcec7SMiao Xie 
60116cdcec7SMiao Xie 	return ret;
60216cdcec7SMiao Xie }
60316cdcec7SMiao Xie 
60416cdcec7SMiao Xie static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
60516cdcec7SMiao Xie 						struct btrfs_delayed_item *item)
60616cdcec7SMiao Xie {
60719fd2949SMiao Xie 	struct btrfs_block_rsv *rsv;
60819fd2949SMiao Xie 
60916cdcec7SMiao Xie 	if (!item->bytes_reserved)
61016cdcec7SMiao Xie 		return;
61116cdcec7SMiao Xie 
612*6d668ddaSJosef Bacik 	rsv = &root->fs_info->delayed_block_rsv;
61319fd2949SMiao Xie 	btrfs_block_rsv_release(root, rsv,
61416cdcec7SMiao Xie 				item->bytes_reserved);
61516cdcec7SMiao Xie }
61616cdcec7SMiao Xie 
61716cdcec7SMiao Xie static int btrfs_delayed_inode_reserve_metadata(
61816cdcec7SMiao Xie 					struct btrfs_trans_handle *trans,
61916cdcec7SMiao Xie 					struct btrfs_root *root,
62016cdcec7SMiao Xie 					struct btrfs_delayed_node *node)
62116cdcec7SMiao Xie {
62216cdcec7SMiao Xie 	struct btrfs_block_rsv *src_rsv;
62316cdcec7SMiao Xie 	struct btrfs_block_rsv *dst_rsv;
62416cdcec7SMiao Xie 	u64 num_bytes;
62516cdcec7SMiao Xie 	int ret;
62616cdcec7SMiao Xie 
62716cdcec7SMiao Xie 	if (!trans->bytes_reserved)
62816cdcec7SMiao Xie 		return 0;
62916cdcec7SMiao Xie 
63016cdcec7SMiao Xie 	src_rsv = trans->block_rsv;
631*6d668ddaSJosef Bacik 	dst_rsv = &root->fs_info->delayed_block_rsv;
63216cdcec7SMiao Xie 
63316cdcec7SMiao Xie 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
63416cdcec7SMiao Xie 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
63516cdcec7SMiao Xie 	if (!ret)
63616cdcec7SMiao Xie 		node->bytes_reserved = num_bytes;
63716cdcec7SMiao Xie 
63816cdcec7SMiao Xie 	return ret;
63916cdcec7SMiao Xie }
64016cdcec7SMiao Xie 
64116cdcec7SMiao Xie static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
64216cdcec7SMiao Xie 						struct btrfs_delayed_node *node)
64316cdcec7SMiao Xie {
64416cdcec7SMiao Xie 	struct btrfs_block_rsv *rsv;
64516cdcec7SMiao Xie 
64616cdcec7SMiao Xie 	if (!node->bytes_reserved)
64716cdcec7SMiao Xie 		return;
64816cdcec7SMiao Xie 
649*6d668ddaSJosef Bacik 	rsv = &root->fs_info->delayed_block_rsv;
65016cdcec7SMiao Xie 	btrfs_block_rsv_release(root, rsv,
65116cdcec7SMiao Xie 				node->bytes_reserved);
65216cdcec7SMiao Xie 	node->bytes_reserved = 0;
65316cdcec7SMiao Xie }
65416cdcec7SMiao Xie 
65516cdcec7SMiao Xie /*
65616cdcec7SMiao Xie  * This helper will insert some continuous items into the same leaf according
65716cdcec7SMiao Xie  * to the free space of the leaf.
65816cdcec7SMiao Xie  */
65916cdcec7SMiao Xie static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
66016cdcec7SMiao Xie 				struct btrfs_root *root,
66116cdcec7SMiao Xie 				struct btrfs_path *path,
66216cdcec7SMiao Xie 				struct btrfs_delayed_item *item)
66316cdcec7SMiao Xie {
66416cdcec7SMiao Xie 	struct btrfs_delayed_item *curr, *next;
66516cdcec7SMiao Xie 	int free_space;
66616cdcec7SMiao Xie 	int total_data_size = 0, total_size = 0;
66716cdcec7SMiao Xie 	struct extent_buffer *leaf;
66816cdcec7SMiao Xie 	char *data_ptr;
66916cdcec7SMiao Xie 	struct btrfs_key *keys;
67016cdcec7SMiao Xie 	u32 *data_size;
67116cdcec7SMiao Xie 	struct list_head head;
67216cdcec7SMiao Xie 	int slot;
67316cdcec7SMiao Xie 	int nitems;
67416cdcec7SMiao Xie 	int i;
67516cdcec7SMiao Xie 	int ret = 0;
67616cdcec7SMiao Xie 
67716cdcec7SMiao Xie 	BUG_ON(!path->nodes[0]);
67816cdcec7SMiao Xie 
67916cdcec7SMiao Xie 	leaf = path->nodes[0];
68016cdcec7SMiao Xie 	free_space = btrfs_leaf_free_space(root, leaf);
68116cdcec7SMiao Xie 	INIT_LIST_HEAD(&head);
68216cdcec7SMiao Xie 
68316cdcec7SMiao Xie 	next = item;
68417aca1c9SChris Mason 	nitems = 0;
68516cdcec7SMiao Xie 
68616cdcec7SMiao Xie 	/*
68716cdcec7SMiao Xie 	 * count the number of the continuous items that we can insert in batch
68816cdcec7SMiao Xie 	 */
68916cdcec7SMiao Xie 	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
69016cdcec7SMiao Xie 	       free_space) {
69116cdcec7SMiao Xie 		total_data_size += next->data_len;
69216cdcec7SMiao Xie 		total_size += next->data_len + sizeof(struct btrfs_item);
69316cdcec7SMiao Xie 		list_add_tail(&next->tree_list, &head);
69416cdcec7SMiao Xie 		nitems++;
69516cdcec7SMiao Xie 
69616cdcec7SMiao Xie 		curr = next;
69716cdcec7SMiao Xie 		next = __btrfs_next_delayed_item(curr);
69816cdcec7SMiao Xie 		if (!next)
69916cdcec7SMiao Xie 			break;
70016cdcec7SMiao Xie 
70116cdcec7SMiao Xie 		if (!btrfs_is_continuous_delayed_item(curr, next))
70216cdcec7SMiao Xie 			break;
70316cdcec7SMiao Xie 	}
70416cdcec7SMiao Xie 
70516cdcec7SMiao Xie 	if (!nitems) {
70616cdcec7SMiao Xie 		ret = 0;
70716cdcec7SMiao Xie 		goto out;
70816cdcec7SMiao Xie 	}
70916cdcec7SMiao Xie 
71016cdcec7SMiao Xie 	/*
71116cdcec7SMiao Xie 	 * we need allocate some memory space, but it might cause the task
71216cdcec7SMiao Xie 	 * to sleep, so we set all locked nodes in the path to blocking locks
71316cdcec7SMiao Xie 	 * first.
71416cdcec7SMiao Xie 	 */
71516cdcec7SMiao Xie 	btrfs_set_path_blocking(path);
71616cdcec7SMiao Xie 
71716cdcec7SMiao Xie 	keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
71816cdcec7SMiao Xie 	if (!keys) {
71916cdcec7SMiao Xie 		ret = -ENOMEM;
72016cdcec7SMiao Xie 		goto out;
72116cdcec7SMiao Xie 	}
72216cdcec7SMiao Xie 
72316cdcec7SMiao Xie 	data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
72416cdcec7SMiao Xie 	if (!data_size) {
72516cdcec7SMiao Xie 		ret = -ENOMEM;
72616cdcec7SMiao Xie 		goto error;
72716cdcec7SMiao Xie 	}
72816cdcec7SMiao Xie 
72916cdcec7SMiao Xie 	/* get keys of all the delayed items */
73016cdcec7SMiao Xie 	i = 0;
73116cdcec7SMiao Xie 	list_for_each_entry(next, &head, tree_list) {
73216cdcec7SMiao Xie 		keys[i] = next->key;
73316cdcec7SMiao Xie 		data_size[i] = next->data_len;
73416cdcec7SMiao Xie 		i++;
73516cdcec7SMiao Xie 	}
73616cdcec7SMiao Xie 
73716cdcec7SMiao Xie 	/* reset all the locked nodes in the patch to spinning locks. */
738bd681513SChris Mason 	btrfs_clear_path_blocking(path, NULL, 0);
73916cdcec7SMiao Xie 
74016cdcec7SMiao Xie 	/* insert the keys of the items */
74116cdcec7SMiao Xie 	ret = setup_items_for_insert(trans, root, path, keys, data_size,
74216cdcec7SMiao Xie 				     total_data_size, total_size, nitems);
74316cdcec7SMiao Xie 	if (ret)
74416cdcec7SMiao Xie 		goto error;
74516cdcec7SMiao Xie 
74616cdcec7SMiao Xie 	/* insert the dir index items */
74716cdcec7SMiao Xie 	slot = path->slots[0];
74816cdcec7SMiao Xie 	list_for_each_entry_safe(curr, next, &head, tree_list) {
74916cdcec7SMiao Xie 		data_ptr = btrfs_item_ptr(leaf, slot, char);
75016cdcec7SMiao Xie 		write_extent_buffer(leaf, &curr->data,
75116cdcec7SMiao Xie 				    (unsigned long)data_ptr,
75216cdcec7SMiao Xie 				    curr->data_len);
75316cdcec7SMiao Xie 		slot++;
75416cdcec7SMiao Xie 
75516cdcec7SMiao Xie 		btrfs_delayed_item_release_metadata(root, curr);
75616cdcec7SMiao Xie 
75716cdcec7SMiao Xie 		list_del(&curr->tree_list);
75816cdcec7SMiao Xie 		btrfs_release_delayed_item(curr);
75916cdcec7SMiao Xie 	}
76016cdcec7SMiao Xie 
76116cdcec7SMiao Xie error:
76216cdcec7SMiao Xie 	kfree(data_size);
76316cdcec7SMiao Xie 	kfree(keys);
76416cdcec7SMiao Xie out:
76516cdcec7SMiao Xie 	return ret;
76616cdcec7SMiao Xie }
76716cdcec7SMiao Xie 
76816cdcec7SMiao Xie /*
76916cdcec7SMiao Xie  * This helper can just do simple insertion that needn't extend item for new
77016cdcec7SMiao Xie  * data, such as directory name index insertion, inode insertion.
77116cdcec7SMiao Xie  */
77216cdcec7SMiao Xie static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
77316cdcec7SMiao Xie 				     struct btrfs_root *root,
77416cdcec7SMiao Xie 				     struct btrfs_path *path,
77516cdcec7SMiao Xie 				     struct btrfs_delayed_item *delayed_item)
77616cdcec7SMiao Xie {
77716cdcec7SMiao Xie 	struct extent_buffer *leaf;
77816cdcec7SMiao Xie 	struct btrfs_item *item;
77916cdcec7SMiao Xie 	char *ptr;
78016cdcec7SMiao Xie 	int ret;
78116cdcec7SMiao Xie 
78216cdcec7SMiao Xie 	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
78316cdcec7SMiao Xie 				      delayed_item->data_len);
78416cdcec7SMiao Xie 	if (ret < 0 && ret != -EEXIST)
78516cdcec7SMiao Xie 		return ret;
78616cdcec7SMiao Xie 
78716cdcec7SMiao Xie 	leaf = path->nodes[0];
78816cdcec7SMiao Xie 
78916cdcec7SMiao Xie 	item = btrfs_item_nr(leaf, path->slots[0]);
79016cdcec7SMiao Xie 	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
79116cdcec7SMiao Xie 
79216cdcec7SMiao Xie 	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
79316cdcec7SMiao Xie 			    delayed_item->data_len);
79416cdcec7SMiao Xie 	btrfs_mark_buffer_dirty(leaf);
79516cdcec7SMiao Xie 
79616cdcec7SMiao Xie 	btrfs_delayed_item_release_metadata(root, delayed_item);
79716cdcec7SMiao Xie 	return 0;
79816cdcec7SMiao Xie }
79916cdcec7SMiao Xie 
80016cdcec7SMiao Xie /*
80116cdcec7SMiao Xie  * we insert an item first, then if there are some continuous items, we try
80216cdcec7SMiao Xie  * to insert those items into the same leaf.
80316cdcec7SMiao Xie  */
80416cdcec7SMiao Xie static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
80516cdcec7SMiao Xie 				      struct btrfs_path *path,
80616cdcec7SMiao Xie 				      struct btrfs_root *root,
80716cdcec7SMiao Xie 				      struct btrfs_delayed_node *node)
80816cdcec7SMiao Xie {
80916cdcec7SMiao Xie 	struct btrfs_delayed_item *curr, *prev;
81016cdcec7SMiao Xie 	int ret = 0;
81116cdcec7SMiao Xie 
81216cdcec7SMiao Xie do_again:
81316cdcec7SMiao Xie 	mutex_lock(&node->mutex);
81416cdcec7SMiao Xie 	curr = __btrfs_first_delayed_insertion_item(node);
81516cdcec7SMiao Xie 	if (!curr)
81616cdcec7SMiao Xie 		goto insert_end;
81716cdcec7SMiao Xie 
81816cdcec7SMiao Xie 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
81916cdcec7SMiao Xie 	if (ret < 0) {
820945d8962SChris Mason 		btrfs_release_path(path);
82116cdcec7SMiao Xie 		goto insert_end;
82216cdcec7SMiao Xie 	}
82316cdcec7SMiao Xie 
82416cdcec7SMiao Xie 	prev = curr;
82516cdcec7SMiao Xie 	curr = __btrfs_next_delayed_item(prev);
82616cdcec7SMiao Xie 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
82716cdcec7SMiao Xie 		/* insert the continuous items into the same leaf */
82816cdcec7SMiao Xie 		path->slots[0]++;
82916cdcec7SMiao Xie 		btrfs_batch_insert_items(trans, root, path, curr);
83016cdcec7SMiao Xie 	}
83116cdcec7SMiao Xie 	btrfs_release_delayed_item(prev);
83216cdcec7SMiao Xie 	btrfs_mark_buffer_dirty(path->nodes[0]);
83316cdcec7SMiao Xie 
834945d8962SChris Mason 	btrfs_release_path(path);
83516cdcec7SMiao Xie 	mutex_unlock(&node->mutex);
83616cdcec7SMiao Xie 	goto do_again;
83716cdcec7SMiao Xie 
83816cdcec7SMiao Xie insert_end:
83916cdcec7SMiao Xie 	mutex_unlock(&node->mutex);
84016cdcec7SMiao Xie 	return ret;
84116cdcec7SMiao Xie }
84216cdcec7SMiao Xie 
84316cdcec7SMiao Xie static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
84416cdcec7SMiao Xie 				    struct btrfs_root *root,
84516cdcec7SMiao Xie 				    struct btrfs_path *path,
84616cdcec7SMiao Xie 				    struct btrfs_delayed_item *item)
84716cdcec7SMiao Xie {
84816cdcec7SMiao Xie 	struct btrfs_delayed_item *curr, *next;
84916cdcec7SMiao Xie 	struct extent_buffer *leaf;
85016cdcec7SMiao Xie 	struct btrfs_key key;
85116cdcec7SMiao Xie 	struct list_head head;
85216cdcec7SMiao Xie 	int nitems, i, last_item;
85316cdcec7SMiao Xie 	int ret = 0;
85416cdcec7SMiao Xie 
85516cdcec7SMiao Xie 	BUG_ON(!path->nodes[0]);
85616cdcec7SMiao Xie 
85716cdcec7SMiao Xie 	leaf = path->nodes[0];
85816cdcec7SMiao Xie 
85916cdcec7SMiao Xie 	i = path->slots[0];
86016cdcec7SMiao Xie 	last_item = btrfs_header_nritems(leaf) - 1;
86116cdcec7SMiao Xie 	if (i > last_item)
86216cdcec7SMiao Xie 		return -ENOENT;	/* FIXME: Is errno suitable? */
86316cdcec7SMiao Xie 
86416cdcec7SMiao Xie 	next = item;
86516cdcec7SMiao Xie 	INIT_LIST_HEAD(&head);
86616cdcec7SMiao Xie 	btrfs_item_key_to_cpu(leaf, &key, i);
86716cdcec7SMiao Xie 	nitems = 0;
86816cdcec7SMiao Xie 	/*
86916cdcec7SMiao Xie 	 * count the number of the dir index items that we can delete in batch
87016cdcec7SMiao Xie 	 */
87116cdcec7SMiao Xie 	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
87216cdcec7SMiao Xie 		list_add_tail(&next->tree_list, &head);
87316cdcec7SMiao Xie 		nitems++;
87416cdcec7SMiao Xie 
87516cdcec7SMiao Xie 		curr = next;
87616cdcec7SMiao Xie 		next = __btrfs_next_delayed_item(curr);
87716cdcec7SMiao Xie 		if (!next)
87816cdcec7SMiao Xie 			break;
87916cdcec7SMiao Xie 
88016cdcec7SMiao Xie 		if (!btrfs_is_continuous_delayed_item(curr, next))
88116cdcec7SMiao Xie 			break;
88216cdcec7SMiao Xie 
88316cdcec7SMiao Xie 		i++;
88416cdcec7SMiao Xie 		if (i > last_item)
88516cdcec7SMiao Xie 			break;
88616cdcec7SMiao Xie 		btrfs_item_key_to_cpu(leaf, &key, i);
88716cdcec7SMiao Xie 	}
88816cdcec7SMiao Xie 
88916cdcec7SMiao Xie 	if (!nitems)
89016cdcec7SMiao Xie 		return 0;
89116cdcec7SMiao Xie 
89216cdcec7SMiao Xie 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
89316cdcec7SMiao Xie 	if (ret)
89416cdcec7SMiao Xie 		goto out;
89516cdcec7SMiao Xie 
89616cdcec7SMiao Xie 	list_for_each_entry_safe(curr, next, &head, tree_list) {
89716cdcec7SMiao Xie 		btrfs_delayed_item_release_metadata(root, curr);
89816cdcec7SMiao Xie 		list_del(&curr->tree_list);
89916cdcec7SMiao Xie 		btrfs_release_delayed_item(curr);
90016cdcec7SMiao Xie 	}
90116cdcec7SMiao Xie 
90216cdcec7SMiao Xie out:
90316cdcec7SMiao Xie 	return ret;
90416cdcec7SMiao Xie }
90516cdcec7SMiao Xie 
90616cdcec7SMiao Xie static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
90716cdcec7SMiao Xie 				      struct btrfs_path *path,
90816cdcec7SMiao Xie 				      struct btrfs_root *root,
90916cdcec7SMiao Xie 				      struct btrfs_delayed_node *node)
91016cdcec7SMiao Xie {
91116cdcec7SMiao Xie 	struct btrfs_delayed_item *curr, *prev;
91216cdcec7SMiao Xie 	int ret = 0;
91316cdcec7SMiao Xie 
91416cdcec7SMiao Xie do_again:
91516cdcec7SMiao Xie 	mutex_lock(&node->mutex);
91616cdcec7SMiao Xie 	curr = __btrfs_first_delayed_deletion_item(node);
91716cdcec7SMiao Xie 	if (!curr)
91816cdcec7SMiao Xie 		goto delete_fail;
91916cdcec7SMiao Xie 
92016cdcec7SMiao Xie 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
92116cdcec7SMiao Xie 	if (ret < 0)
92216cdcec7SMiao Xie 		goto delete_fail;
92316cdcec7SMiao Xie 	else if (ret > 0) {
92416cdcec7SMiao Xie 		/*
92516cdcec7SMiao Xie 		 * can't find the item which the node points to, so this node
92616cdcec7SMiao Xie 		 * is invalid, just drop it.
92716cdcec7SMiao Xie 		 */
92816cdcec7SMiao Xie 		prev = curr;
92916cdcec7SMiao Xie 		curr = __btrfs_next_delayed_item(prev);
93016cdcec7SMiao Xie 		btrfs_release_delayed_item(prev);
93116cdcec7SMiao Xie 		ret = 0;
932945d8962SChris Mason 		btrfs_release_path(path);
93316cdcec7SMiao Xie 		if (curr)
93416cdcec7SMiao Xie 			goto do_again;
93516cdcec7SMiao Xie 		else
93616cdcec7SMiao Xie 			goto delete_fail;
93716cdcec7SMiao Xie 	}
93816cdcec7SMiao Xie 
93916cdcec7SMiao Xie 	btrfs_batch_delete_items(trans, root, path, curr);
940945d8962SChris Mason 	btrfs_release_path(path);
94116cdcec7SMiao Xie 	mutex_unlock(&node->mutex);
94216cdcec7SMiao Xie 	goto do_again;
94316cdcec7SMiao Xie 
94416cdcec7SMiao Xie delete_fail:
945945d8962SChris Mason 	btrfs_release_path(path);
94616cdcec7SMiao Xie 	mutex_unlock(&node->mutex);
94716cdcec7SMiao Xie 	return ret;
94816cdcec7SMiao Xie }
94916cdcec7SMiao Xie 
95016cdcec7SMiao Xie static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
95116cdcec7SMiao Xie {
95216cdcec7SMiao Xie 	struct btrfs_delayed_root *delayed_root;
95316cdcec7SMiao Xie 
95416cdcec7SMiao Xie 	if (delayed_node && delayed_node->inode_dirty) {
95516cdcec7SMiao Xie 		BUG_ON(!delayed_node->root);
95616cdcec7SMiao Xie 		delayed_node->inode_dirty = 0;
95716cdcec7SMiao Xie 		delayed_node->count--;
95816cdcec7SMiao Xie 
95916cdcec7SMiao Xie 		delayed_root = delayed_node->root->fs_info->delayed_root;
96016cdcec7SMiao Xie 		atomic_dec(&delayed_root->items);
96116cdcec7SMiao Xie 		if (atomic_read(&delayed_root->items) <
96216cdcec7SMiao Xie 		    BTRFS_DELAYED_BACKGROUND &&
96316cdcec7SMiao Xie 		    waitqueue_active(&delayed_root->wait))
96416cdcec7SMiao Xie 			wake_up(&delayed_root->wait);
96516cdcec7SMiao Xie 	}
96616cdcec7SMiao Xie }
96716cdcec7SMiao Xie 
96816cdcec7SMiao Xie static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
96916cdcec7SMiao Xie 				      struct btrfs_root *root,
97016cdcec7SMiao Xie 				      struct btrfs_path *path,
97116cdcec7SMiao Xie 				      struct btrfs_delayed_node *node)
97216cdcec7SMiao Xie {
97316cdcec7SMiao Xie 	struct btrfs_key key;
97416cdcec7SMiao Xie 	struct btrfs_inode_item *inode_item;
97516cdcec7SMiao Xie 	struct extent_buffer *leaf;
97616cdcec7SMiao Xie 	int ret;
97716cdcec7SMiao Xie 
97816cdcec7SMiao Xie 	mutex_lock(&node->mutex);
97916cdcec7SMiao Xie 	if (!node->inode_dirty) {
98016cdcec7SMiao Xie 		mutex_unlock(&node->mutex);
98116cdcec7SMiao Xie 		return 0;
98216cdcec7SMiao Xie 	}
98316cdcec7SMiao Xie 
98416cdcec7SMiao Xie 	key.objectid = node->inode_id;
98516cdcec7SMiao Xie 	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
98616cdcec7SMiao Xie 	key.offset = 0;
98716cdcec7SMiao Xie 	ret = btrfs_lookup_inode(trans, root, path, &key, 1);
98816cdcec7SMiao Xie 	if (ret > 0) {
989945d8962SChris Mason 		btrfs_release_path(path);
99016cdcec7SMiao Xie 		mutex_unlock(&node->mutex);
99116cdcec7SMiao Xie 		return -ENOENT;
99216cdcec7SMiao Xie 	} else if (ret < 0) {
99316cdcec7SMiao Xie 		mutex_unlock(&node->mutex);
99416cdcec7SMiao Xie 		return ret;
99516cdcec7SMiao Xie 	}
99616cdcec7SMiao Xie 
99716cdcec7SMiao Xie 	btrfs_unlock_up_safe(path, 1);
99816cdcec7SMiao Xie 	leaf = path->nodes[0];
99916cdcec7SMiao Xie 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
100016cdcec7SMiao Xie 				    struct btrfs_inode_item);
100116cdcec7SMiao Xie 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
100216cdcec7SMiao Xie 			    sizeof(struct btrfs_inode_item));
100316cdcec7SMiao Xie 	btrfs_mark_buffer_dirty(leaf);
1004945d8962SChris Mason 	btrfs_release_path(path);
100516cdcec7SMiao Xie 
100616cdcec7SMiao Xie 	btrfs_delayed_inode_release_metadata(root, node);
100716cdcec7SMiao Xie 	btrfs_release_delayed_inode(node);
100816cdcec7SMiao Xie 	mutex_unlock(&node->mutex);
100916cdcec7SMiao Xie 
101016cdcec7SMiao Xie 	return 0;
101116cdcec7SMiao Xie }
101216cdcec7SMiao Xie 
101316cdcec7SMiao Xie /* Called when committing the transaction. */
101416cdcec7SMiao Xie int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
101516cdcec7SMiao Xie 			    struct btrfs_root *root)
101616cdcec7SMiao Xie {
101716cdcec7SMiao Xie 	struct btrfs_delayed_root *delayed_root;
101816cdcec7SMiao Xie 	struct btrfs_delayed_node *curr_node, *prev_node;
101916cdcec7SMiao Xie 	struct btrfs_path *path;
102019fd2949SMiao Xie 	struct btrfs_block_rsv *block_rsv;
102116cdcec7SMiao Xie 	int ret = 0;
102216cdcec7SMiao Xie 
102316cdcec7SMiao Xie 	path = btrfs_alloc_path();
102416cdcec7SMiao Xie 	if (!path)
102516cdcec7SMiao Xie 		return -ENOMEM;
102616cdcec7SMiao Xie 	path->leave_spinning = 1;
102716cdcec7SMiao Xie 
102819fd2949SMiao Xie 	block_rsv = trans->block_rsv;
1029*6d668ddaSJosef Bacik 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
103019fd2949SMiao Xie 
103116cdcec7SMiao Xie 	delayed_root = btrfs_get_delayed_root(root);
103216cdcec7SMiao Xie 
103316cdcec7SMiao Xie 	curr_node = btrfs_first_delayed_node(delayed_root);
103416cdcec7SMiao Xie 	while (curr_node) {
103516cdcec7SMiao Xie 		root = curr_node->root;
103616cdcec7SMiao Xie 		ret = btrfs_insert_delayed_items(trans, path, root,
103716cdcec7SMiao Xie 						 curr_node);
103816cdcec7SMiao Xie 		if (!ret)
103916cdcec7SMiao Xie 			ret = btrfs_delete_delayed_items(trans, path, root,
104016cdcec7SMiao Xie 							 curr_node);
104116cdcec7SMiao Xie 		if (!ret)
104216cdcec7SMiao Xie 			ret = btrfs_update_delayed_inode(trans, root, path,
104316cdcec7SMiao Xie 							 curr_node);
104416cdcec7SMiao Xie 		if (ret) {
104516cdcec7SMiao Xie 			btrfs_release_delayed_node(curr_node);
104616cdcec7SMiao Xie 			break;
104716cdcec7SMiao Xie 		}
104816cdcec7SMiao Xie 
104916cdcec7SMiao Xie 		prev_node = curr_node;
105016cdcec7SMiao Xie 		curr_node = btrfs_next_delayed_node(curr_node);
105116cdcec7SMiao Xie 		btrfs_release_delayed_node(prev_node);
105216cdcec7SMiao Xie 	}
105316cdcec7SMiao Xie 
105416cdcec7SMiao Xie 	btrfs_free_path(path);
105519fd2949SMiao Xie 	trans->block_rsv = block_rsv;
105616cdcec7SMiao Xie 	return ret;
105716cdcec7SMiao Xie }
105816cdcec7SMiao Xie 
105916cdcec7SMiao Xie static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
106016cdcec7SMiao Xie 					      struct btrfs_delayed_node *node)
106116cdcec7SMiao Xie {
106216cdcec7SMiao Xie 	struct btrfs_path *path;
106319fd2949SMiao Xie 	struct btrfs_block_rsv *block_rsv;
106416cdcec7SMiao Xie 	int ret;
106516cdcec7SMiao Xie 
106616cdcec7SMiao Xie 	path = btrfs_alloc_path();
106716cdcec7SMiao Xie 	if (!path)
106816cdcec7SMiao Xie 		return -ENOMEM;
106916cdcec7SMiao Xie 	path->leave_spinning = 1;
107016cdcec7SMiao Xie 
107119fd2949SMiao Xie 	block_rsv = trans->block_rsv;
1072*6d668ddaSJosef Bacik 	trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
107319fd2949SMiao Xie 
107416cdcec7SMiao Xie 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
107516cdcec7SMiao Xie 	if (!ret)
107616cdcec7SMiao Xie 		ret = btrfs_delete_delayed_items(trans, path, node->root, node);
107716cdcec7SMiao Xie 	if (!ret)
107816cdcec7SMiao Xie 		ret = btrfs_update_delayed_inode(trans, node->root, path, node);
107916cdcec7SMiao Xie 	btrfs_free_path(path);
108016cdcec7SMiao Xie 
108119fd2949SMiao Xie 	trans->block_rsv = block_rsv;
108216cdcec7SMiao Xie 	return ret;
108316cdcec7SMiao Xie }
108416cdcec7SMiao Xie 
108516cdcec7SMiao Xie int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
108616cdcec7SMiao Xie 				     struct inode *inode)
108716cdcec7SMiao Xie {
108816cdcec7SMiao Xie 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
108916cdcec7SMiao Xie 	int ret;
109016cdcec7SMiao Xie 
109116cdcec7SMiao Xie 	if (!delayed_node)
109216cdcec7SMiao Xie 		return 0;
109316cdcec7SMiao Xie 
109416cdcec7SMiao Xie 	mutex_lock(&delayed_node->mutex);
109516cdcec7SMiao Xie 	if (!delayed_node->count) {
109616cdcec7SMiao Xie 		mutex_unlock(&delayed_node->mutex);
109716cdcec7SMiao Xie 		btrfs_release_delayed_node(delayed_node);
109816cdcec7SMiao Xie 		return 0;
109916cdcec7SMiao Xie 	}
110016cdcec7SMiao Xie 	mutex_unlock(&delayed_node->mutex);
110116cdcec7SMiao Xie 
110216cdcec7SMiao Xie 	ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
110316cdcec7SMiao Xie 	btrfs_release_delayed_node(delayed_node);
110416cdcec7SMiao Xie 	return ret;
110516cdcec7SMiao Xie }
110616cdcec7SMiao Xie 
110716cdcec7SMiao Xie void btrfs_remove_delayed_node(struct inode *inode)
110816cdcec7SMiao Xie {
110916cdcec7SMiao Xie 	struct btrfs_delayed_node *delayed_node;
111016cdcec7SMiao Xie 
111116cdcec7SMiao Xie 	delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
111216cdcec7SMiao Xie 	if (!delayed_node)
111316cdcec7SMiao Xie 		return;
111416cdcec7SMiao Xie 
111516cdcec7SMiao Xie 	BTRFS_I(inode)->delayed_node = NULL;
111616cdcec7SMiao Xie 	btrfs_release_delayed_node(delayed_node);
111716cdcec7SMiao Xie }
111816cdcec7SMiao Xie 
111916cdcec7SMiao Xie struct btrfs_async_delayed_node {
112016cdcec7SMiao Xie 	struct btrfs_root *root;
112116cdcec7SMiao Xie 	struct btrfs_delayed_node *delayed_node;
112216cdcec7SMiao Xie 	struct btrfs_work work;
112316cdcec7SMiao Xie };
112416cdcec7SMiao Xie 
112516cdcec7SMiao Xie static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
112616cdcec7SMiao Xie {
112716cdcec7SMiao Xie 	struct btrfs_async_delayed_node *async_node;
112816cdcec7SMiao Xie 	struct btrfs_trans_handle *trans;
112916cdcec7SMiao Xie 	struct btrfs_path *path;
113016cdcec7SMiao Xie 	struct btrfs_delayed_node *delayed_node = NULL;
113116cdcec7SMiao Xie 	struct btrfs_root *root;
113219fd2949SMiao Xie 	struct btrfs_block_rsv *block_rsv;
113316cdcec7SMiao Xie 	unsigned long nr = 0;
113416cdcec7SMiao Xie 	int need_requeue = 0;
113516cdcec7SMiao Xie 	int ret;
113616cdcec7SMiao Xie 
113716cdcec7SMiao Xie 	async_node = container_of(work, struct btrfs_async_delayed_node, work);
113816cdcec7SMiao Xie 
113916cdcec7SMiao Xie 	path = btrfs_alloc_path();
114016cdcec7SMiao Xie 	if (!path)
114116cdcec7SMiao Xie 		goto out;
114216cdcec7SMiao Xie 	path->leave_spinning = 1;
114316cdcec7SMiao Xie 
114416cdcec7SMiao Xie 	delayed_node = async_node->delayed_node;
114516cdcec7SMiao Xie 	root = delayed_node->root;
114616cdcec7SMiao Xie 
1147ff5714ccSChris Mason 	trans = btrfs_join_transaction(root);
114816cdcec7SMiao Xie 	if (IS_ERR(trans))
114916cdcec7SMiao Xie 		goto free_path;
115016cdcec7SMiao Xie 
115119fd2949SMiao Xie 	block_rsv = trans->block_rsv;
1152*6d668ddaSJosef Bacik 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
115319fd2949SMiao Xie 
115416cdcec7SMiao Xie 	ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
115516cdcec7SMiao Xie 	if (!ret)
115616cdcec7SMiao Xie 		ret = btrfs_delete_delayed_items(trans, path, root,
115716cdcec7SMiao Xie 						 delayed_node);
115816cdcec7SMiao Xie 
115916cdcec7SMiao Xie 	if (!ret)
116016cdcec7SMiao Xie 		btrfs_update_delayed_inode(trans, root, path, delayed_node);
116116cdcec7SMiao Xie 
116216cdcec7SMiao Xie 	/*
116316cdcec7SMiao Xie 	 * Maybe new delayed items have been inserted, so we need requeue
116416cdcec7SMiao Xie 	 * the work. Besides that, we must dequeue the empty delayed nodes
116516cdcec7SMiao Xie 	 * to avoid the race between delayed items balance and the worker.
116616cdcec7SMiao Xie 	 * The race like this:
116716cdcec7SMiao Xie 	 * 	Task1				Worker thread
116816cdcec7SMiao Xie 	 * 					count == 0, needn't requeue
116916cdcec7SMiao Xie 	 * 					  also needn't insert the
117016cdcec7SMiao Xie 	 * 					  delayed node into prepare
117116cdcec7SMiao Xie 	 * 					  list again.
117216cdcec7SMiao Xie 	 * 	add lots of delayed items
117316cdcec7SMiao Xie 	 * 	queue the delayed node
117416cdcec7SMiao Xie 	 * 	  already in the list,
117516cdcec7SMiao Xie 	 * 	  and not in the prepare
117616cdcec7SMiao Xie 	 * 	  list, it means the delayed
117716cdcec7SMiao Xie 	 * 	  node is being dealt with
117816cdcec7SMiao Xie 	 * 	  by the worker.
117916cdcec7SMiao Xie 	 * 	do delayed items balance
118016cdcec7SMiao Xie 	 * 	  the delayed node is being
118116cdcec7SMiao Xie 	 * 	  dealt with by the worker
118216cdcec7SMiao Xie 	 * 	  now, just wait.
118316cdcec7SMiao Xie 	 * 	  				the worker goto idle.
118416cdcec7SMiao Xie 	 * Task1 will sleep until the transaction is commited.
118516cdcec7SMiao Xie 	 */
118616cdcec7SMiao Xie 	mutex_lock(&delayed_node->mutex);
118716cdcec7SMiao Xie 	if (delayed_node->count)
118816cdcec7SMiao Xie 		need_requeue = 1;
118916cdcec7SMiao Xie 	else
119016cdcec7SMiao Xie 		btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
119116cdcec7SMiao Xie 					   delayed_node);
119216cdcec7SMiao Xie 	mutex_unlock(&delayed_node->mutex);
119316cdcec7SMiao Xie 
119416cdcec7SMiao Xie 	nr = trans->blocks_used;
119516cdcec7SMiao Xie 
119619fd2949SMiao Xie 	trans->block_rsv = block_rsv;
119716cdcec7SMiao Xie 	btrfs_end_transaction_dmeta(trans, root);
119816cdcec7SMiao Xie 	__btrfs_btree_balance_dirty(root, nr);
119916cdcec7SMiao Xie free_path:
120016cdcec7SMiao Xie 	btrfs_free_path(path);
120116cdcec7SMiao Xie out:
120216cdcec7SMiao Xie 	if (need_requeue)
120316cdcec7SMiao Xie 		btrfs_requeue_work(&async_node->work);
120416cdcec7SMiao Xie 	else {
120516cdcec7SMiao Xie 		btrfs_release_prepared_delayed_node(delayed_node);
120616cdcec7SMiao Xie 		kfree(async_node);
120716cdcec7SMiao Xie 	}
120816cdcec7SMiao Xie }
120916cdcec7SMiao Xie 
121016cdcec7SMiao Xie static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
121116cdcec7SMiao Xie 				     struct btrfs_root *root, int all)
121216cdcec7SMiao Xie {
121316cdcec7SMiao Xie 	struct btrfs_async_delayed_node *async_node;
121416cdcec7SMiao Xie 	struct btrfs_delayed_node *curr;
121516cdcec7SMiao Xie 	int count = 0;
121616cdcec7SMiao Xie 
121716cdcec7SMiao Xie again:
121816cdcec7SMiao Xie 	curr = btrfs_first_prepared_delayed_node(delayed_root);
121916cdcec7SMiao Xie 	if (!curr)
122016cdcec7SMiao Xie 		return 0;
122116cdcec7SMiao Xie 
122216cdcec7SMiao Xie 	async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
122316cdcec7SMiao Xie 	if (!async_node) {
122416cdcec7SMiao Xie 		btrfs_release_prepared_delayed_node(curr);
122516cdcec7SMiao Xie 		return -ENOMEM;
122616cdcec7SMiao Xie 	}
122716cdcec7SMiao Xie 
122816cdcec7SMiao Xie 	async_node->root = root;
122916cdcec7SMiao Xie 	async_node->delayed_node = curr;
123016cdcec7SMiao Xie 
123116cdcec7SMiao Xie 	async_node->work.func = btrfs_async_run_delayed_node_done;
123216cdcec7SMiao Xie 	async_node->work.flags = 0;
123316cdcec7SMiao Xie 
123416cdcec7SMiao Xie 	btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
123516cdcec7SMiao Xie 	count++;
123616cdcec7SMiao Xie 
123716cdcec7SMiao Xie 	if (all || count < 4)
123816cdcec7SMiao Xie 		goto again;
123916cdcec7SMiao Xie 
124016cdcec7SMiao Xie 	return 0;
124116cdcec7SMiao Xie }
124216cdcec7SMiao Xie 
1243e999376fSChris Mason void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1244e999376fSChris Mason {
1245e999376fSChris Mason 	struct btrfs_delayed_root *delayed_root;
1246e999376fSChris Mason 	delayed_root = btrfs_get_delayed_root(root);
1247e999376fSChris Mason 	WARN_ON(btrfs_first_delayed_node(delayed_root));
1248e999376fSChris Mason }
1249e999376fSChris Mason 
125016cdcec7SMiao Xie void btrfs_balance_delayed_items(struct btrfs_root *root)
125116cdcec7SMiao Xie {
125216cdcec7SMiao Xie 	struct btrfs_delayed_root *delayed_root;
125316cdcec7SMiao Xie 
125416cdcec7SMiao Xie 	delayed_root = btrfs_get_delayed_root(root);
125516cdcec7SMiao Xie 
125616cdcec7SMiao Xie 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
125716cdcec7SMiao Xie 		return;
125816cdcec7SMiao Xie 
125916cdcec7SMiao Xie 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
126016cdcec7SMiao Xie 		int ret;
126116cdcec7SMiao Xie 		ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
126216cdcec7SMiao Xie 		if (ret)
126316cdcec7SMiao Xie 			return;
126416cdcec7SMiao Xie 
126516cdcec7SMiao Xie 		wait_event_interruptible_timeout(
126616cdcec7SMiao Xie 				delayed_root->wait,
126716cdcec7SMiao Xie 				(atomic_read(&delayed_root->items) <
126816cdcec7SMiao Xie 				 BTRFS_DELAYED_BACKGROUND),
126916cdcec7SMiao Xie 				HZ);
127016cdcec7SMiao Xie 		return;
127116cdcec7SMiao Xie 	}
127216cdcec7SMiao Xie 
127316cdcec7SMiao Xie 	btrfs_wq_run_delayed_node(delayed_root, root, 0);
127416cdcec7SMiao Xie }
127516cdcec7SMiao Xie 
127616cdcec7SMiao Xie int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
127716cdcec7SMiao Xie 				   struct btrfs_root *root, const char *name,
127816cdcec7SMiao Xie 				   int name_len, struct inode *dir,
127916cdcec7SMiao Xie 				   struct btrfs_disk_key *disk_key, u8 type,
128016cdcec7SMiao Xie 				   u64 index)
128116cdcec7SMiao Xie {
128216cdcec7SMiao Xie 	struct btrfs_delayed_node *delayed_node;
128316cdcec7SMiao Xie 	struct btrfs_delayed_item *delayed_item;
128416cdcec7SMiao Xie 	struct btrfs_dir_item *dir_item;
128516cdcec7SMiao Xie 	int ret;
128616cdcec7SMiao Xie 
128716cdcec7SMiao Xie 	delayed_node = btrfs_get_or_create_delayed_node(dir);
128816cdcec7SMiao Xie 	if (IS_ERR(delayed_node))
128916cdcec7SMiao Xie 		return PTR_ERR(delayed_node);
129016cdcec7SMiao Xie 
129116cdcec7SMiao Xie 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
129216cdcec7SMiao Xie 	if (!delayed_item) {
129316cdcec7SMiao Xie 		ret = -ENOMEM;
129416cdcec7SMiao Xie 		goto release_node;
129516cdcec7SMiao Xie 	}
129616cdcec7SMiao Xie 
129716cdcec7SMiao Xie 	ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
129816cdcec7SMiao Xie 	/*
129916cdcec7SMiao Xie 	 * we have reserved enough space when we start a new transaction,
130016cdcec7SMiao Xie 	 * so reserving metadata failure is impossible
130116cdcec7SMiao Xie 	 */
130216cdcec7SMiao Xie 	BUG_ON(ret);
130316cdcec7SMiao Xie 
13040d0ca30fSChris Mason 	delayed_item->key.objectid = btrfs_ino(dir);
130516cdcec7SMiao Xie 	btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
130616cdcec7SMiao Xie 	delayed_item->key.offset = index;
130716cdcec7SMiao Xie 
130816cdcec7SMiao Xie 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
130916cdcec7SMiao Xie 	dir_item->location = *disk_key;
131016cdcec7SMiao Xie 	dir_item->transid = cpu_to_le64(trans->transid);
131116cdcec7SMiao Xie 	dir_item->data_len = 0;
131216cdcec7SMiao Xie 	dir_item->name_len = cpu_to_le16(name_len);
131316cdcec7SMiao Xie 	dir_item->type = type;
131416cdcec7SMiao Xie 	memcpy((char *)(dir_item + 1), name, name_len);
131516cdcec7SMiao Xie 
131616cdcec7SMiao Xie 	mutex_lock(&delayed_node->mutex);
131716cdcec7SMiao Xie 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
131816cdcec7SMiao Xie 	if (unlikely(ret)) {
131916cdcec7SMiao Xie 		printk(KERN_ERR "err add delayed dir index item(name: %s) into "
132016cdcec7SMiao Xie 				"the insertion tree of the delayed node"
132116cdcec7SMiao Xie 				"(root id: %llu, inode id: %llu, errno: %d)\n",
132216cdcec7SMiao Xie 				name,
132316cdcec7SMiao Xie 				(unsigned long long)delayed_node->root->objectid,
132416cdcec7SMiao Xie 				(unsigned long long)delayed_node->inode_id,
132516cdcec7SMiao Xie 				ret);
132616cdcec7SMiao Xie 		BUG();
132716cdcec7SMiao Xie 	}
132816cdcec7SMiao Xie 	mutex_unlock(&delayed_node->mutex);
132916cdcec7SMiao Xie 
133016cdcec7SMiao Xie release_node:
133116cdcec7SMiao Xie 	btrfs_release_delayed_node(delayed_node);
133216cdcec7SMiao Xie 	return ret;
133316cdcec7SMiao Xie }
133416cdcec7SMiao Xie 
133516cdcec7SMiao Xie static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
133616cdcec7SMiao Xie 					       struct btrfs_delayed_node *node,
133716cdcec7SMiao Xie 					       struct btrfs_key *key)
133816cdcec7SMiao Xie {
133916cdcec7SMiao Xie 	struct btrfs_delayed_item *item;
134016cdcec7SMiao Xie 
134116cdcec7SMiao Xie 	mutex_lock(&node->mutex);
134216cdcec7SMiao Xie 	item = __btrfs_lookup_delayed_insertion_item(node, key);
134316cdcec7SMiao Xie 	if (!item) {
134416cdcec7SMiao Xie 		mutex_unlock(&node->mutex);
134516cdcec7SMiao Xie 		return 1;
134616cdcec7SMiao Xie 	}
134716cdcec7SMiao Xie 
134816cdcec7SMiao Xie 	btrfs_delayed_item_release_metadata(root, item);
134916cdcec7SMiao Xie 	btrfs_release_delayed_item(item);
135016cdcec7SMiao Xie 	mutex_unlock(&node->mutex);
135116cdcec7SMiao Xie 	return 0;
135216cdcec7SMiao Xie }
135316cdcec7SMiao Xie 
135416cdcec7SMiao Xie int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
135516cdcec7SMiao Xie 				   struct btrfs_root *root, struct inode *dir,
135616cdcec7SMiao Xie 				   u64 index)
135716cdcec7SMiao Xie {
135816cdcec7SMiao Xie 	struct btrfs_delayed_node *node;
135916cdcec7SMiao Xie 	struct btrfs_delayed_item *item;
136016cdcec7SMiao Xie 	struct btrfs_key item_key;
136116cdcec7SMiao Xie 	int ret;
136216cdcec7SMiao Xie 
136316cdcec7SMiao Xie 	node = btrfs_get_or_create_delayed_node(dir);
136416cdcec7SMiao Xie 	if (IS_ERR(node))
136516cdcec7SMiao Xie 		return PTR_ERR(node);
136616cdcec7SMiao Xie 
13670d0ca30fSChris Mason 	item_key.objectid = btrfs_ino(dir);
136816cdcec7SMiao Xie 	btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
136916cdcec7SMiao Xie 	item_key.offset = index;
137016cdcec7SMiao Xie 
137116cdcec7SMiao Xie 	ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
137216cdcec7SMiao Xie 	if (!ret)
137316cdcec7SMiao Xie 		goto end;
137416cdcec7SMiao Xie 
137516cdcec7SMiao Xie 	item = btrfs_alloc_delayed_item(0);
137616cdcec7SMiao Xie 	if (!item) {
137716cdcec7SMiao Xie 		ret = -ENOMEM;
137816cdcec7SMiao Xie 		goto end;
137916cdcec7SMiao Xie 	}
138016cdcec7SMiao Xie 
138116cdcec7SMiao Xie 	item->key = item_key;
138216cdcec7SMiao Xie 
138316cdcec7SMiao Xie 	ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
138416cdcec7SMiao Xie 	/*
138516cdcec7SMiao Xie 	 * we have reserved enough space when we start a new transaction,
138616cdcec7SMiao Xie 	 * so reserving metadata failure is impossible.
138716cdcec7SMiao Xie 	 */
138816cdcec7SMiao Xie 	BUG_ON(ret);
138916cdcec7SMiao Xie 
139016cdcec7SMiao Xie 	mutex_lock(&node->mutex);
139116cdcec7SMiao Xie 	ret = __btrfs_add_delayed_deletion_item(node, item);
139216cdcec7SMiao Xie 	if (unlikely(ret)) {
139316cdcec7SMiao Xie 		printk(KERN_ERR "err add delayed dir index item(index: %llu) "
139416cdcec7SMiao Xie 				"into the deletion tree of the delayed node"
139516cdcec7SMiao Xie 				"(root id: %llu, inode id: %llu, errno: %d)\n",
139616cdcec7SMiao Xie 				(unsigned long long)index,
139716cdcec7SMiao Xie 				(unsigned long long)node->root->objectid,
139816cdcec7SMiao Xie 				(unsigned long long)node->inode_id,
139916cdcec7SMiao Xie 				ret);
140016cdcec7SMiao Xie 		BUG();
140116cdcec7SMiao Xie 	}
140216cdcec7SMiao Xie 	mutex_unlock(&node->mutex);
140316cdcec7SMiao Xie end:
140416cdcec7SMiao Xie 	btrfs_release_delayed_node(node);
140516cdcec7SMiao Xie 	return ret;
140616cdcec7SMiao Xie }
140716cdcec7SMiao Xie 
140816cdcec7SMiao Xie int btrfs_inode_delayed_dir_index_count(struct inode *inode)
140916cdcec7SMiao Xie {
14102f7e33d4SMiao Xie 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
141116cdcec7SMiao Xie 
141216cdcec7SMiao Xie 	if (!delayed_node)
141316cdcec7SMiao Xie 		return -ENOENT;
141416cdcec7SMiao Xie 
141516cdcec7SMiao Xie 	/*
141616cdcec7SMiao Xie 	 * Since we have held i_mutex of this directory, it is impossible that
141716cdcec7SMiao Xie 	 * a new directory index is added into the delayed node and index_cnt
141816cdcec7SMiao Xie 	 * is updated now. So we needn't lock the delayed node.
141916cdcec7SMiao Xie 	 */
14202f7e33d4SMiao Xie 	if (!delayed_node->index_cnt) {
14212f7e33d4SMiao Xie 		btrfs_release_delayed_node(delayed_node);
142216cdcec7SMiao Xie 		return -EINVAL;
14232f7e33d4SMiao Xie 	}
142416cdcec7SMiao Xie 
142516cdcec7SMiao Xie 	BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
14262f7e33d4SMiao Xie 	btrfs_release_delayed_node(delayed_node);
14272f7e33d4SMiao Xie 	return 0;
142816cdcec7SMiao Xie }
142916cdcec7SMiao Xie 
143016cdcec7SMiao Xie void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
143116cdcec7SMiao Xie 			     struct list_head *del_list)
143216cdcec7SMiao Xie {
143316cdcec7SMiao Xie 	struct btrfs_delayed_node *delayed_node;
143416cdcec7SMiao Xie 	struct btrfs_delayed_item *item;
143516cdcec7SMiao Xie 
143616cdcec7SMiao Xie 	delayed_node = btrfs_get_delayed_node(inode);
143716cdcec7SMiao Xie 	if (!delayed_node)
143816cdcec7SMiao Xie 		return;
143916cdcec7SMiao Xie 
144016cdcec7SMiao Xie 	mutex_lock(&delayed_node->mutex);
144116cdcec7SMiao Xie 	item = __btrfs_first_delayed_insertion_item(delayed_node);
144216cdcec7SMiao Xie 	while (item) {
144316cdcec7SMiao Xie 		atomic_inc(&item->refs);
144416cdcec7SMiao Xie 		list_add_tail(&item->readdir_list, ins_list);
144516cdcec7SMiao Xie 		item = __btrfs_next_delayed_item(item);
144616cdcec7SMiao Xie 	}
144716cdcec7SMiao Xie 
144816cdcec7SMiao Xie 	item = __btrfs_first_delayed_deletion_item(delayed_node);
144916cdcec7SMiao Xie 	while (item) {
145016cdcec7SMiao Xie 		atomic_inc(&item->refs);
145116cdcec7SMiao Xie 		list_add_tail(&item->readdir_list, del_list);
145216cdcec7SMiao Xie 		item = __btrfs_next_delayed_item(item);
145316cdcec7SMiao Xie 	}
145416cdcec7SMiao Xie 	mutex_unlock(&delayed_node->mutex);
145516cdcec7SMiao Xie 	/*
145616cdcec7SMiao Xie 	 * This delayed node is still cached in the btrfs inode, so refs
145716cdcec7SMiao Xie 	 * must be > 1 now, and we needn't check it is going to be freed
145816cdcec7SMiao Xie 	 * or not.
145916cdcec7SMiao Xie 	 *
146016cdcec7SMiao Xie 	 * Besides that, this function is used to read dir, we do not
146116cdcec7SMiao Xie 	 * insert/delete delayed items in this period. So we also needn't
146216cdcec7SMiao Xie 	 * requeue or dequeue this delayed node.
146316cdcec7SMiao Xie 	 */
146416cdcec7SMiao Xie 	atomic_dec(&delayed_node->refs);
146516cdcec7SMiao Xie }
146616cdcec7SMiao Xie 
146716cdcec7SMiao Xie void btrfs_put_delayed_items(struct list_head *ins_list,
146816cdcec7SMiao Xie 			     struct list_head *del_list)
146916cdcec7SMiao Xie {
147016cdcec7SMiao Xie 	struct btrfs_delayed_item *curr, *next;
147116cdcec7SMiao Xie 
147216cdcec7SMiao Xie 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
147316cdcec7SMiao Xie 		list_del(&curr->readdir_list);
147416cdcec7SMiao Xie 		if (atomic_dec_and_test(&curr->refs))
147516cdcec7SMiao Xie 			kfree(curr);
147616cdcec7SMiao Xie 	}
147716cdcec7SMiao Xie 
147816cdcec7SMiao Xie 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
147916cdcec7SMiao Xie 		list_del(&curr->readdir_list);
148016cdcec7SMiao Xie 		if (atomic_dec_and_test(&curr->refs))
148116cdcec7SMiao Xie 			kfree(curr);
148216cdcec7SMiao Xie 	}
148316cdcec7SMiao Xie }
148416cdcec7SMiao Xie 
148516cdcec7SMiao Xie int btrfs_should_delete_dir_index(struct list_head *del_list,
148616cdcec7SMiao Xie 				  u64 index)
148716cdcec7SMiao Xie {
148816cdcec7SMiao Xie 	struct btrfs_delayed_item *curr, *next;
148916cdcec7SMiao Xie 	int ret;
149016cdcec7SMiao Xie 
149116cdcec7SMiao Xie 	if (list_empty(del_list))
149216cdcec7SMiao Xie 		return 0;
149316cdcec7SMiao Xie 
149416cdcec7SMiao Xie 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
149516cdcec7SMiao Xie 		if (curr->key.offset > index)
149616cdcec7SMiao Xie 			break;
149716cdcec7SMiao Xie 
149816cdcec7SMiao Xie 		list_del(&curr->readdir_list);
149916cdcec7SMiao Xie 		ret = (curr->key.offset == index);
150016cdcec7SMiao Xie 
150116cdcec7SMiao Xie 		if (atomic_dec_and_test(&curr->refs))
150216cdcec7SMiao Xie 			kfree(curr);
150316cdcec7SMiao Xie 
150416cdcec7SMiao Xie 		if (ret)
150516cdcec7SMiao Xie 			return 1;
150616cdcec7SMiao Xie 		else
150716cdcec7SMiao Xie 			continue;
150816cdcec7SMiao Xie 	}
150916cdcec7SMiao Xie 	return 0;
151016cdcec7SMiao Xie }
151116cdcec7SMiao Xie 
151216cdcec7SMiao Xie /*
151316cdcec7SMiao Xie  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
151416cdcec7SMiao Xie  *
151516cdcec7SMiao Xie  */
151616cdcec7SMiao Xie int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
151716cdcec7SMiao Xie 				    filldir_t filldir,
151816cdcec7SMiao Xie 				    struct list_head *ins_list)
151916cdcec7SMiao Xie {
152016cdcec7SMiao Xie 	struct btrfs_dir_item *di;
152116cdcec7SMiao Xie 	struct btrfs_delayed_item *curr, *next;
152216cdcec7SMiao Xie 	struct btrfs_key location;
152316cdcec7SMiao Xie 	char *name;
152416cdcec7SMiao Xie 	int name_len;
152516cdcec7SMiao Xie 	int over = 0;
152616cdcec7SMiao Xie 	unsigned char d_type;
152716cdcec7SMiao Xie 
152816cdcec7SMiao Xie 	if (list_empty(ins_list))
152916cdcec7SMiao Xie 		return 0;
153016cdcec7SMiao Xie 
153116cdcec7SMiao Xie 	/*
153216cdcec7SMiao Xie 	 * Changing the data of the delayed item is impossible. So
153316cdcec7SMiao Xie 	 * we needn't lock them. And we have held i_mutex of the
153416cdcec7SMiao Xie 	 * directory, nobody can delete any directory indexes now.
153516cdcec7SMiao Xie 	 */
153616cdcec7SMiao Xie 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
153716cdcec7SMiao Xie 		list_del(&curr->readdir_list);
153816cdcec7SMiao Xie 
153916cdcec7SMiao Xie 		if (curr->key.offset < filp->f_pos) {
154016cdcec7SMiao Xie 			if (atomic_dec_and_test(&curr->refs))
154116cdcec7SMiao Xie 				kfree(curr);
154216cdcec7SMiao Xie 			continue;
154316cdcec7SMiao Xie 		}
154416cdcec7SMiao Xie 
154516cdcec7SMiao Xie 		filp->f_pos = curr->key.offset;
154616cdcec7SMiao Xie 
154716cdcec7SMiao Xie 		di = (struct btrfs_dir_item *)curr->data;
154816cdcec7SMiao Xie 		name = (char *)(di + 1);
154916cdcec7SMiao Xie 		name_len = le16_to_cpu(di->name_len);
155016cdcec7SMiao Xie 
155116cdcec7SMiao Xie 		d_type = btrfs_filetype_table[di->type];
155216cdcec7SMiao Xie 		btrfs_disk_key_to_cpu(&location, &di->location);
155316cdcec7SMiao Xie 
155416cdcec7SMiao Xie 		over = filldir(dirent, name, name_len, curr->key.offset,
155516cdcec7SMiao Xie 			       location.objectid, d_type);
155616cdcec7SMiao Xie 
155716cdcec7SMiao Xie 		if (atomic_dec_and_test(&curr->refs))
155816cdcec7SMiao Xie 			kfree(curr);
155916cdcec7SMiao Xie 
156016cdcec7SMiao Xie 		if (over)
156116cdcec7SMiao Xie 			return 1;
156216cdcec7SMiao Xie 	}
156316cdcec7SMiao Xie 	return 0;
156416cdcec7SMiao Xie }
156516cdcec7SMiao Xie 
156616cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
156716cdcec7SMiao Xie 			 generation, 64);
156816cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
156916cdcec7SMiao Xie 			 sequence, 64);
157016cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
157116cdcec7SMiao Xie 			 transid, 64);
157216cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
157316cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
157416cdcec7SMiao Xie 			 nbytes, 64);
157516cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
157616cdcec7SMiao Xie 			 block_group, 64);
157716cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
157816cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
157916cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
158016cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
158116cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
158216cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
158316cdcec7SMiao Xie 
158416cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
158516cdcec7SMiao Xie BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
158616cdcec7SMiao Xie 
158716cdcec7SMiao Xie static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
158816cdcec7SMiao Xie 				  struct btrfs_inode_item *inode_item,
158916cdcec7SMiao Xie 				  struct inode *inode)
159016cdcec7SMiao Xie {
159116cdcec7SMiao Xie 	btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
159216cdcec7SMiao Xie 	btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
159316cdcec7SMiao Xie 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
159416cdcec7SMiao Xie 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
159516cdcec7SMiao Xie 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
159616cdcec7SMiao Xie 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
159716cdcec7SMiao Xie 	btrfs_set_stack_inode_generation(inode_item,
159816cdcec7SMiao Xie 					 BTRFS_I(inode)->generation);
159916cdcec7SMiao Xie 	btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
160016cdcec7SMiao Xie 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
160116cdcec7SMiao Xie 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
160216cdcec7SMiao Xie 	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1603ff5714ccSChris Mason 	btrfs_set_stack_inode_block_group(inode_item, 0);
160416cdcec7SMiao Xie 
160516cdcec7SMiao Xie 	btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
160616cdcec7SMiao Xie 				     inode->i_atime.tv_sec);
160716cdcec7SMiao Xie 	btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
160816cdcec7SMiao Xie 				      inode->i_atime.tv_nsec);
160916cdcec7SMiao Xie 
161016cdcec7SMiao Xie 	btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
161116cdcec7SMiao Xie 				     inode->i_mtime.tv_sec);
161216cdcec7SMiao Xie 	btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
161316cdcec7SMiao Xie 				      inode->i_mtime.tv_nsec);
161416cdcec7SMiao Xie 
161516cdcec7SMiao Xie 	btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
161616cdcec7SMiao Xie 				     inode->i_ctime.tv_sec);
161716cdcec7SMiao Xie 	btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
161816cdcec7SMiao Xie 				      inode->i_ctime.tv_nsec);
161916cdcec7SMiao Xie }
162016cdcec7SMiao Xie 
16212f7e33d4SMiao Xie int btrfs_fill_inode(struct inode *inode, u32 *rdev)
16222f7e33d4SMiao Xie {
16232f7e33d4SMiao Xie 	struct btrfs_delayed_node *delayed_node;
16242f7e33d4SMiao Xie 	struct btrfs_inode_item *inode_item;
16252f7e33d4SMiao Xie 	struct btrfs_timespec *tspec;
16262f7e33d4SMiao Xie 
16272f7e33d4SMiao Xie 	delayed_node = btrfs_get_delayed_node(inode);
16282f7e33d4SMiao Xie 	if (!delayed_node)
16292f7e33d4SMiao Xie 		return -ENOENT;
16302f7e33d4SMiao Xie 
16312f7e33d4SMiao Xie 	mutex_lock(&delayed_node->mutex);
16322f7e33d4SMiao Xie 	if (!delayed_node->inode_dirty) {
16332f7e33d4SMiao Xie 		mutex_unlock(&delayed_node->mutex);
16342f7e33d4SMiao Xie 		btrfs_release_delayed_node(delayed_node);
16352f7e33d4SMiao Xie 		return -ENOENT;
16362f7e33d4SMiao Xie 	}
16372f7e33d4SMiao Xie 
16382f7e33d4SMiao Xie 	inode_item = &delayed_node->inode_item;
16392f7e33d4SMiao Xie 
16402f7e33d4SMiao Xie 	inode->i_uid = btrfs_stack_inode_uid(inode_item);
16412f7e33d4SMiao Xie 	inode->i_gid = btrfs_stack_inode_gid(inode_item);
16422f7e33d4SMiao Xie 	btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
16432f7e33d4SMiao Xie 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
16442f7e33d4SMiao Xie 	inode->i_nlink = btrfs_stack_inode_nlink(inode_item);
16452f7e33d4SMiao Xie 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
16462f7e33d4SMiao Xie 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
16472f7e33d4SMiao Xie 	BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
16482f7e33d4SMiao Xie 	inode->i_rdev = 0;
16492f7e33d4SMiao Xie 	*rdev = btrfs_stack_inode_rdev(inode_item);
16502f7e33d4SMiao Xie 	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
16512f7e33d4SMiao Xie 
16522f7e33d4SMiao Xie 	tspec = btrfs_inode_atime(inode_item);
16532f7e33d4SMiao Xie 	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
16542f7e33d4SMiao Xie 	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
16552f7e33d4SMiao Xie 
16562f7e33d4SMiao Xie 	tspec = btrfs_inode_mtime(inode_item);
16572f7e33d4SMiao Xie 	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
16582f7e33d4SMiao Xie 	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
16592f7e33d4SMiao Xie 
16602f7e33d4SMiao Xie 	tspec = btrfs_inode_ctime(inode_item);
16612f7e33d4SMiao Xie 	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
16622f7e33d4SMiao Xie 	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
16632f7e33d4SMiao Xie 
16642f7e33d4SMiao Xie 	inode->i_generation = BTRFS_I(inode)->generation;
16652f7e33d4SMiao Xie 	BTRFS_I(inode)->index_cnt = (u64)-1;
16662f7e33d4SMiao Xie 
16672f7e33d4SMiao Xie 	mutex_unlock(&delayed_node->mutex);
16682f7e33d4SMiao Xie 	btrfs_release_delayed_node(delayed_node);
16692f7e33d4SMiao Xie 	return 0;
16702f7e33d4SMiao Xie }
16712f7e33d4SMiao Xie 
167216cdcec7SMiao Xie int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
167316cdcec7SMiao Xie 			       struct btrfs_root *root, struct inode *inode)
167416cdcec7SMiao Xie {
167516cdcec7SMiao Xie 	struct btrfs_delayed_node *delayed_node;
1676aa0467d8SDavid Sterba 	int ret = 0;
167716cdcec7SMiao Xie 
167816cdcec7SMiao Xie 	delayed_node = btrfs_get_or_create_delayed_node(inode);
167916cdcec7SMiao Xie 	if (IS_ERR(delayed_node))
168016cdcec7SMiao Xie 		return PTR_ERR(delayed_node);
168116cdcec7SMiao Xie 
168216cdcec7SMiao Xie 	mutex_lock(&delayed_node->mutex);
168316cdcec7SMiao Xie 	if (delayed_node->inode_dirty) {
168416cdcec7SMiao Xie 		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
168516cdcec7SMiao Xie 		goto release_node;
168616cdcec7SMiao Xie 	}
168716cdcec7SMiao Xie 
168816cdcec7SMiao Xie 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
168916cdcec7SMiao Xie 	/*
169016cdcec7SMiao Xie 	 * we must reserve enough space when we start a new transaction,
169116cdcec7SMiao Xie 	 * so reserving metadata failure is impossible
169216cdcec7SMiao Xie 	 */
169316cdcec7SMiao Xie 	BUG_ON(ret);
169416cdcec7SMiao Xie 
169516cdcec7SMiao Xie 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
169616cdcec7SMiao Xie 	delayed_node->inode_dirty = 1;
169716cdcec7SMiao Xie 	delayed_node->count++;
169816cdcec7SMiao Xie 	atomic_inc(&root->fs_info->delayed_root->items);
169916cdcec7SMiao Xie release_node:
170016cdcec7SMiao Xie 	mutex_unlock(&delayed_node->mutex);
170116cdcec7SMiao Xie 	btrfs_release_delayed_node(delayed_node);
170216cdcec7SMiao Xie 	return ret;
170316cdcec7SMiao Xie }
170416cdcec7SMiao Xie 
170516cdcec7SMiao Xie static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
170616cdcec7SMiao Xie {
170716cdcec7SMiao Xie 	struct btrfs_root *root = delayed_node->root;
170816cdcec7SMiao Xie 	struct btrfs_delayed_item *curr_item, *prev_item;
170916cdcec7SMiao Xie 
171016cdcec7SMiao Xie 	mutex_lock(&delayed_node->mutex);
171116cdcec7SMiao Xie 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
171216cdcec7SMiao Xie 	while (curr_item) {
171316cdcec7SMiao Xie 		btrfs_delayed_item_release_metadata(root, curr_item);
171416cdcec7SMiao Xie 		prev_item = curr_item;
171516cdcec7SMiao Xie 		curr_item = __btrfs_next_delayed_item(prev_item);
171616cdcec7SMiao Xie 		btrfs_release_delayed_item(prev_item);
171716cdcec7SMiao Xie 	}
171816cdcec7SMiao Xie 
171916cdcec7SMiao Xie 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
172016cdcec7SMiao Xie 	while (curr_item) {
172116cdcec7SMiao Xie 		btrfs_delayed_item_release_metadata(root, curr_item);
172216cdcec7SMiao Xie 		prev_item = curr_item;
172316cdcec7SMiao Xie 		curr_item = __btrfs_next_delayed_item(prev_item);
172416cdcec7SMiao Xie 		btrfs_release_delayed_item(prev_item);
172516cdcec7SMiao Xie 	}
172616cdcec7SMiao Xie 
172716cdcec7SMiao Xie 	if (delayed_node->inode_dirty) {
172816cdcec7SMiao Xie 		btrfs_delayed_inode_release_metadata(root, delayed_node);
172916cdcec7SMiao Xie 		btrfs_release_delayed_inode(delayed_node);
173016cdcec7SMiao Xie 	}
173116cdcec7SMiao Xie 	mutex_unlock(&delayed_node->mutex);
173216cdcec7SMiao Xie }
173316cdcec7SMiao Xie 
173416cdcec7SMiao Xie void btrfs_kill_delayed_inode_items(struct inode *inode)
173516cdcec7SMiao Xie {
173616cdcec7SMiao Xie 	struct btrfs_delayed_node *delayed_node;
173716cdcec7SMiao Xie 
173816cdcec7SMiao Xie 	delayed_node = btrfs_get_delayed_node(inode);
173916cdcec7SMiao Xie 	if (!delayed_node)
174016cdcec7SMiao Xie 		return;
174116cdcec7SMiao Xie 
174216cdcec7SMiao Xie 	__btrfs_kill_delayed_node(delayed_node);
174316cdcec7SMiao Xie 	btrfs_release_delayed_node(delayed_node);
174416cdcec7SMiao Xie }
174516cdcec7SMiao Xie 
174616cdcec7SMiao Xie void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
174716cdcec7SMiao Xie {
174816cdcec7SMiao Xie 	u64 inode_id = 0;
174916cdcec7SMiao Xie 	struct btrfs_delayed_node *delayed_nodes[8];
175016cdcec7SMiao Xie 	int i, n;
175116cdcec7SMiao Xie 
175216cdcec7SMiao Xie 	while (1) {
175316cdcec7SMiao Xie 		spin_lock(&root->inode_lock);
175416cdcec7SMiao Xie 		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
175516cdcec7SMiao Xie 					   (void **)delayed_nodes, inode_id,
175616cdcec7SMiao Xie 					   ARRAY_SIZE(delayed_nodes));
175716cdcec7SMiao Xie 		if (!n) {
175816cdcec7SMiao Xie 			spin_unlock(&root->inode_lock);
175916cdcec7SMiao Xie 			break;
176016cdcec7SMiao Xie 		}
176116cdcec7SMiao Xie 
176216cdcec7SMiao Xie 		inode_id = delayed_nodes[n - 1]->inode_id + 1;
176316cdcec7SMiao Xie 
176416cdcec7SMiao Xie 		for (i = 0; i < n; i++)
176516cdcec7SMiao Xie 			atomic_inc(&delayed_nodes[i]->refs);
176616cdcec7SMiao Xie 		spin_unlock(&root->inode_lock);
176716cdcec7SMiao Xie 
176816cdcec7SMiao Xie 		for (i = 0; i < n; i++) {
176916cdcec7SMiao Xie 			__btrfs_kill_delayed_node(delayed_nodes[i]);
177016cdcec7SMiao Xie 			btrfs_release_delayed_node(delayed_nodes[i]);
177116cdcec7SMiao Xie 		}
177216cdcec7SMiao Xie 	}
177316cdcec7SMiao Xie }
1774