xref: /openbmc/linux/fs/btrfs/delayed-inode.c (revision cd5d5810)
1 /*
2  * Copyright (C) 2011 Fujitsu.  All rights reserved.
3  * Written by Miao Xie <miaox@cn.fujitsu.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19 
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "ctree.h"
25 
26 #define BTRFS_DELAYED_WRITEBACK		512
27 #define BTRFS_DELAYED_BACKGROUND	128
28 #define BTRFS_DELAYED_BATCH		16
29 
30 static struct kmem_cache *delayed_node_cache;
31 
32 int __init btrfs_delayed_inode_init(void)
33 {
34 	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
35 					sizeof(struct btrfs_delayed_node),
36 					0,
37 					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
38 					NULL);
39 	if (!delayed_node_cache)
40 		return -ENOMEM;
41 	return 0;
42 }
43 
44 void btrfs_delayed_inode_exit(void)
45 {
46 	if (delayed_node_cache)
47 		kmem_cache_destroy(delayed_node_cache);
48 }
49 
50 static inline void btrfs_init_delayed_node(
51 				struct btrfs_delayed_node *delayed_node,
52 				struct btrfs_root *root, u64 inode_id)
53 {
54 	delayed_node->root = root;
55 	delayed_node->inode_id = inode_id;
56 	atomic_set(&delayed_node->refs, 0);
57 	delayed_node->count = 0;
58 	delayed_node->in_list = 0;
59 	delayed_node->inode_dirty = 0;
60 	delayed_node->ins_root = RB_ROOT;
61 	delayed_node->del_root = RB_ROOT;
62 	mutex_init(&delayed_node->mutex);
63 	delayed_node->index_cnt = 0;
64 	INIT_LIST_HEAD(&delayed_node->n_list);
65 	INIT_LIST_HEAD(&delayed_node->p_list);
66 	delayed_node->bytes_reserved = 0;
67 	memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
68 }
69 
70 static inline int btrfs_is_continuous_delayed_item(
71 					struct btrfs_delayed_item *item1,
72 					struct btrfs_delayed_item *item2)
73 {
74 	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
75 	    item1->key.objectid == item2->key.objectid &&
76 	    item1->key.type == item2->key.type &&
77 	    item1->key.offset + 1 == item2->key.offset)
78 		return 1;
79 	return 0;
80 }
81 
82 static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
83 							struct btrfs_root *root)
84 {
85 	return root->fs_info->delayed_root;
86 }
87 
88 static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
89 {
90 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
91 	struct btrfs_root *root = btrfs_inode->root;
92 	u64 ino = btrfs_ino(inode);
93 	struct btrfs_delayed_node *node;
94 
95 	node = ACCESS_ONCE(btrfs_inode->delayed_node);
96 	if (node) {
97 		atomic_inc(&node->refs);
98 		return node;
99 	}
100 
101 	spin_lock(&root->inode_lock);
102 	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
103 	if (node) {
104 		if (btrfs_inode->delayed_node) {
105 			atomic_inc(&node->refs);	/* can be accessed */
106 			BUG_ON(btrfs_inode->delayed_node != node);
107 			spin_unlock(&root->inode_lock);
108 			return node;
109 		}
110 		btrfs_inode->delayed_node = node;
111 		atomic_inc(&node->refs);	/* can be accessed */
112 		atomic_inc(&node->refs);	/* cached in the inode */
113 		spin_unlock(&root->inode_lock);
114 		return node;
115 	}
116 	spin_unlock(&root->inode_lock);
117 
118 	return NULL;
119 }
120 
121 /* Will return either the node or PTR_ERR(-ENOMEM) */
122 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
123 							struct inode *inode)
124 {
125 	struct btrfs_delayed_node *node;
126 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
127 	struct btrfs_root *root = btrfs_inode->root;
128 	u64 ino = btrfs_ino(inode);
129 	int ret;
130 
131 again:
132 	node = btrfs_get_delayed_node(inode);
133 	if (node)
134 		return node;
135 
136 	node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
137 	if (!node)
138 		return ERR_PTR(-ENOMEM);
139 	btrfs_init_delayed_node(node, root, ino);
140 
141 	atomic_inc(&node->refs);	/* cached in the btrfs inode */
142 	atomic_inc(&node->refs);	/* can be accessed */
143 
144 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
145 	if (ret) {
146 		kmem_cache_free(delayed_node_cache, node);
147 		return ERR_PTR(ret);
148 	}
149 
150 	spin_lock(&root->inode_lock);
151 	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
152 	if (ret == -EEXIST) {
153 		kmem_cache_free(delayed_node_cache, node);
154 		spin_unlock(&root->inode_lock);
155 		radix_tree_preload_end();
156 		goto again;
157 	}
158 	btrfs_inode->delayed_node = node;
159 	spin_unlock(&root->inode_lock);
160 	radix_tree_preload_end();
161 
162 	return node;
163 }
164 
165 /*
166  * Call it when holding delayed_node->mutex
167  *
168  * If mod = 1, add this node into the prepared list.
169  */
170 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
171 				     struct btrfs_delayed_node *node,
172 				     int mod)
173 {
174 	spin_lock(&root->lock);
175 	if (node->in_list) {
176 		if (!list_empty(&node->p_list))
177 			list_move_tail(&node->p_list, &root->prepare_list);
178 		else if (mod)
179 			list_add_tail(&node->p_list, &root->prepare_list);
180 	} else {
181 		list_add_tail(&node->n_list, &root->node_list);
182 		list_add_tail(&node->p_list, &root->prepare_list);
183 		atomic_inc(&node->refs);	/* inserted into list */
184 		root->nodes++;
185 		node->in_list = 1;
186 	}
187 	spin_unlock(&root->lock);
188 }
189 
190 /* Call it when holding delayed_node->mutex */
191 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
192 				       struct btrfs_delayed_node *node)
193 {
194 	spin_lock(&root->lock);
195 	if (node->in_list) {
196 		root->nodes--;
197 		atomic_dec(&node->refs);	/* not in the list */
198 		list_del_init(&node->n_list);
199 		if (!list_empty(&node->p_list))
200 			list_del_init(&node->p_list);
201 		node->in_list = 0;
202 	}
203 	spin_unlock(&root->lock);
204 }
205 
206 static struct btrfs_delayed_node *btrfs_first_delayed_node(
207 			struct btrfs_delayed_root *delayed_root)
208 {
209 	struct list_head *p;
210 	struct btrfs_delayed_node *node = NULL;
211 
212 	spin_lock(&delayed_root->lock);
213 	if (list_empty(&delayed_root->node_list))
214 		goto out;
215 
216 	p = delayed_root->node_list.next;
217 	node = list_entry(p, struct btrfs_delayed_node, n_list);
218 	atomic_inc(&node->refs);
219 out:
220 	spin_unlock(&delayed_root->lock);
221 
222 	return node;
223 }
224 
225 static struct btrfs_delayed_node *btrfs_next_delayed_node(
226 						struct btrfs_delayed_node *node)
227 {
228 	struct btrfs_delayed_root *delayed_root;
229 	struct list_head *p;
230 	struct btrfs_delayed_node *next = NULL;
231 
232 	delayed_root = node->root->fs_info->delayed_root;
233 	spin_lock(&delayed_root->lock);
234 	if (!node->in_list) {	/* not in the list */
235 		if (list_empty(&delayed_root->node_list))
236 			goto out;
237 		p = delayed_root->node_list.next;
238 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
239 		goto out;
240 	else
241 		p = node->n_list.next;
242 
243 	next = list_entry(p, struct btrfs_delayed_node, n_list);
244 	atomic_inc(&next->refs);
245 out:
246 	spin_unlock(&delayed_root->lock);
247 
248 	return next;
249 }
250 
251 static void __btrfs_release_delayed_node(
252 				struct btrfs_delayed_node *delayed_node,
253 				int mod)
254 {
255 	struct btrfs_delayed_root *delayed_root;
256 
257 	if (!delayed_node)
258 		return;
259 
260 	delayed_root = delayed_node->root->fs_info->delayed_root;
261 
262 	mutex_lock(&delayed_node->mutex);
263 	if (delayed_node->count)
264 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
265 	else
266 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
267 	mutex_unlock(&delayed_node->mutex);
268 
269 	if (atomic_dec_and_test(&delayed_node->refs)) {
270 		struct btrfs_root *root = delayed_node->root;
271 		spin_lock(&root->inode_lock);
272 		if (atomic_read(&delayed_node->refs) == 0) {
273 			radix_tree_delete(&root->delayed_nodes_tree,
274 					  delayed_node->inode_id);
275 			kmem_cache_free(delayed_node_cache, delayed_node);
276 		}
277 		spin_unlock(&root->inode_lock);
278 	}
279 }
280 
281 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
282 {
283 	__btrfs_release_delayed_node(node, 0);
284 }
285 
286 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
287 					struct btrfs_delayed_root *delayed_root)
288 {
289 	struct list_head *p;
290 	struct btrfs_delayed_node *node = NULL;
291 
292 	spin_lock(&delayed_root->lock);
293 	if (list_empty(&delayed_root->prepare_list))
294 		goto out;
295 
296 	p = delayed_root->prepare_list.next;
297 	list_del_init(p);
298 	node = list_entry(p, struct btrfs_delayed_node, p_list);
299 	atomic_inc(&node->refs);
300 out:
301 	spin_unlock(&delayed_root->lock);
302 
303 	return node;
304 }
305 
306 static inline void btrfs_release_prepared_delayed_node(
307 					struct btrfs_delayed_node *node)
308 {
309 	__btrfs_release_delayed_node(node, 1);
310 }
311 
312 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
313 {
314 	struct btrfs_delayed_item *item;
315 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
316 	if (item) {
317 		item->data_len = data_len;
318 		item->ins_or_del = 0;
319 		item->bytes_reserved = 0;
320 		item->delayed_node = NULL;
321 		atomic_set(&item->refs, 1);
322 	}
323 	return item;
324 }
325 
326 /*
327  * __btrfs_lookup_delayed_item - look up the delayed item by key
328  * @delayed_node: pointer to the delayed node
329  * @key:	  the key to look up
330  * @prev:	  used to store the prev item if the right item isn't found
331  * @next:	  used to store the next item if the right item isn't found
332  *
333  * Note: if we don't find the right item, we will return the prev item and
334  * the next item.
335  */
336 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
337 				struct rb_root *root,
338 				struct btrfs_key *key,
339 				struct btrfs_delayed_item **prev,
340 				struct btrfs_delayed_item **next)
341 {
342 	struct rb_node *node, *prev_node = NULL;
343 	struct btrfs_delayed_item *delayed_item = NULL;
344 	int ret = 0;
345 
346 	node = root->rb_node;
347 
348 	while (node) {
349 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
350 					rb_node);
351 		prev_node = node;
352 		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
353 		if (ret < 0)
354 			node = node->rb_right;
355 		else if (ret > 0)
356 			node = node->rb_left;
357 		else
358 			return delayed_item;
359 	}
360 
361 	if (prev) {
362 		if (!prev_node)
363 			*prev = NULL;
364 		else if (ret < 0)
365 			*prev = delayed_item;
366 		else if ((node = rb_prev(prev_node)) != NULL) {
367 			*prev = rb_entry(node, struct btrfs_delayed_item,
368 					 rb_node);
369 		} else
370 			*prev = NULL;
371 	}
372 
373 	if (next) {
374 		if (!prev_node)
375 			*next = NULL;
376 		else if (ret > 0)
377 			*next = delayed_item;
378 		else if ((node = rb_next(prev_node)) != NULL) {
379 			*next = rb_entry(node, struct btrfs_delayed_item,
380 					 rb_node);
381 		} else
382 			*next = NULL;
383 	}
384 	return NULL;
385 }
386 
387 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
388 					struct btrfs_delayed_node *delayed_node,
389 					struct btrfs_key *key)
390 {
391 	struct btrfs_delayed_item *item;
392 
393 	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
394 					   NULL, NULL);
395 	return item;
396 }
397 
398 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
399 				    struct btrfs_delayed_item *ins,
400 				    int action)
401 {
402 	struct rb_node **p, *node;
403 	struct rb_node *parent_node = NULL;
404 	struct rb_root *root;
405 	struct btrfs_delayed_item *item;
406 	int cmp;
407 
408 	if (action == BTRFS_DELAYED_INSERTION_ITEM)
409 		root = &delayed_node->ins_root;
410 	else if (action == BTRFS_DELAYED_DELETION_ITEM)
411 		root = &delayed_node->del_root;
412 	else
413 		BUG();
414 	p = &root->rb_node;
415 	node = &ins->rb_node;
416 
417 	while (*p) {
418 		parent_node = *p;
419 		item = rb_entry(parent_node, struct btrfs_delayed_item,
420 				 rb_node);
421 
422 		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
423 		if (cmp < 0)
424 			p = &(*p)->rb_right;
425 		else if (cmp > 0)
426 			p = &(*p)->rb_left;
427 		else
428 			return -EEXIST;
429 	}
430 
431 	rb_link_node(node, parent_node, p);
432 	rb_insert_color(node, root);
433 	ins->delayed_node = delayed_node;
434 	ins->ins_or_del = action;
435 
436 	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
437 	    action == BTRFS_DELAYED_INSERTION_ITEM &&
438 	    ins->key.offset >= delayed_node->index_cnt)
439 			delayed_node->index_cnt = ins->key.offset + 1;
440 
441 	delayed_node->count++;
442 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
443 	return 0;
444 }
445 
446 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
447 					      struct btrfs_delayed_item *item)
448 {
449 	return __btrfs_add_delayed_item(node, item,
450 					BTRFS_DELAYED_INSERTION_ITEM);
451 }
452 
453 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
454 					     struct btrfs_delayed_item *item)
455 {
456 	return __btrfs_add_delayed_item(node, item,
457 					BTRFS_DELAYED_DELETION_ITEM);
458 }
459 
460 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
461 {
462 	int seq = atomic_inc_return(&delayed_root->items_seq);
463 	if ((atomic_dec_return(&delayed_root->items) <
464 	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
465 	    waitqueue_active(&delayed_root->wait))
466 		wake_up(&delayed_root->wait);
467 }
468 
469 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
470 {
471 	struct rb_root *root;
472 	struct btrfs_delayed_root *delayed_root;
473 
474 	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
475 
476 	BUG_ON(!delayed_root);
477 	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
478 	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
479 
480 	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
481 		root = &delayed_item->delayed_node->ins_root;
482 	else
483 		root = &delayed_item->delayed_node->del_root;
484 
485 	rb_erase(&delayed_item->rb_node, root);
486 	delayed_item->delayed_node->count--;
487 
488 	finish_one_item(delayed_root);
489 }
490 
491 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
492 {
493 	if (item) {
494 		__btrfs_remove_delayed_item(item);
495 		if (atomic_dec_and_test(&item->refs))
496 			kfree(item);
497 	}
498 }
499 
500 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
501 					struct btrfs_delayed_node *delayed_node)
502 {
503 	struct rb_node *p;
504 	struct btrfs_delayed_item *item = NULL;
505 
506 	p = rb_first(&delayed_node->ins_root);
507 	if (p)
508 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
509 
510 	return item;
511 }
512 
513 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
514 					struct btrfs_delayed_node *delayed_node)
515 {
516 	struct rb_node *p;
517 	struct btrfs_delayed_item *item = NULL;
518 
519 	p = rb_first(&delayed_node->del_root);
520 	if (p)
521 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
522 
523 	return item;
524 }
525 
526 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
527 						struct btrfs_delayed_item *item)
528 {
529 	struct rb_node *p;
530 	struct btrfs_delayed_item *next = NULL;
531 
532 	p = rb_next(&item->rb_node);
533 	if (p)
534 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
535 
536 	return next;
537 }
538 
539 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
540 					       struct btrfs_root *root,
541 					       struct btrfs_delayed_item *item)
542 {
543 	struct btrfs_block_rsv *src_rsv;
544 	struct btrfs_block_rsv *dst_rsv;
545 	u64 num_bytes;
546 	int ret;
547 
548 	if (!trans->bytes_reserved)
549 		return 0;
550 
551 	src_rsv = trans->block_rsv;
552 	dst_rsv = &root->fs_info->delayed_block_rsv;
553 
554 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
555 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
556 	if (!ret) {
557 		trace_btrfs_space_reservation(root->fs_info, "delayed_item",
558 					      item->key.objectid,
559 					      num_bytes, 1);
560 		item->bytes_reserved = num_bytes;
561 	}
562 
563 	return ret;
564 }
565 
566 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
567 						struct btrfs_delayed_item *item)
568 {
569 	struct btrfs_block_rsv *rsv;
570 
571 	if (!item->bytes_reserved)
572 		return;
573 
574 	rsv = &root->fs_info->delayed_block_rsv;
575 	trace_btrfs_space_reservation(root->fs_info, "delayed_item",
576 				      item->key.objectid, item->bytes_reserved,
577 				      0);
578 	btrfs_block_rsv_release(root, rsv,
579 				item->bytes_reserved);
580 }
581 
582 static int btrfs_delayed_inode_reserve_metadata(
583 					struct btrfs_trans_handle *trans,
584 					struct btrfs_root *root,
585 					struct inode *inode,
586 					struct btrfs_delayed_node *node)
587 {
588 	struct btrfs_block_rsv *src_rsv;
589 	struct btrfs_block_rsv *dst_rsv;
590 	u64 num_bytes;
591 	int ret;
592 	bool release = false;
593 
594 	src_rsv = trans->block_rsv;
595 	dst_rsv = &root->fs_info->delayed_block_rsv;
596 
597 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
598 
599 	/*
600 	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
601 	 * which doesn't reserve space for speed.  This is a problem since we
602 	 * still need to reserve space for this update, so try to reserve the
603 	 * space.
604 	 *
605 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
606 	 * we're accounted for.
607 	 */
608 	if (!src_rsv || (!trans->bytes_reserved &&
609 			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
610 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
611 					  BTRFS_RESERVE_NO_FLUSH);
612 		/*
613 		 * Since we're under a transaction reserve_metadata_bytes could
614 		 * try to commit the transaction which will make it return
615 		 * EAGAIN to make us stop the transaction we have, so return
616 		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
617 		 */
618 		if (ret == -EAGAIN)
619 			ret = -ENOSPC;
620 		if (!ret) {
621 			node->bytes_reserved = num_bytes;
622 			trace_btrfs_space_reservation(root->fs_info,
623 						      "delayed_inode",
624 						      btrfs_ino(inode),
625 						      num_bytes, 1);
626 		}
627 		return ret;
628 	} else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
629 		spin_lock(&BTRFS_I(inode)->lock);
630 		if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
631 				       &BTRFS_I(inode)->runtime_flags)) {
632 			spin_unlock(&BTRFS_I(inode)->lock);
633 			release = true;
634 			goto migrate;
635 		}
636 		spin_unlock(&BTRFS_I(inode)->lock);
637 
638 		/* Ok we didn't have space pre-reserved.  This shouldn't happen
639 		 * too often but it can happen if we do delalloc to an existing
640 		 * inode which gets dirtied because of the time update, and then
641 		 * isn't touched again until after the transaction commits and
642 		 * then we try to write out the data.  First try to be nice and
643 		 * reserve something strictly for us.  If not be a pain and try
644 		 * to steal from the delalloc block rsv.
645 		 */
646 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
647 					  BTRFS_RESERVE_NO_FLUSH);
648 		if (!ret)
649 			goto out;
650 
651 		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
652 		if (!ret)
653 			goto out;
654 
655 		/*
656 		 * Ok this is a problem, let's just steal from the global rsv
657 		 * since this really shouldn't happen that often.
658 		 */
659 		WARN_ON(1);
660 		ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
661 					      dst_rsv, num_bytes);
662 		goto out;
663 	}
664 
665 migrate:
666 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
667 
668 out:
669 	/*
670 	 * Migrate only takes a reservation, it doesn't touch the size of the
671 	 * block_rsv.  This is to simplify people who don't normally have things
672 	 * migrated from their block rsv.  If they go to release their
673 	 * reservation, that will decrease the size as well, so if migrate
674 	 * reduced size we'd end up with a negative size.  But for the
675 	 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
676 	 * but we could in fact do this reserve/migrate dance several times
677 	 * between the time we did the original reservation and we'd clean it
678 	 * up.  So to take care of this, release the space for the meta
679 	 * reservation here.  I think it may be time for a documentation page on
680 	 * how block rsvs. work.
681 	 */
682 	if (!ret) {
683 		trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
684 					      btrfs_ino(inode), num_bytes, 1);
685 		node->bytes_reserved = num_bytes;
686 	}
687 
688 	if (release) {
689 		trace_btrfs_space_reservation(root->fs_info, "delalloc",
690 					      btrfs_ino(inode), num_bytes, 0);
691 		btrfs_block_rsv_release(root, src_rsv, num_bytes);
692 	}
693 
694 	return ret;
695 }
696 
697 static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
698 						struct btrfs_delayed_node *node)
699 {
700 	struct btrfs_block_rsv *rsv;
701 
702 	if (!node->bytes_reserved)
703 		return;
704 
705 	rsv = &root->fs_info->delayed_block_rsv;
706 	trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
707 				      node->inode_id, node->bytes_reserved, 0);
708 	btrfs_block_rsv_release(root, rsv,
709 				node->bytes_reserved);
710 	node->bytes_reserved = 0;
711 }
712 
713 /*
714  * This helper will insert some continuous items into the same leaf according
715  * to the free space of the leaf.
716  */
717 static int btrfs_batch_insert_items(struct btrfs_root *root,
718 				    struct btrfs_path *path,
719 				    struct btrfs_delayed_item *item)
720 {
721 	struct btrfs_delayed_item *curr, *next;
722 	int free_space;
723 	int total_data_size = 0, total_size = 0;
724 	struct extent_buffer *leaf;
725 	char *data_ptr;
726 	struct btrfs_key *keys;
727 	u32 *data_size;
728 	struct list_head head;
729 	int slot;
730 	int nitems;
731 	int i;
732 	int ret = 0;
733 
734 	BUG_ON(!path->nodes[0]);
735 
736 	leaf = path->nodes[0];
737 	free_space = btrfs_leaf_free_space(root, leaf);
738 	INIT_LIST_HEAD(&head);
739 
740 	next = item;
741 	nitems = 0;
742 
743 	/*
744 	 * count the number of the continuous items that we can insert in batch
745 	 */
746 	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
747 	       free_space) {
748 		total_data_size += next->data_len;
749 		total_size += next->data_len + sizeof(struct btrfs_item);
750 		list_add_tail(&next->tree_list, &head);
751 		nitems++;
752 
753 		curr = next;
754 		next = __btrfs_next_delayed_item(curr);
755 		if (!next)
756 			break;
757 
758 		if (!btrfs_is_continuous_delayed_item(curr, next))
759 			break;
760 	}
761 
762 	if (!nitems) {
763 		ret = 0;
764 		goto out;
765 	}
766 
767 	/*
768 	 * we need allocate some memory space, but it might cause the task
769 	 * to sleep, so we set all locked nodes in the path to blocking locks
770 	 * first.
771 	 */
772 	btrfs_set_path_blocking(path);
773 
774 	keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
775 	if (!keys) {
776 		ret = -ENOMEM;
777 		goto out;
778 	}
779 
780 	data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
781 	if (!data_size) {
782 		ret = -ENOMEM;
783 		goto error;
784 	}
785 
786 	/* get keys of all the delayed items */
787 	i = 0;
788 	list_for_each_entry(next, &head, tree_list) {
789 		keys[i] = next->key;
790 		data_size[i] = next->data_len;
791 		i++;
792 	}
793 
794 	/* reset all the locked nodes in the patch to spinning locks. */
795 	btrfs_clear_path_blocking(path, NULL, 0);
796 
797 	/* insert the keys of the items */
798 	setup_items_for_insert(root, path, keys, data_size,
799 			       total_data_size, total_size, nitems);
800 
801 	/* insert the dir index items */
802 	slot = path->slots[0];
803 	list_for_each_entry_safe(curr, next, &head, tree_list) {
804 		data_ptr = btrfs_item_ptr(leaf, slot, char);
805 		write_extent_buffer(leaf, &curr->data,
806 				    (unsigned long)data_ptr,
807 				    curr->data_len);
808 		slot++;
809 
810 		btrfs_delayed_item_release_metadata(root, curr);
811 
812 		list_del(&curr->tree_list);
813 		btrfs_release_delayed_item(curr);
814 	}
815 
816 error:
817 	kfree(data_size);
818 	kfree(keys);
819 out:
820 	return ret;
821 }
822 
823 /*
824  * This helper can just do simple insertion that needn't extend item for new
825  * data, such as directory name index insertion, inode insertion.
826  */
827 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
828 				     struct btrfs_root *root,
829 				     struct btrfs_path *path,
830 				     struct btrfs_delayed_item *delayed_item)
831 {
832 	struct extent_buffer *leaf;
833 	char *ptr;
834 	int ret;
835 
836 	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
837 				      delayed_item->data_len);
838 	if (ret < 0 && ret != -EEXIST)
839 		return ret;
840 
841 	leaf = path->nodes[0];
842 
843 	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
844 
845 	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
846 			    delayed_item->data_len);
847 	btrfs_mark_buffer_dirty(leaf);
848 
849 	btrfs_delayed_item_release_metadata(root, delayed_item);
850 	return 0;
851 }
852 
853 /*
854  * we insert an item first, then if there are some continuous items, we try
855  * to insert those items into the same leaf.
856  */
857 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
858 				      struct btrfs_path *path,
859 				      struct btrfs_root *root,
860 				      struct btrfs_delayed_node *node)
861 {
862 	struct btrfs_delayed_item *curr, *prev;
863 	int ret = 0;
864 
865 do_again:
866 	mutex_lock(&node->mutex);
867 	curr = __btrfs_first_delayed_insertion_item(node);
868 	if (!curr)
869 		goto insert_end;
870 
871 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
872 	if (ret < 0) {
873 		btrfs_release_path(path);
874 		goto insert_end;
875 	}
876 
877 	prev = curr;
878 	curr = __btrfs_next_delayed_item(prev);
879 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
880 		/* insert the continuous items into the same leaf */
881 		path->slots[0]++;
882 		btrfs_batch_insert_items(root, path, curr);
883 	}
884 	btrfs_release_delayed_item(prev);
885 	btrfs_mark_buffer_dirty(path->nodes[0]);
886 
887 	btrfs_release_path(path);
888 	mutex_unlock(&node->mutex);
889 	goto do_again;
890 
891 insert_end:
892 	mutex_unlock(&node->mutex);
893 	return ret;
894 }
895 
896 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
897 				    struct btrfs_root *root,
898 				    struct btrfs_path *path,
899 				    struct btrfs_delayed_item *item)
900 {
901 	struct btrfs_delayed_item *curr, *next;
902 	struct extent_buffer *leaf;
903 	struct btrfs_key key;
904 	struct list_head head;
905 	int nitems, i, last_item;
906 	int ret = 0;
907 
908 	BUG_ON(!path->nodes[0]);
909 
910 	leaf = path->nodes[0];
911 
912 	i = path->slots[0];
913 	last_item = btrfs_header_nritems(leaf) - 1;
914 	if (i > last_item)
915 		return -ENOENT;	/* FIXME: Is errno suitable? */
916 
917 	next = item;
918 	INIT_LIST_HEAD(&head);
919 	btrfs_item_key_to_cpu(leaf, &key, i);
920 	nitems = 0;
921 	/*
922 	 * count the number of the dir index items that we can delete in batch
923 	 */
924 	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
925 		list_add_tail(&next->tree_list, &head);
926 		nitems++;
927 
928 		curr = next;
929 		next = __btrfs_next_delayed_item(curr);
930 		if (!next)
931 			break;
932 
933 		if (!btrfs_is_continuous_delayed_item(curr, next))
934 			break;
935 
936 		i++;
937 		if (i > last_item)
938 			break;
939 		btrfs_item_key_to_cpu(leaf, &key, i);
940 	}
941 
942 	if (!nitems)
943 		return 0;
944 
945 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
946 	if (ret)
947 		goto out;
948 
949 	list_for_each_entry_safe(curr, next, &head, tree_list) {
950 		btrfs_delayed_item_release_metadata(root, curr);
951 		list_del(&curr->tree_list);
952 		btrfs_release_delayed_item(curr);
953 	}
954 
955 out:
956 	return ret;
957 }
958 
959 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
960 				      struct btrfs_path *path,
961 				      struct btrfs_root *root,
962 				      struct btrfs_delayed_node *node)
963 {
964 	struct btrfs_delayed_item *curr, *prev;
965 	int ret = 0;
966 
967 do_again:
968 	mutex_lock(&node->mutex);
969 	curr = __btrfs_first_delayed_deletion_item(node);
970 	if (!curr)
971 		goto delete_fail;
972 
973 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
974 	if (ret < 0)
975 		goto delete_fail;
976 	else if (ret > 0) {
977 		/*
978 		 * can't find the item which the node points to, so this node
979 		 * is invalid, just drop it.
980 		 */
981 		prev = curr;
982 		curr = __btrfs_next_delayed_item(prev);
983 		btrfs_release_delayed_item(prev);
984 		ret = 0;
985 		btrfs_release_path(path);
986 		if (curr) {
987 			mutex_unlock(&node->mutex);
988 			goto do_again;
989 		} else
990 			goto delete_fail;
991 	}
992 
993 	btrfs_batch_delete_items(trans, root, path, curr);
994 	btrfs_release_path(path);
995 	mutex_unlock(&node->mutex);
996 	goto do_again;
997 
998 delete_fail:
999 	btrfs_release_path(path);
1000 	mutex_unlock(&node->mutex);
1001 	return ret;
1002 }
1003 
1004 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1005 {
1006 	struct btrfs_delayed_root *delayed_root;
1007 
1008 	if (delayed_node && delayed_node->inode_dirty) {
1009 		BUG_ON(!delayed_node->root);
1010 		delayed_node->inode_dirty = 0;
1011 		delayed_node->count--;
1012 
1013 		delayed_root = delayed_node->root->fs_info->delayed_root;
1014 		finish_one_item(delayed_root);
1015 	}
1016 }
1017 
1018 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1019 					struct btrfs_root *root,
1020 					struct btrfs_path *path,
1021 					struct btrfs_delayed_node *node)
1022 {
1023 	struct btrfs_key key;
1024 	struct btrfs_inode_item *inode_item;
1025 	struct extent_buffer *leaf;
1026 	int ret;
1027 
1028 	key.objectid = node->inode_id;
1029 	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1030 	key.offset = 0;
1031 
1032 	ret = btrfs_lookup_inode(trans, root, path, &key, 1);
1033 	if (ret > 0) {
1034 		btrfs_release_path(path);
1035 		return -ENOENT;
1036 	} else if (ret < 0) {
1037 		return ret;
1038 	}
1039 
1040 	btrfs_unlock_up_safe(path, 1);
1041 	leaf = path->nodes[0];
1042 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1043 				    struct btrfs_inode_item);
1044 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1045 			    sizeof(struct btrfs_inode_item));
1046 	btrfs_mark_buffer_dirty(leaf);
1047 	btrfs_release_path(path);
1048 
1049 	btrfs_delayed_inode_release_metadata(root, node);
1050 	btrfs_release_delayed_inode(node);
1051 
1052 	return 0;
1053 }
1054 
1055 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1056 					     struct btrfs_root *root,
1057 					     struct btrfs_path *path,
1058 					     struct btrfs_delayed_node *node)
1059 {
1060 	int ret;
1061 
1062 	mutex_lock(&node->mutex);
1063 	if (!node->inode_dirty) {
1064 		mutex_unlock(&node->mutex);
1065 		return 0;
1066 	}
1067 
1068 	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1069 	mutex_unlock(&node->mutex);
1070 	return ret;
1071 }
1072 
1073 static inline int
1074 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1075 				   struct btrfs_path *path,
1076 				   struct btrfs_delayed_node *node)
1077 {
1078 	int ret;
1079 
1080 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1081 	if (ret)
1082 		return ret;
1083 
1084 	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1085 	if (ret)
1086 		return ret;
1087 
1088 	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1089 	return ret;
1090 }
1091 
1092 /*
1093  * Called when committing the transaction.
1094  * Returns 0 on success.
1095  * Returns < 0 on error and returns with an aborted transaction with any
1096  * outstanding delayed items cleaned up.
1097  */
1098 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1099 				     struct btrfs_root *root, int nr)
1100 {
1101 	struct btrfs_delayed_root *delayed_root;
1102 	struct btrfs_delayed_node *curr_node, *prev_node;
1103 	struct btrfs_path *path;
1104 	struct btrfs_block_rsv *block_rsv;
1105 	int ret = 0;
1106 	bool count = (nr > 0);
1107 
1108 	if (trans->aborted)
1109 		return -EIO;
1110 
1111 	path = btrfs_alloc_path();
1112 	if (!path)
1113 		return -ENOMEM;
1114 	path->leave_spinning = 1;
1115 
1116 	block_rsv = trans->block_rsv;
1117 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1118 
1119 	delayed_root = btrfs_get_delayed_root(root);
1120 
1121 	curr_node = btrfs_first_delayed_node(delayed_root);
1122 	while (curr_node && (!count || (count && nr--))) {
1123 		ret = __btrfs_commit_inode_delayed_items(trans, path,
1124 							 curr_node);
1125 		if (ret) {
1126 			btrfs_release_delayed_node(curr_node);
1127 			curr_node = NULL;
1128 			btrfs_abort_transaction(trans, root, ret);
1129 			break;
1130 		}
1131 
1132 		prev_node = curr_node;
1133 		curr_node = btrfs_next_delayed_node(curr_node);
1134 		btrfs_release_delayed_node(prev_node);
1135 	}
1136 
1137 	if (curr_node)
1138 		btrfs_release_delayed_node(curr_node);
1139 	btrfs_free_path(path);
1140 	trans->block_rsv = block_rsv;
1141 
1142 	return ret;
1143 }
1144 
1145 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1146 			    struct btrfs_root *root)
1147 {
1148 	return __btrfs_run_delayed_items(trans, root, -1);
1149 }
1150 
1151 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1152 			       struct btrfs_root *root, int nr)
1153 {
1154 	return __btrfs_run_delayed_items(trans, root, nr);
1155 }
1156 
1157 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1158 				     struct inode *inode)
1159 {
1160 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1161 	struct btrfs_path *path;
1162 	struct btrfs_block_rsv *block_rsv;
1163 	int ret;
1164 
1165 	if (!delayed_node)
1166 		return 0;
1167 
1168 	mutex_lock(&delayed_node->mutex);
1169 	if (!delayed_node->count) {
1170 		mutex_unlock(&delayed_node->mutex);
1171 		btrfs_release_delayed_node(delayed_node);
1172 		return 0;
1173 	}
1174 	mutex_unlock(&delayed_node->mutex);
1175 
1176 	path = btrfs_alloc_path();
1177 	if (!path)
1178 		return -ENOMEM;
1179 	path->leave_spinning = 1;
1180 
1181 	block_rsv = trans->block_rsv;
1182 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1183 
1184 	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1185 
1186 	btrfs_release_delayed_node(delayed_node);
1187 	btrfs_free_path(path);
1188 	trans->block_rsv = block_rsv;
1189 
1190 	return ret;
1191 }
1192 
1193 int btrfs_commit_inode_delayed_inode(struct inode *inode)
1194 {
1195 	struct btrfs_trans_handle *trans;
1196 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1197 	struct btrfs_path *path;
1198 	struct btrfs_block_rsv *block_rsv;
1199 	int ret;
1200 
1201 	if (!delayed_node)
1202 		return 0;
1203 
1204 	mutex_lock(&delayed_node->mutex);
1205 	if (!delayed_node->inode_dirty) {
1206 		mutex_unlock(&delayed_node->mutex);
1207 		btrfs_release_delayed_node(delayed_node);
1208 		return 0;
1209 	}
1210 	mutex_unlock(&delayed_node->mutex);
1211 
1212 	trans = btrfs_join_transaction(delayed_node->root);
1213 	if (IS_ERR(trans)) {
1214 		ret = PTR_ERR(trans);
1215 		goto out;
1216 	}
1217 
1218 	path = btrfs_alloc_path();
1219 	if (!path) {
1220 		ret = -ENOMEM;
1221 		goto trans_out;
1222 	}
1223 	path->leave_spinning = 1;
1224 
1225 	block_rsv = trans->block_rsv;
1226 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1227 
1228 	mutex_lock(&delayed_node->mutex);
1229 	if (delayed_node->inode_dirty)
1230 		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1231 						   path, delayed_node);
1232 	else
1233 		ret = 0;
1234 	mutex_unlock(&delayed_node->mutex);
1235 
1236 	btrfs_free_path(path);
1237 	trans->block_rsv = block_rsv;
1238 trans_out:
1239 	btrfs_end_transaction(trans, delayed_node->root);
1240 	btrfs_btree_balance_dirty(delayed_node->root);
1241 out:
1242 	btrfs_release_delayed_node(delayed_node);
1243 
1244 	return ret;
1245 }
1246 
1247 void btrfs_remove_delayed_node(struct inode *inode)
1248 {
1249 	struct btrfs_delayed_node *delayed_node;
1250 
1251 	delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1252 	if (!delayed_node)
1253 		return;
1254 
1255 	BTRFS_I(inode)->delayed_node = NULL;
1256 	btrfs_release_delayed_node(delayed_node);
1257 }
1258 
1259 struct btrfs_async_delayed_work {
1260 	struct btrfs_delayed_root *delayed_root;
1261 	int nr;
1262 	struct btrfs_work work;
1263 };
1264 
1265 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1266 {
1267 	struct btrfs_async_delayed_work *async_work;
1268 	struct btrfs_delayed_root *delayed_root;
1269 	struct btrfs_trans_handle *trans;
1270 	struct btrfs_path *path;
1271 	struct btrfs_delayed_node *delayed_node = NULL;
1272 	struct btrfs_root *root;
1273 	struct btrfs_block_rsv *block_rsv;
1274 	int total_done = 0;
1275 
1276 	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1277 	delayed_root = async_work->delayed_root;
1278 
1279 	path = btrfs_alloc_path();
1280 	if (!path)
1281 		goto out;
1282 
1283 again:
1284 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1285 		goto free_path;
1286 
1287 	delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1288 	if (!delayed_node)
1289 		goto free_path;
1290 
1291 	path->leave_spinning = 1;
1292 	root = delayed_node->root;
1293 
1294 	trans = btrfs_join_transaction(root);
1295 	if (IS_ERR(trans))
1296 		goto release_path;
1297 
1298 	block_rsv = trans->block_rsv;
1299 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1300 
1301 	__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1302 	/*
1303 	 * Maybe new delayed items have been inserted, so we need requeue
1304 	 * the work. Besides that, we must dequeue the empty delayed nodes
1305 	 * to avoid the race between delayed items balance and the worker.
1306 	 * The race like this:
1307 	 * 	Task1				Worker thread
1308 	 * 					count == 0, needn't requeue
1309 	 * 					  also needn't insert the
1310 	 * 					  delayed node into prepare
1311 	 * 					  list again.
1312 	 * 	add lots of delayed items
1313 	 * 	queue the delayed node
1314 	 * 	  already in the list,
1315 	 * 	  and not in the prepare
1316 	 * 	  list, it means the delayed
1317 	 * 	  node is being dealt with
1318 	 * 	  by the worker.
1319 	 * 	do delayed items balance
1320 	 * 	  the delayed node is being
1321 	 * 	  dealt with by the worker
1322 	 * 	  now, just wait.
1323 	 * 	  				the worker goto idle.
1324 	 * Task1 will sleep until the transaction is commited.
1325 	 */
1326 	mutex_lock(&delayed_node->mutex);
1327 	btrfs_dequeue_delayed_node(root->fs_info->delayed_root, delayed_node);
1328 	mutex_unlock(&delayed_node->mutex);
1329 
1330 	trans->block_rsv = block_rsv;
1331 	btrfs_end_transaction_dmeta(trans, root);
1332 	btrfs_btree_balance_dirty_nodelay(root);
1333 
1334 release_path:
1335 	btrfs_release_path(path);
1336 	total_done++;
1337 
1338 	btrfs_release_prepared_delayed_node(delayed_node);
1339 	if (async_work->nr == 0 || total_done < async_work->nr)
1340 		goto again;
1341 
1342 free_path:
1343 	btrfs_free_path(path);
1344 out:
1345 	wake_up(&delayed_root->wait);
1346 	kfree(async_work);
1347 }
1348 
1349 
1350 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1351 				     struct btrfs_root *root, int nr)
1352 {
1353 	struct btrfs_async_delayed_work *async_work;
1354 
1355 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1356 		return 0;
1357 
1358 	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1359 	if (!async_work)
1360 		return -ENOMEM;
1361 
1362 	async_work->delayed_root = delayed_root;
1363 	async_work->work.func = btrfs_async_run_delayed_root;
1364 	async_work->work.flags = 0;
1365 	async_work->nr = nr;
1366 
1367 	btrfs_queue_worker(&root->fs_info->delayed_workers, &async_work->work);
1368 	return 0;
1369 }
1370 
1371 void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1372 {
1373 	struct btrfs_delayed_root *delayed_root;
1374 	delayed_root = btrfs_get_delayed_root(root);
1375 	WARN_ON(btrfs_first_delayed_node(delayed_root));
1376 }
1377 
1378 static int refs_newer(struct btrfs_delayed_root *delayed_root,
1379 		      int seq, int count)
1380 {
1381 	int val = atomic_read(&delayed_root->items_seq);
1382 
1383 	if (val < seq || val >= seq + count)
1384 		return 1;
1385 	return 0;
1386 }
1387 
1388 void btrfs_balance_delayed_items(struct btrfs_root *root)
1389 {
1390 	struct btrfs_delayed_root *delayed_root;
1391 	int seq;
1392 
1393 	delayed_root = btrfs_get_delayed_root(root);
1394 
1395 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1396 		return;
1397 
1398 	seq = atomic_read(&delayed_root->items_seq);
1399 
1400 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1401 		int ret;
1402 		DEFINE_WAIT(__wait);
1403 
1404 		ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
1405 		if (ret)
1406 			return;
1407 
1408 		while (1) {
1409 			prepare_to_wait(&delayed_root->wait, &__wait,
1410 					TASK_INTERRUPTIBLE);
1411 
1412 			if (refs_newer(delayed_root, seq,
1413 				       BTRFS_DELAYED_BATCH) ||
1414 			    atomic_read(&delayed_root->items) <
1415 			    BTRFS_DELAYED_BACKGROUND) {
1416 				break;
1417 			}
1418 			if (!signal_pending(current))
1419 				schedule();
1420 			else
1421 				break;
1422 		}
1423 		finish_wait(&delayed_root->wait, &__wait);
1424 	}
1425 
1426 	btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH);
1427 }
1428 
1429 /* Will return 0 or -ENOMEM */
1430 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1431 				   struct btrfs_root *root, const char *name,
1432 				   int name_len, struct inode *dir,
1433 				   struct btrfs_disk_key *disk_key, u8 type,
1434 				   u64 index)
1435 {
1436 	struct btrfs_delayed_node *delayed_node;
1437 	struct btrfs_delayed_item *delayed_item;
1438 	struct btrfs_dir_item *dir_item;
1439 	int ret;
1440 
1441 	delayed_node = btrfs_get_or_create_delayed_node(dir);
1442 	if (IS_ERR(delayed_node))
1443 		return PTR_ERR(delayed_node);
1444 
1445 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1446 	if (!delayed_item) {
1447 		ret = -ENOMEM;
1448 		goto release_node;
1449 	}
1450 
1451 	delayed_item->key.objectid = btrfs_ino(dir);
1452 	btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1453 	delayed_item->key.offset = index;
1454 
1455 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1456 	dir_item->location = *disk_key;
1457 	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1458 	btrfs_set_stack_dir_data_len(dir_item, 0);
1459 	btrfs_set_stack_dir_name_len(dir_item, name_len);
1460 	btrfs_set_stack_dir_type(dir_item, type);
1461 	memcpy((char *)(dir_item + 1), name, name_len);
1462 
1463 	ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1464 	/*
1465 	 * we have reserved enough space when we start a new transaction,
1466 	 * so reserving metadata failure is impossible
1467 	 */
1468 	BUG_ON(ret);
1469 
1470 
1471 	mutex_lock(&delayed_node->mutex);
1472 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1473 	if (unlikely(ret)) {
1474 		printk(KERN_ERR "err add delayed dir index item(name: %.*s) "
1475 				"into the insertion tree of the delayed node"
1476 				"(root id: %llu, inode id: %llu, errno: %d)\n",
1477 				name_len, name, delayed_node->root->objectid,
1478 				delayed_node->inode_id, ret);
1479 		BUG();
1480 	}
1481 	mutex_unlock(&delayed_node->mutex);
1482 
1483 release_node:
1484 	btrfs_release_delayed_node(delayed_node);
1485 	return ret;
1486 }
1487 
1488 static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1489 					       struct btrfs_delayed_node *node,
1490 					       struct btrfs_key *key)
1491 {
1492 	struct btrfs_delayed_item *item;
1493 
1494 	mutex_lock(&node->mutex);
1495 	item = __btrfs_lookup_delayed_insertion_item(node, key);
1496 	if (!item) {
1497 		mutex_unlock(&node->mutex);
1498 		return 1;
1499 	}
1500 
1501 	btrfs_delayed_item_release_metadata(root, item);
1502 	btrfs_release_delayed_item(item);
1503 	mutex_unlock(&node->mutex);
1504 	return 0;
1505 }
1506 
1507 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1508 				   struct btrfs_root *root, struct inode *dir,
1509 				   u64 index)
1510 {
1511 	struct btrfs_delayed_node *node;
1512 	struct btrfs_delayed_item *item;
1513 	struct btrfs_key item_key;
1514 	int ret;
1515 
1516 	node = btrfs_get_or_create_delayed_node(dir);
1517 	if (IS_ERR(node))
1518 		return PTR_ERR(node);
1519 
1520 	item_key.objectid = btrfs_ino(dir);
1521 	btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1522 	item_key.offset = index;
1523 
1524 	ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1525 	if (!ret)
1526 		goto end;
1527 
1528 	item = btrfs_alloc_delayed_item(0);
1529 	if (!item) {
1530 		ret = -ENOMEM;
1531 		goto end;
1532 	}
1533 
1534 	item->key = item_key;
1535 
1536 	ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1537 	/*
1538 	 * we have reserved enough space when we start a new transaction,
1539 	 * so reserving metadata failure is impossible.
1540 	 */
1541 	BUG_ON(ret);
1542 
1543 	mutex_lock(&node->mutex);
1544 	ret = __btrfs_add_delayed_deletion_item(node, item);
1545 	if (unlikely(ret)) {
1546 		printk(KERN_ERR "err add delayed dir index item(index: %llu) "
1547 				"into the deletion tree of the delayed node"
1548 				"(root id: %llu, inode id: %llu, errno: %d)\n",
1549 				index, node->root->objectid, node->inode_id,
1550 				ret);
1551 		BUG();
1552 	}
1553 	mutex_unlock(&node->mutex);
1554 end:
1555 	btrfs_release_delayed_node(node);
1556 	return ret;
1557 }
1558 
1559 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1560 {
1561 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1562 
1563 	if (!delayed_node)
1564 		return -ENOENT;
1565 
1566 	/*
1567 	 * Since we have held i_mutex of this directory, it is impossible that
1568 	 * a new directory index is added into the delayed node and index_cnt
1569 	 * is updated now. So we needn't lock the delayed node.
1570 	 */
1571 	if (!delayed_node->index_cnt) {
1572 		btrfs_release_delayed_node(delayed_node);
1573 		return -EINVAL;
1574 	}
1575 
1576 	BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1577 	btrfs_release_delayed_node(delayed_node);
1578 	return 0;
1579 }
1580 
1581 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1582 			     struct list_head *del_list)
1583 {
1584 	struct btrfs_delayed_node *delayed_node;
1585 	struct btrfs_delayed_item *item;
1586 
1587 	delayed_node = btrfs_get_delayed_node(inode);
1588 	if (!delayed_node)
1589 		return;
1590 
1591 	mutex_lock(&delayed_node->mutex);
1592 	item = __btrfs_first_delayed_insertion_item(delayed_node);
1593 	while (item) {
1594 		atomic_inc(&item->refs);
1595 		list_add_tail(&item->readdir_list, ins_list);
1596 		item = __btrfs_next_delayed_item(item);
1597 	}
1598 
1599 	item = __btrfs_first_delayed_deletion_item(delayed_node);
1600 	while (item) {
1601 		atomic_inc(&item->refs);
1602 		list_add_tail(&item->readdir_list, del_list);
1603 		item = __btrfs_next_delayed_item(item);
1604 	}
1605 	mutex_unlock(&delayed_node->mutex);
1606 	/*
1607 	 * This delayed node is still cached in the btrfs inode, so refs
1608 	 * must be > 1 now, and we needn't check it is going to be freed
1609 	 * or not.
1610 	 *
1611 	 * Besides that, this function is used to read dir, we do not
1612 	 * insert/delete delayed items in this period. So we also needn't
1613 	 * requeue or dequeue this delayed node.
1614 	 */
1615 	atomic_dec(&delayed_node->refs);
1616 }
1617 
1618 void btrfs_put_delayed_items(struct list_head *ins_list,
1619 			     struct list_head *del_list)
1620 {
1621 	struct btrfs_delayed_item *curr, *next;
1622 
1623 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1624 		list_del(&curr->readdir_list);
1625 		if (atomic_dec_and_test(&curr->refs))
1626 			kfree(curr);
1627 	}
1628 
1629 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1630 		list_del(&curr->readdir_list);
1631 		if (atomic_dec_and_test(&curr->refs))
1632 			kfree(curr);
1633 	}
1634 }
1635 
1636 int btrfs_should_delete_dir_index(struct list_head *del_list,
1637 				  u64 index)
1638 {
1639 	struct btrfs_delayed_item *curr, *next;
1640 	int ret;
1641 
1642 	if (list_empty(del_list))
1643 		return 0;
1644 
1645 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1646 		if (curr->key.offset > index)
1647 			break;
1648 
1649 		list_del(&curr->readdir_list);
1650 		ret = (curr->key.offset == index);
1651 
1652 		if (atomic_dec_and_test(&curr->refs))
1653 			kfree(curr);
1654 
1655 		if (ret)
1656 			return 1;
1657 		else
1658 			continue;
1659 	}
1660 	return 0;
1661 }
1662 
1663 /*
1664  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1665  *
1666  */
1667 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1668 				    struct list_head *ins_list)
1669 {
1670 	struct btrfs_dir_item *di;
1671 	struct btrfs_delayed_item *curr, *next;
1672 	struct btrfs_key location;
1673 	char *name;
1674 	int name_len;
1675 	int over = 0;
1676 	unsigned char d_type;
1677 
1678 	if (list_empty(ins_list))
1679 		return 0;
1680 
1681 	/*
1682 	 * Changing the data of the delayed item is impossible. So
1683 	 * we needn't lock them. And we have held i_mutex of the
1684 	 * directory, nobody can delete any directory indexes now.
1685 	 */
1686 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1687 		list_del(&curr->readdir_list);
1688 
1689 		if (curr->key.offset < ctx->pos) {
1690 			if (atomic_dec_and_test(&curr->refs))
1691 				kfree(curr);
1692 			continue;
1693 		}
1694 
1695 		ctx->pos = curr->key.offset;
1696 
1697 		di = (struct btrfs_dir_item *)curr->data;
1698 		name = (char *)(di + 1);
1699 		name_len = btrfs_stack_dir_name_len(di);
1700 
1701 		d_type = btrfs_filetype_table[di->type];
1702 		btrfs_disk_key_to_cpu(&location, &di->location);
1703 
1704 		over = !dir_emit(ctx, name, name_len,
1705 			       location.objectid, d_type);
1706 
1707 		if (atomic_dec_and_test(&curr->refs))
1708 			kfree(curr);
1709 
1710 		if (over)
1711 			return 1;
1712 	}
1713 	return 0;
1714 }
1715 
1716 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1717 				  struct btrfs_inode_item *inode_item,
1718 				  struct inode *inode)
1719 {
1720 	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1721 	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1722 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1723 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1724 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1725 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1726 	btrfs_set_stack_inode_generation(inode_item,
1727 					 BTRFS_I(inode)->generation);
1728 	btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1729 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1730 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1731 	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1732 	btrfs_set_stack_inode_block_group(inode_item, 0);
1733 
1734 	btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1735 				     inode->i_atime.tv_sec);
1736 	btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1737 				      inode->i_atime.tv_nsec);
1738 
1739 	btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1740 				     inode->i_mtime.tv_sec);
1741 	btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1742 				      inode->i_mtime.tv_nsec);
1743 
1744 	btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1745 				     inode->i_ctime.tv_sec);
1746 	btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1747 				      inode->i_ctime.tv_nsec);
1748 }
1749 
1750 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1751 {
1752 	struct btrfs_delayed_node *delayed_node;
1753 	struct btrfs_inode_item *inode_item;
1754 	struct btrfs_timespec *tspec;
1755 
1756 	delayed_node = btrfs_get_delayed_node(inode);
1757 	if (!delayed_node)
1758 		return -ENOENT;
1759 
1760 	mutex_lock(&delayed_node->mutex);
1761 	if (!delayed_node->inode_dirty) {
1762 		mutex_unlock(&delayed_node->mutex);
1763 		btrfs_release_delayed_node(delayed_node);
1764 		return -ENOENT;
1765 	}
1766 
1767 	inode_item = &delayed_node->inode_item;
1768 
1769 	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1770 	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1771 	btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1772 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1773 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1774 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1775 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1776 	inode->i_version = btrfs_stack_inode_sequence(inode_item);
1777 	inode->i_rdev = 0;
1778 	*rdev = btrfs_stack_inode_rdev(inode_item);
1779 	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1780 
1781 	tspec = btrfs_inode_atime(inode_item);
1782 	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
1783 	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1784 
1785 	tspec = btrfs_inode_mtime(inode_item);
1786 	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
1787 	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1788 
1789 	tspec = btrfs_inode_ctime(inode_item);
1790 	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
1791 	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1792 
1793 	inode->i_generation = BTRFS_I(inode)->generation;
1794 	BTRFS_I(inode)->index_cnt = (u64)-1;
1795 
1796 	mutex_unlock(&delayed_node->mutex);
1797 	btrfs_release_delayed_node(delayed_node);
1798 	return 0;
1799 }
1800 
1801 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1802 			       struct btrfs_root *root, struct inode *inode)
1803 {
1804 	struct btrfs_delayed_node *delayed_node;
1805 	int ret = 0;
1806 
1807 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1808 	if (IS_ERR(delayed_node))
1809 		return PTR_ERR(delayed_node);
1810 
1811 	mutex_lock(&delayed_node->mutex);
1812 	if (delayed_node->inode_dirty) {
1813 		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1814 		goto release_node;
1815 	}
1816 
1817 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1818 						   delayed_node);
1819 	if (ret)
1820 		goto release_node;
1821 
1822 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1823 	delayed_node->inode_dirty = 1;
1824 	delayed_node->count++;
1825 	atomic_inc(&root->fs_info->delayed_root->items);
1826 release_node:
1827 	mutex_unlock(&delayed_node->mutex);
1828 	btrfs_release_delayed_node(delayed_node);
1829 	return ret;
1830 }
1831 
1832 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1833 {
1834 	struct btrfs_root *root = delayed_node->root;
1835 	struct btrfs_delayed_item *curr_item, *prev_item;
1836 
1837 	mutex_lock(&delayed_node->mutex);
1838 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1839 	while (curr_item) {
1840 		btrfs_delayed_item_release_metadata(root, curr_item);
1841 		prev_item = curr_item;
1842 		curr_item = __btrfs_next_delayed_item(prev_item);
1843 		btrfs_release_delayed_item(prev_item);
1844 	}
1845 
1846 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1847 	while (curr_item) {
1848 		btrfs_delayed_item_release_metadata(root, curr_item);
1849 		prev_item = curr_item;
1850 		curr_item = __btrfs_next_delayed_item(prev_item);
1851 		btrfs_release_delayed_item(prev_item);
1852 	}
1853 
1854 	if (delayed_node->inode_dirty) {
1855 		btrfs_delayed_inode_release_metadata(root, delayed_node);
1856 		btrfs_release_delayed_inode(delayed_node);
1857 	}
1858 	mutex_unlock(&delayed_node->mutex);
1859 }
1860 
1861 void btrfs_kill_delayed_inode_items(struct inode *inode)
1862 {
1863 	struct btrfs_delayed_node *delayed_node;
1864 
1865 	delayed_node = btrfs_get_delayed_node(inode);
1866 	if (!delayed_node)
1867 		return;
1868 
1869 	__btrfs_kill_delayed_node(delayed_node);
1870 	btrfs_release_delayed_node(delayed_node);
1871 }
1872 
1873 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1874 {
1875 	u64 inode_id = 0;
1876 	struct btrfs_delayed_node *delayed_nodes[8];
1877 	int i, n;
1878 
1879 	while (1) {
1880 		spin_lock(&root->inode_lock);
1881 		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1882 					   (void **)delayed_nodes, inode_id,
1883 					   ARRAY_SIZE(delayed_nodes));
1884 		if (!n) {
1885 			spin_unlock(&root->inode_lock);
1886 			break;
1887 		}
1888 
1889 		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1890 
1891 		for (i = 0; i < n; i++)
1892 			atomic_inc(&delayed_nodes[i]->refs);
1893 		spin_unlock(&root->inode_lock);
1894 
1895 		for (i = 0; i < n; i++) {
1896 			__btrfs_kill_delayed_node(delayed_nodes[i]);
1897 			btrfs_release_delayed_node(delayed_nodes[i]);
1898 		}
1899 	}
1900 }
1901 
1902 void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
1903 {
1904 	struct btrfs_delayed_root *delayed_root;
1905 	struct btrfs_delayed_node *curr_node, *prev_node;
1906 
1907 	delayed_root = btrfs_get_delayed_root(root);
1908 
1909 	curr_node = btrfs_first_delayed_node(delayed_root);
1910 	while (curr_node) {
1911 		__btrfs_kill_delayed_node(curr_node);
1912 
1913 		prev_node = curr_node;
1914 		curr_node = btrfs_next_delayed_node(curr_node);
1915 		btrfs_release_delayed_node(prev_node);
1916 	}
1917 }
1918 
1919