xref: /openbmc/linux/fs/btrfs/delayed-inode.c (revision 7490ca1e)
1 /*
2  * Copyright (C) 2011 Fujitsu.  All rights reserved.
3  * Written by Miao Xie <miaox@cn.fujitsu.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19 
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 
25 #define BTRFS_DELAYED_WRITEBACK		400
26 #define BTRFS_DELAYED_BACKGROUND	100
27 
28 static struct kmem_cache *delayed_node_cache;
29 
30 int __init btrfs_delayed_inode_init(void)
31 {
32 	delayed_node_cache = kmem_cache_create("delayed_node",
33 					sizeof(struct btrfs_delayed_node),
34 					0,
35 					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
36 					NULL);
37 	if (!delayed_node_cache)
38 		return -ENOMEM;
39 	return 0;
40 }
41 
42 void btrfs_delayed_inode_exit(void)
43 {
44 	if (delayed_node_cache)
45 		kmem_cache_destroy(delayed_node_cache);
46 }
47 
48 static inline void btrfs_init_delayed_node(
49 				struct btrfs_delayed_node *delayed_node,
50 				struct btrfs_root *root, u64 inode_id)
51 {
52 	delayed_node->root = root;
53 	delayed_node->inode_id = inode_id;
54 	atomic_set(&delayed_node->refs, 0);
55 	delayed_node->count = 0;
56 	delayed_node->in_list = 0;
57 	delayed_node->inode_dirty = 0;
58 	delayed_node->ins_root = RB_ROOT;
59 	delayed_node->del_root = RB_ROOT;
60 	mutex_init(&delayed_node->mutex);
61 	delayed_node->index_cnt = 0;
62 	INIT_LIST_HEAD(&delayed_node->n_list);
63 	INIT_LIST_HEAD(&delayed_node->p_list);
64 	delayed_node->bytes_reserved = 0;
65 }
66 
67 static inline int btrfs_is_continuous_delayed_item(
68 					struct btrfs_delayed_item *item1,
69 					struct btrfs_delayed_item *item2)
70 {
71 	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
72 	    item1->key.objectid == item2->key.objectid &&
73 	    item1->key.type == item2->key.type &&
74 	    item1->key.offset + 1 == item2->key.offset)
75 		return 1;
76 	return 0;
77 }
78 
79 static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
80 							struct btrfs_root *root)
81 {
82 	return root->fs_info->delayed_root;
83 }
84 
85 static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
86 {
87 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
88 	struct btrfs_root *root = btrfs_inode->root;
89 	u64 ino = btrfs_ino(inode);
90 	struct btrfs_delayed_node *node;
91 
92 	node = ACCESS_ONCE(btrfs_inode->delayed_node);
93 	if (node) {
94 		atomic_inc(&node->refs);
95 		return node;
96 	}
97 
98 	spin_lock(&root->inode_lock);
99 	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
100 	if (node) {
101 		if (btrfs_inode->delayed_node) {
102 			atomic_inc(&node->refs);	/* can be accessed */
103 			BUG_ON(btrfs_inode->delayed_node != node);
104 			spin_unlock(&root->inode_lock);
105 			return node;
106 		}
107 		btrfs_inode->delayed_node = node;
108 		atomic_inc(&node->refs);	/* can be accessed */
109 		atomic_inc(&node->refs);	/* cached in the inode */
110 		spin_unlock(&root->inode_lock);
111 		return node;
112 	}
113 	spin_unlock(&root->inode_lock);
114 
115 	return NULL;
116 }
117 
118 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
119 							struct inode *inode)
120 {
121 	struct btrfs_delayed_node *node;
122 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
123 	struct btrfs_root *root = btrfs_inode->root;
124 	u64 ino = btrfs_ino(inode);
125 	int ret;
126 
127 again:
128 	node = btrfs_get_delayed_node(inode);
129 	if (node)
130 		return node;
131 
132 	node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
133 	if (!node)
134 		return ERR_PTR(-ENOMEM);
135 	btrfs_init_delayed_node(node, root, ino);
136 
137 	atomic_inc(&node->refs);	/* cached in the btrfs inode */
138 	atomic_inc(&node->refs);	/* can be accessed */
139 
140 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
141 	if (ret) {
142 		kmem_cache_free(delayed_node_cache, node);
143 		return ERR_PTR(ret);
144 	}
145 
146 	spin_lock(&root->inode_lock);
147 	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
148 	if (ret == -EEXIST) {
149 		kmem_cache_free(delayed_node_cache, node);
150 		spin_unlock(&root->inode_lock);
151 		radix_tree_preload_end();
152 		goto again;
153 	}
154 	btrfs_inode->delayed_node = node;
155 	spin_unlock(&root->inode_lock);
156 	radix_tree_preload_end();
157 
158 	return node;
159 }
160 
161 /*
162  * Call it when holding delayed_node->mutex
163  *
164  * If mod = 1, add this node into the prepared list.
165  */
166 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
167 				     struct btrfs_delayed_node *node,
168 				     int mod)
169 {
170 	spin_lock(&root->lock);
171 	if (node->in_list) {
172 		if (!list_empty(&node->p_list))
173 			list_move_tail(&node->p_list, &root->prepare_list);
174 		else if (mod)
175 			list_add_tail(&node->p_list, &root->prepare_list);
176 	} else {
177 		list_add_tail(&node->n_list, &root->node_list);
178 		list_add_tail(&node->p_list, &root->prepare_list);
179 		atomic_inc(&node->refs);	/* inserted into list */
180 		root->nodes++;
181 		node->in_list = 1;
182 	}
183 	spin_unlock(&root->lock);
184 }
185 
186 /* Call it when holding delayed_node->mutex */
187 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
188 				       struct btrfs_delayed_node *node)
189 {
190 	spin_lock(&root->lock);
191 	if (node->in_list) {
192 		root->nodes--;
193 		atomic_dec(&node->refs);	/* not in the list */
194 		list_del_init(&node->n_list);
195 		if (!list_empty(&node->p_list))
196 			list_del_init(&node->p_list);
197 		node->in_list = 0;
198 	}
199 	spin_unlock(&root->lock);
200 }
201 
202 struct btrfs_delayed_node *btrfs_first_delayed_node(
203 			struct btrfs_delayed_root *delayed_root)
204 {
205 	struct list_head *p;
206 	struct btrfs_delayed_node *node = NULL;
207 
208 	spin_lock(&delayed_root->lock);
209 	if (list_empty(&delayed_root->node_list))
210 		goto out;
211 
212 	p = delayed_root->node_list.next;
213 	node = list_entry(p, struct btrfs_delayed_node, n_list);
214 	atomic_inc(&node->refs);
215 out:
216 	spin_unlock(&delayed_root->lock);
217 
218 	return node;
219 }
220 
221 struct btrfs_delayed_node *btrfs_next_delayed_node(
222 						struct btrfs_delayed_node *node)
223 {
224 	struct btrfs_delayed_root *delayed_root;
225 	struct list_head *p;
226 	struct btrfs_delayed_node *next = NULL;
227 
228 	delayed_root = node->root->fs_info->delayed_root;
229 	spin_lock(&delayed_root->lock);
230 	if (!node->in_list) {	/* not in the list */
231 		if (list_empty(&delayed_root->node_list))
232 			goto out;
233 		p = delayed_root->node_list.next;
234 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
235 		goto out;
236 	else
237 		p = node->n_list.next;
238 
239 	next = list_entry(p, struct btrfs_delayed_node, n_list);
240 	atomic_inc(&next->refs);
241 out:
242 	spin_unlock(&delayed_root->lock);
243 
244 	return next;
245 }
246 
247 static void __btrfs_release_delayed_node(
248 				struct btrfs_delayed_node *delayed_node,
249 				int mod)
250 {
251 	struct btrfs_delayed_root *delayed_root;
252 
253 	if (!delayed_node)
254 		return;
255 
256 	delayed_root = delayed_node->root->fs_info->delayed_root;
257 
258 	mutex_lock(&delayed_node->mutex);
259 	if (delayed_node->count)
260 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
261 	else
262 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
263 	mutex_unlock(&delayed_node->mutex);
264 
265 	if (atomic_dec_and_test(&delayed_node->refs)) {
266 		struct btrfs_root *root = delayed_node->root;
267 		spin_lock(&root->inode_lock);
268 		if (atomic_read(&delayed_node->refs) == 0) {
269 			radix_tree_delete(&root->delayed_nodes_tree,
270 					  delayed_node->inode_id);
271 			kmem_cache_free(delayed_node_cache, delayed_node);
272 		}
273 		spin_unlock(&root->inode_lock);
274 	}
275 }
276 
277 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
278 {
279 	__btrfs_release_delayed_node(node, 0);
280 }
281 
282 struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
283 					struct btrfs_delayed_root *delayed_root)
284 {
285 	struct list_head *p;
286 	struct btrfs_delayed_node *node = NULL;
287 
288 	spin_lock(&delayed_root->lock);
289 	if (list_empty(&delayed_root->prepare_list))
290 		goto out;
291 
292 	p = delayed_root->prepare_list.next;
293 	list_del_init(p);
294 	node = list_entry(p, struct btrfs_delayed_node, p_list);
295 	atomic_inc(&node->refs);
296 out:
297 	spin_unlock(&delayed_root->lock);
298 
299 	return node;
300 }
301 
302 static inline void btrfs_release_prepared_delayed_node(
303 					struct btrfs_delayed_node *node)
304 {
305 	__btrfs_release_delayed_node(node, 1);
306 }
307 
308 struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
309 {
310 	struct btrfs_delayed_item *item;
311 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
312 	if (item) {
313 		item->data_len = data_len;
314 		item->ins_or_del = 0;
315 		item->bytes_reserved = 0;
316 		item->delayed_node = NULL;
317 		atomic_set(&item->refs, 1);
318 	}
319 	return item;
320 }
321 
322 /*
323  * __btrfs_lookup_delayed_item - look up the delayed item by key
324  * @delayed_node: pointer to the delayed node
325  * @key:	  the key to look up
326  * @prev:	  used to store the prev item if the right item isn't found
327  * @next:	  used to store the next item if the right item isn't found
328  *
329  * Note: if we don't find the right item, we will return the prev item and
330  * the next item.
331  */
332 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
333 				struct rb_root *root,
334 				struct btrfs_key *key,
335 				struct btrfs_delayed_item **prev,
336 				struct btrfs_delayed_item **next)
337 {
338 	struct rb_node *node, *prev_node = NULL;
339 	struct btrfs_delayed_item *delayed_item = NULL;
340 	int ret = 0;
341 
342 	node = root->rb_node;
343 
344 	while (node) {
345 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
346 					rb_node);
347 		prev_node = node;
348 		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
349 		if (ret < 0)
350 			node = node->rb_right;
351 		else if (ret > 0)
352 			node = node->rb_left;
353 		else
354 			return delayed_item;
355 	}
356 
357 	if (prev) {
358 		if (!prev_node)
359 			*prev = NULL;
360 		else if (ret < 0)
361 			*prev = delayed_item;
362 		else if ((node = rb_prev(prev_node)) != NULL) {
363 			*prev = rb_entry(node, struct btrfs_delayed_item,
364 					 rb_node);
365 		} else
366 			*prev = NULL;
367 	}
368 
369 	if (next) {
370 		if (!prev_node)
371 			*next = NULL;
372 		else if (ret > 0)
373 			*next = delayed_item;
374 		else if ((node = rb_next(prev_node)) != NULL) {
375 			*next = rb_entry(node, struct btrfs_delayed_item,
376 					 rb_node);
377 		} else
378 			*next = NULL;
379 	}
380 	return NULL;
381 }
382 
383 struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
384 					struct btrfs_delayed_node *delayed_node,
385 					struct btrfs_key *key)
386 {
387 	struct btrfs_delayed_item *item;
388 
389 	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
390 					   NULL, NULL);
391 	return item;
392 }
393 
394 struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
395 					struct btrfs_delayed_node *delayed_node,
396 					struct btrfs_key *key)
397 {
398 	struct btrfs_delayed_item *item;
399 
400 	item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
401 					   NULL, NULL);
402 	return item;
403 }
404 
405 struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
406 					struct btrfs_delayed_node *delayed_node,
407 					struct btrfs_key *key)
408 {
409 	struct btrfs_delayed_item *item, *next;
410 
411 	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
412 					   NULL, &next);
413 	if (!item)
414 		item = next;
415 
416 	return item;
417 }
418 
419 struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
420 					struct btrfs_delayed_node *delayed_node,
421 					struct btrfs_key *key)
422 {
423 	struct btrfs_delayed_item *item, *next;
424 
425 	item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
426 					   NULL, &next);
427 	if (!item)
428 		item = next;
429 
430 	return item;
431 }
432 
433 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
434 				    struct btrfs_delayed_item *ins,
435 				    int action)
436 {
437 	struct rb_node **p, *node;
438 	struct rb_node *parent_node = NULL;
439 	struct rb_root *root;
440 	struct btrfs_delayed_item *item;
441 	int cmp;
442 
443 	if (action == BTRFS_DELAYED_INSERTION_ITEM)
444 		root = &delayed_node->ins_root;
445 	else if (action == BTRFS_DELAYED_DELETION_ITEM)
446 		root = &delayed_node->del_root;
447 	else
448 		BUG();
449 	p = &root->rb_node;
450 	node = &ins->rb_node;
451 
452 	while (*p) {
453 		parent_node = *p;
454 		item = rb_entry(parent_node, struct btrfs_delayed_item,
455 				 rb_node);
456 
457 		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
458 		if (cmp < 0)
459 			p = &(*p)->rb_right;
460 		else if (cmp > 0)
461 			p = &(*p)->rb_left;
462 		else
463 			return -EEXIST;
464 	}
465 
466 	rb_link_node(node, parent_node, p);
467 	rb_insert_color(node, root);
468 	ins->delayed_node = delayed_node;
469 	ins->ins_or_del = action;
470 
471 	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
472 	    action == BTRFS_DELAYED_INSERTION_ITEM &&
473 	    ins->key.offset >= delayed_node->index_cnt)
474 			delayed_node->index_cnt = ins->key.offset + 1;
475 
476 	delayed_node->count++;
477 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
478 	return 0;
479 }
480 
481 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
482 					      struct btrfs_delayed_item *item)
483 {
484 	return __btrfs_add_delayed_item(node, item,
485 					BTRFS_DELAYED_INSERTION_ITEM);
486 }
487 
488 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
489 					     struct btrfs_delayed_item *item)
490 {
491 	return __btrfs_add_delayed_item(node, item,
492 					BTRFS_DELAYED_DELETION_ITEM);
493 }
494 
495 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
496 {
497 	struct rb_root *root;
498 	struct btrfs_delayed_root *delayed_root;
499 
500 	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
501 
502 	BUG_ON(!delayed_root);
503 	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
504 	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
505 
506 	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
507 		root = &delayed_item->delayed_node->ins_root;
508 	else
509 		root = &delayed_item->delayed_node->del_root;
510 
511 	rb_erase(&delayed_item->rb_node, root);
512 	delayed_item->delayed_node->count--;
513 	atomic_dec(&delayed_root->items);
514 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
515 	    waitqueue_active(&delayed_root->wait))
516 		wake_up(&delayed_root->wait);
517 }
518 
519 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
520 {
521 	if (item) {
522 		__btrfs_remove_delayed_item(item);
523 		if (atomic_dec_and_test(&item->refs))
524 			kfree(item);
525 	}
526 }
527 
528 struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
529 					struct btrfs_delayed_node *delayed_node)
530 {
531 	struct rb_node *p;
532 	struct btrfs_delayed_item *item = NULL;
533 
534 	p = rb_first(&delayed_node->ins_root);
535 	if (p)
536 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
537 
538 	return item;
539 }
540 
541 struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
542 					struct btrfs_delayed_node *delayed_node)
543 {
544 	struct rb_node *p;
545 	struct btrfs_delayed_item *item = NULL;
546 
547 	p = rb_first(&delayed_node->del_root);
548 	if (p)
549 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
550 
551 	return item;
552 }
553 
554 struct btrfs_delayed_item *__btrfs_next_delayed_item(
555 						struct btrfs_delayed_item *item)
556 {
557 	struct rb_node *p;
558 	struct btrfs_delayed_item *next = NULL;
559 
560 	p = rb_next(&item->rb_node);
561 	if (p)
562 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
563 
564 	return next;
565 }
566 
567 static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
568 						   u64 root_id)
569 {
570 	struct btrfs_key root_key;
571 
572 	if (root->objectid == root_id)
573 		return root;
574 
575 	root_key.objectid = root_id;
576 	root_key.type = BTRFS_ROOT_ITEM_KEY;
577 	root_key.offset = (u64)-1;
578 	return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
579 }
580 
581 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
582 					       struct btrfs_root *root,
583 					       struct btrfs_delayed_item *item)
584 {
585 	struct btrfs_block_rsv *src_rsv;
586 	struct btrfs_block_rsv *dst_rsv;
587 	u64 num_bytes;
588 	int ret;
589 
590 	if (!trans->bytes_reserved)
591 		return 0;
592 
593 	src_rsv = trans->block_rsv;
594 	dst_rsv = &root->fs_info->delayed_block_rsv;
595 
596 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
597 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
598 	if (!ret) {
599 		trace_btrfs_space_reservation(root->fs_info, "delayed_item",
600 					      item->key.objectid,
601 					      num_bytes, 1);
602 		item->bytes_reserved = num_bytes;
603 	}
604 
605 	return ret;
606 }
607 
608 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
609 						struct btrfs_delayed_item *item)
610 {
611 	struct btrfs_block_rsv *rsv;
612 
613 	if (!item->bytes_reserved)
614 		return;
615 
616 	rsv = &root->fs_info->delayed_block_rsv;
617 	trace_btrfs_space_reservation(root->fs_info, "delayed_item",
618 				      item->key.objectid, item->bytes_reserved,
619 				      0);
620 	btrfs_block_rsv_release(root, rsv,
621 				item->bytes_reserved);
622 }
623 
624 static int btrfs_delayed_inode_reserve_metadata(
625 					struct btrfs_trans_handle *trans,
626 					struct btrfs_root *root,
627 					struct inode *inode,
628 					struct btrfs_delayed_node *node)
629 {
630 	struct btrfs_block_rsv *src_rsv;
631 	struct btrfs_block_rsv *dst_rsv;
632 	u64 num_bytes;
633 	int ret;
634 	bool release = false;
635 
636 	src_rsv = trans->block_rsv;
637 	dst_rsv = &root->fs_info->delayed_block_rsv;
638 
639 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
640 
641 	/*
642 	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
643 	 * which doesn't reserve space for speed.  This is a problem since we
644 	 * still need to reserve space for this update, so try to reserve the
645 	 * space.
646 	 *
647 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
648 	 * we're accounted for.
649 	 */
650 	if (!src_rsv || (!trans->bytes_reserved &&
651 	    src_rsv != &root->fs_info->delalloc_block_rsv)) {
652 		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
653 		/*
654 		 * Since we're under a transaction reserve_metadata_bytes could
655 		 * try to commit the transaction which will make it return
656 		 * EAGAIN to make us stop the transaction we have, so return
657 		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
658 		 */
659 		if (ret == -EAGAIN)
660 			ret = -ENOSPC;
661 		if (!ret) {
662 			node->bytes_reserved = num_bytes;
663 			trace_btrfs_space_reservation(root->fs_info,
664 						      "delayed_inode",
665 						      btrfs_ino(inode),
666 						      num_bytes, 1);
667 		}
668 		return ret;
669 	} else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
670 		spin_lock(&BTRFS_I(inode)->lock);
671 		if (BTRFS_I(inode)->delalloc_meta_reserved) {
672 			BTRFS_I(inode)->delalloc_meta_reserved = 0;
673 			spin_unlock(&BTRFS_I(inode)->lock);
674 			release = true;
675 			goto migrate;
676 		}
677 		spin_unlock(&BTRFS_I(inode)->lock);
678 
679 		/* Ok we didn't have space pre-reserved.  This shouldn't happen
680 		 * too often but it can happen if we do delalloc to an existing
681 		 * inode which gets dirtied because of the time update, and then
682 		 * isn't touched again until after the transaction commits and
683 		 * then we try to write out the data.  First try to be nice and
684 		 * reserve something strictly for us.  If not be a pain and try
685 		 * to steal from the delalloc block rsv.
686 		 */
687 		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
688 		if (!ret)
689 			goto out;
690 
691 		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
692 		if (!ret)
693 			goto out;
694 
695 		/*
696 		 * Ok this is a problem, let's just steal from the global rsv
697 		 * since this really shouldn't happen that often.
698 		 */
699 		WARN_ON(1);
700 		ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
701 					      dst_rsv, num_bytes);
702 		goto out;
703 	}
704 
705 migrate:
706 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
707 
708 out:
709 	/*
710 	 * Migrate only takes a reservation, it doesn't touch the size of the
711 	 * block_rsv.  This is to simplify people who don't normally have things
712 	 * migrated from their block rsv.  If they go to release their
713 	 * reservation, that will decrease the size as well, so if migrate
714 	 * reduced size we'd end up with a negative size.  But for the
715 	 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
716 	 * but we could in fact do this reserve/migrate dance several times
717 	 * between the time we did the original reservation and we'd clean it
718 	 * up.  So to take care of this, release the space for the meta
719 	 * reservation here.  I think it may be time for a documentation page on
720 	 * how block rsvs. work.
721 	 */
722 	if (!ret) {
723 		trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
724 					      btrfs_ino(inode), num_bytes, 1);
725 		node->bytes_reserved = num_bytes;
726 	}
727 
728 	if (release) {
729 		trace_btrfs_space_reservation(root->fs_info, "delalloc",
730 					      btrfs_ino(inode), num_bytes, 0);
731 		btrfs_block_rsv_release(root, src_rsv, num_bytes);
732 	}
733 
734 	return ret;
735 }
736 
737 static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
738 						struct btrfs_delayed_node *node)
739 {
740 	struct btrfs_block_rsv *rsv;
741 
742 	if (!node->bytes_reserved)
743 		return;
744 
745 	rsv = &root->fs_info->delayed_block_rsv;
746 	trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
747 				      node->inode_id, node->bytes_reserved, 0);
748 	btrfs_block_rsv_release(root, rsv,
749 				node->bytes_reserved);
750 	node->bytes_reserved = 0;
751 }
752 
753 /*
754  * This helper will insert some continuous items into the same leaf according
755  * to the free space of the leaf.
756  */
757 static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
758 				struct btrfs_root *root,
759 				struct btrfs_path *path,
760 				struct btrfs_delayed_item *item)
761 {
762 	struct btrfs_delayed_item *curr, *next;
763 	int free_space;
764 	int total_data_size = 0, total_size = 0;
765 	struct extent_buffer *leaf;
766 	char *data_ptr;
767 	struct btrfs_key *keys;
768 	u32 *data_size;
769 	struct list_head head;
770 	int slot;
771 	int nitems;
772 	int i;
773 	int ret = 0;
774 
775 	BUG_ON(!path->nodes[0]);
776 
777 	leaf = path->nodes[0];
778 	free_space = btrfs_leaf_free_space(root, leaf);
779 	INIT_LIST_HEAD(&head);
780 
781 	next = item;
782 	nitems = 0;
783 
784 	/*
785 	 * count the number of the continuous items that we can insert in batch
786 	 */
787 	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
788 	       free_space) {
789 		total_data_size += next->data_len;
790 		total_size += next->data_len + sizeof(struct btrfs_item);
791 		list_add_tail(&next->tree_list, &head);
792 		nitems++;
793 
794 		curr = next;
795 		next = __btrfs_next_delayed_item(curr);
796 		if (!next)
797 			break;
798 
799 		if (!btrfs_is_continuous_delayed_item(curr, next))
800 			break;
801 	}
802 
803 	if (!nitems) {
804 		ret = 0;
805 		goto out;
806 	}
807 
808 	/*
809 	 * we need allocate some memory space, but it might cause the task
810 	 * to sleep, so we set all locked nodes in the path to blocking locks
811 	 * first.
812 	 */
813 	btrfs_set_path_blocking(path);
814 
815 	keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
816 	if (!keys) {
817 		ret = -ENOMEM;
818 		goto out;
819 	}
820 
821 	data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
822 	if (!data_size) {
823 		ret = -ENOMEM;
824 		goto error;
825 	}
826 
827 	/* get keys of all the delayed items */
828 	i = 0;
829 	list_for_each_entry(next, &head, tree_list) {
830 		keys[i] = next->key;
831 		data_size[i] = next->data_len;
832 		i++;
833 	}
834 
835 	/* reset all the locked nodes in the patch to spinning locks. */
836 	btrfs_clear_path_blocking(path, NULL, 0);
837 
838 	/* insert the keys of the items */
839 	ret = setup_items_for_insert(trans, root, path, keys, data_size,
840 				     total_data_size, total_size, nitems);
841 	if (ret)
842 		goto error;
843 
844 	/* insert the dir index items */
845 	slot = path->slots[0];
846 	list_for_each_entry_safe(curr, next, &head, tree_list) {
847 		data_ptr = btrfs_item_ptr(leaf, slot, char);
848 		write_extent_buffer(leaf, &curr->data,
849 				    (unsigned long)data_ptr,
850 				    curr->data_len);
851 		slot++;
852 
853 		btrfs_delayed_item_release_metadata(root, curr);
854 
855 		list_del(&curr->tree_list);
856 		btrfs_release_delayed_item(curr);
857 	}
858 
859 error:
860 	kfree(data_size);
861 	kfree(keys);
862 out:
863 	return ret;
864 }
865 
866 /*
867  * This helper can just do simple insertion that needn't extend item for new
868  * data, such as directory name index insertion, inode insertion.
869  */
870 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
871 				     struct btrfs_root *root,
872 				     struct btrfs_path *path,
873 				     struct btrfs_delayed_item *delayed_item)
874 {
875 	struct extent_buffer *leaf;
876 	struct btrfs_item *item;
877 	char *ptr;
878 	int ret;
879 
880 	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
881 				      delayed_item->data_len);
882 	if (ret < 0 && ret != -EEXIST)
883 		return ret;
884 
885 	leaf = path->nodes[0];
886 
887 	item = btrfs_item_nr(leaf, path->slots[0]);
888 	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
889 
890 	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
891 			    delayed_item->data_len);
892 	btrfs_mark_buffer_dirty(leaf);
893 
894 	btrfs_delayed_item_release_metadata(root, delayed_item);
895 	return 0;
896 }
897 
898 /*
899  * we insert an item first, then if there are some continuous items, we try
900  * to insert those items into the same leaf.
901  */
902 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
903 				      struct btrfs_path *path,
904 				      struct btrfs_root *root,
905 				      struct btrfs_delayed_node *node)
906 {
907 	struct btrfs_delayed_item *curr, *prev;
908 	int ret = 0;
909 
910 do_again:
911 	mutex_lock(&node->mutex);
912 	curr = __btrfs_first_delayed_insertion_item(node);
913 	if (!curr)
914 		goto insert_end;
915 
916 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
917 	if (ret < 0) {
918 		btrfs_release_path(path);
919 		goto insert_end;
920 	}
921 
922 	prev = curr;
923 	curr = __btrfs_next_delayed_item(prev);
924 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
925 		/* insert the continuous items into the same leaf */
926 		path->slots[0]++;
927 		btrfs_batch_insert_items(trans, root, path, curr);
928 	}
929 	btrfs_release_delayed_item(prev);
930 	btrfs_mark_buffer_dirty(path->nodes[0]);
931 
932 	btrfs_release_path(path);
933 	mutex_unlock(&node->mutex);
934 	goto do_again;
935 
936 insert_end:
937 	mutex_unlock(&node->mutex);
938 	return ret;
939 }
940 
941 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
942 				    struct btrfs_root *root,
943 				    struct btrfs_path *path,
944 				    struct btrfs_delayed_item *item)
945 {
946 	struct btrfs_delayed_item *curr, *next;
947 	struct extent_buffer *leaf;
948 	struct btrfs_key key;
949 	struct list_head head;
950 	int nitems, i, last_item;
951 	int ret = 0;
952 
953 	BUG_ON(!path->nodes[0]);
954 
955 	leaf = path->nodes[0];
956 
957 	i = path->slots[0];
958 	last_item = btrfs_header_nritems(leaf) - 1;
959 	if (i > last_item)
960 		return -ENOENT;	/* FIXME: Is errno suitable? */
961 
962 	next = item;
963 	INIT_LIST_HEAD(&head);
964 	btrfs_item_key_to_cpu(leaf, &key, i);
965 	nitems = 0;
966 	/*
967 	 * count the number of the dir index items that we can delete in batch
968 	 */
969 	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
970 		list_add_tail(&next->tree_list, &head);
971 		nitems++;
972 
973 		curr = next;
974 		next = __btrfs_next_delayed_item(curr);
975 		if (!next)
976 			break;
977 
978 		if (!btrfs_is_continuous_delayed_item(curr, next))
979 			break;
980 
981 		i++;
982 		if (i > last_item)
983 			break;
984 		btrfs_item_key_to_cpu(leaf, &key, i);
985 	}
986 
987 	if (!nitems)
988 		return 0;
989 
990 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
991 	if (ret)
992 		goto out;
993 
994 	list_for_each_entry_safe(curr, next, &head, tree_list) {
995 		btrfs_delayed_item_release_metadata(root, curr);
996 		list_del(&curr->tree_list);
997 		btrfs_release_delayed_item(curr);
998 	}
999 
1000 out:
1001 	return ret;
1002 }
1003 
1004 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
1005 				      struct btrfs_path *path,
1006 				      struct btrfs_root *root,
1007 				      struct btrfs_delayed_node *node)
1008 {
1009 	struct btrfs_delayed_item *curr, *prev;
1010 	int ret = 0;
1011 
1012 do_again:
1013 	mutex_lock(&node->mutex);
1014 	curr = __btrfs_first_delayed_deletion_item(node);
1015 	if (!curr)
1016 		goto delete_fail;
1017 
1018 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
1019 	if (ret < 0)
1020 		goto delete_fail;
1021 	else if (ret > 0) {
1022 		/*
1023 		 * can't find the item which the node points to, so this node
1024 		 * is invalid, just drop it.
1025 		 */
1026 		prev = curr;
1027 		curr = __btrfs_next_delayed_item(prev);
1028 		btrfs_release_delayed_item(prev);
1029 		ret = 0;
1030 		btrfs_release_path(path);
1031 		if (curr)
1032 			goto do_again;
1033 		else
1034 			goto delete_fail;
1035 	}
1036 
1037 	btrfs_batch_delete_items(trans, root, path, curr);
1038 	btrfs_release_path(path);
1039 	mutex_unlock(&node->mutex);
1040 	goto do_again;
1041 
1042 delete_fail:
1043 	btrfs_release_path(path);
1044 	mutex_unlock(&node->mutex);
1045 	return ret;
1046 }
1047 
1048 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1049 {
1050 	struct btrfs_delayed_root *delayed_root;
1051 
1052 	if (delayed_node && delayed_node->inode_dirty) {
1053 		BUG_ON(!delayed_node->root);
1054 		delayed_node->inode_dirty = 0;
1055 		delayed_node->count--;
1056 
1057 		delayed_root = delayed_node->root->fs_info->delayed_root;
1058 		atomic_dec(&delayed_root->items);
1059 		if (atomic_read(&delayed_root->items) <
1060 		    BTRFS_DELAYED_BACKGROUND &&
1061 		    waitqueue_active(&delayed_root->wait))
1062 			wake_up(&delayed_root->wait);
1063 	}
1064 }
1065 
1066 static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1067 				      struct btrfs_root *root,
1068 				      struct btrfs_path *path,
1069 				      struct btrfs_delayed_node *node)
1070 {
1071 	struct btrfs_key key;
1072 	struct btrfs_inode_item *inode_item;
1073 	struct extent_buffer *leaf;
1074 	int ret;
1075 
1076 	mutex_lock(&node->mutex);
1077 	if (!node->inode_dirty) {
1078 		mutex_unlock(&node->mutex);
1079 		return 0;
1080 	}
1081 
1082 	key.objectid = node->inode_id;
1083 	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1084 	key.offset = 0;
1085 	ret = btrfs_lookup_inode(trans, root, path, &key, 1);
1086 	if (ret > 0) {
1087 		btrfs_release_path(path);
1088 		mutex_unlock(&node->mutex);
1089 		return -ENOENT;
1090 	} else if (ret < 0) {
1091 		mutex_unlock(&node->mutex);
1092 		return ret;
1093 	}
1094 
1095 	btrfs_unlock_up_safe(path, 1);
1096 	leaf = path->nodes[0];
1097 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1098 				    struct btrfs_inode_item);
1099 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1100 			    sizeof(struct btrfs_inode_item));
1101 	btrfs_mark_buffer_dirty(leaf);
1102 	btrfs_release_path(path);
1103 
1104 	btrfs_delayed_inode_release_metadata(root, node);
1105 	btrfs_release_delayed_inode(node);
1106 	mutex_unlock(&node->mutex);
1107 
1108 	return 0;
1109 }
1110 
1111 /* Called when committing the transaction. */
1112 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1113 			    struct btrfs_root *root)
1114 {
1115 	struct btrfs_delayed_root *delayed_root;
1116 	struct btrfs_delayed_node *curr_node, *prev_node;
1117 	struct btrfs_path *path;
1118 	struct btrfs_block_rsv *block_rsv;
1119 	int ret = 0;
1120 
1121 	path = btrfs_alloc_path();
1122 	if (!path)
1123 		return -ENOMEM;
1124 	path->leave_spinning = 1;
1125 
1126 	block_rsv = trans->block_rsv;
1127 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1128 
1129 	delayed_root = btrfs_get_delayed_root(root);
1130 
1131 	curr_node = btrfs_first_delayed_node(delayed_root);
1132 	while (curr_node) {
1133 		root = curr_node->root;
1134 		ret = btrfs_insert_delayed_items(trans, path, root,
1135 						 curr_node);
1136 		if (!ret)
1137 			ret = btrfs_delete_delayed_items(trans, path, root,
1138 							 curr_node);
1139 		if (!ret)
1140 			ret = btrfs_update_delayed_inode(trans, root, path,
1141 							 curr_node);
1142 		if (ret) {
1143 			btrfs_release_delayed_node(curr_node);
1144 			break;
1145 		}
1146 
1147 		prev_node = curr_node;
1148 		curr_node = btrfs_next_delayed_node(curr_node);
1149 		btrfs_release_delayed_node(prev_node);
1150 	}
1151 
1152 	btrfs_free_path(path);
1153 	trans->block_rsv = block_rsv;
1154 	return ret;
1155 }
1156 
1157 static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1158 					      struct btrfs_delayed_node *node)
1159 {
1160 	struct btrfs_path *path;
1161 	struct btrfs_block_rsv *block_rsv;
1162 	int ret;
1163 
1164 	path = btrfs_alloc_path();
1165 	if (!path)
1166 		return -ENOMEM;
1167 	path->leave_spinning = 1;
1168 
1169 	block_rsv = trans->block_rsv;
1170 	trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
1171 
1172 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1173 	if (!ret)
1174 		ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1175 	if (!ret)
1176 		ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1177 	btrfs_free_path(path);
1178 
1179 	trans->block_rsv = block_rsv;
1180 	return ret;
1181 }
1182 
1183 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1184 				     struct inode *inode)
1185 {
1186 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1187 	int ret;
1188 
1189 	if (!delayed_node)
1190 		return 0;
1191 
1192 	mutex_lock(&delayed_node->mutex);
1193 	if (!delayed_node->count) {
1194 		mutex_unlock(&delayed_node->mutex);
1195 		btrfs_release_delayed_node(delayed_node);
1196 		return 0;
1197 	}
1198 	mutex_unlock(&delayed_node->mutex);
1199 
1200 	ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
1201 	btrfs_release_delayed_node(delayed_node);
1202 	return ret;
1203 }
1204 
1205 void btrfs_remove_delayed_node(struct inode *inode)
1206 {
1207 	struct btrfs_delayed_node *delayed_node;
1208 
1209 	delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1210 	if (!delayed_node)
1211 		return;
1212 
1213 	BTRFS_I(inode)->delayed_node = NULL;
1214 	btrfs_release_delayed_node(delayed_node);
1215 }
1216 
1217 struct btrfs_async_delayed_node {
1218 	struct btrfs_root *root;
1219 	struct btrfs_delayed_node *delayed_node;
1220 	struct btrfs_work work;
1221 };
1222 
1223 static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1224 {
1225 	struct btrfs_async_delayed_node *async_node;
1226 	struct btrfs_trans_handle *trans;
1227 	struct btrfs_path *path;
1228 	struct btrfs_delayed_node *delayed_node = NULL;
1229 	struct btrfs_root *root;
1230 	struct btrfs_block_rsv *block_rsv;
1231 	unsigned long nr = 0;
1232 	int need_requeue = 0;
1233 	int ret;
1234 
1235 	async_node = container_of(work, struct btrfs_async_delayed_node, work);
1236 
1237 	path = btrfs_alloc_path();
1238 	if (!path)
1239 		goto out;
1240 	path->leave_spinning = 1;
1241 
1242 	delayed_node = async_node->delayed_node;
1243 	root = delayed_node->root;
1244 
1245 	trans = btrfs_join_transaction(root);
1246 	if (IS_ERR(trans))
1247 		goto free_path;
1248 
1249 	block_rsv = trans->block_rsv;
1250 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1251 
1252 	ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
1253 	if (!ret)
1254 		ret = btrfs_delete_delayed_items(trans, path, root,
1255 						 delayed_node);
1256 
1257 	if (!ret)
1258 		btrfs_update_delayed_inode(trans, root, path, delayed_node);
1259 
1260 	/*
1261 	 * Maybe new delayed items have been inserted, so we need requeue
1262 	 * the work. Besides that, we must dequeue the empty delayed nodes
1263 	 * to avoid the race between delayed items balance and the worker.
1264 	 * The race like this:
1265 	 * 	Task1				Worker thread
1266 	 * 					count == 0, needn't requeue
1267 	 * 					  also needn't insert the
1268 	 * 					  delayed node into prepare
1269 	 * 					  list again.
1270 	 * 	add lots of delayed items
1271 	 * 	queue the delayed node
1272 	 * 	  already in the list,
1273 	 * 	  and not in the prepare
1274 	 * 	  list, it means the delayed
1275 	 * 	  node is being dealt with
1276 	 * 	  by the worker.
1277 	 * 	do delayed items balance
1278 	 * 	  the delayed node is being
1279 	 * 	  dealt with by the worker
1280 	 * 	  now, just wait.
1281 	 * 	  				the worker goto idle.
1282 	 * Task1 will sleep until the transaction is commited.
1283 	 */
1284 	mutex_lock(&delayed_node->mutex);
1285 	if (delayed_node->count)
1286 		need_requeue = 1;
1287 	else
1288 		btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
1289 					   delayed_node);
1290 	mutex_unlock(&delayed_node->mutex);
1291 
1292 	nr = trans->blocks_used;
1293 
1294 	trans->block_rsv = block_rsv;
1295 	btrfs_end_transaction_dmeta(trans, root);
1296 	__btrfs_btree_balance_dirty(root, nr);
1297 free_path:
1298 	btrfs_free_path(path);
1299 out:
1300 	if (need_requeue)
1301 		btrfs_requeue_work(&async_node->work);
1302 	else {
1303 		btrfs_release_prepared_delayed_node(delayed_node);
1304 		kfree(async_node);
1305 	}
1306 }
1307 
1308 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1309 				     struct btrfs_root *root, int all)
1310 {
1311 	struct btrfs_async_delayed_node *async_node;
1312 	struct btrfs_delayed_node *curr;
1313 	int count = 0;
1314 
1315 again:
1316 	curr = btrfs_first_prepared_delayed_node(delayed_root);
1317 	if (!curr)
1318 		return 0;
1319 
1320 	async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
1321 	if (!async_node) {
1322 		btrfs_release_prepared_delayed_node(curr);
1323 		return -ENOMEM;
1324 	}
1325 
1326 	async_node->root = root;
1327 	async_node->delayed_node = curr;
1328 
1329 	async_node->work.func = btrfs_async_run_delayed_node_done;
1330 	async_node->work.flags = 0;
1331 
1332 	btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
1333 	count++;
1334 
1335 	if (all || count < 4)
1336 		goto again;
1337 
1338 	return 0;
1339 }
1340 
1341 void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1342 {
1343 	struct btrfs_delayed_root *delayed_root;
1344 	delayed_root = btrfs_get_delayed_root(root);
1345 	WARN_ON(btrfs_first_delayed_node(delayed_root));
1346 }
1347 
1348 void btrfs_balance_delayed_items(struct btrfs_root *root)
1349 {
1350 	struct btrfs_delayed_root *delayed_root;
1351 
1352 	delayed_root = btrfs_get_delayed_root(root);
1353 
1354 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1355 		return;
1356 
1357 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1358 		int ret;
1359 		ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
1360 		if (ret)
1361 			return;
1362 
1363 		wait_event_interruptible_timeout(
1364 				delayed_root->wait,
1365 				(atomic_read(&delayed_root->items) <
1366 				 BTRFS_DELAYED_BACKGROUND),
1367 				HZ);
1368 		return;
1369 	}
1370 
1371 	btrfs_wq_run_delayed_node(delayed_root, root, 0);
1372 }
1373 
1374 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1375 				   struct btrfs_root *root, const char *name,
1376 				   int name_len, struct inode *dir,
1377 				   struct btrfs_disk_key *disk_key, u8 type,
1378 				   u64 index)
1379 {
1380 	struct btrfs_delayed_node *delayed_node;
1381 	struct btrfs_delayed_item *delayed_item;
1382 	struct btrfs_dir_item *dir_item;
1383 	int ret;
1384 
1385 	delayed_node = btrfs_get_or_create_delayed_node(dir);
1386 	if (IS_ERR(delayed_node))
1387 		return PTR_ERR(delayed_node);
1388 
1389 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1390 	if (!delayed_item) {
1391 		ret = -ENOMEM;
1392 		goto release_node;
1393 	}
1394 
1395 	delayed_item->key.objectid = btrfs_ino(dir);
1396 	btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1397 	delayed_item->key.offset = index;
1398 
1399 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1400 	dir_item->location = *disk_key;
1401 	dir_item->transid = cpu_to_le64(trans->transid);
1402 	dir_item->data_len = 0;
1403 	dir_item->name_len = cpu_to_le16(name_len);
1404 	dir_item->type = type;
1405 	memcpy((char *)(dir_item + 1), name, name_len);
1406 
1407 	ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1408 	/*
1409 	 * we have reserved enough space when we start a new transaction,
1410 	 * so reserving metadata failure is impossible
1411 	 */
1412 	BUG_ON(ret);
1413 
1414 
1415 	mutex_lock(&delayed_node->mutex);
1416 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1417 	if (unlikely(ret)) {
1418 		printk(KERN_ERR "err add delayed dir index item(name: %s) into "
1419 				"the insertion tree of the delayed node"
1420 				"(root id: %llu, inode id: %llu, errno: %d)\n",
1421 				name,
1422 				(unsigned long long)delayed_node->root->objectid,
1423 				(unsigned long long)delayed_node->inode_id,
1424 				ret);
1425 		BUG();
1426 	}
1427 	mutex_unlock(&delayed_node->mutex);
1428 
1429 release_node:
1430 	btrfs_release_delayed_node(delayed_node);
1431 	return ret;
1432 }
1433 
1434 static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1435 					       struct btrfs_delayed_node *node,
1436 					       struct btrfs_key *key)
1437 {
1438 	struct btrfs_delayed_item *item;
1439 
1440 	mutex_lock(&node->mutex);
1441 	item = __btrfs_lookup_delayed_insertion_item(node, key);
1442 	if (!item) {
1443 		mutex_unlock(&node->mutex);
1444 		return 1;
1445 	}
1446 
1447 	btrfs_delayed_item_release_metadata(root, item);
1448 	btrfs_release_delayed_item(item);
1449 	mutex_unlock(&node->mutex);
1450 	return 0;
1451 }
1452 
1453 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1454 				   struct btrfs_root *root, struct inode *dir,
1455 				   u64 index)
1456 {
1457 	struct btrfs_delayed_node *node;
1458 	struct btrfs_delayed_item *item;
1459 	struct btrfs_key item_key;
1460 	int ret;
1461 
1462 	node = btrfs_get_or_create_delayed_node(dir);
1463 	if (IS_ERR(node))
1464 		return PTR_ERR(node);
1465 
1466 	item_key.objectid = btrfs_ino(dir);
1467 	btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1468 	item_key.offset = index;
1469 
1470 	ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1471 	if (!ret)
1472 		goto end;
1473 
1474 	item = btrfs_alloc_delayed_item(0);
1475 	if (!item) {
1476 		ret = -ENOMEM;
1477 		goto end;
1478 	}
1479 
1480 	item->key = item_key;
1481 
1482 	ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1483 	/*
1484 	 * we have reserved enough space when we start a new transaction,
1485 	 * so reserving metadata failure is impossible.
1486 	 */
1487 	BUG_ON(ret);
1488 
1489 	mutex_lock(&node->mutex);
1490 	ret = __btrfs_add_delayed_deletion_item(node, item);
1491 	if (unlikely(ret)) {
1492 		printk(KERN_ERR "err add delayed dir index item(index: %llu) "
1493 				"into the deletion tree of the delayed node"
1494 				"(root id: %llu, inode id: %llu, errno: %d)\n",
1495 				(unsigned long long)index,
1496 				(unsigned long long)node->root->objectid,
1497 				(unsigned long long)node->inode_id,
1498 				ret);
1499 		BUG();
1500 	}
1501 	mutex_unlock(&node->mutex);
1502 end:
1503 	btrfs_release_delayed_node(node);
1504 	return ret;
1505 }
1506 
1507 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1508 {
1509 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1510 
1511 	if (!delayed_node)
1512 		return -ENOENT;
1513 
1514 	/*
1515 	 * Since we have held i_mutex of this directory, it is impossible that
1516 	 * a new directory index is added into the delayed node and index_cnt
1517 	 * is updated now. So we needn't lock the delayed node.
1518 	 */
1519 	if (!delayed_node->index_cnt) {
1520 		btrfs_release_delayed_node(delayed_node);
1521 		return -EINVAL;
1522 	}
1523 
1524 	BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1525 	btrfs_release_delayed_node(delayed_node);
1526 	return 0;
1527 }
1528 
1529 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1530 			     struct list_head *del_list)
1531 {
1532 	struct btrfs_delayed_node *delayed_node;
1533 	struct btrfs_delayed_item *item;
1534 
1535 	delayed_node = btrfs_get_delayed_node(inode);
1536 	if (!delayed_node)
1537 		return;
1538 
1539 	mutex_lock(&delayed_node->mutex);
1540 	item = __btrfs_first_delayed_insertion_item(delayed_node);
1541 	while (item) {
1542 		atomic_inc(&item->refs);
1543 		list_add_tail(&item->readdir_list, ins_list);
1544 		item = __btrfs_next_delayed_item(item);
1545 	}
1546 
1547 	item = __btrfs_first_delayed_deletion_item(delayed_node);
1548 	while (item) {
1549 		atomic_inc(&item->refs);
1550 		list_add_tail(&item->readdir_list, del_list);
1551 		item = __btrfs_next_delayed_item(item);
1552 	}
1553 	mutex_unlock(&delayed_node->mutex);
1554 	/*
1555 	 * This delayed node is still cached in the btrfs inode, so refs
1556 	 * must be > 1 now, and we needn't check it is going to be freed
1557 	 * or not.
1558 	 *
1559 	 * Besides that, this function is used to read dir, we do not
1560 	 * insert/delete delayed items in this period. So we also needn't
1561 	 * requeue or dequeue this delayed node.
1562 	 */
1563 	atomic_dec(&delayed_node->refs);
1564 }
1565 
1566 void btrfs_put_delayed_items(struct list_head *ins_list,
1567 			     struct list_head *del_list)
1568 {
1569 	struct btrfs_delayed_item *curr, *next;
1570 
1571 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1572 		list_del(&curr->readdir_list);
1573 		if (atomic_dec_and_test(&curr->refs))
1574 			kfree(curr);
1575 	}
1576 
1577 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1578 		list_del(&curr->readdir_list);
1579 		if (atomic_dec_and_test(&curr->refs))
1580 			kfree(curr);
1581 	}
1582 }
1583 
1584 int btrfs_should_delete_dir_index(struct list_head *del_list,
1585 				  u64 index)
1586 {
1587 	struct btrfs_delayed_item *curr, *next;
1588 	int ret;
1589 
1590 	if (list_empty(del_list))
1591 		return 0;
1592 
1593 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1594 		if (curr->key.offset > index)
1595 			break;
1596 
1597 		list_del(&curr->readdir_list);
1598 		ret = (curr->key.offset == index);
1599 
1600 		if (atomic_dec_and_test(&curr->refs))
1601 			kfree(curr);
1602 
1603 		if (ret)
1604 			return 1;
1605 		else
1606 			continue;
1607 	}
1608 	return 0;
1609 }
1610 
1611 /*
1612  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1613  *
1614  */
1615 int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
1616 				    filldir_t filldir,
1617 				    struct list_head *ins_list)
1618 {
1619 	struct btrfs_dir_item *di;
1620 	struct btrfs_delayed_item *curr, *next;
1621 	struct btrfs_key location;
1622 	char *name;
1623 	int name_len;
1624 	int over = 0;
1625 	unsigned char d_type;
1626 
1627 	if (list_empty(ins_list))
1628 		return 0;
1629 
1630 	/*
1631 	 * Changing the data of the delayed item is impossible. So
1632 	 * we needn't lock them. And we have held i_mutex of the
1633 	 * directory, nobody can delete any directory indexes now.
1634 	 */
1635 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1636 		list_del(&curr->readdir_list);
1637 
1638 		if (curr->key.offset < filp->f_pos) {
1639 			if (atomic_dec_and_test(&curr->refs))
1640 				kfree(curr);
1641 			continue;
1642 		}
1643 
1644 		filp->f_pos = curr->key.offset;
1645 
1646 		di = (struct btrfs_dir_item *)curr->data;
1647 		name = (char *)(di + 1);
1648 		name_len = le16_to_cpu(di->name_len);
1649 
1650 		d_type = btrfs_filetype_table[di->type];
1651 		btrfs_disk_key_to_cpu(&location, &di->location);
1652 
1653 		over = filldir(dirent, name, name_len, curr->key.offset,
1654 			       location.objectid, d_type);
1655 
1656 		if (atomic_dec_and_test(&curr->refs))
1657 			kfree(curr);
1658 
1659 		if (over)
1660 			return 1;
1661 	}
1662 	return 0;
1663 }
1664 
1665 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
1666 			 generation, 64);
1667 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
1668 			 sequence, 64);
1669 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
1670 			 transid, 64);
1671 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
1672 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
1673 			 nbytes, 64);
1674 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
1675 			 block_group, 64);
1676 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
1677 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
1678 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
1679 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
1680 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
1681 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
1682 
1683 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
1684 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
1685 
1686 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1687 				  struct btrfs_inode_item *inode_item,
1688 				  struct inode *inode)
1689 {
1690 	btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
1691 	btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
1692 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1693 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1694 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1695 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1696 	btrfs_set_stack_inode_generation(inode_item,
1697 					 BTRFS_I(inode)->generation);
1698 	btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
1699 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1700 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1701 	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1702 	btrfs_set_stack_inode_block_group(inode_item, 0);
1703 
1704 	btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1705 				     inode->i_atime.tv_sec);
1706 	btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1707 				      inode->i_atime.tv_nsec);
1708 
1709 	btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1710 				     inode->i_mtime.tv_sec);
1711 	btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1712 				      inode->i_mtime.tv_nsec);
1713 
1714 	btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1715 				     inode->i_ctime.tv_sec);
1716 	btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1717 				      inode->i_ctime.tv_nsec);
1718 }
1719 
1720 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1721 {
1722 	struct btrfs_delayed_node *delayed_node;
1723 	struct btrfs_inode_item *inode_item;
1724 	struct btrfs_timespec *tspec;
1725 
1726 	delayed_node = btrfs_get_delayed_node(inode);
1727 	if (!delayed_node)
1728 		return -ENOENT;
1729 
1730 	mutex_lock(&delayed_node->mutex);
1731 	if (!delayed_node->inode_dirty) {
1732 		mutex_unlock(&delayed_node->mutex);
1733 		btrfs_release_delayed_node(delayed_node);
1734 		return -ENOENT;
1735 	}
1736 
1737 	inode_item = &delayed_node->inode_item;
1738 
1739 	inode->i_uid = btrfs_stack_inode_uid(inode_item);
1740 	inode->i_gid = btrfs_stack_inode_gid(inode_item);
1741 	btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1742 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1743 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1744 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1745 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1746 	BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
1747 	inode->i_rdev = 0;
1748 	*rdev = btrfs_stack_inode_rdev(inode_item);
1749 	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1750 
1751 	tspec = btrfs_inode_atime(inode_item);
1752 	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
1753 	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1754 
1755 	tspec = btrfs_inode_mtime(inode_item);
1756 	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
1757 	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1758 
1759 	tspec = btrfs_inode_ctime(inode_item);
1760 	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
1761 	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1762 
1763 	inode->i_generation = BTRFS_I(inode)->generation;
1764 	BTRFS_I(inode)->index_cnt = (u64)-1;
1765 
1766 	mutex_unlock(&delayed_node->mutex);
1767 	btrfs_release_delayed_node(delayed_node);
1768 	return 0;
1769 }
1770 
1771 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1772 			       struct btrfs_root *root, struct inode *inode)
1773 {
1774 	struct btrfs_delayed_node *delayed_node;
1775 	int ret = 0;
1776 
1777 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1778 	if (IS_ERR(delayed_node))
1779 		return PTR_ERR(delayed_node);
1780 
1781 	mutex_lock(&delayed_node->mutex);
1782 	if (delayed_node->inode_dirty) {
1783 		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1784 		goto release_node;
1785 	}
1786 
1787 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1788 						   delayed_node);
1789 	if (ret)
1790 		goto release_node;
1791 
1792 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1793 	delayed_node->inode_dirty = 1;
1794 	delayed_node->count++;
1795 	atomic_inc(&root->fs_info->delayed_root->items);
1796 release_node:
1797 	mutex_unlock(&delayed_node->mutex);
1798 	btrfs_release_delayed_node(delayed_node);
1799 	return ret;
1800 }
1801 
1802 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1803 {
1804 	struct btrfs_root *root = delayed_node->root;
1805 	struct btrfs_delayed_item *curr_item, *prev_item;
1806 
1807 	mutex_lock(&delayed_node->mutex);
1808 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1809 	while (curr_item) {
1810 		btrfs_delayed_item_release_metadata(root, curr_item);
1811 		prev_item = curr_item;
1812 		curr_item = __btrfs_next_delayed_item(prev_item);
1813 		btrfs_release_delayed_item(prev_item);
1814 	}
1815 
1816 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1817 	while (curr_item) {
1818 		btrfs_delayed_item_release_metadata(root, curr_item);
1819 		prev_item = curr_item;
1820 		curr_item = __btrfs_next_delayed_item(prev_item);
1821 		btrfs_release_delayed_item(prev_item);
1822 	}
1823 
1824 	if (delayed_node->inode_dirty) {
1825 		btrfs_delayed_inode_release_metadata(root, delayed_node);
1826 		btrfs_release_delayed_inode(delayed_node);
1827 	}
1828 	mutex_unlock(&delayed_node->mutex);
1829 }
1830 
1831 void btrfs_kill_delayed_inode_items(struct inode *inode)
1832 {
1833 	struct btrfs_delayed_node *delayed_node;
1834 
1835 	delayed_node = btrfs_get_delayed_node(inode);
1836 	if (!delayed_node)
1837 		return;
1838 
1839 	__btrfs_kill_delayed_node(delayed_node);
1840 	btrfs_release_delayed_node(delayed_node);
1841 }
1842 
1843 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1844 {
1845 	u64 inode_id = 0;
1846 	struct btrfs_delayed_node *delayed_nodes[8];
1847 	int i, n;
1848 
1849 	while (1) {
1850 		spin_lock(&root->inode_lock);
1851 		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1852 					   (void **)delayed_nodes, inode_id,
1853 					   ARRAY_SIZE(delayed_nodes));
1854 		if (!n) {
1855 			spin_unlock(&root->inode_lock);
1856 			break;
1857 		}
1858 
1859 		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1860 
1861 		for (i = 0; i < n; i++)
1862 			atomic_inc(&delayed_nodes[i]->refs);
1863 		spin_unlock(&root->inode_lock);
1864 
1865 		for (i = 0; i < n; i++) {
1866 			__btrfs_kill_delayed_node(delayed_nodes[i]);
1867 			btrfs_release_delayed_node(delayed_nodes[i]);
1868 		}
1869 	}
1870 }
1871