1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 */
6
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include "ctree.h"
10 #include "fs.h"
11 #include "messages.h"
12 #include "misc.h"
13 #include "delayed-inode.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "qgroup.h"
17 #include "locking.h"
18 #include "inode-item.h"
19 #include "space-info.h"
20 #include "accessors.h"
21 #include "file-item.h"
22
23 #define BTRFS_DELAYED_WRITEBACK 512
24 #define BTRFS_DELAYED_BACKGROUND 128
25 #define BTRFS_DELAYED_BATCH 16
26
27 static struct kmem_cache *delayed_node_cache;
28
btrfs_delayed_inode_init(void)29 int __init btrfs_delayed_inode_init(void)
30 {
31 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
32 sizeof(struct btrfs_delayed_node),
33 0,
34 SLAB_MEM_SPREAD,
35 NULL);
36 if (!delayed_node_cache)
37 return -ENOMEM;
38 return 0;
39 }
40
btrfs_delayed_inode_exit(void)41 void __cold btrfs_delayed_inode_exit(void)
42 {
43 kmem_cache_destroy(delayed_node_cache);
44 }
45
btrfs_init_delayed_node(struct btrfs_delayed_node * delayed_node,struct btrfs_root * root,u64 inode_id)46 static inline void btrfs_init_delayed_node(
47 struct btrfs_delayed_node *delayed_node,
48 struct btrfs_root *root, u64 inode_id)
49 {
50 delayed_node->root = root;
51 delayed_node->inode_id = inode_id;
52 refcount_set(&delayed_node->refs, 0);
53 delayed_node->ins_root = RB_ROOT_CACHED;
54 delayed_node->del_root = RB_ROOT_CACHED;
55 mutex_init(&delayed_node->mutex);
56 INIT_LIST_HEAD(&delayed_node->n_list);
57 INIT_LIST_HEAD(&delayed_node->p_list);
58 }
59
btrfs_get_delayed_node(struct btrfs_inode * btrfs_inode)60 static struct btrfs_delayed_node *btrfs_get_delayed_node(
61 struct btrfs_inode *btrfs_inode)
62 {
63 struct btrfs_root *root = btrfs_inode->root;
64 u64 ino = btrfs_ino(btrfs_inode);
65 struct btrfs_delayed_node *node;
66
67 node = READ_ONCE(btrfs_inode->delayed_node);
68 if (node) {
69 refcount_inc(&node->refs);
70 return node;
71 }
72
73 spin_lock(&root->inode_lock);
74 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
75
76 if (node) {
77 if (btrfs_inode->delayed_node) {
78 refcount_inc(&node->refs); /* can be accessed */
79 BUG_ON(btrfs_inode->delayed_node != node);
80 spin_unlock(&root->inode_lock);
81 return node;
82 }
83
84 /*
85 * It's possible that we're racing into the middle of removing
86 * this node from the radix tree. In this case, the refcount
87 * was zero and it should never go back to one. Just return
88 * NULL like it was never in the radix at all; our release
89 * function is in the process of removing it.
90 *
91 * Some implementations of refcount_inc refuse to bump the
92 * refcount once it has hit zero. If we don't do this dance
93 * here, refcount_inc() may decide to just WARN_ONCE() instead
94 * of actually bumping the refcount.
95 *
96 * If this node is properly in the radix, we want to bump the
97 * refcount twice, once for the inode and once for this get
98 * operation.
99 */
100 if (refcount_inc_not_zero(&node->refs)) {
101 refcount_inc(&node->refs);
102 btrfs_inode->delayed_node = node;
103 } else {
104 node = NULL;
105 }
106
107 spin_unlock(&root->inode_lock);
108 return node;
109 }
110 spin_unlock(&root->inode_lock);
111
112 return NULL;
113 }
114
115 /* Will return either the node or PTR_ERR(-ENOMEM) */
btrfs_get_or_create_delayed_node(struct btrfs_inode * btrfs_inode)116 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
117 struct btrfs_inode *btrfs_inode)
118 {
119 struct btrfs_delayed_node *node;
120 struct btrfs_root *root = btrfs_inode->root;
121 u64 ino = btrfs_ino(btrfs_inode);
122 int ret;
123
124 again:
125 node = btrfs_get_delayed_node(btrfs_inode);
126 if (node)
127 return node;
128
129 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
130 if (!node)
131 return ERR_PTR(-ENOMEM);
132 btrfs_init_delayed_node(node, root, ino);
133
134 /* cached in the btrfs inode and can be accessed */
135 refcount_set(&node->refs, 2);
136
137 ret = radix_tree_preload(GFP_NOFS);
138 if (ret) {
139 kmem_cache_free(delayed_node_cache, node);
140 return ERR_PTR(ret);
141 }
142
143 spin_lock(&root->inode_lock);
144 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
145 if (ret == -EEXIST) {
146 spin_unlock(&root->inode_lock);
147 kmem_cache_free(delayed_node_cache, node);
148 radix_tree_preload_end();
149 goto again;
150 }
151 btrfs_inode->delayed_node = node;
152 spin_unlock(&root->inode_lock);
153 radix_tree_preload_end();
154
155 return node;
156 }
157
158 /*
159 * Call it when holding delayed_node->mutex
160 *
161 * If mod = 1, add this node into the prepared list.
162 */
btrfs_queue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node,int mod)163 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
164 struct btrfs_delayed_node *node,
165 int mod)
166 {
167 spin_lock(&root->lock);
168 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
169 if (!list_empty(&node->p_list))
170 list_move_tail(&node->p_list, &root->prepare_list);
171 else if (mod)
172 list_add_tail(&node->p_list, &root->prepare_list);
173 } else {
174 list_add_tail(&node->n_list, &root->node_list);
175 list_add_tail(&node->p_list, &root->prepare_list);
176 refcount_inc(&node->refs); /* inserted into list */
177 root->nodes++;
178 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
179 }
180 spin_unlock(&root->lock);
181 }
182
183 /* Call it when holding delayed_node->mutex */
btrfs_dequeue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node)184 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
185 struct btrfs_delayed_node *node)
186 {
187 spin_lock(&root->lock);
188 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
189 root->nodes--;
190 refcount_dec(&node->refs); /* not in the list */
191 list_del_init(&node->n_list);
192 if (!list_empty(&node->p_list))
193 list_del_init(&node->p_list);
194 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
195 }
196 spin_unlock(&root->lock);
197 }
198
btrfs_first_delayed_node(struct btrfs_delayed_root * delayed_root)199 static struct btrfs_delayed_node *btrfs_first_delayed_node(
200 struct btrfs_delayed_root *delayed_root)
201 {
202 struct list_head *p;
203 struct btrfs_delayed_node *node = NULL;
204
205 spin_lock(&delayed_root->lock);
206 if (list_empty(&delayed_root->node_list))
207 goto out;
208
209 p = delayed_root->node_list.next;
210 node = list_entry(p, struct btrfs_delayed_node, n_list);
211 refcount_inc(&node->refs);
212 out:
213 spin_unlock(&delayed_root->lock);
214
215 return node;
216 }
217
btrfs_next_delayed_node(struct btrfs_delayed_node * node)218 static struct btrfs_delayed_node *btrfs_next_delayed_node(
219 struct btrfs_delayed_node *node)
220 {
221 struct btrfs_delayed_root *delayed_root;
222 struct list_head *p;
223 struct btrfs_delayed_node *next = NULL;
224
225 delayed_root = node->root->fs_info->delayed_root;
226 spin_lock(&delayed_root->lock);
227 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
228 /* not in the list */
229 if (list_empty(&delayed_root->node_list))
230 goto out;
231 p = delayed_root->node_list.next;
232 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
233 goto out;
234 else
235 p = node->n_list.next;
236
237 next = list_entry(p, struct btrfs_delayed_node, n_list);
238 refcount_inc(&next->refs);
239 out:
240 spin_unlock(&delayed_root->lock);
241
242 return next;
243 }
244
__btrfs_release_delayed_node(struct btrfs_delayed_node * delayed_node,int mod)245 static void __btrfs_release_delayed_node(
246 struct btrfs_delayed_node *delayed_node,
247 int mod)
248 {
249 struct btrfs_delayed_root *delayed_root;
250
251 if (!delayed_node)
252 return;
253
254 delayed_root = delayed_node->root->fs_info->delayed_root;
255
256 mutex_lock(&delayed_node->mutex);
257 if (delayed_node->count)
258 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
259 else
260 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
261 mutex_unlock(&delayed_node->mutex);
262
263 if (refcount_dec_and_test(&delayed_node->refs)) {
264 struct btrfs_root *root = delayed_node->root;
265
266 spin_lock(&root->inode_lock);
267 /*
268 * Once our refcount goes to zero, nobody is allowed to bump it
269 * back up. We can delete it now.
270 */
271 ASSERT(refcount_read(&delayed_node->refs) == 0);
272 radix_tree_delete(&root->delayed_nodes_tree,
273 delayed_node->inode_id);
274 spin_unlock(&root->inode_lock);
275 kmem_cache_free(delayed_node_cache, delayed_node);
276 }
277 }
278
btrfs_release_delayed_node(struct btrfs_delayed_node * node)279 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
280 {
281 __btrfs_release_delayed_node(node, 0);
282 }
283
btrfs_first_prepared_delayed_node(struct btrfs_delayed_root * delayed_root)284 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
285 struct btrfs_delayed_root *delayed_root)
286 {
287 struct list_head *p;
288 struct btrfs_delayed_node *node = NULL;
289
290 spin_lock(&delayed_root->lock);
291 if (list_empty(&delayed_root->prepare_list))
292 goto out;
293
294 p = delayed_root->prepare_list.next;
295 list_del_init(p);
296 node = list_entry(p, struct btrfs_delayed_node, p_list);
297 refcount_inc(&node->refs);
298 out:
299 spin_unlock(&delayed_root->lock);
300
301 return node;
302 }
303
btrfs_release_prepared_delayed_node(struct btrfs_delayed_node * node)304 static inline void btrfs_release_prepared_delayed_node(
305 struct btrfs_delayed_node *node)
306 {
307 __btrfs_release_delayed_node(node, 1);
308 }
309
btrfs_alloc_delayed_item(u16 data_len,struct btrfs_delayed_node * node,enum btrfs_delayed_item_type type)310 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
311 struct btrfs_delayed_node *node,
312 enum btrfs_delayed_item_type type)
313 {
314 struct btrfs_delayed_item *item;
315
316 item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
317 if (item) {
318 item->data_len = data_len;
319 item->type = type;
320 item->bytes_reserved = 0;
321 item->delayed_node = node;
322 RB_CLEAR_NODE(&item->rb_node);
323 INIT_LIST_HEAD(&item->log_list);
324 item->logged = false;
325 refcount_set(&item->refs, 1);
326 }
327 return item;
328 }
329
330 /*
331 * __btrfs_lookup_delayed_item - look up the delayed item by key
332 * @delayed_node: pointer to the delayed node
333 * @index: the dir index value to lookup (offset of a dir index key)
334 *
335 * Note: if we don't find the right item, we will return the prev item and
336 * the next item.
337 */
__btrfs_lookup_delayed_item(struct rb_root * root,u64 index)338 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
339 struct rb_root *root,
340 u64 index)
341 {
342 struct rb_node *node = root->rb_node;
343 struct btrfs_delayed_item *delayed_item = NULL;
344
345 while (node) {
346 delayed_item = rb_entry(node, struct btrfs_delayed_item,
347 rb_node);
348 if (delayed_item->index < index)
349 node = node->rb_right;
350 else if (delayed_item->index > index)
351 node = node->rb_left;
352 else
353 return delayed_item;
354 }
355
356 return NULL;
357 }
358
__btrfs_add_delayed_item(struct btrfs_delayed_node * delayed_node,struct btrfs_delayed_item * ins)359 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
360 struct btrfs_delayed_item *ins)
361 {
362 struct rb_node **p, *node;
363 struct rb_node *parent_node = NULL;
364 struct rb_root_cached *root;
365 struct btrfs_delayed_item *item;
366 bool leftmost = true;
367
368 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
369 root = &delayed_node->ins_root;
370 else
371 root = &delayed_node->del_root;
372
373 p = &root->rb_root.rb_node;
374 node = &ins->rb_node;
375
376 while (*p) {
377 parent_node = *p;
378 item = rb_entry(parent_node, struct btrfs_delayed_item,
379 rb_node);
380
381 if (item->index < ins->index) {
382 p = &(*p)->rb_right;
383 leftmost = false;
384 } else if (item->index > ins->index) {
385 p = &(*p)->rb_left;
386 } else {
387 return -EEXIST;
388 }
389 }
390
391 rb_link_node(node, parent_node, p);
392 rb_insert_color_cached(node, root, leftmost);
393
394 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
395 ins->index >= delayed_node->index_cnt)
396 delayed_node->index_cnt = ins->index + 1;
397
398 delayed_node->count++;
399 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
400 return 0;
401 }
402
finish_one_item(struct btrfs_delayed_root * delayed_root)403 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
404 {
405 int seq = atomic_inc_return(&delayed_root->items_seq);
406
407 /* atomic_dec_return implies a barrier */
408 if ((atomic_dec_return(&delayed_root->items) <
409 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
410 cond_wake_up_nomb(&delayed_root->wait);
411 }
412
__btrfs_remove_delayed_item(struct btrfs_delayed_item * delayed_item)413 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
414 {
415 struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
416 struct rb_root_cached *root;
417 struct btrfs_delayed_root *delayed_root;
418
419 /* Not inserted, ignore it. */
420 if (RB_EMPTY_NODE(&delayed_item->rb_node))
421 return;
422
423 /* If it's in a rbtree, then we need to have delayed node locked. */
424 lockdep_assert_held(&delayed_node->mutex);
425
426 delayed_root = delayed_node->root->fs_info->delayed_root;
427
428 BUG_ON(!delayed_root);
429
430 if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
431 root = &delayed_node->ins_root;
432 else
433 root = &delayed_node->del_root;
434
435 rb_erase_cached(&delayed_item->rb_node, root);
436 RB_CLEAR_NODE(&delayed_item->rb_node);
437 delayed_node->count--;
438
439 finish_one_item(delayed_root);
440 }
441
btrfs_release_delayed_item(struct btrfs_delayed_item * item)442 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
443 {
444 if (item) {
445 __btrfs_remove_delayed_item(item);
446 if (refcount_dec_and_test(&item->refs))
447 kfree(item);
448 }
449 }
450
__btrfs_first_delayed_insertion_item(struct btrfs_delayed_node * delayed_node)451 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
452 struct btrfs_delayed_node *delayed_node)
453 {
454 struct rb_node *p;
455 struct btrfs_delayed_item *item = NULL;
456
457 p = rb_first_cached(&delayed_node->ins_root);
458 if (p)
459 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
460
461 return item;
462 }
463
__btrfs_first_delayed_deletion_item(struct btrfs_delayed_node * delayed_node)464 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
465 struct btrfs_delayed_node *delayed_node)
466 {
467 struct rb_node *p;
468 struct btrfs_delayed_item *item = NULL;
469
470 p = rb_first_cached(&delayed_node->del_root);
471 if (p)
472 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
473
474 return item;
475 }
476
__btrfs_next_delayed_item(struct btrfs_delayed_item * item)477 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
478 struct btrfs_delayed_item *item)
479 {
480 struct rb_node *p;
481 struct btrfs_delayed_item *next = NULL;
482
483 p = rb_next(&item->rb_node);
484 if (p)
485 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
486
487 return next;
488 }
489
btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_delayed_item * item)490 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
491 struct btrfs_delayed_item *item)
492 {
493 struct btrfs_block_rsv *src_rsv;
494 struct btrfs_block_rsv *dst_rsv;
495 struct btrfs_fs_info *fs_info = trans->fs_info;
496 u64 num_bytes;
497 int ret;
498
499 if (!trans->bytes_reserved)
500 return 0;
501
502 src_rsv = trans->block_rsv;
503 dst_rsv = &fs_info->delayed_block_rsv;
504
505 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
506
507 /*
508 * Here we migrate space rsv from transaction rsv, since have already
509 * reserved space when starting a transaction. So no need to reserve
510 * qgroup space here.
511 */
512 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
513 if (!ret) {
514 trace_btrfs_space_reservation(fs_info, "delayed_item",
515 item->delayed_node->inode_id,
516 num_bytes, 1);
517 /*
518 * For insertions we track reserved metadata space by accounting
519 * for the number of leaves that will be used, based on the delayed
520 * node's index_items_size field.
521 */
522 if (item->type == BTRFS_DELAYED_DELETION_ITEM)
523 item->bytes_reserved = num_bytes;
524 }
525
526 return ret;
527 }
528
btrfs_delayed_item_release_metadata(struct btrfs_root * root,struct btrfs_delayed_item * item)529 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
530 struct btrfs_delayed_item *item)
531 {
532 struct btrfs_block_rsv *rsv;
533 struct btrfs_fs_info *fs_info = root->fs_info;
534
535 if (!item->bytes_reserved)
536 return;
537
538 rsv = &fs_info->delayed_block_rsv;
539 /*
540 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
541 * to release/reserve qgroup space.
542 */
543 trace_btrfs_space_reservation(fs_info, "delayed_item",
544 item->delayed_node->inode_id,
545 item->bytes_reserved, 0);
546 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
547 }
548
btrfs_delayed_item_release_leaves(struct btrfs_delayed_node * node,unsigned int num_leaves)549 static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
550 unsigned int num_leaves)
551 {
552 struct btrfs_fs_info *fs_info = node->root->fs_info;
553 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
554
555 /* There are no space reservations during log replay, bail out. */
556 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
557 return;
558
559 trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
560 bytes, 0);
561 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
562 }
563
btrfs_delayed_inode_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_node * node)564 static int btrfs_delayed_inode_reserve_metadata(
565 struct btrfs_trans_handle *trans,
566 struct btrfs_root *root,
567 struct btrfs_delayed_node *node)
568 {
569 struct btrfs_fs_info *fs_info = root->fs_info;
570 struct btrfs_block_rsv *src_rsv;
571 struct btrfs_block_rsv *dst_rsv;
572 u64 num_bytes;
573 int ret;
574
575 src_rsv = trans->block_rsv;
576 dst_rsv = &fs_info->delayed_block_rsv;
577
578 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
579
580 /*
581 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
582 * which doesn't reserve space for speed. This is a problem since we
583 * still need to reserve space for this update, so try to reserve the
584 * space.
585 *
586 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
587 * we always reserve enough to update the inode item.
588 */
589 if (!src_rsv || (!trans->bytes_reserved &&
590 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
591 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
592 BTRFS_QGROUP_RSV_META_PREALLOC, true);
593 if (ret < 0)
594 return ret;
595 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
596 BTRFS_RESERVE_NO_FLUSH);
597 /* NO_FLUSH could only fail with -ENOSPC */
598 ASSERT(ret == 0 || ret == -ENOSPC);
599 if (ret)
600 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
601 } else {
602 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
603 }
604
605 if (!ret) {
606 trace_btrfs_space_reservation(fs_info, "delayed_inode",
607 node->inode_id, num_bytes, 1);
608 node->bytes_reserved = num_bytes;
609 }
610
611 return ret;
612 }
613
btrfs_delayed_inode_release_metadata(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,bool qgroup_free)614 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
615 struct btrfs_delayed_node *node,
616 bool qgroup_free)
617 {
618 struct btrfs_block_rsv *rsv;
619
620 if (!node->bytes_reserved)
621 return;
622
623 rsv = &fs_info->delayed_block_rsv;
624 trace_btrfs_space_reservation(fs_info, "delayed_inode",
625 node->inode_id, node->bytes_reserved, 0);
626 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
627 if (qgroup_free)
628 btrfs_qgroup_free_meta_prealloc(node->root,
629 node->bytes_reserved);
630 else
631 btrfs_qgroup_convert_reserved_meta(node->root,
632 node->bytes_reserved);
633 node->bytes_reserved = 0;
634 }
635
636 /*
637 * Insert a single delayed item or a batch of delayed items, as many as possible
638 * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
639 * in the rbtree, and if there's a gap between two consecutive dir index items,
640 * then it means at some point we had delayed dir indexes to add but they got
641 * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
642 * into the subvolume tree. Dir index keys also have their offsets coming from a
643 * monotonically increasing counter, so we can't get new keys with an offset that
644 * fits within a gap between delayed dir index items.
645 */
btrfs_insert_delayed_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * first_item)646 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
647 struct btrfs_root *root,
648 struct btrfs_path *path,
649 struct btrfs_delayed_item *first_item)
650 {
651 struct btrfs_fs_info *fs_info = root->fs_info;
652 struct btrfs_delayed_node *node = first_item->delayed_node;
653 LIST_HEAD(item_list);
654 struct btrfs_delayed_item *curr;
655 struct btrfs_delayed_item *next;
656 const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
657 struct btrfs_item_batch batch;
658 struct btrfs_key first_key;
659 const u32 first_data_size = first_item->data_len;
660 int total_size;
661 char *ins_data = NULL;
662 int ret;
663 bool continuous_keys_only = false;
664
665 lockdep_assert_held(&node->mutex);
666
667 /*
668 * During normal operation the delayed index offset is continuously
669 * increasing, so we can batch insert all items as there will not be any
670 * overlapping keys in the tree.
671 *
672 * The exception to this is log replay, where we may have interleaved
673 * offsets in the tree, so our batch needs to be continuous keys only in
674 * order to ensure we do not end up with out of order items in our leaf.
675 */
676 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
677 continuous_keys_only = true;
678
679 /*
680 * For delayed items to insert, we track reserved metadata bytes based
681 * on the number of leaves that we will use.
682 * See btrfs_insert_delayed_dir_index() and
683 * btrfs_delayed_item_reserve_metadata()).
684 */
685 ASSERT(first_item->bytes_reserved == 0);
686
687 list_add_tail(&first_item->tree_list, &item_list);
688 batch.total_data_size = first_data_size;
689 batch.nr = 1;
690 total_size = first_data_size + sizeof(struct btrfs_item);
691 curr = first_item;
692
693 while (true) {
694 int next_size;
695
696 next = __btrfs_next_delayed_item(curr);
697 if (!next)
698 break;
699
700 /*
701 * We cannot allow gaps in the key space if we're doing log
702 * replay.
703 */
704 if (continuous_keys_only && (next->index != curr->index + 1))
705 break;
706
707 ASSERT(next->bytes_reserved == 0);
708
709 next_size = next->data_len + sizeof(struct btrfs_item);
710 if (total_size + next_size > max_size)
711 break;
712
713 list_add_tail(&next->tree_list, &item_list);
714 batch.nr++;
715 total_size += next_size;
716 batch.total_data_size += next->data_len;
717 curr = next;
718 }
719
720 if (batch.nr == 1) {
721 first_key.objectid = node->inode_id;
722 first_key.type = BTRFS_DIR_INDEX_KEY;
723 first_key.offset = first_item->index;
724 batch.keys = &first_key;
725 batch.data_sizes = &first_data_size;
726 } else {
727 struct btrfs_key *ins_keys;
728 u32 *ins_sizes;
729 int i = 0;
730
731 ins_data = kmalloc(batch.nr * sizeof(u32) +
732 batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
733 if (!ins_data) {
734 ret = -ENOMEM;
735 goto out;
736 }
737 ins_sizes = (u32 *)ins_data;
738 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
739 batch.keys = ins_keys;
740 batch.data_sizes = ins_sizes;
741 list_for_each_entry(curr, &item_list, tree_list) {
742 ins_keys[i].objectid = node->inode_id;
743 ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
744 ins_keys[i].offset = curr->index;
745 ins_sizes[i] = curr->data_len;
746 i++;
747 }
748 }
749
750 ret = btrfs_insert_empty_items(trans, root, path, &batch);
751 if (ret)
752 goto out;
753
754 list_for_each_entry(curr, &item_list, tree_list) {
755 char *data_ptr;
756
757 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
758 write_extent_buffer(path->nodes[0], &curr->data,
759 (unsigned long)data_ptr, curr->data_len);
760 path->slots[0]++;
761 }
762
763 /*
764 * Now release our path before releasing the delayed items and their
765 * metadata reservations, so that we don't block other tasks for more
766 * time than needed.
767 */
768 btrfs_release_path(path);
769
770 ASSERT(node->index_item_leaves > 0);
771
772 /*
773 * For normal operations we will batch an entire leaf's worth of delayed
774 * items, so if there are more items to process we can decrement
775 * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
776 *
777 * However for log replay we may not have inserted an entire leaf's
778 * worth of items, we may have not had continuous items, so decrementing
779 * here would mess up the index_item_leaves accounting. For this case
780 * only clean up the accounting when there are no items left.
781 */
782 if (next && !continuous_keys_only) {
783 /*
784 * We inserted one batch of items into a leaf a there are more
785 * items to flush in a future batch, now release one unit of
786 * metadata space from the delayed block reserve, corresponding
787 * the leaf we just flushed to.
788 */
789 btrfs_delayed_item_release_leaves(node, 1);
790 node->index_item_leaves--;
791 } else if (!next) {
792 /*
793 * There are no more items to insert. We can have a number of
794 * reserved leaves > 1 here - this happens when many dir index
795 * items are added and then removed before they are flushed (file
796 * names with a very short life, never span a transaction). So
797 * release all remaining leaves.
798 */
799 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
800 node->index_item_leaves = 0;
801 }
802
803 list_for_each_entry_safe(curr, next, &item_list, tree_list) {
804 list_del(&curr->tree_list);
805 btrfs_release_delayed_item(curr);
806 }
807 out:
808 kfree(ins_data);
809 return ret;
810 }
811
btrfs_insert_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)812 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
813 struct btrfs_path *path,
814 struct btrfs_root *root,
815 struct btrfs_delayed_node *node)
816 {
817 int ret = 0;
818
819 while (ret == 0) {
820 struct btrfs_delayed_item *curr;
821
822 mutex_lock(&node->mutex);
823 curr = __btrfs_first_delayed_insertion_item(node);
824 if (!curr) {
825 mutex_unlock(&node->mutex);
826 break;
827 }
828 ret = btrfs_insert_delayed_item(trans, root, path, curr);
829 mutex_unlock(&node->mutex);
830 }
831
832 return ret;
833 }
834
btrfs_batch_delete_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * item)835 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
836 struct btrfs_root *root,
837 struct btrfs_path *path,
838 struct btrfs_delayed_item *item)
839 {
840 const u64 ino = item->delayed_node->inode_id;
841 struct btrfs_fs_info *fs_info = root->fs_info;
842 struct btrfs_delayed_item *curr, *next;
843 struct extent_buffer *leaf = path->nodes[0];
844 LIST_HEAD(batch_list);
845 int nitems, slot, last_slot;
846 int ret;
847 u64 total_reserved_size = item->bytes_reserved;
848
849 ASSERT(leaf != NULL);
850
851 slot = path->slots[0];
852 last_slot = btrfs_header_nritems(leaf) - 1;
853 /*
854 * Our caller always gives us a path pointing to an existing item, so
855 * this can not happen.
856 */
857 ASSERT(slot <= last_slot);
858 if (WARN_ON(slot > last_slot))
859 return -ENOENT;
860
861 nitems = 1;
862 curr = item;
863 list_add_tail(&curr->tree_list, &batch_list);
864
865 /*
866 * Keep checking if the next delayed item matches the next item in the
867 * leaf - if so, we can add it to the batch of items to delete from the
868 * leaf.
869 */
870 while (slot < last_slot) {
871 struct btrfs_key key;
872
873 next = __btrfs_next_delayed_item(curr);
874 if (!next)
875 break;
876
877 slot++;
878 btrfs_item_key_to_cpu(leaf, &key, slot);
879 if (key.objectid != ino ||
880 key.type != BTRFS_DIR_INDEX_KEY ||
881 key.offset != next->index)
882 break;
883 nitems++;
884 curr = next;
885 list_add_tail(&curr->tree_list, &batch_list);
886 total_reserved_size += curr->bytes_reserved;
887 }
888
889 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
890 if (ret)
891 return ret;
892
893 /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
894 if (total_reserved_size > 0) {
895 /*
896 * Check btrfs_delayed_item_reserve_metadata() to see why we
897 * don't need to release/reserve qgroup space.
898 */
899 trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
900 total_reserved_size, 0);
901 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
902 total_reserved_size, NULL);
903 }
904
905 list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
906 list_del(&curr->tree_list);
907 btrfs_release_delayed_item(curr);
908 }
909
910 return 0;
911 }
912
btrfs_delete_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)913 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
914 struct btrfs_path *path,
915 struct btrfs_root *root,
916 struct btrfs_delayed_node *node)
917 {
918 struct btrfs_key key;
919 int ret = 0;
920
921 key.objectid = node->inode_id;
922 key.type = BTRFS_DIR_INDEX_KEY;
923
924 while (ret == 0) {
925 struct btrfs_delayed_item *item;
926
927 mutex_lock(&node->mutex);
928 item = __btrfs_first_delayed_deletion_item(node);
929 if (!item) {
930 mutex_unlock(&node->mutex);
931 break;
932 }
933
934 key.offset = item->index;
935 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
936 if (ret > 0) {
937 /*
938 * There's no matching item in the leaf. This means we
939 * have already deleted this item in a past run of the
940 * delayed items. We ignore errors when running delayed
941 * items from an async context, through a work queue job
942 * running btrfs_async_run_delayed_root(), and don't
943 * release delayed items that failed to complete. This
944 * is because we will retry later, and at transaction
945 * commit time we always run delayed items and will
946 * then deal with errors if they fail to run again.
947 *
948 * So just release delayed items for which we can't find
949 * an item in the tree, and move to the next item.
950 */
951 btrfs_release_path(path);
952 btrfs_release_delayed_item(item);
953 ret = 0;
954 } else if (ret == 0) {
955 ret = btrfs_batch_delete_items(trans, root, path, item);
956 btrfs_release_path(path);
957 }
958
959 /*
960 * We unlock and relock on each iteration, this is to prevent
961 * blocking other tasks for too long while we are being run from
962 * the async context (work queue job). Those tasks are typically
963 * running system calls like creat/mkdir/rename/unlink/etc which
964 * need to add delayed items to this delayed node.
965 */
966 mutex_unlock(&node->mutex);
967 }
968
969 return ret;
970 }
971
btrfs_release_delayed_inode(struct btrfs_delayed_node * delayed_node)972 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
973 {
974 struct btrfs_delayed_root *delayed_root;
975
976 if (delayed_node &&
977 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
978 BUG_ON(!delayed_node->root);
979 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
980 delayed_node->count--;
981
982 delayed_root = delayed_node->root->fs_info->delayed_root;
983 finish_one_item(delayed_root);
984 }
985 }
986
btrfs_release_delayed_iref(struct btrfs_delayed_node * delayed_node)987 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
988 {
989
990 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
991 struct btrfs_delayed_root *delayed_root;
992
993 ASSERT(delayed_node->root);
994 delayed_node->count--;
995
996 delayed_root = delayed_node->root->fs_info->delayed_root;
997 finish_one_item(delayed_root);
998 }
999 }
1000
__btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1001 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1002 struct btrfs_root *root,
1003 struct btrfs_path *path,
1004 struct btrfs_delayed_node *node)
1005 {
1006 struct btrfs_fs_info *fs_info = root->fs_info;
1007 struct btrfs_key key;
1008 struct btrfs_inode_item *inode_item;
1009 struct extent_buffer *leaf;
1010 int mod;
1011 int ret;
1012
1013 key.objectid = node->inode_id;
1014 key.type = BTRFS_INODE_ITEM_KEY;
1015 key.offset = 0;
1016
1017 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1018 mod = -1;
1019 else
1020 mod = 1;
1021
1022 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1023 if (ret > 0)
1024 ret = -ENOENT;
1025 if (ret < 0)
1026 goto out;
1027
1028 leaf = path->nodes[0];
1029 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1030 struct btrfs_inode_item);
1031 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1032 sizeof(struct btrfs_inode_item));
1033 btrfs_mark_buffer_dirty(trans, leaf);
1034
1035 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1036 goto out;
1037
1038 path->slots[0]++;
1039 if (path->slots[0] >= btrfs_header_nritems(leaf))
1040 goto search;
1041 again:
1042 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1043 if (key.objectid != node->inode_id)
1044 goto out;
1045
1046 if (key.type != BTRFS_INODE_REF_KEY &&
1047 key.type != BTRFS_INODE_EXTREF_KEY)
1048 goto out;
1049
1050 /*
1051 * Delayed iref deletion is for the inode who has only one link,
1052 * so there is only one iref. The case that several irefs are
1053 * in the same item doesn't exist.
1054 */
1055 ret = btrfs_del_item(trans, root, path);
1056 out:
1057 btrfs_release_delayed_iref(node);
1058 btrfs_release_path(path);
1059 err_out:
1060 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1061 btrfs_release_delayed_inode(node);
1062
1063 /*
1064 * If we fail to update the delayed inode we need to abort the
1065 * transaction, because we could leave the inode with the improper
1066 * counts behind.
1067 */
1068 if (ret && ret != -ENOENT)
1069 btrfs_abort_transaction(trans, ret);
1070
1071 return ret;
1072
1073 search:
1074 btrfs_release_path(path);
1075
1076 key.type = BTRFS_INODE_EXTREF_KEY;
1077 key.offset = -1;
1078
1079 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1080 if (ret < 0)
1081 goto err_out;
1082 ASSERT(ret);
1083
1084 ret = 0;
1085 leaf = path->nodes[0];
1086 path->slots[0]--;
1087 goto again;
1088 }
1089
btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1090 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1091 struct btrfs_root *root,
1092 struct btrfs_path *path,
1093 struct btrfs_delayed_node *node)
1094 {
1095 int ret;
1096
1097 mutex_lock(&node->mutex);
1098 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1099 mutex_unlock(&node->mutex);
1100 return 0;
1101 }
1102
1103 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1104 mutex_unlock(&node->mutex);
1105 return ret;
1106 }
1107
1108 static inline int
__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_delayed_node * node)1109 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1110 struct btrfs_path *path,
1111 struct btrfs_delayed_node *node)
1112 {
1113 int ret;
1114
1115 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1116 if (ret)
1117 return ret;
1118
1119 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1120 if (ret)
1121 return ret;
1122
1123 ret = btrfs_record_root_in_trans(trans, node->root);
1124 if (ret)
1125 return ret;
1126 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1127 return ret;
1128 }
1129
1130 /*
1131 * Called when committing the transaction.
1132 * Returns 0 on success.
1133 * Returns < 0 on error and returns with an aborted transaction with any
1134 * outstanding delayed items cleaned up.
1135 */
__btrfs_run_delayed_items(struct btrfs_trans_handle * trans,int nr)1136 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1137 {
1138 struct btrfs_fs_info *fs_info = trans->fs_info;
1139 struct btrfs_delayed_root *delayed_root;
1140 struct btrfs_delayed_node *curr_node, *prev_node;
1141 struct btrfs_path *path;
1142 struct btrfs_block_rsv *block_rsv;
1143 int ret = 0;
1144 bool count = (nr > 0);
1145
1146 if (TRANS_ABORTED(trans))
1147 return -EIO;
1148
1149 path = btrfs_alloc_path();
1150 if (!path)
1151 return -ENOMEM;
1152
1153 block_rsv = trans->block_rsv;
1154 trans->block_rsv = &fs_info->delayed_block_rsv;
1155
1156 delayed_root = fs_info->delayed_root;
1157
1158 curr_node = btrfs_first_delayed_node(delayed_root);
1159 while (curr_node && (!count || nr--)) {
1160 ret = __btrfs_commit_inode_delayed_items(trans, path,
1161 curr_node);
1162 if (ret) {
1163 btrfs_abort_transaction(trans, ret);
1164 break;
1165 }
1166
1167 prev_node = curr_node;
1168 curr_node = btrfs_next_delayed_node(curr_node);
1169 /*
1170 * See the comment below about releasing path before releasing
1171 * node. If the commit of delayed items was successful the path
1172 * should always be released, but in case of an error, it may
1173 * point to locked extent buffers (a leaf at the very least).
1174 */
1175 ASSERT(path->nodes[0] == NULL);
1176 btrfs_release_delayed_node(prev_node);
1177 }
1178
1179 /*
1180 * Release the path to avoid a potential deadlock and lockdep splat when
1181 * releasing the delayed node, as that requires taking the delayed node's
1182 * mutex. If another task starts running delayed items before we take
1183 * the mutex, it will first lock the mutex and then it may try to lock
1184 * the same btree path (leaf).
1185 */
1186 btrfs_free_path(path);
1187
1188 if (curr_node)
1189 btrfs_release_delayed_node(curr_node);
1190 trans->block_rsv = block_rsv;
1191
1192 return ret;
1193 }
1194
btrfs_run_delayed_items(struct btrfs_trans_handle * trans)1195 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1196 {
1197 return __btrfs_run_delayed_items(trans, -1);
1198 }
1199
btrfs_run_delayed_items_nr(struct btrfs_trans_handle * trans,int nr)1200 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1201 {
1202 return __btrfs_run_delayed_items(trans, nr);
1203 }
1204
btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)1205 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1206 struct btrfs_inode *inode)
1207 {
1208 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1209 struct btrfs_path *path;
1210 struct btrfs_block_rsv *block_rsv;
1211 int ret;
1212
1213 if (!delayed_node)
1214 return 0;
1215
1216 mutex_lock(&delayed_node->mutex);
1217 if (!delayed_node->count) {
1218 mutex_unlock(&delayed_node->mutex);
1219 btrfs_release_delayed_node(delayed_node);
1220 return 0;
1221 }
1222 mutex_unlock(&delayed_node->mutex);
1223
1224 path = btrfs_alloc_path();
1225 if (!path) {
1226 btrfs_release_delayed_node(delayed_node);
1227 return -ENOMEM;
1228 }
1229
1230 block_rsv = trans->block_rsv;
1231 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1232
1233 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1234
1235 btrfs_release_delayed_node(delayed_node);
1236 btrfs_free_path(path);
1237 trans->block_rsv = block_rsv;
1238
1239 return ret;
1240 }
1241
btrfs_commit_inode_delayed_inode(struct btrfs_inode * inode)1242 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1243 {
1244 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1245 struct btrfs_trans_handle *trans;
1246 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1247 struct btrfs_path *path;
1248 struct btrfs_block_rsv *block_rsv;
1249 int ret;
1250
1251 if (!delayed_node)
1252 return 0;
1253
1254 mutex_lock(&delayed_node->mutex);
1255 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1256 mutex_unlock(&delayed_node->mutex);
1257 btrfs_release_delayed_node(delayed_node);
1258 return 0;
1259 }
1260 mutex_unlock(&delayed_node->mutex);
1261
1262 trans = btrfs_join_transaction(delayed_node->root);
1263 if (IS_ERR(trans)) {
1264 ret = PTR_ERR(trans);
1265 goto out;
1266 }
1267
1268 path = btrfs_alloc_path();
1269 if (!path) {
1270 ret = -ENOMEM;
1271 goto trans_out;
1272 }
1273
1274 block_rsv = trans->block_rsv;
1275 trans->block_rsv = &fs_info->delayed_block_rsv;
1276
1277 mutex_lock(&delayed_node->mutex);
1278 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1279 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1280 path, delayed_node);
1281 else
1282 ret = 0;
1283 mutex_unlock(&delayed_node->mutex);
1284
1285 btrfs_free_path(path);
1286 trans->block_rsv = block_rsv;
1287 trans_out:
1288 btrfs_end_transaction(trans);
1289 btrfs_btree_balance_dirty(fs_info);
1290 out:
1291 btrfs_release_delayed_node(delayed_node);
1292
1293 return ret;
1294 }
1295
btrfs_remove_delayed_node(struct btrfs_inode * inode)1296 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1297 {
1298 struct btrfs_delayed_node *delayed_node;
1299
1300 delayed_node = READ_ONCE(inode->delayed_node);
1301 if (!delayed_node)
1302 return;
1303
1304 inode->delayed_node = NULL;
1305 btrfs_release_delayed_node(delayed_node);
1306 }
1307
1308 struct btrfs_async_delayed_work {
1309 struct btrfs_delayed_root *delayed_root;
1310 int nr;
1311 struct btrfs_work work;
1312 };
1313
btrfs_async_run_delayed_root(struct btrfs_work * work)1314 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1315 {
1316 struct btrfs_async_delayed_work *async_work;
1317 struct btrfs_delayed_root *delayed_root;
1318 struct btrfs_trans_handle *trans;
1319 struct btrfs_path *path;
1320 struct btrfs_delayed_node *delayed_node = NULL;
1321 struct btrfs_root *root;
1322 struct btrfs_block_rsv *block_rsv;
1323 int total_done = 0;
1324
1325 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1326 delayed_root = async_work->delayed_root;
1327
1328 path = btrfs_alloc_path();
1329 if (!path)
1330 goto out;
1331
1332 do {
1333 if (atomic_read(&delayed_root->items) <
1334 BTRFS_DELAYED_BACKGROUND / 2)
1335 break;
1336
1337 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1338 if (!delayed_node)
1339 break;
1340
1341 root = delayed_node->root;
1342
1343 trans = btrfs_join_transaction(root);
1344 if (IS_ERR(trans)) {
1345 btrfs_release_path(path);
1346 btrfs_release_prepared_delayed_node(delayed_node);
1347 total_done++;
1348 continue;
1349 }
1350
1351 block_rsv = trans->block_rsv;
1352 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1353
1354 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1355
1356 trans->block_rsv = block_rsv;
1357 btrfs_end_transaction(trans);
1358 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1359
1360 btrfs_release_path(path);
1361 btrfs_release_prepared_delayed_node(delayed_node);
1362 total_done++;
1363
1364 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1365 || total_done < async_work->nr);
1366
1367 btrfs_free_path(path);
1368 out:
1369 wake_up(&delayed_root->wait);
1370 kfree(async_work);
1371 }
1372
1373
btrfs_wq_run_delayed_node(struct btrfs_delayed_root * delayed_root,struct btrfs_fs_info * fs_info,int nr)1374 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1375 struct btrfs_fs_info *fs_info, int nr)
1376 {
1377 struct btrfs_async_delayed_work *async_work;
1378
1379 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1380 if (!async_work)
1381 return -ENOMEM;
1382
1383 async_work->delayed_root = delayed_root;
1384 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1385 NULL);
1386 async_work->nr = nr;
1387
1388 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1389 return 0;
1390 }
1391
btrfs_assert_delayed_root_empty(struct btrfs_fs_info * fs_info)1392 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1393 {
1394 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1395 }
1396
could_end_wait(struct btrfs_delayed_root * delayed_root,int seq)1397 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1398 {
1399 int val = atomic_read(&delayed_root->items_seq);
1400
1401 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1402 return 1;
1403
1404 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1405 return 1;
1406
1407 return 0;
1408 }
1409
btrfs_balance_delayed_items(struct btrfs_fs_info * fs_info)1410 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1411 {
1412 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1413
1414 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1415 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1416 return;
1417
1418 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1419 int seq;
1420 int ret;
1421
1422 seq = atomic_read(&delayed_root->items_seq);
1423
1424 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1425 if (ret)
1426 return;
1427
1428 wait_event_interruptible(delayed_root->wait,
1429 could_end_wait(delayed_root, seq));
1430 return;
1431 }
1432
1433 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1434 }
1435
btrfs_release_dir_index_item_space(struct btrfs_trans_handle * trans)1436 static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
1437 {
1438 struct btrfs_fs_info *fs_info = trans->fs_info;
1439 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1440
1441 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1442 return;
1443
1444 /*
1445 * Adding the new dir index item does not require touching another
1446 * leaf, so we can release 1 unit of metadata that was previously
1447 * reserved when starting the transaction. This applies only to
1448 * the case where we had a transaction start and excludes the
1449 * transaction join case (when replaying log trees).
1450 */
1451 trace_btrfs_space_reservation(fs_info, "transaction",
1452 trans->transid, bytes, 0);
1453 btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1454 ASSERT(trans->bytes_reserved >= bytes);
1455 trans->bytes_reserved -= bytes;
1456 }
1457
1458 /* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
btrfs_insert_delayed_dir_index(struct btrfs_trans_handle * trans,const char * name,int name_len,struct btrfs_inode * dir,struct btrfs_disk_key * disk_key,u8 flags,u64 index)1459 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1460 const char *name, int name_len,
1461 struct btrfs_inode *dir,
1462 struct btrfs_disk_key *disk_key, u8 flags,
1463 u64 index)
1464 {
1465 struct btrfs_fs_info *fs_info = trans->fs_info;
1466 const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
1467 struct btrfs_delayed_node *delayed_node;
1468 struct btrfs_delayed_item *delayed_item;
1469 struct btrfs_dir_item *dir_item;
1470 bool reserve_leaf_space;
1471 u32 data_len;
1472 int ret;
1473
1474 delayed_node = btrfs_get_or_create_delayed_node(dir);
1475 if (IS_ERR(delayed_node))
1476 return PTR_ERR(delayed_node);
1477
1478 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
1479 delayed_node,
1480 BTRFS_DELAYED_INSERTION_ITEM);
1481 if (!delayed_item) {
1482 ret = -ENOMEM;
1483 goto release_node;
1484 }
1485
1486 delayed_item->index = index;
1487
1488 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1489 dir_item->location = *disk_key;
1490 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1491 btrfs_set_stack_dir_data_len(dir_item, 0);
1492 btrfs_set_stack_dir_name_len(dir_item, name_len);
1493 btrfs_set_stack_dir_flags(dir_item, flags);
1494 memcpy((char *)(dir_item + 1), name, name_len);
1495
1496 data_len = delayed_item->data_len + sizeof(struct btrfs_item);
1497
1498 mutex_lock(&delayed_node->mutex);
1499
1500 /*
1501 * First attempt to insert the delayed item. This is to make the error
1502 * handling path simpler in case we fail (-EEXIST). There's no risk of
1503 * any other task coming in and running the delayed item before we do
1504 * the metadata space reservation below, because we are holding the
1505 * delayed node's mutex and that mutex must also be locked before the
1506 * node's delayed items can be run.
1507 */
1508 ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1509 if (unlikely(ret)) {
1510 btrfs_err(trans->fs_info,
1511 "error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
1512 name_len, name, index, btrfs_root_id(delayed_node->root),
1513 delayed_node->inode_id, dir->index_cnt,
1514 delayed_node->index_cnt, ret);
1515 btrfs_release_delayed_item(delayed_item);
1516 btrfs_release_dir_index_item_space(trans);
1517 mutex_unlock(&delayed_node->mutex);
1518 goto release_node;
1519 }
1520
1521 if (delayed_node->index_item_leaves == 0 ||
1522 delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1523 delayed_node->curr_index_batch_size = data_len;
1524 reserve_leaf_space = true;
1525 } else {
1526 delayed_node->curr_index_batch_size += data_len;
1527 reserve_leaf_space = false;
1528 }
1529
1530 if (reserve_leaf_space) {
1531 ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
1532 /*
1533 * Space was reserved for a dir index item insertion when we
1534 * started the transaction, so getting a failure here should be
1535 * impossible.
1536 */
1537 if (WARN_ON(ret)) {
1538 btrfs_release_delayed_item(delayed_item);
1539 mutex_unlock(&delayed_node->mutex);
1540 goto release_node;
1541 }
1542
1543 delayed_node->index_item_leaves++;
1544 } else {
1545 btrfs_release_dir_index_item_space(trans);
1546 }
1547 mutex_unlock(&delayed_node->mutex);
1548
1549 release_node:
1550 btrfs_release_delayed_node(delayed_node);
1551 return ret;
1552 }
1553
btrfs_delete_delayed_insertion_item(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,u64 index)1554 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1555 struct btrfs_delayed_node *node,
1556 u64 index)
1557 {
1558 struct btrfs_delayed_item *item;
1559
1560 mutex_lock(&node->mutex);
1561 item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
1562 if (!item) {
1563 mutex_unlock(&node->mutex);
1564 return 1;
1565 }
1566
1567 /*
1568 * For delayed items to insert, we track reserved metadata bytes based
1569 * on the number of leaves that we will use.
1570 * See btrfs_insert_delayed_dir_index() and
1571 * btrfs_delayed_item_reserve_metadata()).
1572 */
1573 ASSERT(item->bytes_reserved == 0);
1574 ASSERT(node->index_item_leaves > 0);
1575
1576 /*
1577 * If there's only one leaf reserved, we can decrement this item from the
1578 * current batch, otherwise we can not because we don't know which leaf
1579 * it belongs to. With the current limit on delayed items, we rarely
1580 * accumulate enough dir index items to fill more than one leaf (even
1581 * when using a leaf size of 4K).
1582 */
1583 if (node->index_item_leaves == 1) {
1584 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1585
1586 ASSERT(node->curr_index_batch_size >= data_len);
1587 node->curr_index_batch_size -= data_len;
1588 }
1589
1590 btrfs_release_delayed_item(item);
1591
1592 /* If we now have no more dir index items, we can release all leaves. */
1593 if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1594 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1595 node->index_item_leaves = 0;
1596 }
1597
1598 mutex_unlock(&node->mutex);
1599 return 0;
1600 }
1601
btrfs_delete_delayed_dir_index(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,u64 index)1602 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1603 struct btrfs_inode *dir, u64 index)
1604 {
1605 struct btrfs_delayed_node *node;
1606 struct btrfs_delayed_item *item;
1607 int ret;
1608
1609 node = btrfs_get_or_create_delayed_node(dir);
1610 if (IS_ERR(node))
1611 return PTR_ERR(node);
1612
1613 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
1614 if (!ret)
1615 goto end;
1616
1617 item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
1618 if (!item) {
1619 ret = -ENOMEM;
1620 goto end;
1621 }
1622
1623 item->index = index;
1624
1625 ret = btrfs_delayed_item_reserve_metadata(trans, item);
1626 /*
1627 * we have reserved enough space when we start a new transaction,
1628 * so reserving metadata failure is impossible.
1629 */
1630 if (ret < 0) {
1631 btrfs_err(trans->fs_info,
1632 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1633 btrfs_release_delayed_item(item);
1634 goto end;
1635 }
1636
1637 mutex_lock(&node->mutex);
1638 ret = __btrfs_add_delayed_item(node, item);
1639 if (unlikely(ret)) {
1640 btrfs_err(trans->fs_info,
1641 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1642 index, node->root->root_key.objectid,
1643 node->inode_id, ret);
1644 btrfs_delayed_item_release_metadata(dir->root, item);
1645 btrfs_release_delayed_item(item);
1646 }
1647 mutex_unlock(&node->mutex);
1648 end:
1649 btrfs_release_delayed_node(node);
1650 return ret;
1651 }
1652
btrfs_inode_delayed_dir_index_count(struct btrfs_inode * inode)1653 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1654 {
1655 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1656
1657 if (!delayed_node)
1658 return -ENOENT;
1659
1660 /*
1661 * Since we have held i_mutex of this directory, it is impossible that
1662 * a new directory index is added into the delayed node and index_cnt
1663 * is updated now. So we needn't lock the delayed node.
1664 */
1665 if (!delayed_node->index_cnt) {
1666 btrfs_release_delayed_node(delayed_node);
1667 return -EINVAL;
1668 }
1669
1670 inode->index_cnt = delayed_node->index_cnt;
1671 btrfs_release_delayed_node(delayed_node);
1672 return 0;
1673 }
1674
btrfs_readdir_get_delayed_items(struct inode * inode,u64 last_index,struct list_head * ins_list,struct list_head * del_list)1675 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1676 u64 last_index,
1677 struct list_head *ins_list,
1678 struct list_head *del_list)
1679 {
1680 struct btrfs_delayed_node *delayed_node;
1681 struct btrfs_delayed_item *item;
1682
1683 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1684 if (!delayed_node)
1685 return false;
1686
1687 /*
1688 * We can only do one readdir with delayed items at a time because of
1689 * item->readdir_list.
1690 */
1691 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
1692 btrfs_inode_lock(BTRFS_I(inode), 0);
1693
1694 mutex_lock(&delayed_node->mutex);
1695 item = __btrfs_first_delayed_insertion_item(delayed_node);
1696 while (item && item->index <= last_index) {
1697 refcount_inc(&item->refs);
1698 list_add_tail(&item->readdir_list, ins_list);
1699 item = __btrfs_next_delayed_item(item);
1700 }
1701
1702 item = __btrfs_first_delayed_deletion_item(delayed_node);
1703 while (item && item->index <= last_index) {
1704 refcount_inc(&item->refs);
1705 list_add_tail(&item->readdir_list, del_list);
1706 item = __btrfs_next_delayed_item(item);
1707 }
1708 mutex_unlock(&delayed_node->mutex);
1709 /*
1710 * This delayed node is still cached in the btrfs inode, so refs
1711 * must be > 1 now, and we needn't check it is going to be freed
1712 * or not.
1713 *
1714 * Besides that, this function is used to read dir, we do not
1715 * insert/delete delayed items in this period. So we also needn't
1716 * requeue or dequeue this delayed node.
1717 */
1718 refcount_dec(&delayed_node->refs);
1719
1720 return true;
1721 }
1722
btrfs_readdir_put_delayed_items(struct inode * inode,struct list_head * ins_list,struct list_head * del_list)1723 void btrfs_readdir_put_delayed_items(struct inode *inode,
1724 struct list_head *ins_list,
1725 struct list_head *del_list)
1726 {
1727 struct btrfs_delayed_item *curr, *next;
1728
1729 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1730 list_del(&curr->readdir_list);
1731 if (refcount_dec_and_test(&curr->refs))
1732 kfree(curr);
1733 }
1734
1735 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1736 list_del(&curr->readdir_list);
1737 if (refcount_dec_and_test(&curr->refs))
1738 kfree(curr);
1739 }
1740
1741 /*
1742 * The VFS is going to do up_read(), so we need to downgrade back to a
1743 * read lock.
1744 */
1745 downgrade_write(&inode->i_rwsem);
1746 }
1747
btrfs_should_delete_dir_index(struct list_head * del_list,u64 index)1748 int btrfs_should_delete_dir_index(struct list_head *del_list,
1749 u64 index)
1750 {
1751 struct btrfs_delayed_item *curr;
1752 int ret = 0;
1753
1754 list_for_each_entry(curr, del_list, readdir_list) {
1755 if (curr->index > index)
1756 break;
1757 if (curr->index == index) {
1758 ret = 1;
1759 break;
1760 }
1761 }
1762 return ret;
1763 }
1764
1765 /*
1766 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1767 *
1768 */
btrfs_readdir_delayed_dir_index(struct dir_context * ctx,struct list_head * ins_list)1769 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1770 struct list_head *ins_list)
1771 {
1772 struct btrfs_dir_item *di;
1773 struct btrfs_delayed_item *curr, *next;
1774 struct btrfs_key location;
1775 char *name;
1776 int name_len;
1777 int over = 0;
1778 unsigned char d_type;
1779
1780 /*
1781 * Changing the data of the delayed item is impossible. So
1782 * we needn't lock them. And we have held i_mutex of the
1783 * directory, nobody can delete any directory indexes now.
1784 */
1785 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1786 list_del(&curr->readdir_list);
1787
1788 if (curr->index < ctx->pos) {
1789 if (refcount_dec_and_test(&curr->refs))
1790 kfree(curr);
1791 continue;
1792 }
1793
1794 ctx->pos = curr->index;
1795
1796 di = (struct btrfs_dir_item *)curr->data;
1797 name = (char *)(di + 1);
1798 name_len = btrfs_stack_dir_name_len(di);
1799
1800 d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type));
1801 btrfs_disk_key_to_cpu(&location, &di->location);
1802
1803 over = !dir_emit(ctx, name, name_len,
1804 location.objectid, d_type);
1805
1806 if (refcount_dec_and_test(&curr->refs))
1807 kfree(curr);
1808
1809 if (over)
1810 return 1;
1811 ctx->pos++;
1812 }
1813 return 0;
1814 }
1815
fill_stack_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode_item * inode_item,struct inode * inode)1816 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1817 struct btrfs_inode_item *inode_item,
1818 struct inode *inode)
1819 {
1820 u64 flags;
1821
1822 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1823 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1824 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1825 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1826 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1827 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1828 btrfs_set_stack_inode_generation(inode_item,
1829 BTRFS_I(inode)->generation);
1830 btrfs_set_stack_inode_sequence(inode_item,
1831 inode_peek_iversion(inode));
1832 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1833 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1834 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1835 BTRFS_I(inode)->ro_flags);
1836 btrfs_set_stack_inode_flags(inode_item, flags);
1837 btrfs_set_stack_inode_block_group(inode_item, 0);
1838
1839 btrfs_set_stack_timespec_sec(&inode_item->atime,
1840 inode->i_atime.tv_sec);
1841 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1842 inode->i_atime.tv_nsec);
1843
1844 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1845 inode->i_mtime.tv_sec);
1846 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1847 inode->i_mtime.tv_nsec);
1848
1849 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1850 inode_get_ctime(inode).tv_sec);
1851 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1852 inode_get_ctime(inode).tv_nsec);
1853
1854 btrfs_set_stack_timespec_sec(&inode_item->otime,
1855 BTRFS_I(inode)->i_otime.tv_sec);
1856 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1857 BTRFS_I(inode)->i_otime.tv_nsec);
1858 }
1859
btrfs_fill_inode(struct inode * inode,u32 * rdev)1860 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1861 {
1862 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1863 struct btrfs_delayed_node *delayed_node;
1864 struct btrfs_inode_item *inode_item;
1865
1866 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1867 if (!delayed_node)
1868 return -ENOENT;
1869
1870 mutex_lock(&delayed_node->mutex);
1871 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1872 mutex_unlock(&delayed_node->mutex);
1873 btrfs_release_delayed_node(delayed_node);
1874 return -ENOENT;
1875 }
1876
1877 inode_item = &delayed_node->inode_item;
1878
1879 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1880 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1881 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1882 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1883 round_up(i_size_read(inode), fs_info->sectorsize));
1884 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1885 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1886 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1887 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1888 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1889
1890 inode_set_iversion_queried(inode,
1891 btrfs_stack_inode_sequence(inode_item));
1892 inode->i_rdev = 0;
1893 *rdev = btrfs_stack_inode_rdev(inode_item);
1894 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1895 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1896
1897 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1898 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1899
1900 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1901 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1902
1903 inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
1904 btrfs_stack_timespec_nsec(&inode_item->ctime));
1905
1906 BTRFS_I(inode)->i_otime.tv_sec =
1907 btrfs_stack_timespec_sec(&inode_item->otime);
1908 BTRFS_I(inode)->i_otime.tv_nsec =
1909 btrfs_stack_timespec_nsec(&inode_item->otime);
1910
1911 inode->i_generation = BTRFS_I(inode)->generation;
1912 BTRFS_I(inode)->index_cnt = (u64)-1;
1913
1914 mutex_unlock(&delayed_node->mutex);
1915 btrfs_release_delayed_node(delayed_node);
1916 return 0;
1917 }
1918
btrfs_delayed_update_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode)1919 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1920 struct btrfs_root *root,
1921 struct btrfs_inode *inode)
1922 {
1923 struct btrfs_delayed_node *delayed_node;
1924 int ret = 0;
1925
1926 delayed_node = btrfs_get_or_create_delayed_node(inode);
1927 if (IS_ERR(delayed_node))
1928 return PTR_ERR(delayed_node);
1929
1930 mutex_lock(&delayed_node->mutex);
1931 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1932 fill_stack_inode_item(trans, &delayed_node->inode_item,
1933 &inode->vfs_inode);
1934 goto release_node;
1935 }
1936
1937 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1938 if (ret)
1939 goto release_node;
1940
1941 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1942 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1943 delayed_node->count++;
1944 atomic_inc(&root->fs_info->delayed_root->items);
1945 release_node:
1946 mutex_unlock(&delayed_node->mutex);
1947 btrfs_release_delayed_node(delayed_node);
1948 return ret;
1949 }
1950
btrfs_delayed_delete_inode_ref(struct btrfs_inode * inode)1951 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1952 {
1953 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1954 struct btrfs_delayed_node *delayed_node;
1955
1956 /*
1957 * we don't do delayed inode updates during log recovery because it
1958 * leads to enospc problems. This means we also can't do
1959 * delayed inode refs
1960 */
1961 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1962 return -EAGAIN;
1963
1964 delayed_node = btrfs_get_or_create_delayed_node(inode);
1965 if (IS_ERR(delayed_node))
1966 return PTR_ERR(delayed_node);
1967
1968 /*
1969 * We don't reserve space for inode ref deletion is because:
1970 * - We ONLY do async inode ref deletion for the inode who has only
1971 * one link(i_nlink == 1), it means there is only one inode ref.
1972 * And in most case, the inode ref and the inode item are in the
1973 * same leaf, and we will deal with them at the same time.
1974 * Since we are sure we will reserve the space for the inode item,
1975 * it is unnecessary to reserve space for inode ref deletion.
1976 * - If the inode ref and the inode item are not in the same leaf,
1977 * We also needn't worry about enospc problem, because we reserve
1978 * much more space for the inode update than it needs.
1979 * - At the worst, we can steal some space from the global reservation.
1980 * It is very rare.
1981 */
1982 mutex_lock(&delayed_node->mutex);
1983 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1984 goto release_node;
1985
1986 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1987 delayed_node->count++;
1988 atomic_inc(&fs_info->delayed_root->items);
1989 release_node:
1990 mutex_unlock(&delayed_node->mutex);
1991 btrfs_release_delayed_node(delayed_node);
1992 return 0;
1993 }
1994
__btrfs_kill_delayed_node(struct btrfs_delayed_node * delayed_node)1995 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1996 {
1997 struct btrfs_root *root = delayed_node->root;
1998 struct btrfs_fs_info *fs_info = root->fs_info;
1999 struct btrfs_delayed_item *curr_item, *prev_item;
2000
2001 mutex_lock(&delayed_node->mutex);
2002 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
2003 while (curr_item) {
2004 prev_item = curr_item;
2005 curr_item = __btrfs_next_delayed_item(prev_item);
2006 btrfs_release_delayed_item(prev_item);
2007 }
2008
2009 if (delayed_node->index_item_leaves > 0) {
2010 btrfs_delayed_item_release_leaves(delayed_node,
2011 delayed_node->index_item_leaves);
2012 delayed_node->index_item_leaves = 0;
2013 }
2014
2015 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
2016 while (curr_item) {
2017 btrfs_delayed_item_release_metadata(root, curr_item);
2018 prev_item = curr_item;
2019 curr_item = __btrfs_next_delayed_item(prev_item);
2020 btrfs_release_delayed_item(prev_item);
2021 }
2022
2023 btrfs_release_delayed_iref(delayed_node);
2024
2025 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2026 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
2027 btrfs_release_delayed_inode(delayed_node);
2028 }
2029 mutex_unlock(&delayed_node->mutex);
2030 }
2031
btrfs_kill_delayed_inode_items(struct btrfs_inode * inode)2032 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
2033 {
2034 struct btrfs_delayed_node *delayed_node;
2035
2036 delayed_node = btrfs_get_delayed_node(inode);
2037 if (!delayed_node)
2038 return;
2039
2040 __btrfs_kill_delayed_node(delayed_node);
2041 btrfs_release_delayed_node(delayed_node);
2042 }
2043
btrfs_kill_all_delayed_nodes(struct btrfs_root * root)2044 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2045 {
2046 u64 inode_id = 0;
2047 struct btrfs_delayed_node *delayed_nodes[8];
2048 int i, n;
2049
2050 while (1) {
2051 spin_lock(&root->inode_lock);
2052 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
2053 (void **)delayed_nodes, inode_id,
2054 ARRAY_SIZE(delayed_nodes));
2055 if (!n) {
2056 spin_unlock(&root->inode_lock);
2057 break;
2058 }
2059
2060 inode_id = delayed_nodes[n - 1]->inode_id + 1;
2061 for (i = 0; i < n; i++) {
2062 /*
2063 * Don't increase refs in case the node is dead and
2064 * about to be removed from the tree in the loop below
2065 */
2066 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
2067 delayed_nodes[i] = NULL;
2068 }
2069 spin_unlock(&root->inode_lock);
2070
2071 for (i = 0; i < n; i++) {
2072 if (!delayed_nodes[i])
2073 continue;
2074 __btrfs_kill_delayed_node(delayed_nodes[i]);
2075 btrfs_release_delayed_node(delayed_nodes[i]);
2076 }
2077 }
2078 }
2079
btrfs_destroy_delayed_inodes(struct btrfs_fs_info * fs_info)2080 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2081 {
2082 struct btrfs_delayed_node *curr_node, *prev_node;
2083
2084 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2085 while (curr_node) {
2086 __btrfs_kill_delayed_node(curr_node);
2087
2088 prev_node = curr_node;
2089 curr_node = btrfs_next_delayed_node(curr_node);
2090 btrfs_release_delayed_node(prev_node);
2091 }
2092 }
2093
btrfs_log_get_delayed_items(struct btrfs_inode * inode,struct list_head * ins_list,struct list_head * del_list)2094 void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
2095 struct list_head *ins_list,
2096 struct list_head *del_list)
2097 {
2098 struct btrfs_delayed_node *node;
2099 struct btrfs_delayed_item *item;
2100
2101 node = btrfs_get_delayed_node(inode);
2102 if (!node)
2103 return;
2104
2105 mutex_lock(&node->mutex);
2106 item = __btrfs_first_delayed_insertion_item(node);
2107 while (item) {
2108 /*
2109 * It's possible that the item is already in a log list. This
2110 * can happen in case two tasks are trying to log the same
2111 * directory. For example if we have tasks A and task B:
2112 *
2113 * Task A collected the delayed items into a log list while
2114 * under the inode's log_mutex (at btrfs_log_inode()), but it
2115 * only releases the items after logging the inodes they point
2116 * to (if they are new inodes), which happens after unlocking
2117 * the log mutex;
2118 *
2119 * Task B enters btrfs_log_inode() and acquires the log_mutex
2120 * of the same directory inode, before task B releases the
2121 * delayed items. This can happen for example when logging some
2122 * inode we need to trigger logging of its parent directory, so
2123 * logging two files that have the same parent directory can
2124 * lead to this.
2125 *
2126 * If this happens, just ignore delayed items already in a log
2127 * list. All the tasks logging the directory are under a log
2128 * transaction and whichever finishes first can not sync the log
2129 * before the other completes and leaves the log transaction.
2130 */
2131 if (!item->logged && list_empty(&item->log_list)) {
2132 refcount_inc(&item->refs);
2133 list_add_tail(&item->log_list, ins_list);
2134 }
2135 item = __btrfs_next_delayed_item(item);
2136 }
2137
2138 item = __btrfs_first_delayed_deletion_item(node);
2139 while (item) {
2140 /* It may be non-empty, for the same reason mentioned above. */
2141 if (!item->logged && list_empty(&item->log_list)) {
2142 refcount_inc(&item->refs);
2143 list_add_tail(&item->log_list, del_list);
2144 }
2145 item = __btrfs_next_delayed_item(item);
2146 }
2147 mutex_unlock(&node->mutex);
2148
2149 /*
2150 * We are called during inode logging, which means the inode is in use
2151 * and can not be evicted before we finish logging the inode. So we never
2152 * have the last reference on the delayed inode.
2153 * Also, we don't use btrfs_release_delayed_node() because that would
2154 * requeue the delayed inode (change its order in the list of prepared
2155 * nodes) and we don't want to do such change because we don't create or
2156 * delete delayed items.
2157 */
2158 ASSERT(refcount_read(&node->refs) > 1);
2159 refcount_dec(&node->refs);
2160 }
2161
btrfs_log_put_delayed_items(struct btrfs_inode * inode,struct list_head * ins_list,struct list_head * del_list)2162 void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
2163 struct list_head *ins_list,
2164 struct list_head *del_list)
2165 {
2166 struct btrfs_delayed_node *node;
2167 struct btrfs_delayed_item *item;
2168 struct btrfs_delayed_item *next;
2169
2170 node = btrfs_get_delayed_node(inode);
2171 if (!node)
2172 return;
2173
2174 mutex_lock(&node->mutex);
2175
2176 list_for_each_entry_safe(item, next, ins_list, log_list) {
2177 item->logged = true;
2178 list_del_init(&item->log_list);
2179 if (refcount_dec_and_test(&item->refs))
2180 kfree(item);
2181 }
2182
2183 list_for_each_entry_safe(item, next, del_list, log_list) {
2184 item->logged = true;
2185 list_del_init(&item->log_list);
2186 if (refcount_dec_and_test(&item->refs))
2187 kfree(item);
2188 }
2189
2190 mutex_unlock(&node->mutex);
2191
2192 /*
2193 * We are called during inode logging, which means the inode is in use
2194 * and can not be evicted before we finish logging the inode. So we never
2195 * have the last reference on the delayed inode.
2196 * Also, we don't use btrfs_release_delayed_node() because that would
2197 * requeue the delayed inode (change its order in the list of prepared
2198 * nodes) and we don't want to do such change because we don't create or
2199 * delete delayed items.
2200 */
2201 ASSERT(refcount_read(&node->refs) > 1);
2202 refcount_dec(&node->refs);
2203 }
2204