1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 */
6
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include "ctree.h"
10 #include "fs.h"
11 #include "messages.h"
12 #include "misc.h"
13 #include "delayed-inode.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "qgroup.h"
17 #include "locking.h"
18 #include "inode-item.h"
19 #include "space-info.h"
20 #include "accessors.h"
21 #include "file-item.h"
22
23 #define BTRFS_DELAYED_WRITEBACK 512
24 #define BTRFS_DELAYED_BACKGROUND 128
25 #define BTRFS_DELAYED_BATCH 16
26
27 static struct kmem_cache *delayed_node_cache;
28
btrfs_delayed_inode_init(void)29 int __init btrfs_delayed_inode_init(void)
30 {
31 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
32 sizeof(struct btrfs_delayed_node),
33 0,
34 SLAB_MEM_SPREAD,
35 NULL);
36 if (!delayed_node_cache)
37 return -ENOMEM;
38 return 0;
39 }
40
btrfs_delayed_inode_exit(void)41 void __cold btrfs_delayed_inode_exit(void)
42 {
43 kmem_cache_destroy(delayed_node_cache);
44 }
45
btrfs_init_delayed_node(struct btrfs_delayed_node * delayed_node,struct btrfs_root * root,u64 inode_id)46 static inline void btrfs_init_delayed_node(
47 struct btrfs_delayed_node *delayed_node,
48 struct btrfs_root *root, u64 inode_id)
49 {
50 delayed_node->root = root;
51 delayed_node->inode_id = inode_id;
52 refcount_set(&delayed_node->refs, 0);
53 delayed_node->ins_root = RB_ROOT_CACHED;
54 delayed_node->del_root = RB_ROOT_CACHED;
55 mutex_init(&delayed_node->mutex);
56 INIT_LIST_HEAD(&delayed_node->n_list);
57 INIT_LIST_HEAD(&delayed_node->p_list);
58 }
59
btrfs_get_delayed_node(struct btrfs_inode * btrfs_inode)60 static struct btrfs_delayed_node *btrfs_get_delayed_node(
61 struct btrfs_inode *btrfs_inode)
62 {
63 struct btrfs_root *root = btrfs_inode->root;
64 u64 ino = btrfs_ino(btrfs_inode);
65 struct btrfs_delayed_node *node;
66
67 node = READ_ONCE(btrfs_inode->delayed_node);
68 if (node) {
69 refcount_inc(&node->refs);
70 return node;
71 }
72
73 spin_lock(&root->inode_lock);
74 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
75
76 if (node) {
77 if (btrfs_inode->delayed_node) {
78 refcount_inc(&node->refs); /* can be accessed */
79 BUG_ON(btrfs_inode->delayed_node != node);
80 spin_unlock(&root->inode_lock);
81 return node;
82 }
83
84 /*
85 * It's possible that we're racing into the middle of removing
86 * this node from the radix tree. In this case, the refcount
87 * was zero and it should never go back to one. Just return
88 * NULL like it was never in the radix at all; our release
89 * function is in the process of removing it.
90 *
91 * Some implementations of refcount_inc refuse to bump the
92 * refcount once it has hit zero. If we don't do this dance
93 * here, refcount_inc() may decide to just WARN_ONCE() instead
94 * of actually bumping the refcount.
95 *
96 * If this node is properly in the radix, we want to bump the
97 * refcount twice, once for the inode and once for this get
98 * operation.
99 */
100 if (refcount_inc_not_zero(&node->refs)) {
101 refcount_inc(&node->refs);
102 btrfs_inode->delayed_node = node;
103 } else {
104 node = NULL;
105 }
106
107 spin_unlock(&root->inode_lock);
108 return node;
109 }
110 spin_unlock(&root->inode_lock);
111
112 return NULL;
113 }
114
115 /* Will return either the node or PTR_ERR(-ENOMEM) */
btrfs_get_or_create_delayed_node(struct btrfs_inode * btrfs_inode)116 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
117 struct btrfs_inode *btrfs_inode)
118 {
119 struct btrfs_delayed_node *node;
120 struct btrfs_root *root = btrfs_inode->root;
121 u64 ino = btrfs_ino(btrfs_inode);
122 int ret;
123
124 again:
125 node = btrfs_get_delayed_node(btrfs_inode);
126 if (node)
127 return node;
128
129 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
130 if (!node)
131 return ERR_PTR(-ENOMEM);
132 btrfs_init_delayed_node(node, root, ino);
133
134 /* cached in the btrfs inode and can be accessed */
135 refcount_set(&node->refs, 2);
136
137 ret = radix_tree_preload(GFP_NOFS);
138 if (ret) {
139 kmem_cache_free(delayed_node_cache, node);
140 return ERR_PTR(ret);
141 }
142
143 spin_lock(&root->inode_lock);
144 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
145 if (ret == -EEXIST) {
146 spin_unlock(&root->inode_lock);
147 kmem_cache_free(delayed_node_cache, node);
148 radix_tree_preload_end();
149 goto again;
150 }
151 btrfs_inode->delayed_node = node;
152 spin_unlock(&root->inode_lock);
153 radix_tree_preload_end();
154
155 return node;
156 }
157
158 /*
159 * Call it when holding delayed_node->mutex
160 *
161 * If mod = 1, add this node into the prepared list.
162 */
btrfs_queue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node,int mod)163 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
164 struct btrfs_delayed_node *node,
165 int mod)
166 {
167 spin_lock(&root->lock);
168 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
169 if (!list_empty(&node->p_list))
170 list_move_tail(&node->p_list, &root->prepare_list);
171 else if (mod)
172 list_add_tail(&node->p_list, &root->prepare_list);
173 } else {
174 list_add_tail(&node->n_list, &root->node_list);
175 list_add_tail(&node->p_list, &root->prepare_list);
176 refcount_inc(&node->refs); /* inserted into list */
177 root->nodes++;
178 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
179 }
180 spin_unlock(&root->lock);
181 }
182
183 /* Call it when holding delayed_node->mutex */
btrfs_dequeue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node)184 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
185 struct btrfs_delayed_node *node)
186 {
187 spin_lock(&root->lock);
188 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
189 root->nodes--;
190 refcount_dec(&node->refs); /* not in the list */
191 list_del_init(&node->n_list);
192 if (!list_empty(&node->p_list))
193 list_del_init(&node->p_list);
194 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
195 }
196 spin_unlock(&root->lock);
197 }
198
btrfs_first_delayed_node(struct btrfs_delayed_root * delayed_root)199 static struct btrfs_delayed_node *btrfs_first_delayed_node(
200 struct btrfs_delayed_root *delayed_root)
201 {
202 struct list_head *p;
203 struct btrfs_delayed_node *node = NULL;
204
205 spin_lock(&delayed_root->lock);
206 if (list_empty(&delayed_root->node_list))
207 goto out;
208
209 p = delayed_root->node_list.next;
210 node = list_entry(p, struct btrfs_delayed_node, n_list);
211 refcount_inc(&node->refs);
212 out:
213 spin_unlock(&delayed_root->lock);
214
215 return node;
216 }
217
btrfs_next_delayed_node(struct btrfs_delayed_node * node)218 static struct btrfs_delayed_node *btrfs_next_delayed_node(
219 struct btrfs_delayed_node *node)
220 {
221 struct btrfs_delayed_root *delayed_root;
222 struct list_head *p;
223 struct btrfs_delayed_node *next = NULL;
224
225 delayed_root = node->root->fs_info->delayed_root;
226 spin_lock(&delayed_root->lock);
227 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
228 /* not in the list */
229 if (list_empty(&delayed_root->node_list))
230 goto out;
231 p = delayed_root->node_list.next;
232 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
233 goto out;
234 else
235 p = node->n_list.next;
236
237 next = list_entry(p, struct btrfs_delayed_node, n_list);
238 refcount_inc(&next->refs);
239 out:
240 spin_unlock(&delayed_root->lock);
241
242 return next;
243 }
244
__btrfs_release_delayed_node(struct btrfs_delayed_node * delayed_node,int mod)245 static void __btrfs_release_delayed_node(
246 struct btrfs_delayed_node *delayed_node,
247 int mod)
248 {
249 struct btrfs_delayed_root *delayed_root;
250
251 if (!delayed_node)
252 return;
253
254 delayed_root = delayed_node->root->fs_info->delayed_root;
255
256 mutex_lock(&delayed_node->mutex);
257 if (delayed_node->count)
258 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
259 else
260 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
261 mutex_unlock(&delayed_node->mutex);
262
263 if (refcount_dec_and_test(&delayed_node->refs)) {
264 struct btrfs_root *root = delayed_node->root;
265
266 spin_lock(&root->inode_lock);
267 /*
268 * Once our refcount goes to zero, nobody is allowed to bump it
269 * back up. We can delete it now.
270 */
271 ASSERT(refcount_read(&delayed_node->refs) == 0);
272 radix_tree_delete(&root->delayed_nodes_tree,
273 delayed_node->inode_id);
274 spin_unlock(&root->inode_lock);
275 kmem_cache_free(delayed_node_cache, delayed_node);
276 }
277 }
278
btrfs_release_delayed_node(struct btrfs_delayed_node * node)279 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
280 {
281 __btrfs_release_delayed_node(node, 0);
282 }
283
btrfs_first_prepared_delayed_node(struct btrfs_delayed_root * delayed_root)284 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
285 struct btrfs_delayed_root *delayed_root)
286 {
287 struct list_head *p;
288 struct btrfs_delayed_node *node = NULL;
289
290 spin_lock(&delayed_root->lock);
291 if (list_empty(&delayed_root->prepare_list))
292 goto out;
293
294 p = delayed_root->prepare_list.next;
295 list_del_init(p);
296 node = list_entry(p, struct btrfs_delayed_node, p_list);
297 refcount_inc(&node->refs);
298 out:
299 spin_unlock(&delayed_root->lock);
300
301 return node;
302 }
303
btrfs_release_prepared_delayed_node(struct btrfs_delayed_node * node)304 static inline void btrfs_release_prepared_delayed_node(
305 struct btrfs_delayed_node *node)
306 {
307 __btrfs_release_delayed_node(node, 1);
308 }
309
btrfs_alloc_delayed_item(u16 data_len,struct btrfs_delayed_node * node,enum btrfs_delayed_item_type type)310 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
311 struct btrfs_delayed_node *node,
312 enum btrfs_delayed_item_type type)
313 {
314 struct btrfs_delayed_item *item;
315
316 item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
317 if (item) {
318 item->data_len = data_len;
319 item->type = type;
320 item->bytes_reserved = 0;
321 item->delayed_node = node;
322 RB_CLEAR_NODE(&item->rb_node);
323 INIT_LIST_HEAD(&item->log_list);
324 item->logged = false;
325 refcount_set(&item->refs, 1);
326 }
327 return item;
328 }
329
330 /*
331 * __btrfs_lookup_delayed_item - look up the delayed item by key
332 * @delayed_node: pointer to the delayed node
333 * @index: the dir index value to lookup (offset of a dir index key)
334 *
335 * Note: if we don't find the right item, we will return the prev item and
336 * the next item.
337 */
__btrfs_lookup_delayed_item(struct rb_root * root,u64 index)338 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
339 struct rb_root *root,
340 u64 index)
341 {
342 struct rb_node *node = root->rb_node;
343 struct btrfs_delayed_item *delayed_item = NULL;
344
345 while (node) {
346 delayed_item = rb_entry(node, struct btrfs_delayed_item,
347 rb_node);
348 if (delayed_item->index < index)
349 node = node->rb_right;
350 else if (delayed_item->index > index)
351 node = node->rb_left;
352 else
353 return delayed_item;
354 }
355
356 return NULL;
357 }
358
__btrfs_add_delayed_item(struct btrfs_delayed_node * delayed_node,struct btrfs_delayed_item * ins)359 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
360 struct btrfs_delayed_item *ins)
361 {
362 struct rb_node **p, *node;
363 struct rb_node *parent_node = NULL;
364 struct rb_root_cached *root;
365 struct btrfs_delayed_item *item;
366 bool leftmost = true;
367
368 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
369 root = &delayed_node->ins_root;
370 else
371 root = &delayed_node->del_root;
372
373 p = &root->rb_root.rb_node;
374 node = &ins->rb_node;
375
376 while (*p) {
377 parent_node = *p;
378 item = rb_entry(parent_node, struct btrfs_delayed_item,
379 rb_node);
380
381 if (item->index < ins->index) {
382 p = &(*p)->rb_right;
383 leftmost = false;
384 } else if (item->index > ins->index) {
385 p = &(*p)->rb_left;
386 } else {
387 return -EEXIST;
388 }
389 }
390
391 rb_link_node(node, parent_node, p);
392 rb_insert_color_cached(node, root, leftmost);
393
394 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
395 ins->index >= delayed_node->index_cnt)
396 delayed_node->index_cnt = ins->index + 1;
397
398 delayed_node->count++;
399 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
400 return 0;
401 }
402
finish_one_item(struct btrfs_delayed_root * delayed_root)403 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
404 {
405 int seq = atomic_inc_return(&delayed_root->items_seq);
406
407 /* atomic_dec_return implies a barrier */
408 if ((atomic_dec_return(&delayed_root->items) <
409 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
410 cond_wake_up_nomb(&delayed_root->wait);
411 }
412
__btrfs_remove_delayed_item(struct btrfs_delayed_item * delayed_item)413 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
414 {
415 struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
416 struct rb_root_cached *root;
417 struct btrfs_delayed_root *delayed_root;
418
419 /* Not inserted, ignore it. */
420 if (RB_EMPTY_NODE(&delayed_item->rb_node))
421 return;
422
423 /* If it's in a rbtree, then we need to have delayed node locked. */
424 lockdep_assert_held(&delayed_node->mutex);
425
426 delayed_root = delayed_node->root->fs_info->delayed_root;
427
428 if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
429 root = &delayed_node->ins_root;
430 else
431 root = &delayed_node->del_root;
432
433 rb_erase_cached(&delayed_item->rb_node, root);
434 RB_CLEAR_NODE(&delayed_item->rb_node);
435 delayed_node->count--;
436
437 finish_one_item(delayed_root);
438 }
439
btrfs_release_delayed_item(struct btrfs_delayed_item * item)440 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
441 {
442 if (item) {
443 __btrfs_remove_delayed_item(item);
444 if (refcount_dec_and_test(&item->refs))
445 kfree(item);
446 }
447 }
448
__btrfs_first_delayed_insertion_item(struct btrfs_delayed_node * delayed_node)449 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
450 struct btrfs_delayed_node *delayed_node)
451 {
452 struct rb_node *p;
453 struct btrfs_delayed_item *item = NULL;
454
455 p = rb_first_cached(&delayed_node->ins_root);
456 if (p)
457 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
458
459 return item;
460 }
461
__btrfs_first_delayed_deletion_item(struct btrfs_delayed_node * delayed_node)462 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
463 struct btrfs_delayed_node *delayed_node)
464 {
465 struct rb_node *p;
466 struct btrfs_delayed_item *item = NULL;
467
468 p = rb_first_cached(&delayed_node->del_root);
469 if (p)
470 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
471
472 return item;
473 }
474
__btrfs_next_delayed_item(struct btrfs_delayed_item * item)475 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
476 struct btrfs_delayed_item *item)
477 {
478 struct rb_node *p;
479 struct btrfs_delayed_item *next = NULL;
480
481 p = rb_next(&item->rb_node);
482 if (p)
483 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
484
485 return next;
486 }
487
btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_delayed_item * item)488 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
489 struct btrfs_delayed_item *item)
490 {
491 struct btrfs_block_rsv *src_rsv;
492 struct btrfs_block_rsv *dst_rsv;
493 struct btrfs_fs_info *fs_info = trans->fs_info;
494 u64 num_bytes;
495 int ret;
496
497 if (!trans->bytes_reserved)
498 return 0;
499
500 src_rsv = trans->block_rsv;
501 dst_rsv = &fs_info->delayed_block_rsv;
502
503 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
504
505 /*
506 * Here we migrate space rsv from transaction rsv, since have already
507 * reserved space when starting a transaction. So no need to reserve
508 * qgroup space here.
509 */
510 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
511 if (!ret) {
512 trace_btrfs_space_reservation(fs_info, "delayed_item",
513 item->delayed_node->inode_id,
514 num_bytes, 1);
515 /*
516 * For insertions we track reserved metadata space by accounting
517 * for the number of leaves that will be used, based on the delayed
518 * node's index_items_size field.
519 */
520 if (item->type == BTRFS_DELAYED_DELETION_ITEM)
521 item->bytes_reserved = num_bytes;
522 }
523
524 return ret;
525 }
526
btrfs_delayed_item_release_metadata(struct btrfs_root * root,struct btrfs_delayed_item * item)527 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
528 struct btrfs_delayed_item *item)
529 {
530 struct btrfs_block_rsv *rsv;
531 struct btrfs_fs_info *fs_info = root->fs_info;
532
533 if (!item->bytes_reserved)
534 return;
535
536 rsv = &fs_info->delayed_block_rsv;
537 /*
538 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
539 * to release/reserve qgroup space.
540 */
541 trace_btrfs_space_reservation(fs_info, "delayed_item",
542 item->delayed_node->inode_id,
543 item->bytes_reserved, 0);
544 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
545 }
546
btrfs_delayed_item_release_leaves(struct btrfs_delayed_node * node,unsigned int num_leaves)547 static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
548 unsigned int num_leaves)
549 {
550 struct btrfs_fs_info *fs_info = node->root->fs_info;
551 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
552
553 /* There are no space reservations during log replay, bail out. */
554 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
555 return;
556
557 trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
558 bytes, 0);
559 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
560 }
561
btrfs_delayed_inode_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_node * node)562 static int btrfs_delayed_inode_reserve_metadata(
563 struct btrfs_trans_handle *trans,
564 struct btrfs_root *root,
565 struct btrfs_delayed_node *node)
566 {
567 struct btrfs_fs_info *fs_info = root->fs_info;
568 struct btrfs_block_rsv *src_rsv;
569 struct btrfs_block_rsv *dst_rsv;
570 u64 num_bytes;
571 int ret;
572
573 src_rsv = trans->block_rsv;
574 dst_rsv = &fs_info->delayed_block_rsv;
575
576 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
577
578 /*
579 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
580 * which doesn't reserve space for speed. This is a problem since we
581 * still need to reserve space for this update, so try to reserve the
582 * space.
583 *
584 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
585 * we always reserve enough to update the inode item.
586 */
587 if (!src_rsv || (!trans->bytes_reserved &&
588 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
589 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
590 BTRFS_QGROUP_RSV_META_PREALLOC, true);
591 if (ret < 0)
592 return ret;
593 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
594 BTRFS_RESERVE_NO_FLUSH);
595 /* NO_FLUSH could only fail with -ENOSPC */
596 ASSERT(ret == 0 || ret == -ENOSPC);
597 if (ret)
598 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
599 } else {
600 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
601 }
602
603 if (!ret) {
604 trace_btrfs_space_reservation(fs_info, "delayed_inode",
605 node->inode_id, num_bytes, 1);
606 node->bytes_reserved = num_bytes;
607 }
608
609 return ret;
610 }
611
btrfs_delayed_inode_release_metadata(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,bool qgroup_free)612 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
613 struct btrfs_delayed_node *node,
614 bool qgroup_free)
615 {
616 struct btrfs_block_rsv *rsv;
617
618 if (!node->bytes_reserved)
619 return;
620
621 rsv = &fs_info->delayed_block_rsv;
622 trace_btrfs_space_reservation(fs_info, "delayed_inode",
623 node->inode_id, node->bytes_reserved, 0);
624 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
625 if (qgroup_free)
626 btrfs_qgroup_free_meta_prealloc(node->root,
627 node->bytes_reserved);
628 else
629 btrfs_qgroup_convert_reserved_meta(node->root,
630 node->bytes_reserved);
631 node->bytes_reserved = 0;
632 }
633
634 /*
635 * Insert a single delayed item or a batch of delayed items, as many as possible
636 * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
637 * in the rbtree, and if there's a gap between two consecutive dir index items,
638 * then it means at some point we had delayed dir indexes to add but they got
639 * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
640 * into the subvolume tree. Dir index keys also have their offsets coming from a
641 * monotonically increasing counter, so we can't get new keys with an offset that
642 * fits within a gap between delayed dir index items.
643 */
btrfs_insert_delayed_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * first_item)644 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
645 struct btrfs_root *root,
646 struct btrfs_path *path,
647 struct btrfs_delayed_item *first_item)
648 {
649 struct btrfs_fs_info *fs_info = root->fs_info;
650 struct btrfs_delayed_node *node = first_item->delayed_node;
651 LIST_HEAD(item_list);
652 struct btrfs_delayed_item *curr;
653 struct btrfs_delayed_item *next;
654 const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
655 struct btrfs_item_batch batch;
656 struct btrfs_key first_key;
657 const u32 first_data_size = first_item->data_len;
658 int total_size;
659 char *ins_data = NULL;
660 int ret;
661 bool continuous_keys_only = false;
662
663 lockdep_assert_held(&node->mutex);
664
665 /*
666 * During normal operation the delayed index offset is continuously
667 * increasing, so we can batch insert all items as there will not be any
668 * overlapping keys in the tree.
669 *
670 * The exception to this is log replay, where we may have interleaved
671 * offsets in the tree, so our batch needs to be continuous keys only in
672 * order to ensure we do not end up with out of order items in our leaf.
673 */
674 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
675 continuous_keys_only = true;
676
677 /*
678 * For delayed items to insert, we track reserved metadata bytes based
679 * on the number of leaves that we will use.
680 * See btrfs_insert_delayed_dir_index() and
681 * btrfs_delayed_item_reserve_metadata()).
682 */
683 ASSERT(first_item->bytes_reserved == 0);
684
685 list_add_tail(&first_item->tree_list, &item_list);
686 batch.total_data_size = first_data_size;
687 batch.nr = 1;
688 total_size = first_data_size + sizeof(struct btrfs_item);
689 curr = first_item;
690
691 while (true) {
692 int next_size;
693
694 next = __btrfs_next_delayed_item(curr);
695 if (!next)
696 break;
697
698 /*
699 * We cannot allow gaps in the key space if we're doing log
700 * replay.
701 */
702 if (continuous_keys_only && (next->index != curr->index + 1))
703 break;
704
705 ASSERT(next->bytes_reserved == 0);
706
707 next_size = next->data_len + sizeof(struct btrfs_item);
708 if (total_size + next_size > max_size)
709 break;
710
711 list_add_tail(&next->tree_list, &item_list);
712 batch.nr++;
713 total_size += next_size;
714 batch.total_data_size += next->data_len;
715 curr = next;
716 }
717
718 if (batch.nr == 1) {
719 first_key.objectid = node->inode_id;
720 first_key.type = BTRFS_DIR_INDEX_KEY;
721 first_key.offset = first_item->index;
722 batch.keys = &first_key;
723 batch.data_sizes = &first_data_size;
724 } else {
725 struct btrfs_key *ins_keys;
726 u32 *ins_sizes;
727 int i = 0;
728
729 ins_data = kmalloc(batch.nr * sizeof(u32) +
730 batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
731 if (!ins_data) {
732 ret = -ENOMEM;
733 goto out;
734 }
735 ins_sizes = (u32 *)ins_data;
736 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
737 batch.keys = ins_keys;
738 batch.data_sizes = ins_sizes;
739 list_for_each_entry(curr, &item_list, tree_list) {
740 ins_keys[i].objectid = node->inode_id;
741 ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
742 ins_keys[i].offset = curr->index;
743 ins_sizes[i] = curr->data_len;
744 i++;
745 }
746 }
747
748 ret = btrfs_insert_empty_items(trans, root, path, &batch);
749 if (ret)
750 goto out;
751
752 list_for_each_entry(curr, &item_list, tree_list) {
753 char *data_ptr;
754
755 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
756 write_extent_buffer(path->nodes[0], &curr->data,
757 (unsigned long)data_ptr, curr->data_len);
758 path->slots[0]++;
759 }
760
761 /*
762 * Now release our path before releasing the delayed items and their
763 * metadata reservations, so that we don't block other tasks for more
764 * time than needed.
765 */
766 btrfs_release_path(path);
767
768 ASSERT(node->index_item_leaves > 0);
769
770 /*
771 * For normal operations we will batch an entire leaf's worth of delayed
772 * items, so if there are more items to process we can decrement
773 * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
774 *
775 * However for log replay we may not have inserted an entire leaf's
776 * worth of items, we may have not had continuous items, so decrementing
777 * here would mess up the index_item_leaves accounting. For this case
778 * only clean up the accounting when there are no items left.
779 */
780 if (next && !continuous_keys_only) {
781 /*
782 * We inserted one batch of items into a leaf a there are more
783 * items to flush in a future batch, now release one unit of
784 * metadata space from the delayed block reserve, corresponding
785 * the leaf we just flushed to.
786 */
787 btrfs_delayed_item_release_leaves(node, 1);
788 node->index_item_leaves--;
789 } else if (!next) {
790 /*
791 * There are no more items to insert. We can have a number of
792 * reserved leaves > 1 here - this happens when many dir index
793 * items are added and then removed before they are flushed (file
794 * names with a very short life, never span a transaction). So
795 * release all remaining leaves.
796 */
797 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
798 node->index_item_leaves = 0;
799 }
800
801 list_for_each_entry_safe(curr, next, &item_list, tree_list) {
802 list_del(&curr->tree_list);
803 btrfs_release_delayed_item(curr);
804 }
805 out:
806 kfree(ins_data);
807 return ret;
808 }
809
btrfs_insert_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)810 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
811 struct btrfs_path *path,
812 struct btrfs_root *root,
813 struct btrfs_delayed_node *node)
814 {
815 int ret = 0;
816
817 while (ret == 0) {
818 struct btrfs_delayed_item *curr;
819
820 mutex_lock(&node->mutex);
821 curr = __btrfs_first_delayed_insertion_item(node);
822 if (!curr) {
823 mutex_unlock(&node->mutex);
824 break;
825 }
826 ret = btrfs_insert_delayed_item(trans, root, path, curr);
827 mutex_unlock(&node->mutex);
828 }
829
830 return ret;
831 }
832
btrfs_batch_delete_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * item)833 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
834 struct btrfs_root *root,
835 struct btrfs_path *path,
836 struct btrfs_delayed_item *item)
837 {
838 const u64 ino = item->delayed_node->inode_id;
839 struct btrfs_fs_info *fs_info = root->fs_info;
840 struct btrfs_delayed_item *curr, *next;
841 struct extent_buffer *leaf = path->nodes[0];
842 LIST_HEAD(batch_list);
843 int nitems, slot, last_slot;
844 int ret;
845 u64 total_reserved_size = item->bytes_reserved;
846
847 ASSERT(leaf != NULL);
848
849 slot = path->slots[0];
850 last_slot = btrfs_header_nritems(leaf) - 1;
851 /*
852 * Our caller always gives us a path pointing to an existing item, so
853 * this can not happen.
854 */
855 ASSERT(slot <= last_slot);
856 if (WARN_ON(slot > last_slot))
857 return -ENOENT;
858
859 nitems = 1;
860 curr = item;
861 list_add_tail(&curr->tree_list, &batch_list);
862
863 /*
864 * Keep checking if the next delayed item matches the next item in the
865 * leaf - if so, we can add it to the batch of items to delete from the
866 * leaf.
867 */
868 while (slot < last_slot) {
869 struct btrfs_key key;
870
871 next = __btrfs_next_delayed_item(curr);
872 if (!next)
873 break;
874
875 slot++;
876 btrfs_item_key_to_cpu(leaf, &key, slot);
877 if (key.objectid != ino ||
878 key.type != BTRFS_DIR_INDEX_KEY ||
879 key.offset != next->index)
880 break;
881 nitems++;
882 curr = next;
883 list_add_tail(&curr->tree_list, &batch_list);
884 total_reserved_size += curr->bytes_reserved;
885 }
886
887 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
888 if (ret)
889 return ret;
890
891 /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
892 if (total_reserved_size > 0) {
893 /*
894 * Check btrfs_delayed_item_reserve_metadata() to see why we
895 * don't need to release/reserve qgroup space.
896 */
897 trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
898 total_reserved_size, 0);
899 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
900 total_reserved_size, NULL);
901 }
902
903 list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
904 list_del(&curr->tree_list);
905 btrfs_release_delayed_item(curr);
906 }
907
908 return 0;
909 }
910
btrfs_delete_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)911 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
912 struct btrfs_path *path,
913 struct btrfs_root *root,
914 struct btrfs_delayed_node *node)
915 {
916 struct btrfs_key key;
917 int ret = 0;
918
919 key.objectid = node->inode_id;
920 key.type = BTRFS_DIR_INDEX_KEY;
921
922 while (ret == 0) {
923 struct btrfs_delayed_item *item;
924
925 mutex_lock(&node->mutex);
926 item = __btrfs_first_delayed_deletion_item(node);
927 if (!item) {
928 mutex_unlock(&node->mutex);
929 break;
930 }
931
932 key.offset = item->index;
933 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
934 if (ret > 0) {
935 /*
936 * There's no matching item in the leaf. This means we
937 * have already deleted this item in a past run of the
938 * delayed items. We ignore errors when running delayed
939 * items from an async context, through a work queue job
940 * running btrfs_async_run_delayed_root(), and don't
941 * release delayed items that failed to complete. This
942 * is because we will retry later, and at transaction
943 * commit time we always run delayed items and will
944 * then deal with errors if they fail to run again.
945 *
946 * So just release delayed items for which we can't find
947 * an item in the tree, and move to the next item.
948 */
949 btrfs_release_path(path);
950 btrfs_release_delayed_item(item);
951 ret = 0;
952 } else if (ret == 0) {
953 ret = btrfs_batch_delete_items(trans, root, path, item);
954 btrfs_release_path(path);
955 }
956
957 /*
958 * We unlock and relock on each iteration, this is to prevent
959 * blocking other tasks for too long while we are being run from
960 * the async context (work queue job). Those tasks are typically
961 * running system calls like creat/mkdir/rename/unlink/etc which
962 * need to add delayed items to this delayed node.
963 */
964 mutex_unlock(&node->mutex);
965 }
966
967 return ret;
968 }
969
btrfs_release_delayed_inode(struct btrfs_delayed_node * delayed_node)970 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
971 {
972 struct btrfs_delayed_root *delayed_root;
973
974 if (delayed_node &&
975 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
976 ASSERT(delayed_node->root);
977 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
978 delayed_node->count--;
979
980 delayed_root = delayed_node->root->fs_info->delayed_root;
981 finish_one_item(delayed_root);
982 }
983 }
984
btrfs_release_delayed_iref(struct btrfs_delayed_node * delayed_node)985 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
986 {
987
988 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
989 struct btrfs_delayed_root *delayed_root;
990
991 ASSERT(delayed_node->root);
992 delayed_node->count--;
993
994 delayed_root = delayed_node->root->fs_info->delayed_root;
995 finish_one_item(delayed_root);
996 }
997 }
998
__btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)999 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1000 struct btrfs_root *root,
1001 struct btrfs_path *path,
1002 struct btrfs_delayed_node *node)
1003 {
1004 struct btrfs_fs_info *fs_info = root->fs_info;
1005 struct btrfs_key key;
1006 struct btrfs_inode_item *inode_item;
1007 struct extent_buffer *leaf;
1008 int mod;
1009 int ret;
1010
1011 key.objectid = node->inode_id;
1012 key.type = BTRFS_INODE_ITEM_KEY;
1013 key.offset = 0;
1014
1015 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1016 mod = -1;
1017 else
1018 mod = 1;
1019
1020 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1021 if (ret > 0)
1022 ret = -ENOENT;
1023 if (ret < 0)
1024 goto out;
1025
1026 leaf = path->nodes[0];
1027 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1028 struct btrfs_inode_item);
1029 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1030 sizeof(struct btrfs_inode_item));
1031 btrfs_mark_buffer_dirty(trans, leaf);
1032
1033 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1034 goto out;
1035
1036 path->slots[0]++;
1037 if (path->slots[0] >= btrfs_header_nritems(leaf))
1038 goto search;
1039 again:
1040 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1041 if (key.objectid != node->inode_id)
1042 goto out;
1043
1044 if (key.type != BTRFS_INODE_REF_KEY &&
1045 key.type != BTRFS_INODE_EXTREF_KEY)
1046 goto out;
1047
1048 /*
1049 * Delayed iref deletion is for the inode who has only one link,
1050 * so there is only one iref. The case that several irefs are
1051 * in the same item doesn't exist.
1052 */
1053 ret = btrfs_del_item(trans, root, path);
1054 out:
1055 btrfs_release_delayed_iref(node);
1056 btrfs_release_path(path);
1057 err_out:
1058 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1059 btrfs_release_delayed_inode(node);
1060
1061 /*
1062 * If we fail to update the delayed inode we need to abort the
1063 * transaction, because we could leave the inode with the improper
1064 * counts behind.
1065 */
1066 if (ret && ret != -ENOENT)
1067 btrfs_abort_transaction(trans, ret);
1068
1069 return ret;
1070
1071 search:
1072 btrfs_release_path(path);
1073
1074 key.type = BTRFS_INODE_EXTREF_KEY;
1075 key.offset = -1;
1076
1077 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1078 if (ret < 0)
1079 goto err_out;
1080 ASSERT(ret);
1081
1082 ret = 0;
1083 leaf = path->nodes[0];
1084 path->slots[0]--;
1085 goto again;
1086 }
1087
btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1088 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1089 struct btrfs_root *root,
1090 struct btrfs_path *path,
1091 struct btrfs_delayed_node *node)
1092 {
1093 int ret;
1094
1095 mutex_lock(&node->mutex);
1096 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1097 mutex_unlock(&node->mutex);
1098 return 0;
1099 }
1100
1101 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1102 mutex_unlock(&node->mutex);
1103 return ret;
1104 }
1105
1106 static inline int
__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_delayed_node * node)1107 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1108 struct btrfs_path *path,
1109 struct btrfs_delayed_node *node)
1110 {
1111 int ret;
1112
1113 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1114 if (ret)
1115 return ret;
1116
1117 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1118 if (ret)
1119 return ret;
1120
1121 ret = btrfs_record_root_in_trans(trans, node->root);
1122 if (ret)
1123 return ret;
1124 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1125 return ret;
1126 }
1127
1128 /*
1129 * Called when committing the transaction.
1130 * Returns 0 on success.
1131 * Returns < 0 on error and returns with an aborted transaction with any
1132 * outstanding delayed items cleaned up.
1133 */
__btrfs_run_delayed_items(struct btrfs_trans_handle * trans,int nr)1134 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1135 {
1136 struct btrfs_fs_info *fs_info = trans->fs_info;
1137 struct btrfs_delayed_root *delayed_root;
1138 struct btrfs_delayed_node *curr_node, *prev_node;
1139 struct btrfs_path *path;
1140 struct btrfs_block_rsv *block_rsv;
1141 int ret = 0;
1142 bool count = (nr > 0);
1143
1144 if (TRANS_ABORTED(trans))
1145 return -EIO;
1146
1147 path = btrfs_alloc_path();
1148 if (!path)
1149 return -ENOMEM;
1150
1151 block_rsv = trans->block_rsv;
1152 trans->block_rsv = &fs_info->delayed_block_rsv;
1153
1154 delayed_root = fs_info->delayed_root;
1155
1156 curr_node = btrfs_first_delayed_node(delayed_root);
1157 while (curr_node && (!count || nr--)) {
1158 ret = __btrfs_commit_inode_delayed_items(trans, path,
1159 curr_node);
1160 if (ret) {
1161 btrfs_abort_transaction(trans, ret);
1162 break;
1163 }
1164
1165 prev_node = curr_node;
1166 curr_node = btrfs_next_delayed_node(curr_node);
1167 /*
1168 * See the comment below about releasing path before releasing
1169 * node. If the commit of delayed items was successful the path
1170 * should always be released, but in case of an error, it may
1171 * point to locked extent buffers (a leaf at the very least).
1172 */
1173 ASSERT(path->nodes[0] == NULL);
1174 btrfs_release_delayed_node(prev_node);
1175 }
1176
1177 /*
1178 * Release the path to avoid a potential deadlock and lockdep splat when
1179 * releasing the delayed node, as that requires taking the delayed node's
1180 * mutex. If another task starts running delayed items before we take
1181 * the mutex, it will first lock the mutex and then it may try to lock
1182 * the same btree path (leaf).
1183 */
1184 btrfs_free_path(path);
1185
1186 if (curr_node)
1187 btrfs_release_delayed_node(curr_node);
1188 trans->block_rsv = block_rsv;
1189
1190 return ret;
1191 }
1192
btrfs_run_delayed_items(struct btrfs_trans_handle * trans)1193 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1194 {
1195 return __btrfs_run_delayed_items(trans, -1);
1196 }
1197
btrfs_run_delayed_items_nr(struct btrfs_trans_handle * trans,int nr)1198 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1199 {
1200 return __btrfs_run_delayed_items(trans, nr);
1201 }
1202
btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)1203 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1204 struct btrfs_inode *inode)
1205 {
1206 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1207 struct btrfs_path *path;
1208 struct btrfs_block_rsv *block_rsv;
1209 int ret;
1210
1211 if (!delayed_node)
1212 return 0;
1213
1214 mutex_lock(&delayed_node->mutex);
1215 if (!delayed_node->count) {
1216 mutex_unlock(&delayed_node->mutex);
1217 btrfs_release_delayed_node(delayed_node);
1218 return 0;
1219 }
1220 mutex_unlock(&delayed_node->mutex);
1221
1222 path = btrfs_alloc_path();
1223 if (!path) {
1224 btrfs_release_delayed_node(delayed_node);
1225 return -ENOMEM;
1226 }
1227
1228 block_rsv = trans->block_rsv;
1229 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1230
1231 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1232
1233 btrfs_release_delayed_node(delayed_node);
1234 btrfs_free_path(path);
1235 trans->block_rsv = block_rsv;
1236
1237 return ret;
1238 }
1239
btrfs_commit_inode_delayed_inode(struct btrfs_inode * inode)1240 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1241 {
1242 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1243 struct btrfs_trans_handle *trans;
1244 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1245 struct btrfs_path *path;
1246 struct btrfs_block_rsv *block_rsv;
1247 int ret;
1248
1249 if (!delayed_node)
1250 return 0;
1251
1252 mutex_lock(&delayed_node->mutex);
1253 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1254 mutex_unlock(&delayed_node->mutex);
1255 btrfs_release_delayed_node(delayed_node);
1256 return 0;
1257 }
1258 mutex_unlock(&delayed_node->mutex);
1259
1260 trans = btrfs_join_transaction(delayed_node->root);
1261 if (IS_ERR(trans)) {
1262 ret = PTR_ERR(trans);
1263 goto out;
1264 }
1265
1266 path = btrfs_alloc_path();
1267 if (!path) {
1268 ret = -ENOMEM;
1269 goto trans_out;
1270 }
1271
1272 block_rsv = trans->block_rsv;
1273 trans->block_rsv = &fs_info->delayed_block_rsv;
1274
1275 mutex_lock(&delayed_node->mutex);
1276 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1277 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1278 path, delayed_node);
1279 else
1280 ret = 0;
1281 mutex_unlock(&delayed_node->mutex);
1282
1283 btrfs_free_path(path);
1284 trans->block_rsv = block_rsv;
1285 trans_out:
1286 btrfs_end_transaction(trans);
1287 btrfs_btree_balance_dirty(fs_info);
1288 out:
1289 btrfs_release_delayed_node(delayed_node);
1290
1291 return ret;
1292 }
1293
btrfs_remove_delayed_node(struct btrfs_inode * inode)1294 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1295 {
1296 struct btrfs_delayed_node *delayed_node;
1297
1298 delayed_node = READ_ONCE(inode->delayed_node);
1299 if (!delayed_node)
1300 return;
1301
1302 inode->delayed_node = NULL;
1303 btrfs_release_delayed_node(delayed_node);
1304 }
1305
1306 struct btrfs_async_delayed_work {
1307 struct btrfs_delayed_root *delayed_root;
1308 int nr;
1309 struct btrfs_work work;
1310 };
1311
btrfs_async_run_delayed_root(struct btrfs_work * work)1312 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1313 {
1314 struct btrfs_async_delayed_work *async_work;
1315 struct btrfs_delayed_root *delayed_root;
1316 struct btrfs_trans_handle *trans;
1317 struct btrfs_path *path;
1318 struct btrfs_delayed_node *delayed_node = NULL;
1319 struct btrfs_root *root;
1320 struct btrfs_block_rsv *block_rsv;
1321 int total_done = 0;
1322
1323 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1324 delayed_root = async_work->delayed_root;
1325
1326 path = btrfs_alloc_path();
1327 if (!path)
1328 goto out;
1329
1330 do {
1331 if (atomic_read(&delayed_root->items) <
1332 BTRFS_DELAYED_BACKGROUND / 2)
1333 break;
1334
1335 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1336 if (!delayed_node)
1337 break;
1338
1339 root = delayed_node->root;
1340
1341 trans = btrfs_join_transaction(root);
1342 if (IS_ERR(trans)) {
1343 btrfs_release_path(path);
1344 btrfs_release_prepared_delayed_node(delayed_node);
1345 total_done++;
1346 continue;
1347 }
1348
1349 block_rsv = trans->block_rsv;
1350 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1351
1352 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1353
1354 trans->block_rsv = block_rsv;
1355 btrfs_end_transaction(trans);
1356 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1357
1358 btrfs_release_path(path);
1359 btrfs_release_prepared_delayed_node(delayed_node);
1360 total_done++;
1361
1362 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1363 || total_done < async_work->nr);
1364
1365 btrfs_free_path(path);
1366 out:
1367 wake_up(&delayed_root->wait);
1368 kfree(async_work);
1369 }
1370
1371
btrfs_wq_run_delayed_node(struct btrfs_delayed_root * delayed_root,struct btrfs_fs_info * fs_info,int nr)1372 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1373 struct btrfs_fs_info *fs_info, int nr)
1374 {
1375 struct btrfs_async_delayed_work *async_work;
1376
1377 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1378 if (!async_work)
1379 return -ENOMEM;
1380
1381 async_work->delayed_root = delayed_root;
1382 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1383 NULL);
1384 async_work->nr = nr;
1385
1386 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1387 return 0;
1388 }
1389
btrfs_assert_delayed_root_empty(struct btrfs_fs_info * fs_info)1390 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1391 {
1392 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1393 }
1394
could_end_wait(struct btrfs_delayed_root * delayed_root,int seq)1395 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1396 {
1397 int val = atomic_read(&delayed_root->items_seq);
1398
1399 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1400 return 1;
1401
1402 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1403 return 1;
1404
1405 return 0;
1406 }
1407
btrfs_balance_delayed_items(struct btrfs_fs_info * fs_info)1408 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1409 {
1410 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1411
1412 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1413 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1414 return;
1415
1416 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1417 int seq;
1418 int ret;
1419
1420 seq = atomic_read(&delayed_root->items_seq);
1421
1422 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1423 if (ret)
1424 return;
1425
1426 wait_event_interruptible(delayed_root->wait,
1427 could_end_wait(delayed_root, seq));
1428 return;
1429 }
1430
1431 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1432 }
1433
btrfs_release_dir_index_item_space(struct btrfs_trans_handle * trans)1434 static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
1435 {
1436 struct btrfs_fs_info *fs_info = trans->fs_info;
1437 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1438
1439 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1440 return;
1441
1442 /*
1443 * Adding the new dir index item does not require touching another
1444 * leaf, so we can release 1 unit of metadata that was previously
1445 * reserved when starting the transaction. This applies only to
1446 * the case where we had a transaction start and excludes the
1447 * transaction join case (when replaying log trees).
1448 */
1449 trace_btrfs_space_reservation(fs_info, "transaction",
1450 trans->transid, bytes, 0);
1451 btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1452 ASSERT(trans->bytes_reserved >= bytes);
1453 trans->bytes_reserved -= bytes;
1454 }
1455
1456 /* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
btrfs_insert_delayed_dir_index(struct btrfs_trans_handle * trans,const char * name,int name_len,struct btrfs_inode * dir,struct btrfs_disk_key * disk_key,u8 flags,u64 index)1457 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1458 const char *name, int name_len,
1459 struct btrfs_inode *dir,
1460 struct btrfs_disk_key *disk_key, u8 flags,
1461 u64 index)
1462 {
1463 struct btrfs_fs_info *fs_info = trans->fs_info;
1464 const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
1465 struct btrfs_delayed_node *delayed_node;
1466 struct btrfs_delayed_item *delayed_item;
1467 struct btrfs_dir_item *dir_item;
1468 bool reserve_leaf_space;
1469 u32 data_len;
1470 int ret;
1471
1472 delayed_node = btrfs_get_or_create_delayed_node(dir);
1473 if (IS_ERR(delayed_node))
1474 return PTR_ERR(delayed_node);
1475
1476 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
1477 delayed_node,
1478 BTRFS_DELAYED_INSERTION_ITEM);
1479 if (!delayed_item) {
1480 ret = -ENOMEM;
1481 goto release_node;
1482 }
1483
1484 delayed_item->index = index;
1485
1486 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1487 dir_item->location = *disk_key;
1488 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1489 btrfs_set_stack_dir_data_len(dir_item, 0);
1490 btrfs_set_stack_dir_name_len(dir_item, name_len);
1491 btrfs_set_stack_dir_flags(dir_item, flags);
1492 memcpy((char *)(dir_item + 1), name, name_len);
1493
1494 data_len = delayed_item->data_len + sizeof(struct btrfs_item);
1495
1496 mutex_lock(&delayed_node->mutex);
1497
1498 /*
1499 * First attempt to insert the delayed item. This is to make the error
1500 * handling path simpler in case we fail (-EEXIST). There's no risk of
1501 * any other task coming in and running the delayed item before we do
1502 * the metadata space reservation below, because we are holding the
1503 * delayed node's mutex and that mutex must also be locked before the
1504 * node's delayed items can be run.
1505 */
1506 ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1507 if (unlikely(ret)) {
1508 btrfs_err(trans->fs_info,
1509 "error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
1510 name_len, name, index, btrfs_root_id(delayed_node->root),
1511 delayed_node->inode_id, dir->index_cnt,
1512 delayed_node->index_cnt, ret);
1513 btrfs_release_delayed_item(delayed_item);
1514 btrfs_release_dir_index_item_space(trans);
1515 mutex_unlock(&delayed_node->mutex);
1516 goto release_node;
1517 }
1518
1519 if (delayed_node->index_item_leaves == 0 ||
1520 delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1521 delayed_node->curr_index_batch_size = data_len;
1522 reserve_leaf_space = true;
1523 } else {
1524 delayed_node->curr_index_batch_size += data_len;
1525 reserve_leaf_space = false;
1526 }
1527
1528 if (reserve_leaf_space) {
1529 ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
1530 /*
1531 * Space was reserved for a dir index item insertion when we
1532 * started the transaction, so getting a failure here should be
1533 * impossible.
1534 */
1535 if (WARN_ON(ret)) {
1536 btrfs_release_delayed_item(delayed_item);
1537 mutex_unlock(&delayed_node->mutex);
1538 goto release_node;
1539 }
1540
1541 delayed_node->index_item_leaves++;
1542 } else {
1543 btrfs_release_dir_index_item_space(trans);
1544 }
1545 mutex_unlock(&delayed_node->mutex);
1546
1547 release_node:
1548 btrfs_release_delayed_node(delayed_node);
1549 return ret;
1550 }
1551
btrfs_delete_delayed_insertion_item(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,u64 index)1552 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1553 struct btrfs_delayed_node *node,
1554 u64 index)
1555 {
1556 struct btrfs_delayed_item *item;
1557
1558 mutex_lock(&node->mutex);
1559 item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
1560 if (!item) {
1561 mutex_unlock(&node->mutex);
1562 return 1;
1563 }
1564
1565 /*
1566 * For delayed items to insert, we track reserved metadata bytes based
1567 * on the number of leaves that we will use.
1568 * See btrfs_insert_delayed_dir_index() and
1569 * btrfs_delayed_item_reserve_metadata()).
1570 */
1571 ASSERT(item->bytes_reserved == 0);
1572 ASSERT(node->index_item_leaves > 0);
1573
1574 /*
1575 * If there's only one leaf reserved, we can decrement this item from the
1576 * current batch, otherwise we can not because we don't know which leaf
1577 * it belongs to. With the current limit on delayed items, we rarely
1578 * accumulate enough dir index items to fill more than one leaf (even
1579 * when using a leaf size of 4K).
1580 */
1581 if (node->index_item_leaves == 1) {
1582 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1583
1584 ASSERT(node->curr_index_batch_size >= data_len);
1585 node->curr_index_batch_size -= data_len;
1586 }
1587
1588 btrfs_release_delayed_item(item);
1589
1590 /* If we now have no more dir index items, we can release all leaves. */
1591 if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1592 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1593 node->index_item_leaves = 0;
1594 }
1595
1596 mutex_unlock(&node->mutex);
1597 return 0;
1598 }
1599
btrfs_delete_delayed_dir_index(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,u64 index)1600 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1601 struct btrfs_inode *dir, u64 index)
1602 {
1603 struct btrfs_delayed_node *node;
1604 struct btrfs_delayed_item *item;
1605 int ret;
1606
1607 node = btrfs_get_or_create_delayed_node(dir);
1608 if (IS_ERR(node))
1609 return PTR_ERR(node);
1610
1611 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
1612 if (!ret)
1613 goto end;
1614
1615 item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
1616 if (!item) {
1617 ret = -ENOMEM;
1618 goto end;
1619 }
1620
1621 item->index = index;
1622
1623 ret = btrfs_delayed_item_reserve_metadata(trans, item);
1624 /*
1625 * we have reserved enough space when we start a new transaction,
1626 * so reserving metadata failure is impossible.
1627 */
1628 if (ret < 0) {
1629 btrfs_err(trans->fs_info,
1630 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1631 btrfs_release_delayed_item(item);
1632 goto end;
1633 }
1634
1635 mutex_lock(&node->mutex);
1636 ret = __btrfs_add_delayed_item(node, item);
1637 if (unlikely(ret)) {
1638 btrfs_err(trans->fs_info,
1639 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1640 index, node->root->root_key.objectid,
1641 node->inode_id, ret);
1642 btrfs_delayed_item_release_metadata(dir->root, item);
1643 btrfs_release_delayed_item(item);
1644 }
1645 mutex_unlock(&node->mutex);
1646 end:
1647 btrfs_release_delayed_node(node);
1648 return ret;
1649 }
1650
btrfs_inode_delayed_dir_index_count(struct btrfs_inode * inode)1651 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1652 {
1653 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1654
1655 if (!delayed_node)
1656 return -ENOENT;
1657
1658 /*
1659 * Since we have held i_mutex of this directory, it is impossible that
1660 * a new directory index is added into the delayed node and index_cnt
1661 * is updated now. So we needn't lock the delayed node.
1662 */
1663 if (!delayed_node->index_cnt) {
1664 btrfs_release_delayed_node(delayed_node);
1665 return -EINVAL;
1666 }
1667
1668 inode->index_cnt = delayed_node->index_cnt;
1669 btrfs_release_delayed_node(delayed_node);
1670 return 0;
1671 }
1672
btrfs_readdir_get_delayed_items(struct inode * inode,u64 last_index,struct list_head * ins_list,struct list_head * del_list)1673 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1674 u64 last_index,
1675 struct list_head *ins_list,
1676 struct list_head *del_list)
1677 {
1678 struct btrfs_delayed_node *delayed_node;
1679 struct btrfs_delayed_item *item;
1680
1681 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1682 if (!delayed_node)
1683 return false;
1684
1685 /*
1686 * We can only do one readdir with delayed items at a time because of
1687 * item->readdir_list.
1688 */
1689 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
1690 btrfs_inode_lock(BTRFS_I(inode), 0);
1691
1692 mutex_lock(&delayed_node->mutex);
1693 item = __btrfs_first_delayed_insertion_item(delayed_node);
1694 while (item && item->index <= last_index) {
1695 refcount_inc(&item->refs);
1696 list_add_tail(&item->readdir_list, ins_list);
1697 item = __btrfs_next_delayed_item(item);
1698 }
1699
1700 item = __btrfs_first_delayed_deletion_item(delayed_node);
1701 while (item && item->index <= last_index) {
1702 refcount_inc(&item->refs);
1703 list_add_tail(&item->readdir_list, del_list);
1704 item = __btrfs_next_delayed_item(item);
1705 }
1706 mutex_unlock(&delayed_node->mutex);
1707 /*
1708 * This delayed node is still cached in the btrfs inode, so refs
1709 * must be > 1 now, and we needn't check it is going to be freed
1710 * or not.
1711 *
1712 * Besides that, this function is used to read dir, we do not
1713 * insert/delete delayed items in this period. So we also needn't
1714 * requeue or dequeue this delayed node.
1715 */
1716 refcount_dec(&delayed_node->refs);
1717
1718 return true;
1719 }
1720
btrfs_readdir_put_delayed_items(struct inode * inode,struct list_head * ins_list,struct list_head * del_list)1721 void btrfs_readdir_put_delayed_items(struct inode *inode,
1722 struct list_head *ins_list,
1723 struct list_head *del_list)
1724 {
1725 struct btrfs_delayed_item *curr, *next;
1726
1727 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1728 list_del(&curr->readdir_list);
1729 if (refcount_dec_and_test(&curr->refs))
1730 kfree(curr);
1731 }
1732
1733 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1734 list_del(&curr->readdir_list);
1735 if (refcount_dec_and_test(&curr->refs))
1736 kfree(curr);
1737 }
1738
1739 /*
1740 * The VFS is going to do up_read(), so we need to downgrade back to a
1741 * read lock.
1742 */
1743 downgrade_write(&inode->i_rwsem);
1744 }
1745
btrfs_should_delete_dir_index(struct list_head * del_list,u64 index)1746 int btrfs_should_delete_dir_index(struct list_head *del_list,
1747 u64 index)
1748 {
1749 struct btrfs_delayed_item *curr;
1750 int ret = 0;
1751
1752 list_for_each_entry(curr, del_list, readdir_list) {
1753 if (curr->index > index)
1754 break;
1755 if (curr->index == index) {
1756 ret = 1;
1757 break;
1758 }
1759 }
1760 return ret;
1761 }
1762
1763 /*
1764 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1765 *
1766 */
btrfs_readdir_delayed_dir_index(struct dir_context * ctx,struct list_head * ins_list)1767 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1768 struct list_head *ins_list)
1769 {
1770 struct btrfs_dir_item *di;
1771 struct btrfs_delayed_item *curr, *next;
1772 struct btrfs_key location;
1773 char *name;
1774 int name_len;
1775 int over = 0;
1776 unsigned char d_type;
1777
1778 /*
1779 * Changing the data of the delayed item is impossible. So
1780 * we needn't lock them. And we have held i_mutex of the
1781 * directory, nobody can delete any directory indexes now.
1782 */
1783 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1784 list_del(&curr->readdir_list);
1785
1786 if (curr->index < ctx->pos) {
1787 if (refcount_dec_and_test(&curr->refs))
1788 kfree(curr);
1789 continue;
1790 }
1791
1792 ctx->pos = curr->index;
1793
1794 di = (struct btrfs_dir_item *)curr->data;
1795 name = (char *)(di + 1);
1796 name_len = btrfs_stack_dir_name_len(di);
1797
1798 d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type));
1799 btrfs_disk_key_to_cpu(&location, &di->location);
1800
1801 over = !dir_emit(ctx, name, name_len,
1802 location.objectid, d_type);
1803
1804 if (refcount_dec_and_test(&curr->refs))
1805 kfree(curr);
1806
1807 if (over)
1808 return 1;
1809 ctx->pos++;
1810 }
1811 return 0;
1812 }
1813
fill_stack_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode_item * inode_item,struct inode * inode)1814 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1815 struct btrfs_inode_item *inode_item,
1816 struct inode *inode)
1817 {
1818 u64 flags;
1819
1820 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1821 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1822 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1823 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1824 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1825 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1826 btrfs_set_stack_inode_generation(inode_item,
1827 BTRFS_I(inode)->generation);
1828 btrfs_set_stack_inode_sequence(inode_item,
1829 inode_peek_iversion(inode));
1830 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1831 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1832 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1833 BTRFS_I(inode)->ro_flags);
1834 btrfs_set_stack_inode_flags(inode_item, flags);
1835 btrfs_set_stack_inode_block_group(inode_item, 0);
1836
1837 btrfs_set_stack_timespec_sec(&inode_item->atime,
1838 inode->i_atime.tv_sec);
1839 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1840 inode->i_atime.tv_nsec);
1841
1842 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1843 inode->i_mtime.tv_sec);
1844 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1845 inode->i_mtime.tv_nsec);
1846
1847 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1848 inode_get_ctime(inode).tv_sec);
1849 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1850 inode_get_ctime(inode).tv_nsec);
1851
1852 btrfs_set_stack_timespec_sec(&inode_item->otime,
1853 BTRFS_I(inode)->i_otime.tv_sec);
1854 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1855 BTRFS_I(inode)->i_otime.tv_nsec);
1856 }
1857
btrfs_fill_inode(struct inode * inode,u32 * rdev)1858 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1859 {
1860 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1861 struct btrfs_delayed_node *delayed_node;
1862 struct btrfs_inode_item *inode_item;
1863
1864 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1865 if (!delayed_node)
1866 return -ENOENT;
1867
1868 mutex_lock(&delayed_node->mutex);
1869 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1870 mutex_unlock(&delayed_node->mutex);
1871 btrfs_release_delayed_node(delayed_node);
1872 return -ENOENT;
1873 }
1874
1875 inode_item = &delayed_node->inode_item;
1876
1877 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1878 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1879 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1880 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1881 round_up(i_size_read(inode), fs_info->sectorsize));
1882 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1883 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1884 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1885 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1886 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1887
1888 inode_set_iversion_queried(inode,
1889 btrfs_stack_inode_sequence(inode_item));
1890 inode->i_rdev = 0;
1891 *rdev = btrfs_stack_inode_rdev(inode_item);
1892 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1893 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1894
1895 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1896 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1897
1898 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1899 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1900
1901 inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
1902 btrfs_stack_timespec_nsec(&inode_item->ctime));
1903
1904 BTRFS_I(inode)->i_otime.tv_sec =
1905 btrfs_stack_timespec_sec(&inode_item->otime);
1906 BTRFS_I(inode)->i_otime.tv_nsec =
1907 btrfs_stack_timespec_nsec(&inode_item->otime);
1908
1909 inode->i_generation = BTRFS_I(inode)->generation;
1910 BTRFS_I(inode)->index_cnt = (u64)-1;
1911
1912 mutex_unlock(&delayed_node->mutex);
1913 btrfs_release_delayed_node(delayed_node);
1914 return 0;
1915 }
1916
btrfs_delayed_update_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode)1917 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1918 struct btrfs_root *root,
1919 struct btrfs_inode *inode)
1920 {
1921 struct btrfs_delayed_node *delayed_node;
1922 int ret = 0;
1923
1924 delayed_node = btrfs_get_or_create_delayed_node(inode);
1925 if (IS_ERR(delayed_node))
1926 return PTR_ERR(delayed_node);
1927
1928 mutex_lock(&delayed_node->mutex);
1929 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1930 fill_stack_inode_item(trans, &delayed_node->inode_item,
1931 &inode->vfs_inode);
1932 goto release_node;
1933 }
1934
1935 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1936 if (ret)
1937 goto release_node;
1938
1939 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1940 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1941 delayed_node->count++;
1942 atomic_inc(&root->fs_info->delayed_root->items);
1943 release_node:
1944 mutex_unlock(&delayed_node->mutex);
1945 btrfs_release_delayed_node(delayed_node);
1946 return ret;
1947 }
1948
btrfs_delayed_delete_inode_ref(struct btrfs_inode * inode)1949 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1950 {
1951 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1952 struct btrfs_delayed_node *delayed_node;
1953
1954 /*
1955 * we don't do delayed inode updates during log recovery because it
1956 * leads to enospc problems. This means we also can't do
1957 * delayed inode refs
1958 */
1959 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1960 return -EAGAIN;
1961
1962 delayed_node = btrfs_get_or_create_delayed_node(inode);
1963 if (IS_ERR(delayed_node))
1964 return PTR_ERR(delayed_node);
1965
1966 /*
1967 * We don't reserve space for inode ref deletion is because:
1968 * - We ONLY do async inode ref deletion for the inode who has only
1969 * one link(i_nlink == 1), it means there is only one inode ref.
1970 * And in most case, the inode ref and the inode item are in the
1971 * same leaf, and we will deal with them at the same time.
1972 * Since we are sure we will reserve the space for the inode item,
1973 * it is unnecessary to reserve space for inode ref deletion.
1974 * - If the inode ref and the inode item are not in the same leaf,
1975 * We also needn't worry about enospc problem, because we reserve
1976 * much more space for the inode update than it needs.
1977 * - At the worst, we can steal some space from the global reservation.
1978 * It is very rare.
1979 */
1980 mutex_lock(&delayed_node->mutex);
1981 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1982 goto release_node;
1983
1984 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1985 delayed_node->count++;
1986 atomic_inc(&fs_info->delayed_root->items);
1987 release_node:
1988 mutex_unlock(&delayed_node->mutex);
1989 btrfs_release_delayed_node(delayed_node);
1990 return 0;
1991 }
1992
__btrfs_kill_delayed_node(struct btrfs_delayed_node * delayed_node)1993 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1994 {
1995 struct btrfs_root *root = delayed_node->root;
1996 struct btrfs_fs_info *fs_info = root->fs_info;
1997 struct btrfs_delayed_item *curr_item, *prev_item;
1998
1999 mutex_lock(&delayed_node->mutex);
2000 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
2001 while (curr_item) {
2002 prev_item = curr_item;
2003 curr_item = __btrfs_next_delayed_item(prev_item);
2004 btrfs_release_delayed_item(prev_item);
2005 }
2006
2007 if (delayed_node->index_item_leaves > 0) {
2008 btrfs_delayed_item_release_leaves(delayed_node,
2009 delayed_node->index_item_leaves);
2010 delayed_node->index_item_leaves = 0;
2011 }
2012
2013 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
2014 while (curr_item) {
2015 btrfs_delayed_item_release_metadata(root, curr_item);
2016 prev_item = curr_item;
2017 curr_item = __btrfs_next_delayed_item(prev_item);
2018 btrfs_release_delayed_item(prev_item);
2019 }
2020
2021 btrfs_release_delayed_iref(delayed_node);
2022
2023 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2024 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
2025 btrfs_release_delayed_inode(delayed_node);
2026 }
2027 mutex_unlock(&delayed_node->mutex);
2028 }
2029
btrfs_kill_delayed_inode_items(struct btrfs_inode * inode)2030 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
2031 {
2032 struct btrfs_delayed_node *delayed_node;
2033
2034 delayed_node = btrfs_get_delayed_node(inode);
2035 if (!delayed_node)
2036 return;
2037
2038 __btrfs_kill_delayed_node(delayed_node);
2039 btrfs_release_delayed_node(delayed_node);
2040 }
2041
btrfs_kill_all_delayed_nodes(struct btrfs_root * root)2042 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2043 {
2044 u64 inode_id = 0;
2045 struct btrfs_delayed_node *delayed_nodes[8];
2046 int i, n;
2047
2048 while (1) {
2049 spin_lock(&root->inode_lock);
2050 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
2051 (void **)delayed_nodes, inode_id,
2052 ARRAY_SIZE(delayed_nodes));
2053 if (!n) {
2054 spin_unlock(&root->inode_lock);
2055 break;
2056 }
2057
2058 inode_id = delayed_nodes[n - 1]->inode_id + 1;
2059 for (i = 0; i < n; i++) {
2060 /*
2061 * Don't increase refs in case the node is dead and
2062 * about to be removed from the tree in the loop below
2063 */
2064 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
2065 delayed_nodes[i] = NULL;
2066 }
2067 spin_unlock(&root->inode_lock);
2068
2069 for (i = 0; i < n; i++) {
2070 if (!delayed_nodes[i])
2071 continue;
2072 __btrfs_kill_delayed_node(delayed_nodes[i]);
2073 btrfs_release_delayed_node(delayed_nodes[i]);
2074 }
2075 }
2076 }
2077
btrfs_destroy_delayed_inodes(struct btrfs_fs_info * fs_info)2078 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2079 {
2080 struct btrfs_delayed_node *curr_node, *prev_node;
2081
2082 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2083 while (curr_node) {
2084 __btrfs_kill_delayed_node(curr_node);
2085
2086 prev_node = curr_node;
2087 curr_node = btrfs_next_delayed_node(curr_node);
2088 btrfs_release_delayed_node(prev_node);
2089 }
2090 }
2091
btrfs_log_get_delayed_items(struct btrfs_inode * inode,struct list_head * ins_list,struct list_head * del_list)2092 void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
2093 struct list_head *ins_list,
2094 struct list_head *del_list)
2095 {
2096 struct btrfs_delayed_node *node;
2097 struct btrfs_delayed_item *item;
2098
2099 node = btrfs_get_delayed_node(inode);
2100 if (!node)
2101 return;
2102
2103 mutex_lock(&node->mutex);
2104 item = __btrfs_first_delayed_insertion_item(node);
2105 while (item) {
2106 /*
2107 * It's possible that the item is already in a log list. This
2108 * can happen in case two tasks are trying to log the same
2109 * directory. For example if we have tasks A and task B:
2110 *
2111 * Task A collected the delayed items into a log list while
2112 * under the inode's log_mutex (at btrfs_log_inode()), but it
2113 * only releases the items after logging the inodes they point
2114 * to (if they are new inodes), which happens after unlocking
2115 * the log mutex;
2116 *
2117 * Task B enters btrfs_log_inode() and acquires the log_mutex
2118 * of the same directory inode, before task B releases the
2119 * delayed items. This can happen for example when logging some
2120 * inode we need to trigger logging of its parent directory, so
2121 * logging two files that have the same parent directory can
2122 * lead to this.
2123 *
2124 * If this happens, just ignore delayed items already in a log
2125 * list. All the tasks logging the directory are under a log
2126 * transaction and whichever finishes first can not sync the log
2127 * before the other completes and leaves the log transaction.
2128 */
2129 if (!item->logged && list_empty(&item->log_list)) {
2130 refcount_inc(&item->refs);
2131 list_add_tail(&item->log_list, ins_list);
2132 }
2133 item = __btrfs_next_delayed_item(item);
2134 }
2135
2136 item = __btrfs_first_delayed_deletion_item(node);
2137 while (item) {
2138 /* It may be non-empty, for the same reason mentioned above. */
2139 if (!item->logged && list_empty(&item->log_list)) {
2140 refcount_inc(&item->refs);
2141 list_add_tail(&item->log_list, del_list);
2142 }
2143 item = __btrfs_next_delayed_item(item);
2144 }
2145 mutex_unlock(&node->mutex);
2146
2147 /*
2148 * We are called during inode logging, which means the inode is in use
2149 * and can not be evicted before we finish logging the inode. So we never
2150 * have the last reference on the delayed inode.
2151 * Also, we don't use btrfs_release_delayed_node() because that would
2152 * requeue the delayed inode (change its order in the list of prepared
2153 * nodes) and we don't want to do such change because we don't create or
2154 * delete delayed items.
2155 */
2156 ASSERT(refcount_read(&node->refs) > 1);
2157 refcount_dec(&node->refs);
2158 }
2159
btrfs_log_put_delayed_items(struct btrfs_inode * inode,struct list_head * ins_list,struct list_head * del_list)2160 void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
2161 struct list_head *ins_list,
2162 struct list_head *del_list)
2163 {
2164 struct btrfs_delayed_node *node;
2165 struct btrfs_delayed_item *item;
2166 struct btrfs_delayed_item *next;
2167
2168 node = btrfs_get_delayed_node(inode);
2169 if (!node)
2170 return;
2171
2172 mutex_lock(&node->mutex);
2173
2174 list_for_each_entry_safe(item, next, ins_list, log_list) {
2175 item->logged = true;
2176 list_del_init(&item->log_list);
2177 if (refcount_dec_and_test(&item->refs))
2178 kfree(item);
2179 }
2180
2181 list_for_each_entry_safe(item, next, del_list, log_list) {
2182 item->logged = true;
2183 list_del_init(&item->log_list);
2184 if (refcount_dec_and_test(&item->refs))
2185 kfree(item);
2186 }
2187
2188 mutex_unlock(&node->mutex);
2189
2190 /*
2191 * We are called during inode logging, which means the inode is in use
2192 * and can not be evicted before we finish logging the inode. So we never
2193 * have the last reference on the delayed inode.
2194 * Also, we don't use btrfs_release_delayed_node() because that would
2195 * requeue the delayed inode (change its order in the list of prepared
2196 * nodes) and we don't want to do such change because we don't create or
2197 * delete delayed items.
2198 */
2199 ASSERT(refcount_read(&node->refs) > 1);
2200 refcount_dec(&node->refs);
2201 }
2202