1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 Fujitsu. All rights reserved. 4 * Written by Miao Xie <miaox@cn.fujitsu.com> 5 */ 6 7 #include <linux/slab.h> 8 #include <linux/iversion.h> 9 #include "ctree.h" 10 #include "fs.h" 11 #include "messages.h" 12 #include "misc.h" 13 #include "delayed-inode.h" 14 #include "disk-io.h" 15 #include "transaction.h" 16 #include "qgroup.h" 17 #include "locking.h" 18 #include "inode-item.h" 19 #include "space-info.h" 20 #include "accessors.h" 21 #include "file-item.h" 22 23 #define BTRFS_DELAYED_WRITEBACK 512 24 #define BTRFS_DELAYED_BACKGROUND 128 25 #define BTRFS_DELAYED_BATCH 16 26 27 static struct kmem_cache *delayed_node_cache; 28 29 int __init btrfs_delayed_inode_init(void) 30 { 31 delayed_node_cache = kmem_cache_create("btrfs_delayed_node", 32 sizeof(struct btrfs_delayed_node), 33 0, 34 SLAB_MEM_SPREAD, 35 NULL); 36 if (!delayed_node_cache) 37 return -ENOMEM; 38 return 0; 39 } 40 41 void __cold btrfs_delayed_inode_exit(void) 42 { 43 kmem_cache_destroy(delayed_node_cache); 44 } 45 46 static inline void btrfs_init_delayed_node( 47 struct btrfs_delayed_node *delayed_node, 48 struct btrfs_root *root, u64 inode_id) 49 { 50 delayed_node->root = root; 51 delayed_node->inode_id = inode_id; 52 refcount_set(&delayed_node->refs, 0); 53 delayed_node->ins_root = RB_ROOT_CACHED; 54 delayed_node->del_root = RB_ROOT_CACHED; 55 mutex_init(&delayed_node->mutex); 56 INIT_LIST_HEAD(&delayed_node->n_list); 57 INIT_LIST_HEAD(&delayed_node->p_list); 58 } 59 60 static struct btrfs_delayed_node *btrfs_get_delayed_node( 61 struct btrfs_inode *btrfs_inode) 62 { 63 struct btrfs_root *root = btrfs_inode->root; 64 u64 ino = btrfs_ino(btrfs_inode); 65 struct btrfs_delayed_node *node; 66 67 node = READ_ONCE(btrfs_inode->delayed_node); 68 if (node) { 69 refcount_inc(&node->refs); 70 return node; 71 } 72 73 spin_lock(&root->inode_lock); 74 node = radix_tree_lookup(&root->delayed_nodes_tree, ino); 75 76 if (node) { 77 if (btrfs_inode->delayed_node) { 78 refcount_inc(&node->refs); /* can be accessed */ 79 BUG_ON(btrfs_inode->delayed_node != node); 80 spin_unlock(&root->inode_lock); 81 return node; 82 } 83 84 /* 85 * It's possible that we're racing into the middle of removing 86 * this node from the radix tree. In this case, the refcount 87 * was zero and it should never go back to one. Just return 88 * NULL like it was never in the radix at all; our release 89 * function is in the process of removing it. 90 * 91 * Some implementations of refcount_inc refuse to bump the 92 * refcount once it has hit zero. If we don't do this dance 93 * here, refcount_inc() may decide to just WARN_ONCE() instead 94 * of actually bumping the refcount. 95 * 96 * If this node is properly in the radix, we want to bump the 97 * refcount twice, once for the inode and once for this get 98 * operation. 99 */ 100 if (refcount_inc_not_zero(&node->refs)) { 101 refcount_inc(&node->refs); 102 btrfs_inode->delayed_node = node; 103 } else { 104 node = NULL; 105 } 106 107 spin_unlock(&root->inode_lock); 108 return node; 109 } 110 spin_unlock(&root->inode_lock); 111 112 return NULL; 113 } 114 115 /* Will return either the node or PTR_ERR(-ENOMEM) */ 116 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( 117 struct btrfs_inode *btrfs_inode) 118 { 119 struct btrfs_delayed_node *node; 120 struct btrfs_root *root = btrfs_inode->root; 121 u64 ino = btrfs_ino(btrfs_inode); 122 int ret; 123 124 again: 125 node = btrfs_get_delayed_node(btrfs_inode); 126 if (node) 127 return node; 128 129 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS); 130 if (!node) 131 return ERR_PTR(-ENOMEM); 132 btrfs_init_delayed_node(node, root, ino); 133 134 /* cached in the btrfs inode and can be accessed */ 135 refcount_set(&node->refs, 2); 136 137 ret = radix_tree_preload(GFP_NOFS); 138 if (ret) { 139 kmem_cache_free(delayed_node_cache, node); 140 return ERR_PTR(ret); 141 } 142 143 spin_lock(&root->inode_lock); 144 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); 145 if (ret == -EEXIST) { 146 spin_unlock(&root->inode_lock); 147 kmem_cache_free(delayed_node_cache, node); 148 radix_tree_preload_end(); 149 goto again; 150 } 151 btrfs_inode->delayed_node = node; 152 spin_unlock(&root->inode_lock); 153 radix_tree_preload_end(); 154 155 return node; 156 } 157 158 /* 159 * Call it when holding delayed_node->mutex 160 * 161 * If mod = 1, add this node into the prepared list. 162 */ 163 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, 164 struct btrfs_delayed_node *node, 165 int mod) 166 { 167 spin_lock(&root->lock); 168 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { 169 if (!list_empty(&node->p_list)) 170 list_move_tail(&node->p_list, &root->prepare_list); 171 else if (mod) 172 list_add_tail(&node->p_list, &root->prepare_list); 173 } else { 174 list_add_tail(&node->n_list, &root->node_list); 175 list_add_tail(&node->p_list, &root->prepare_list); 176 refcount_inc(&node->refs); /* inserted into list */ 177 root->nodes++; 178 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags); 179 } 180 spin_unlock(&root->lock); 181 } 182 183 /* Call it when holding delayed_node->mutex */ 184 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, 185 struct btrfs_delayed_node *node) 186 { 187 spin_lock(&root->lock); 188 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { 189 root->nodes--; 190 refcount_dec(&node->refs); /* not in the list */ 191 list_del_init(&node->n_list); 192 if (!list_empty(&node->p_list)) 193 list_del_init(&node->p_list); 194 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags); 195 } 196 spin_unlock(&root->lock); 197 } 198 199 static struct btrfs_delayed_node *btrfs_first_delayed_node( 200 struct btrfs_delayed_root *delayed_root) 201 { 202 struct list_head *p; 203 struct btrfs_delayed_node *node = NULL; 204 205 spin_lock(&delayed_root->lock); 206 if (list_empty(&delayed_root->node_list)) 207 goto out; 208 209 p = delayed_root->node_list.next; 210 node = list_entry(p, struct btrfs_delayed_node, n_list); 211 refcount_inc(&node->refs); 212 out: 213 spin_unlock(&delayed_root->lock); 214 215 return node; 216 } 217 218 static struct btrfs_delayed_node *btrfs_next_delayed_node( 219 struct btrfs_delayed_node *node) 220 { 221 struct btrfs_delayed_root *delayed_root; 222 struct list_head *p; 223 struct btrfs_delayed_node *next = NULL; 224 225 delayed_root = node->root->fs_info->delayed_root; 226 spin_lock(&delayed_root->lock); 227 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { 228 /* not in the list */ 229 if (list_empty(&delayed_root->node_list)) 230 goto out; 231 p = delayed_root->node_list.next; 232 } else if (list_is_last(&node->n_list, &delayed_root->node_list)) 233 goto out; 234 else 235 p = node->n_list.next; 236 237 next = list_entry(p, struct btrfs_delayed_node, n_list); 238 refcount_inc(&next->refs); 239 out: 240 spin_unlock(&delayed_root->lock); 241 242 return next; 243 } 244 245 static void __btrfs_release_delayed_node( 246 struct btrfs_delayed_node *delayed_node, 247 int mod) 248 { 249 struct btrfs_delayed_root *delayed_root; 250 251 if (!delayed_node) 252 return; 253 254 delayed_root = delayed_node->root->fs_info->delayed_root; 255 256 mutex_lock(&delayed_node->mutex); 257 if (delayed_node->count) 258 btrfs_queue_delayed_node(delayed_root, delayed_node, mod); 259 else 260 btrfs_dequeue_delayed_node(delayed_root, delayed_node); 261 mutex_unlock(&delayed_node->mutex); 262 263 if (refcount_dec_and_test(&delayed_node->refs)) { 264 struct btrfs_root *root = delayed_node->root; 265 266 spin_lock(&root->inode_lock); 267 /* 268 * Once our refcount goes to zero, nobody is allowed to bump it 269 * back up. We can delete it now. 270 */ 271 ASSERT(refcount_read(&delayed_node->refs) == 0); 272 radix_tree_delete(&root->delayed_nodes_tree, 273 delayed_node->inode_id); 274 spin_unlock(&root->inode_lock); 275 kmem_cache_free(delayed_node_cache, delayed_node); 276 } 277 } 278 279 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) 280 { 281 __btrfs_release_delayed_node(node, 0); 282 } 283 284 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( 285 struct btrfs_delayed_root *delayed_root) 286 { 287 struct list_head *p; 288 struct btrfs_delayed_node *node = NULL; 289 290 spin_lock(&delayed_root->lock); 291 if (list_empty(&delayed_root->prepare_list)) 292 goto out; 293 294 p = delayed_root->prepare_list.next; 295 list_del_init(p); 296 node = list_entry(p, struct btrfs_delayed_node, p_list); 297 refcount_inc(&node->refs); 298 out: 299 spin_unlock(&delayed_root->lock); 300 301 return node; 302 } 303 304 static inline void btrfs_release_prepared_delayed_node( 305 struct btrfs_delayed_node *node) 306 { 307 __btrfs_release_delayed_node(node, 1); 308 } 309 310 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len, 311 struct btrfs_delayed_node *node, 312 enum btrfs_delayed_item_type type) 313 { 314 struct btrfs_delayed_item *item; 315 316 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); 317 if (item) { 318 item->data_len = data_len; 319 item->type = type; 320 item->bytes_reserved = 0; 321 item->delayed_node = node; 322 RB_CLEAR_NODE(&item->rb_node); 323 INIT_LIST_HEAD(&item->log_list); 324 item->logged = false; 325 refcount_set(&item->refs, 1); 326 } 327 return item; 328 } 329 330 /* 331 * __btrfs_lookup_delayed_item - look up the delayed item by key 332 * @delayed_node: pointer to the delayed node 333 * @index: the dir index value to lookup (offset of a dir index key) 334 * 335 * Note: if we don't find the right item, we will return the prev item and 336 * the next item. 337 */ 338 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( 339 struct rb_root *root, 340 u64 index) 341 { 342 struct rb_node *node = root->rb_node; 343 struct btrfs_delayed_item *delayed_item = NULL; 344 345 while (node) { 346 delayed_item = rb_entry(node, struct btrfs_delayed_item, 347 rb_node); 348 if (delayed_item->index < index) 349 node = node->rb_right; 350 else if (delayed_item->index > index) 351 node = node->rb_left; 352 else 353 return delayed_item; 354 } 355 356 return NULL; 357 } 358 359 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, 360 struct btrfs_delayed_item *ins) 361 { 362 struct rb_node **p, *node; 363 struct rb_node *parent_node = NULL; 364 struct rb_root_cached *root; 365 struct btrfs_delayed_item *item; 366 bool leftmost = true; 367 368 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM) 369 root = &delayed_node->ins_root; 370 else 371 root = &delayed_node->del_root; 372 373 p = &root->rb_root.rb_node; 374 node = &ins->rb_node; 375 376 while (*p) { 377 parent_node = *p; 378 item = rb_entry(parent_node, struct btrfs_delayed_item, 379 rb_node); 380 381 if (item->index < ins->index) { 382 p = &(*p)->rb_right; 383 leftmost = false; 384 } else if (item->index > ins->index) { 385 p = &(*p)->rb_left; 386 } else { 387 return -EEXIST; 388 } 389 } 390 391 rb_link_node(node, parent_node, p); 392 rb_insert_color_cached(node, root, leftmost); 393 394 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM && 395 ins->index >= delayed_node->index_cnt) 396 delayed_node->index_cnt = ins->index + 1; 397 398 delayed_node->count++; 399 atomic_inc(&delayed_node->root->fs_info->delayed_root->items); 400 return 0; 401 } 402 403 static void finish_one_item(struct btrfs_delayed_root *delayed_root) 404 { 405 int seq = atomic_inc_return(&delayed_root->items_seq); 406 407 /* atomic_dec_return implies a barrier */ 408 if ((atomic_dec_return(&delayed_root->items) < 409 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0)) 410 cond_wake_up_nomb(&delayed_root->wait); 411 } 412 413 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) 414 { 415 struct rb_root_cached *root; 416 struct btrfs_delayed_root *delayed_root; 417 418 /* Not inserted, ignore it. */ 419 if (RB_EMPTY_NODE(&delayed_item->rb_node)) 420 return; 421 422 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; 423 424 BUG_ON(!delayed_root); 425 426 if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM) 427 root = &delayed_item->delayed_node->ins_root; 428 else 429 root = &delayed_item->delayed_node->del_root; 430 431 rb_erase_cached(&delayed_item->rb_node, root); 432 RB_CLEAR_NODE(&delayed_item->rb_node); 433 delayed_item->delayed_node->count--; 434 435 finish_one_item(delayed_root); 436 } 437 438 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) 439 { 440 if (item) { 441 __btrfs_remove_delayed_item(item); 442 if (refcount_dec_and_test(&item->refs)) 443 kfree(item); 444 } 445 } 446 447 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( 448 struct btrfs_delayed_node *delayed_node) 449 { 450 struct rb_node *p; 451 struct btrfs_delayed_item *item = NULL; 452 453 p = rb_first_cached(&delayed_node->ins_root); 454 if (p) 455 item = rb_entry(p, struct btrfs_delayed_item, rb_node); 456 457 return item; 458 } 459 460 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( 461 struct btrfs_delayed_node *delayed_node) 462 { 463 struct rb_node *p; 464 struct btrfs_delayed_item *item = NULL; 465 466 p = rb_first_cached(&delayed_node->del_root); 467 if (p) 468 item = rb_entry(p, struct btrfs_delayed_item, rb_node); 469 470 return item; 471 } 472 473 static struct btrfs_delayed_item *__btrfs_next_delayed_item( 474 struct btrfs_delayed_item *item) 475 { 476 struct rb_node *p; 477 struct btrfs_delayed_item *next = NULL; 478 479 p = rb_next(&item->rb_node); 480 if (p) 481 next = rb_entry(p, struct btrfs_delayed_item, rb_node); 482 483 return next; 484 } 485 486 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, 487 struct btrfs_delayed_item *item) 488 { 489 struct btrfs_block_rsv *src_rsv; 490 struct btrfs_block_rsv *dst_rsv; 491 struct btrfs_fs_info *fs_info = trans->fs_info; 492 u64 num_bytes; 493 int ret; 494 495 if (!trans->bytes_reserved) 496 return 0; 497 498 src_rsv = trans->block_rsv; 499 dst_rsv = &fs_info->delayed_block_rsv; 500 501 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 502 503 /* 504 * Here we migrate space rsv from transaction rsv, since have already 505 * reserved space when starting a transaction. So no need to reserve 506 * qgroup space here. 507 */ 508 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true); 509 if (!ret) { 510 trace_btrfs_space_reservation(fs_info, "delayed_item", 511 item->delayed_node->inode_id, 512 num_bytes, 1); 513 /* 514 * For insertions we track reserved metadata space by accounting 515 * for the number of leaves that will be used, based on the delayed 516 * node's index_items_size field. 517 */ 518 if (item->type == BTRFS_DELAYED_DELETION_ITEM) 519 item->bytes_reserved = num_bytes; 520 } 521 522 return ret; 523 } 524 525 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, 526 struct btrfs_delayed_item *item) 527 { 528 struct btrfs_block_rsv *rsv; 529 struct btrfs_fs_info *fs_info = root->fs_info; 530 531 if (!item->bytes_reserved) 532 return; 533 534 rsv = &fs_info->delayed_block_rsv; 535 /* 536 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need 537 * to release/reserve qgroup space. 538 */ 539 trace_btrfs_space_reservation(fs_info, "delayed_item", 540 item->delayed_node->inode_id, 541 item->bytes_reserved, 0); 542 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL); 543 } 544 545 static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node, 546 unsigned int num_leaves) 547 { 548 struct btrfs_fs_info *fs_info = node->root->fs_info; 549 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves); 550 551 /* There are no space reservations during log replay, bail out. */ 552 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 553 return; 554 555 trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id, 556 bytes, 0); 557 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL); 558 } 559 560 static int btrfs_delayed_inode_reserve_metadata( 561 struct btrfs_trans_handle *trans, 562 struct btrfs_root *root, 563 struct btrfs_delayed_node *node) 564 { 565 struct btrfs_fs_info *fs_info = root->fs_info; 566 struct btrfs_block_rsv *src_rsv; 567 struct btrfs_block_rsv *dst_rsv; 568 u64 num_bytes; 569 int ret; 570 571 src_rsv = trans->block_rsv; 572 dst_rsv = &fs_info->delayed_block_rsv; 573 574 num_bytes = btrfs_calc_metadata_size(fs_info, 1); 575 576 /* 577 * btrfs_dirty_inode will update the inode under btrfs_join_transaction 578 * which doesn't reserve space for speed. This is a problem since we 579 * still need to reserve space for this update, so try to reserve the 580 * space. 581 * 582 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since 583 * we always reserve enough to update the inode item. 584 */ 585 if (!src_rsv || (!trans->bytes_reserved && 586 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { 587 ret = btrfs_qgroup_reserve_meta(root, num_bytes, 588 BTRFS_QGROUP_RSV_META_PREALLOC, true); 589 if (ret < 0) 590 return ret; 591 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes, 592 BTRFS_RESERVE_NO_FLUSH); 593 /* NO_FLUSH could only fail with -ENOSPC */ 594 ASSERT(ret == 0 || ret == -ENOSPC); 595 if (ret) 596 btrfs_qgroup_free_meta_prealloc(root, num_bytes); 597 } else { 598 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true); 599 } 600 601 if (!ret) { 602 trace_btrfs_space_reservation(fs_info, "delayed_inode", 603 node->inode_id, num_bytes, 1); 604 node->bytes_reserved = num_bytes; 605 } 606 607 return ret; 608 } 609 610 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info, 611 struct btrfs_delayed_node *node, 612 bool qgroup_free) 613 { 614 struct btrfs_block_rsv *rsv; 615 616 if (!node->bytes_reserved) 617 return; 618 619 rsv = &fs_info->delayed_block_rsv; 620 trace_btrfs_space_reservation(fs_info, "delayed_inode", 621 node->inode_id, node->bytes_reserved, 0); 622 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL); 623 if (qgroup_free) 624 btrfs_qgroup_free_meta_prealloc(node->root, 625 node->bytes_reserved); 626 else 627 btrfs_qgroup_convert_reserved_meta(node->root, 628 node->bytes_reserved); 629 node->bytes_reserved = 0; 630 } 631 632 /* 633 * Insert a single delayed item or a batch of delayed items, as many as possible 634 * that fit in a leaf. The delayed items (dir index keys) are sorted by their key 635 * in the rbtree, and if there's a gap between two consecutive dir index items, 636 * then it means at some point we had delayed dir indexes to add but they got 637 * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them 638 * into the subvolume tree. Dir index keys also have their offsets coming from a 639 * monotonically increasing counter, so we can't get new keys with an offset that 640 * fits within a gap between delayed dir index items. 641 */ 642 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, 643 struct btrfs_root *root, 644 struct btrfs_path *path, 645 struct btrfs_delayed_item *first_item) 646 { 647 struct btrfs_fs_info *fs_info = root->fs_info; 648 struct btrfs_delayed_node *node = first_item->delayed_node; 649 LIST_HEAD(item_list); 650 struct btrfs_delayed_item *curr; 651 struct btrfs_delayed_item *next; 652 const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info); 653 struct btrfs_item_batch batch; 654 struct btrfs_key first_key; 655 const u32 first_data_size = first_item->data_len; 656 int total_size; 657 char *ins_data = NULL; 658 int ret; 659 bool continuous_keys_only = false; 660 661 lockdep_assert_held(&node->mutex); 662 663 /* 664 * During normal operation the delayed index offset is continuously 665 * increasing, so we can batch insert all items as there will not be any 666 * overlapping keys in the tree. 667 * 668 * The exception to this is log replay, where we may have interleaved 669 * offsets in the tree, so our batch needs to be continuous keys only in 670 * order to ensure we do not end up with out of order items in our leaf. 671 */ 672 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 673 continuous_keys_only = true; 674 675 /* 676 * For delayed items to insert, we track reserved metadata bytes based 677 * on the number of leaves that we will use. 678 * See btrfs_insert_delayed_dir_index() and 679 * btrfs_delayed_item_reserve_metadata()). 680 */ 681 ASSERT(first_item->bytes_reserved == 0); 682 683 list_add_tail(&first_item->tree_list, &item_list); 684 batch.total_data_size = first_data_size; 685 batch.nr = 1; 686 total_size = first_data_size + sizeof(struct btrfs_item); 687 curr = first_item; 688 689 while (true) { 690 int next_size; 691 692 next = __btrfs_next_delayed_item(curr); 693 if (!next) 694 break; 695 696 /* 697 * We cannot allow gaps in the key space if we're doing log 698 * replay. 699 */ 700 if (continuous_keys_only && (next->index != curr->index + 1)) 701 break; 702 703 ASSERT(next->bytes_reserved == 0); 704 705 next_size = next->data_len + sizeof(struct btrfs_item); 706 if (total_size + next_size > max_size) 707 break; 708 709 list_add_tail(&next->tree_list, &item_list); 710 batch.nr++; 711 total_size += next_size; 712 batch.total_data_size += next->data_len; 713 curr = next; 714 } 715 716 if (batch.nr == 1) { 717 first_key.objectid = node->inode_id; 718 first_key.type = BTRFS_DIR_INDEX_KEY; 719 first_key.offset = first_item->index; 720 batch.keys = &first_key; 721 batch.data_sizes = &first_data_size; 722 } else { 723 struct btrfs_key *ins_keys; 724 u32 *ins_sizes; 725 int i = 0; 726 727 ins_data = kmalloc(batch.nr * sizeof(u32) + 728 batch.nr * sizeof(struct btrfs_key), GFP_NOFS); 729 if (!ins_data) { 730 ret = -ENOMEM; 731 goto out; 732 } 733 ins_sizes = (u32 *)ins_data; 734 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32)); 735 batch.keys = ins_keys; 736 batch.data_sizes = ins_sizes; 737 list_for_each_entry(curr, &item_list, tree_list) { 738 ins_keys[i].objectid = node->inode_id; 739 ins_keys[i].type = BTRFS_DIR_INDEX_KEY; 740 ins_keys[i].offset = curr->index; 741 ins_sizes[i] = curr->data_len; 742 i++; 743 } 744 } 745 746 ret = btrfs_insert_empty_items(trans, root, path, &batch); 747 if (ret) 748 goto out; 749 750 list_for_each_entry(curr, &item_list, tree_list) { 751 char *data_ptr; 752 753 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char); 754 write_extent_buffer(path->nodes[0], &curr->data, 755 (unsigned long)data_ptr, curr->data_len); 756 path->slots[0]++; 757 } 758 759 /* 760 * Now release our path before releasing the delayed items and their 761 * metadata reservations, so that we don't block other tasks for more 762 * time than needed. 763 */ 764 btrfs_release_path(path); 765 766 ASSERT(node->index_item_leaves > 0); 767 768 /* 769 * For normal operations we will batch an entire leaf's worth of delayed 770 * items, so if there are more items to process we can decrement 771 * index_item_leaves by 1 as we inserted 1 leaf's worth of items. 772 * 773 * However for log replay we may not have inserted an entire leaf's 774 * worth of items, we may have not had continuous items, so decrementing 775 * here would mess up the index_item_leaves accounting. For this case 776 * only clean up the accounting when there are no items left. 777 */ 778 if (next && !continuous_keys_only) { 779 /* 780 * We inserted one batch of items into a leaf a there are more 781 * items to flush in a future batch, now release one unit of 782 * metadata space from the delayed block reserve, corresponding 783 * the leaf we just flushed to. 784 */ 785 btrfs_delayed_item_release_leaves(node, 1); 786 node->index_item_leaves--; 787 } else if (!next) { 788 /* 789 * There are no more items to insert. We can have a number of 790 * reserved leaves > 1 here - this happens when many dir index 791 * items are added and then removed before they are flushed (file 792 * names with a very short life, never span a transaction). So 793 * release all remaining leaves. 794 */ 795 btrfs_delayed_item_release_leaves(node, node->index_item_leaves); 796 node->index_item_leaves = 0; 797 } 798 799 list_for_each_entry_safe(curr, next, &item_list, tree_list) { 800 list_del(&curr->tree_list); 801 btrfs_release_delayed_item(curr); 802 } 803 out: 804 kfree(ins_data); 805 return ret; 806 } 807 808 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, 809 struct btrfs_path *path, 810 struct btrfs_root *root, 811 struct btrfs_delayed_node *node) 812 { 813 int ret = 0; 814 815 while (ret == 0) { 816 struct btrfs_delayed_item *curr; 817 818 mutex_lock(&node->mutex); 819 curr = __btrfs_first_delayed_insertion_item(node); 820 if (!curr) { 821 mutex_unlock(&node->mutex); 822 break; 823 } 824 ret = btrfs_insert_delayed_item(trans, root, path, curr); 825 mutex_unlock(&node->mutex); 826 } 827 828 return ret; 829 } 830 831 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, 832 struct btrfs_root *root, 833 struct btrfs_path *path, 834 struct btrfs_delayed_item *item) 835 { 836 const u64 ino = item->delayed_node->inode_id; 837 struct btrfs_fs_info *fs_info = root->fs_info; 838 struct btrfs_delayed_item *curr, *next; 839 struct extent_buffer *leaf = path->nodes[0]; 840 LIST_HEAD(batch_list); 841 int nitems, slot, last_slot; 842 int ret; 843 u64 total_reserved_size = item->bytes_reserved; 844 845 ASSERT(leaf != NULL); 846 847 slot = path->slots[0]; 848 last_slot = btrfs_header_nritems(leaf) - 1; 849 /* 850 * Our caller always gives us a path pointing to an existing item, so 851 * this can not happen. 852 */ 853 ASSERT(slot <= last_slot); 854 if (WARN_ON(slot > last_slot)) 855 return -ENOENT; 856 857 nitems = 1; 858 curr = item; 859 list_add_tail(&curr->tree_list, &batch_list); 860 861 /* 862 * Keep checking if the next delayed item matches the next item in the 863 * leaf - if so, we can add it to the batch of items to delete from the 864 * leaf. 865 */ 866 while (slot < last_slot) { 867 struct btrfs_key key; 868 869 next = __btrfs_next_delayed_item(curr); 870 if (!next) 871 break; 872 873 slot++; 874 btrfs_item_key_to_cpu(leaf, &key, slot); 875 if (key.objectid != ino || 876 key.type != BTRFS_DIR_INDEX_KEY || 877 key.offset != next->index) 878 break; 879 nitems++; 880 curr = next; 881 list_add_tail(&curr->tree_list, &batch_list); 882 total_reserved_size += curr->bytes_reserved; 883 } 884 885 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); 886 if (ret) 887 return ret; 888 889 /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */ 890 if (total_reserved_size > 0) { 891 /* 892 * Check btrfs_delayed_item_reserve_metadata() to see why we 893 * don't need to release/reserve qgroup space. 894 */ 895 trace_btrfs_space_reservation(fs_info, "delayed_item", ino, 896 total_reserved_size, 0); 897 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, 898 total_reserved_size, NULL); 899 } 900 901 list_for_each_entry_safe(curr, next, &batch_list, tree_list) { 902 list_del(&curr->tree_list); 903 btrfs_release_delayed_item(curr); 904 } 905 906 return 0; 907 } 908 909 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, 910 struct btrfs_path *path, 911 struct btrfs_root *root, 912 struct btrfs_delayed_node *node) 913 { 914 struct btrfs_key key; 915 int ret = 0; 916 917 key.objectid = node->inode_id; 918 key.type = BTRFS_DIR_INDEX_KEY; 919 920 while (ret == 0) { 921 struct btrfs_delayed_item *item; 922 923 mutex_lock(&node->mutex); 924 item = __btrfs_first_delayed_deletion_item(node); 925 if (!item) { 926 mutex_unlock(&node->mutex); 927 break; 928 } 929 930 key.offset = item->index; 931 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 932 if (ret > 0) { 933 /* 934 * There's no matching item in the leaf. This means we 935 * have already deleted this item in a past run of the 936 * delayed items. We ignore errors when running delayed 937 * items from an async context, through a work queue job 938 * running btrfs_async_run_delayed_root(), and don't 939 * release delayed items that failed to complete. This 940 * is because we will retry later, and at transaction 941 * commit time we always run delayed items and will 942 * then deal with errors if they fail to run again. 943 * 944 * So just release delayed items for which we can't find 945 * an item in the tree, and move to the next item. 946 */ 947 btrfs_release_path(path); 948 btrfs_release_delayed_item(item); 949 ret = 0; 950 } else if (ret == 0) { 951 ret = btrfs_batch_delete_items(trans, root, path, item); 952 btrfs_release_path(path); 953 } 954 955 /* 956 * We unlock and relock on each iteration, this is to prevent 957 * blocking other tasks for too long while we are being run from 958 * the async context (work queue job). Those tasks are typically 959 * running system calls like creat/mkdir/rename/unlink/etc which 960 * need to add delayed items to this delayed node. 961 */ 962 mutex_unlock(&node->mutex); 963 } 964 965 return ret; 966 } 967 968 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) 969 { 970 struct btrfs_delayed_root *delayed_root; 971 972 if (delayed_node && 973 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 974 BUG_ON(!delayed_node->root); 975 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags); 976 delayed_node->count--; 977 978 delayed_root = delayed_node->root->fs_info->delayed_root; 979 finish_one_item(delayed_root); 980 } 981 } 982 983 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node) 984 { 985 986 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) { 987 struct btrfs_delayed_root *delayed_root; 988 989 ASSERT(delayed_node->root); 990 delayed_node->count--; 991 992 delayed_root = delayed_node->root->fs_info->delayed_root; 993 finish_one_item(delayed_root); 994 } 995 } 996 997 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, 998 struct btrfs_root *root, 999 struct btrfs_path *path, 1000 struct btrfs_delayed_node *node) 1001 { 1002 struct btrfs_fs_info *fs_info = root->fs_info; 1003 struct btrfs_key key; 1004 struct btrfs_inode_item *inode_item; 1005 struct extent_buffer *leaf; 1006 int mod; 1007 int ret; 1008 1009 key.objectid = node->inode_id; 1010 key.type = BTRFS_INODE_ITEM_KEY; 1011 key.offset = 0; 1012 1013 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags)) 1014 mod = -1; 1015 else 1016 mod = 1; 1017 1018 ret = btrfs_lookup_inode(trans, root, path, &key, mod); 1019 if (ret > 0) 1020 ret = -ENOENT; 1021 if (ret < 0) 1022 goto out; 1023 1024 leaf = path->nodes[0]; 1025 inode_item = btrfs_item_ptr(leaf, path->slots[0], 1026 struct btrfs_inode_item); 1027 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, 1028 sizeof(struct btrfs_inode_item)); 1029 btrfs_mark_buffer_dirty(leaf); 1030 1031 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags)) 1032 goto out; 1033 1034 path->slots[0]++; 1035 if (path->slots[0] >= btrfs_header_nritems(leaf)) 1036 goto search; 1037 again: 1038 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1039 if (key.objectid != node->inode_id) 1040 goto out; 1041 1042 if (key.type != BTRFS_INODE_REF_KEY && 1043 key.type != BTRFS_INODE_EXTREF_KEY) 1044 goto out; 1045 1046 /* 1047 * Delayed iref deletion is for the inode who has only one link, 1048 * so there is only one iref. The case that several irefs are 1049 * in the same item doesn't exist. 1050 */ 1051 ret = btrfs_del_item(trans, root, path); 1052 out: 1053 btrfs_release_delayed_iref(node); 1054 btrfs_release_path(path); 1055 err_out: 1056 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0)); 1057 btrfs_release_delayed_inode(node); 1058 1059 /* 1060 * If we fail to update the delayed inode we need to abort the 1061 * transaction, because we could leave the inode with the improper 1062 * counts behind. 1063 */ 1064 if (ret && ret != -ENOENT) 1065 btrfs_abort_transaction(trans, ret); 1066 1067 return ret; 1068 1069 search: 1070 btrfs_release_path(path); 1071 1072 key.type = BTRFS_INODE_EXTREF_KEY; 1073 key.offset = -1; 1074 1075 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1076 if (ret < 0) 1077 goto err_out; 1078 ASSERT(ret); 1079 1080 ret = 0; 1081 leaf = path->nodes[0]; 1082 path->slots[0]--; 1083 goto again; 1084 } 1085 1086 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, 1087 struct btrfs_root *root, 1088 struct btrfs_path *path, 1089 struct btrfs_delayed_node *node) 1090 { 1091 int ret; 1092 1093 mutex_lock(&node->mutex); 1094 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) { 1095 mutex_unlock(&node->mutex); 1096 return 0; 1097 } 1098 1099 ret = __btrfs_update_delayed_inode(trans, root, path, node); 1100 mutex_unlock(&node->mutex); 1101 return ret; 1102 } 1103 1104 static inline int 1105 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, 1106 struct btrfs_path *path, 1107 struct btrfs_delayed_node *node) 1108 { 1109 int ret; 1110 1111 ret = btrfs_insert_delayed_items(trans, path, node->root, node); 1112 if (ret) 1113 return ret; 1114 1115 ret = btrfs_delete_delayed_items(trans, path, node->root, node); 1116 if (ret) 1117 return ret; 1118 1119 ret = btrfs_update_delayed_inode(trans, node->root, path, node); 1120 return ret; 1121 } 1122 1123 /* 1124 * Called when committing the transaction. 1125 * Returns 0 on success. 1126 * Returns < 0 on error and returns with an aborted transaction with any 1127 * outstanding delayed items cleaned up. 1128 */ 1129 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr) 1130 { 1131 struct btrfs_fs_info *fs_info = trans->fs_info; 1132 struct btrfs_delayed_root *delayed_root; 1133 struct btrfs_delayed_node *curr_node, *prev_node; 1134 struct btrfs_path *path; 1135 struct btrfs_block_rsv *block_rsv; 1136 int ret = 0; 1137 bool count = (nr > 0); 1138 1139 if (TRANS_ABORTED(trans)) 1140 return -EIO; 1141 1142 path = btrfs_alloc_path(); 1143 if (!path) 1144 return -ENOMEM; 1145 1146 block_rsv = trans->block_rsv; 1147 trans->block_rsv = &fs_info->delayed_block_rsv; 1148 1149 delayed_root = fs_info->delayed_root; 1150 1151 curr_node = btrfs_first_delayed_node(delayed_root); 1152 while (curr_node && (!count || nr--)) { 1153 ret = __btrfs_commit_inode_delayed_items(trans, path, 1154 curr_node); 1155 if (ret) { 1156 btrfs_release_delayed_node(curr_node); 1157 curr_node = NULL; 1158 btrfs_abort_transaction(trans, ret); 1159 break; 1160 } 1161 1162 prev_node = curr_node; 1163 curr_node = btrfs_next_delayed_node(curr_node); 1164 btrfs_release_delayed_node(prev_node); 1165 } 1166 1167 if (curr_node) 1168 btrfs_release_delayed_node(curr_node); 1169 btrfs_free_path(path); 1170 trans->block_rsv = block_rsv; 1171 1172 return ret; 1173 } 1174 1175 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans) 1176 { 1177 return __btrfs_run_delayed_items(trans, -1); 1178 } 1179 1180 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr) 1181 { 1182 return __btrfs_run_delayed_items(trans, nr); 1183 } 1184 1185 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, 1186 struct btrfs_inode *inode) 1187 { 1188 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1189 struct btrfs_path *path; 1190 struct btrfs_block_rsv *block_rsv; 1191 int ret; 1192 1193 if (!delayed_node) 1194 return 0; 1195 1196 mutex_lock(&delayed_node->mutex); 1197 if (!delayed_node->count) { 1198 mutex_unlock(&delayed_node->mutex); 1199 btrfs_release_delayed_node(delayed_node); 1200 return 0; 1201 } 1202 mutex_unlock(&delayed_node->mutex); 1203 1204 path = btrfs_alloc_path(); 1205 if (!path) { 1206 btrfs_release_delayed_node(delayed_node); 1207 return -ENOMEM; 1208 } 1209 1210 block_rsv = trans->block_rsv; 1211 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv; 1212 1213 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node); 1214 1215 btrfs_release_delayed_node(delayed_node); 1216 btrfs_free_path(path); 1217 trans->block_rsv = block_rsv; 1218 1219 return ret; 1220 } 1221 1222 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode) 1223 { 1224 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1225 struct btrfs_trans_handle *trans; 1226 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1227 struct btrfs_path *path; 1228 struct btrfs_block_rsv *block_rsv; 1229 int ret; 1230 1231 if (!delayed_node) 1232 return 0; 1233 1234 mutex_lock(&delayed_node->mutex); 1235 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1236 mutex_unlock(&delayed_node->mutex); 1237 btrfs_release_delayed_node(delayed_node); 1238 return 0; 1239 } 1240 mutex_unlock(&delayed_node->mutex); 1241 1242 trans = btrfs_join_transaction(delayed_node->root); 1243 if (IS_ERR(trans)) { 1244 ret = PTR_ERR(trans); 1245 goto out; 1246 } 1247 1248 path = btrfs_alloc_path(); 1249 if (!path) { 1250 ret = -ENOMEM; 1251 goto trans_out; 1252 } 1253 1254 block_rsv = trans->block_rsv; 1255 trans->block_rsv = &fs_info->delayed_block_rsv; 1256 1257 mutex_lock(&delayed_node->mutex); 1258 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) 1259 ret = __btrfs_update_delayed_inode(trans, delayed_node->root, 1260 path, delayed_node); 1261 else 1262 ret = 0; 1263 mutex_unlock(&delayed_node->mutex); 1264 1265 btrfs_free_path(path); 1266 trans->block_rsv = block_rsv; 1267 trans_out: 1268 btrfs_end_transaction(trans); 1269 btrfs_btree_balance_dirty(fs_info); 1270 out: 1271 btrfs_release_delayed_node(delayed_node); 1272 1273 return ret; 1274 } 1275 1276 void btrfs_remove_delayed_node(struct btrfs_inode *inode) 1277 { 1278 struct btrfs_delayed_node *delayed_node; 1279 1280 delayed_node = READ_ONCE(inode->delayed_node); 1281 if (!delayed_node) 1282 return; 1283 1284 inode->delayed_node = NULL; 1285 btrfs_release_delayed_node(delayed_node); 1286 } 1287 1288 struct btrfs_async_delayed_work { 1289 struct btrfs_delayed_root *delayed_root; 1290 int nr; 1291 struct btrfs_work work; 1292 }; 1293 1294 static void btrfs_async_run_delayed_root(struct btrfs_work *work) 1295 { 1296 struct btrfs_async_delayed_work *async_work; 1297 struct btrfs_delayed_root *delayed_root; 1298 struct btrfs_trans_handle *trans; 1299 struct btrfs_path *path; 1300 struct btrfs_delayed_node *delayed_node = NULL; 1301 struct btrfs_root *root; 1302 struct btrfs_block_rsv *block_rsv; 1303 int total_done = 0; 1304 1305 async_work = container_of(work, struct btrfs_async_delayed_work, work); 1306 delayed_root = async_work->delayed_root; 1307 1308 path = btrfs_alloc_path(); 1309 if (!path) 1310 goto out; 1311 1312 do { 1313 if (atomic_read(&delayed_root->items) < 1314 BTRFS_DELAYED_BACKGROUND / 2) 1315 break; 1316 1317 delayed_node = btrfs_first_prepared_delayed_node(delayed_root); 1318 if (!delayed_node) 1319 break; 1320 1321 root = delayed_node->root; 1322 1323 trans = btrfs_join_transaction(root); 1324 if (IS_ERR(trans)) { 1325 btrfs_release_path(path); 1326 btrfs_release_prepared_delayed_node(delayed_node); 1327 total_done++; 1328 continue; 1329 } 1330 1331 block_rsv = trans->block_rsv; 1332 trans->block_rsv = &root->fs_info->delayed_block_rsv; 1333 1334 __btrfs_commit_inode_delayed_items(trans, path, delayed_node); 1335 1336 trans->block_rsv = block_rsv; 1337 btrfs_end_transaction(trans); 1338 btrfs_btree_balance_dirty_nodelay(root->fs_info); 1339 1340 btrfs_release_path(path); 1341 btrfs_release_prepared_delayed_node(delayed_node); 1342 total_done++; 1343 1344 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) 1345 || total_done < async_work->nr); 1346 1347 btrfs_free_path(path); 1348 out: 1349 wake_up(&delayed_root->wait); 1350 kfree(async_work); 1351 } 1352 1353 1354 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, 1355 struct btrfs_fs_info *fs_info, int nr) 1356 { 1357 struct btrfs_async_delayed_work *async_work; 1358 1359 async_work = kmalloc(sizeof(*async_work), GFP_NOFS); 1360 if (!async_work) 1361 return -ENOMEM; 1362 1363 async_work->delayed_root = delayed_root; 1364 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL, 1365 NULL); 1366 async_work->nr = nr; 1367 1368 btrfs_queue_work(fs_info->delayed_workers, &async_work->work); 1369 return 0; 1370 } 1371 1372 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info) 1373 { 1374 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root)); 1375 } 1376 1377 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq) 1378 { 1379 int val = atomic_read(&delayed_root->items_seq); 1380 1381 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH) 1382 return 1; 1383 1384 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) 1385 return 1; 1386 1387 return 0; 1388 } 1389 1390 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info) 1391 { 1392 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root; 1393 1394 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) || 1395 btrfs_workqueue_normal_congested(fs_info->delayed_workers)) 1396 return; 1397 1398 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { 1399 int seq; 1400 int ret; 1401 1402 seq = atomic_read(&delayed_root->items_seq); 1403 1404 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0); 1405 if (ret) 1406 return; 1407 1408 wait_event_interruptible(delayed_root->wait, 1409 could_end_wait(delayed_root, seq)); 1410 return; 1411 } 1412 1413 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH); 1414 } 1415 1416 /* Will return 0 or -ENOMEM */ 1417 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, 1418 const char *name, int name_len, 1419 struct btrfs_inode *dir, 1420 struct btrfs_disk_key *disk_key, u8 flags, 1421 u64 index) 1422 { 1423 struct btrfs_fs_info *fs_info = trans->fs_info; 1424 const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info); 1425 struct btrfs_delayed_node *delayed_node; 1426 struct btrfs_delayed_item *delayed_item; 1427 struct btrfs_dir_item *dir_item; 1428 bool reserve_leaf_space; 1429 u32 data_len; 1430 int ret; 1431 1432 delayed_node = btrfs_get_or_create_delayed_node(dir); 1433 if (IS_ERR(delayed_node)) 1434 return PTR_ERR(delayed_node); 1435 1436 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len, 1437 delayed_node, 1438 BTRFS_DELAYED_INSERTION_ITEM); 1439 if (!delayed_item) { 1440 ret = -ENOMEM; 1441 goto release_node; 1442 } 1443 1444 delayed_item->index = index; 1445 1446 dir_item = (struct btrfs_dir_item *)delayed_item->data; 1447 dir_item->location = *disk_key; 1448 btrfs_set_stack_dir_transid(dir_item, trans->transid); 1449 btrfs_set_stack_dir_data_len(dir_item, 0); 1450 btrfs_set_stack_dir_name_len(dir_item, name_len); 1451 btrfs_set_stack_dir_flags(dir_item, flags); 1452 memcpy((char *)(dir_item + 1), name, name_len); 1453 1454 data_len = delayed_item->data_len + sizeof(struct btrfs_item); 1455 1456 mutex_lock(&delayed_node->mutex); 1457 1458 if (delayed_node->index_item_leaves == 0 || 1459 delayed_node->curr_index_batch_size + data_len > leaf_data_size) { 1460 delayed_node->curr_index_batch_size = data_len; 1461 reserve_leaf_space = true; 1462 } else { 1463 delayed_node->curr_index_batch_size += data_len; 1464 reserve_leaf_space = false; 1465 } 1466 1467 if (reserve_leaf_space) { 1468 ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item); 1469 /* 1470 * Space was reserved for a dir index item insertion when we 1471 * started the transaction, so getting a failure here should be 1472 * impossible. 1473 */ 1474 if (WARN_ON(ret)) { 1475 mutex_unlock(&delayed_node->mutex); 1476 btrfs_release_delayed_item(delayed_item); 1477 goto release_node; 1478 } 1479 1480 delayed_node->index_item_leaves++; 1481 } else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 1482 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 1483 1484 /* 1485 * Adding the new dir index item does not require touching another 1486 * leaf, so we can release 1 unit of metadata that was previously 1487 * reserved when starting the transaction. This applies only to 1488 * the case where we had a transaction start and excludes the 1489 * transaction join case (when replaying log trees). 1490 */ 1491 trace_btrfs_space_reservation(fs_info, "transaction", 1492 trans->transid, bytes, 0); 1493 btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL); 1494 ASSERT(trans->bytes_reserved >= bytes); 1495 trans->bytes_reserved -= bytes; 1496 } 1497 1498 ret = __btrfs_add_delayed_item(delayed_node, delayed_item); 1499 if (unlikely(ret)) { 1500 btrfs_err(trans->fs_info, 1501 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", 1502 name_len, name, delayed_node->root->root_key.objectid, 1503 delayed_node->inode_id, ret); 1504 BUG(); 1505 } 1506 mutex_unlock(&delayed_node->mutex); 1507 1508 release_node: 1509 btrfs_release_delayed_node(delayed_node); 1510 return ret; 1511 } 1512 1513 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info, 1514 struct btrfs_delayed_node *node, 1515 u64 index) 1516 { 1517 struct btrfs_delayed_item *item; 1518 1519 mutex_lock(&node->mutex); 1520 item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index); 1521 if (!item) { 1522 mutex_unlock(&node->mutex); 1523 return 1; 1524 } 1525 1526 /* 1527 * For delayed items to insert, we track reserved metadata bytes based 1528 * on the number of leaves that we will use. 1529 * See btrfs_insert_delayed_dir_index() and 1530 * btrfs_delayed_item_reserve_metadata()). 1531 */ 1532 ASSERT(item->bytes_reserved == 0); 1533 ASSERT(node->index_item_leaves > 0); 1534 1535 /* 1536 * If there's only one leaf reserved, we can decrement this item from the 1537 * current batch, otherwise we can not because we don't know which leaf 1538 * it belongs to. With the current limit on delayed items, we rarely 1539 * accumulate enough dir index items to fill more than one leaf (even 1540 * when using a leaf size of 4K). 1541 */ 1542 if (node->index_item_leaves == 1) { 1543 const u32 data_len = item->data_len + sizeof(struct btrfs_item); 1544 1545 ASSERT(node->curr_index_batch_size >= data_len); 1546 node->curr_index_batch_size -= data_len; 1547 } 1548 1549 btrfs_release_delayed_item(item); 1550 1551 /* If we now have no more dir index items, we can release all leaves. */ 1552 if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) { 1553 btrfs_delayed_item_release_leaves(node, node->index_item_leaves); 1554 node->index_item_leaves = 0; 1555 } 1556 1557 mutex_unlock(&node->mutex); 1558 return 0; 1559 } 1560 1561 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, 1562 struct btrfs_inode *dir, u64 index) 1563 { 1564 struct btrfs_delayed_node *node; 1565 struct btrfs_delayed_item *item; 1566 int ret; 1567 1568 node = btrfs_get_or_create_delayed_node(dir); 1569 if (IS_ERR(node)) 1570 return PTR_ERR(node); 1571 1572 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index); 1573 if (!ret) 1574 goto end; 1575 1576 item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM); 1577 if (!item) { 1578 ret = -ENOMEM; 1579 goto end; 1580 } 1581 1582 item->index = index; 1583 1584 ret = btrfs_delayed_item_reserve_metadata(trans, item); 1585 /* 1586 * we have reserved enough space when we start a new transaction, 1587 * so reserving metadata failure is impossible. 1588 */ 1589 if (ret < 0) { 1590 btrfs_err(trans->fs_info, 1591 "metadata reservation failed for delayed dir item deltiona, should have been reserved"); 1592 btrfs_release_delayed_item(item); 1593 goto end; 1594 } 1595 1596 mutex_lock(&node->mutex); 1597 ret = __btrfs_add_delayed_item(node, item); 1598 if (unlikely(ret)) { 1599 btrfs_err(trans->fs_info, 1600 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", 1601 index, node->root->root_key.objectid, 1602 node->inode_id, ret); 1603 btrfs_delayed_item_release_metadata(dir->root, item); 1604 btrfs_release_delayed_item(item); 1605 } 1606 mutex_unlock(&node->mutex); 1607 end: 1608 btrfs_release_delayed_node(node); 1609 return ret; 1610 } 1611 1612 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode) 1613 { 1614 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1615 1616 if (!delayed_node) 1617 return -ENOENT; 1618 1619 /* 1620 * Since we have held i_mutex of this directory, it is impossible that 1621 * a new directory index is added into the delayed node and index_cnt 1622 * is updated now. So we needn't lock the delayed node. 1623 */ 1624 if (!delayed_node->index_cnt) { 1625 btrfs_release_delayed_node(delayed_node); 1626 return -EINVAL; 1627 } 1628 1629 inode->index_cnt = delayed_node->index_cnt; 1630 btrfs_release_delayed_node(delayed_node); 1631 return 0; 1632 } 1633 1634 bool btrfs_readdir_get_delayed_items(struct inode *inode, 1635 u64 last_index, 1636 struct list_head *ins_list, 1637 struct list_head *del_list) 1638 { 1639 struct btrfs_delayed_node *delayed_node; 1640 struct btrfs_delayed_item *item; 1641 1642 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode)); 1643 if (!delayed_node) 1644 return false; 1645 1646 /* 1647 * We can only do one readdir with delayed items at a time because of 1648 * item->readdir_list. 1649 */ 1650 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED); 1651 btrfs_inode_lock(BTRFS_I(inode), 0); 1652 1653 mutex_lock(&delayed_node->mutex); 1654 item = __btrfs_first_delayed_insertion_item(delayed_node); 1655 while (item && item->index <= last_index) { 1656 refcount_inc(&item->refs); 1657 list_add_tail(&item->readdir_list, ins_list); 1658 item = __btrfs_next_delayed_item(item); 1659 } 1660 1661 item = __btrfs_first_delayed_deletion_item(delayed_node); 1662 while (item && item->index <= last_index) { 1663 refcount_inc(&item->refs); 1664 list_add_tail(&item->readdir_list, del_list); 1665 item = __btrfs_next_delayed_item(item); 1666 } 1667 mutex_unlock(&delayed_node->mutex); 1668 /* 1669 * This delayed node is still cached in the btrfs inode, so refs 1670 * must be > 1 now, and we needn't check it is going to be freed 1671 * or not. 1672 * 1673 * Besides that, this function is used to read dir, we do not 1674 * insert/delete delayed items in this period. So we also needn't 1675 * requeue or dequeue this delayed node. 1676 */ 1677 refcount_dec(&delayed_node->refs); 1678 1679 return true; 1680 } 1681 1682 void btrfs_readdir_put_delayed_items(struct inode *inode, 1683 struct list_head *ins_list, 1684 struct list_head *del_list) 1685 { 1686 struct btrfs_delayed_item *curr, *next; 1687 1688 list_for_each_entry_safe(curr, next, ins_list, readdir_list) { 1689 list_del(&curr->readdir_list); 1690 if (refcount_dec_and_test(&curr->refs)) 1691 kfree(curr); 1692 } 1693 1694 list_for_each_entry_safe(curr, next, del_list, readdir_list) { 1695 list_del(&curr->readdir_list); 1696 if (refcount_dec_and_test(&curr->refs)) 1697 kfree(curr); 1698 } 1699 1700 /* 1701 * The VFS is going to do up_read(), so we need to downgrade back to a 1702 * read lock. 1703 */ 1704 downgrade_write(&inode->i_rwsem); 1705 } 1706 1707 int btrfs_should_delete_dir_index(struct list_head *del_list, 1708 u64 index) 1709 { 1710 struct btrfs_delayed_item *curr; 1711 int ret = 0; 1712 1713 list_for_each_entry(curr, del_list, readdir_list) { 1714 if (curr->index > index) 1715 break; 1716 if (curr->index == index) { 1717 ret = 1; 1718 break; 1719 } 1720 } 1721 return ret; 1722 } 1723 1724 /* 1725 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree 1726 * 1727 */ 1728 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, 1729 struct list_head *ins_list) 1730 { 1731 struct btrfs_dir_item *di; 1732 struct btrfs_delayed_item *curr, *next; 1733 struct btrfs_key location; 1734 char *name; 1735 int name_len; 1736 int over = 0; 1737 unsigned char d_type; 1738 1739 /* 1740 * Changing the data of the delayed item is impossible. So 1741 * we needn't lock them. And we have held i_mutex of the 1742 * directory, nobody can delete any directory indexes now. 1743 */ 1744 list_for_each_entry_safe(curr, next, ins_list, readdir_list) { 1745 list_del(&curr->readdir_list); 1746 1747 if (curr->index < ctx->pos) { 1748 if (refcount_dec_and_test(&curr->refs)) 1749 kfree(curr); 1750 continue; 1751 } 1752 1753 ctx->pos = curr->index; 1754 1755 di = (struct btrfs_dir_item *)curr->data; 1756 name = (char *)(di + 1); 1757 name_len = btrfs_stack_dir_name_len(di); 1758 1759 d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type)); 1760 btrfs_disk_key_to_cpu(&location, &di->location); 1761 1762 over = !dir_emit(ctx, name, name_len, 1763 location.objectid, d_type); 1764 1765 if (refcount_dec_and_test(&curr->refs)) 1766 kfree(curr); 1767 1768 if (over) 1769 return 1; 1770 ctx->pos++; 1771 } 1772 return 0; 1773 } 1774 1775 static void fill_stack_inode_item(struct btrfs_trans_handle *trans, 1776 struct btrfs_inode_item *inode_item, 1777 struct inode *inode) 1778 { 1779 u64 flags; 1780 1781 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode)); 1782 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode)); 1783 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); 1784 btrfs_set_stack_inode_mode(inode_item, inode->i_mode); 1785 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); 1786 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); 1787 btrfs_set_stack_inode_generation(inode_item, 1788 BTRFS_I(inode)->generation); 1789 btrfs_set_stack_inode_sequence(inode_item, 1790 inode_peek_iversion(inode)); 1791 btrfs_set_stack_inode_transid(inode_item, trans->transid); 1792 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); 1793 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 1794 BTRFS_I(inode)->ro_flags); 1795 btrfs_set_stack_inode_flags(inode_item, flags); 1796 btrfs_set_stack_inode_block_group(inode_item, 0); 1797 1798 btrfs_set_stack_timespec_sec(&inode_item->atime, 1799 inode->i_atime.tv_sec); 1800 btrfs_set_stack_timespec_nsec(&inode_item->atime, 1801 inode->i_atime.tv_nsec); 1802 1803 btrfs_set_stack_timespec_sec(&inode_item->mtime, 1804 inode->i_mtime.tv_sec); 1805 btrfs_set_stack_timespec_nsec(&inode_item->mtime, 1806 inode->i_mtime.tv_nsec); 1807 1808 btrfs_set_stack_timespec_sec(&inode_item->ctime, 1809 inode_get_ctime(inode).tv_sec); 1810 btrfs_set_stack_timespec_nsec(&inode_item->ctime, 1811 inode_get_ctime(inode).tv_nsec); 1812 1813 btrfs_set_stack_timespec_sec(&inode_item->otime, 1814 BTRFS_I(inode)->i_otime.tv_sec); 1815 btrfs_set_stack_timespec_nsec(&inode_item->otime, 1816 BTRFS_I(inode)->i_otime.tv_nsec); 1817 } 1818 1819 int btrfs_fill_inode(struct inode *inode, u32 *rdev) 1820 { 1821 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 1822 struct btrfs_delayed_node *delayed_node; 1823 struct btrfs_inode_item *inode_item; 1824 1825 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode)); 1826 if (!delayed_node) 1827 return -ENOENT; 1828 1829 mutex_lock(&delayed_node->mutex); 1830 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1831 mutex_unlock(&delayed_node->mutex); 1832 btrfs_release_delayed_node(delayed_node); 1833 return -ENOENT; 1834 } 1835 1836 inode_item = &delayed_node->inode_item; 1837 1838 i_uid_write(inode, btrfs_stack_inode_uid(inode_item)); 1839 i_gid_write(inode, btrfs_stack_inode_gid(inode_item)); 1840 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item)); 1841 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 1842 round_up(i_size_read(inode), fs_info->sectorsize)); 1843 inode->i_mode = btrfs_stack_inode_mode(inode_item); 1844 set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); 1845 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); 1846 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); 1847 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item); 1848 1849 inode_set_iversion_queried(inode, 1850 btrfs_stack_inode_sequence(inode_item)); 1851 inode->i_rdev = 0; 1852 *rdev = btrfs_stack_inode_rdev(inode_item); 1853 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item), 1854 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); 1855 1856 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime); 1857 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime); 1858 1859 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime); 1860 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime); 1861 1862 inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime), 1863 btrfs_stack_timespec_nsec(&inode_item->ctime)); 1864 1865 BTRFS_I(inode)->i_otime.tv_sec = 1866 btrfs_stack_timespec_sec(&inode_item->otime); 1867 BTRFS_I(inode)->i_otime.tv_nsec = 1868 btrfs_stack_timespec_nsec(&inode_item->otime); 1869 1870 inode->i_generation = BTRFS_I(inode)->generation; 1871 BTRFS_I(inode)->index_cnt = (u64)-1; 1872 1873 mutex_unlock(&delayed_node->mutex); 1874 btrfs_release_delayed_node(delayed_node); 1875 return 0; 1876 } 1877 1878 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, 1879 struct btrfs_root *root, 1880 struct btrfs_inode *inode) 1881 { 1882 struct btrfs_delayed_node *delayed_node; 1883 int ret = 0; 1884 1885 delayed_node = btrfs_get_or_create_delayed_node(inode); 1886 if (IS_ERR(delayed_node)) 1887 return PTR_ERR(delayed_node); 1888 1889 mutex_lock(&delayed_node->mutex); 1890 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1891 fill_stack_inode_item(trans, &delayed_node->inode_item, 1892 &inode->vfs_inode); 1893 goto release_node; 1894 } 1895 1896 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); 1897 if (ret) 1898 goto release_node; 1899 1900 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode); 1901 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags); 1902 delayed_node->count++; 1903 atomic_inc(&root->fs_info->delayed_root->items); 1904 release_node: 1905 mutex_unlock(&delayed_node->mutex); 1906 btrfs_release_delayed_node(delayed_node); 1907 return ret; 1908 } 1909 1910 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode) 1911 { 1912 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1913 struct btrfs_delayed_node *delayed_node; 1914 1915 /* 1916 * we don't do delayed inode updates during log recovery because it 1917 * leads to enospc problems. This means we also can't do 1918 * delayed inode refs 1919 */ 1920 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 1921 return -EAGAIN; 1922 1923 delayed_node = btrfs_get_or_create_delayed_node(inode); 1924 if (IS_ERR(delayed_node)) 1925 return PTR_ERR(delayed_node); 1926 1927 /* 1928 * We don't reserve space for inode ref deletion is because: 1929 * - We ONLY do async inode ref deletion for the inode who has only 1930 * one link(i_nlink == 1), it means there is only one inode ref. 1931 * And in most case, the inode ref and the inode item are in the 1932 * same leaf, and we will deal with them at the same time. 1933 * Since we are sure we will reserve the space for the inode item, 1934 * it is unnecessary to reserve space for inode ref deletion. 1935 * - If the inode ref and the inode item are not in the same leaf, 1936 * We also needn't worry about enospc problem, because we reserve 1937 * much more space for the inode update than it needs. 1938 * - At the worst, we can steal some space from the global reservation. 1939 * It is very rare. 1940 */ 1941 mutex_lock(&delayed_node->mutex); 1942 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) 1943 goto release_node; 1944 1945 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags); 1946 delayed_node->count++; 1947 atomic_inc(&fs_info->delayed_root->items); 1948 release_node: 1949 mutex_unlock(&delayed_node->mutex); 1950 btrfs_release_delayed_node(delayed_node); 1951 return 0; 1952 } 1953 1954 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) 1955 { 1956 struct btrfs_root *root = delayed_node->root; 1957 struct btrfs_fs_info *fs_info = root->fs_info; 1958 struct btrfs_delayed_item *curr_item, *prev_item; 1959 1960 mutex_lock(&delayed_node->mutex); 1961 curr_item = __btrfs_first_delayed_insertion_item(delayed_node); 1962 while (curr_item) { 1963 prev_item = curr_item; 1964 curr_item = __btrfs_next_delayed_item(prev_item); 1965 btrfs_release_delayed_item(prev_item); 1966 } 1967 1968 if (delayed_node->index_item_leaves > 0) { 1969 btrfs_delayed_item_release_leaves(delayed_node, 1970 delayed_node->index_item_leaves); 1971 delayed_node->index_item_leaves = 0; 1972 } 1973 1974 curr_item = __btrfs_first_delayed_deletion_item(delayed_node); 1975 while (curr_item) { 1976 btrfs_delayed_item_release_metadata(root, curr_item); 1977 prev_item = curr_item; 1978 curr_item = __btrfs_next_delayed_item(prev_item); 1979 btrfs_release_delayed_item(prev_item); 1980 } 1981 1982 btrfs_release_delayed_iref(delayed_node); 1983 1984 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1985 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false); 1986 btrfs_release_delayed_inode(delayed_node); 1987 } 1988 mutex_unlock(&delayed_node->mutex); 1989 } 1990 1991 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode) 1992 { 1993 struct btrfs_delayed_node *delayed_node; 1994 1995 delayed_node = btrfs_get_delayed_node(inode); 1996 if (!delayed_node) 1997 return; 1998 1999 __btrfs_kill_delayed_node(delayed_node); 2000 btrfs_release_delayed_node(delayed_node); 2001 } 2002 2003 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) 2004 { 2005 u64 inode_id = 0; 2006 struct btrfs_delayed_node *delayed_nodes[8]; 2007 int i, n; 2008 2009 while (1) { 2010 spin_lock(&root->inode_lock); 2011 n = radix_tree_gang_lookup(&root->delayed_nodes_tree, 2012 (void **)delayed_nodes, inode_id, 2013 ARRAY_SIZE(delayed_nodes)); 2014 if (!n) { 2015 spin_unlock(&root->inode_lock); 2016 break; 2017 } 2018 2019 inode_id = delayed_nodes[n - 1]->inode_id + 1; 2020 for (i = 0; i < n; i++) { 2021 /* 2022 * Don't increase refs in case the node is dead and 2023 * about to be removed from the tree in the loop below 2024 */ 2025 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs)) 2026 delayed_nodes[i] = NULL; 2027 } 2028 spin_unlock(&root->inode_lock); 2029 2030 for (i = 0; i < n; i++) { 2031 if (!delayed_nodes[i]) 2032 continue; 2033 __btrfs_kill_delayed_node(delayed_nodes[i]); 2034 btrfs_release_delayed_node(delayed_nodes[i]); 2035 } 2036 } 2037 } 2038 2039 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info) 2040 { 2041 struct btrfs_delayed_node *curr_node, *prev_node; 2042 2043 curr_node = btrfs_first_delayed_node(fs_info->delayed_root); 2044 while (curr_node) { 2045 __btrfs_kill_delayed_node(curr_node); 2046 2047 prev_node = curr_node; 2048 curr_node = btrfs_next_delayed_node(curr_node); 2049 btrfs_release_delayed_node(prev_node); 2050 } 2051 } 2052 2053 void btrfs_log_get_delayed_items(struct btrfs_inode *inode, 2054 struct list_head *ins_list, 2055 struct list_head *del_list) 2056 { 2057 struct btrfs_delayed_node *node; 2058 struct btrfs_delayed_item *item; 2059 2060 node = btrfs_get_delayed_node(inode); 2061 if (!node) 2062 return; 2063 2064 mutex_lock(&node->mutex); 2065 item = __btrfs_first_delayed_insertion_item(node); 2066 while (item) { 2067 /* 2068 * It's possible that the item is already in a log list. This 2069 * can happen in case two tasks are trying to log the same 2070 * directory. For example if we have tasks A and task B: 2071 * 2072 * Task A collected the delayed items into a log list while 2073 * under the inode's log_mutex (at btrfs_log_inode()), but it 2074 * only releases the items after logging the inodes they point 2075 * to (if they are new inodes), which happens after unlocking 2076 * the log mutex; 2077 * 2078 * Task B enters btrfs_log_inode() and acquires the log_mutex 2079 * of the same directory inode, before task B releases the 2080 * delayed items. This can happen for example when logging some 2081 * inode we need to trigger logging of its parent directory, so 2082 * logging two files that have the same parent directory can 2083 * lead to this. 2084 * 2085 * If this happens, just ignore delayed items already in a log 2086 * list. All the tasks logging the directory are under a log 2087 * transaction and whichever finishes first can not sync the log 2088 * before the other completes and leaves the log transaction. 2089 */ 2090 if (!item->logged && list_empty(&item->log_list)) { 2091 refcount_inc(&item->refs); 2092 list_add_tail(&item->log_list, ins_list); 2093 } 2094 item = __btrfs_next_delayed_item(item); 2095 } 2096 2097 item = __btrfs_first_delayed_deletion_item(node); 2098 while (item) { 2099 /* It may be non-empty, for the same reason mentioned above. */ 2100 if (!item->logged && list_empty(&item->log_list)) { 2101 refcount_inc(&item->refs); 2102 list_add_tail(&item->log_list, del_list); 2103 } 2104 item = __btrfs_next_delayed_item(item); 2105 } 2106 mutex_unlock(&node->mutex); 2107 2108 /* 2109 * We are called during inode logging, which means the inode is in use 2110 * and can not be evicted before we finish logging the inode. So we never 2111 * have the last reference on the delayed inode. 2112 * Also, we don't use btrfs_release_delayed_node() because that would 2113 * requeue the delayed inode (change its order in the list of prepared 2114 * nodes) and we don't want to do such change because we don't create or 2115 * delete delayed items. 2116 */ 2117 ASSERT(refcount_read(&node->refs) > 1); 2118 refcount_dec(&node->refs); 2119 } 2120 2121 void btrfs_log_put_delayed_items(struct btrfs_inode *inode, 2122 struct list_head *ins_list, 2123 struct list_head *del_list) 2124 { 2125 struct btrfs_delayed_node *node; 2126 struct btrfs_delayed_item *item; 2127 struct btrfs_delayed_item *next; 2128 2129 node = btrfs_get_delayed_node(inode); 2130 if (!node) 2131 return; 2132 2133 mutex_lock(&node->mutex); 2134 2135 list_for_each_entry_safe(item, next, ins_list, log_list) { 2136 item->logged = true; 2137 list_del_init(&item->log_list); 2138 if (refcount_dec_and_test(&item->refs)) 2139 kfree(item); 2140 } 2141 2142 list_for_each_entry_safe(item, next, del_list, log_list) { 2143 item->logged = true; 2144 list_del_init(&item->log_list); 2145 if (refcount_dec_and_test(&item->refs)) 2146 kfree(item); 2147 } 2148 2149 mutex_unlock(&node->mutex); 2150 2151 /* 2152 * We are called during inode logging, which means the inode is in use 2153 * and can not be evicted before we finish logging the inode. So we never 2154 * have the last reference on the delayed inode. 2155 * Also, we don't use btrfs_release_delayed_node() because that would 2156 * requeue the delayed inode (change its order in the list of prepared 2157 * nodes) and we don't want to do such change because we don't create or 2158 * delete delayed items. 2159 */ 2160 ASSERT(refcount_read(&node->refs) > 1); 2161 refcount_dec(&node->refs); 2162 } 2163