1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 Fujitsu. All rights reserved. 4 * Written by Miao Xie <miaox@cn.fujitsu.com> 5 */ 6 7 #include <linux/slab.h> 8 #include <linux/iversion.h> 9 #include "misc.h" 10 #include "delayed-inode.h" 11 #include "disk-io.h" 12 #include "transaction.h" 13 #include "ctree.h" 14 #include "qgroup.h" 15 #include "locking.h" 16 #include "inode-item.h" 17 18 #define BTRFS_DELAYED_WRITEBACK 512 19 #define BTRFS_DELAYED_BACKGROUND 128 20 #define BTRFS_DELAYED_BATCH 16 21 22 static struct kmem_cache *delayed_node_cache; 23 24 int __init btrfs_delayed_inode_init(void) 25 { 26 delayed_node_cache = kmem_cache_create("btrfs_delayed_node", 27 sizeof(struct btrfs_delayed_node), 28 0, 29 SLAB_MEM_SPREAD, 30 NULL); 31 if (!delayed_node_cache) 32 return -ENOMEM; 33 return 0; 34 } 35 36 void __cold btrfs_delayed_inode_exit(void) 37 { 38 kmem_cache_destroy(delayed_node_cache); 39 } 40 41 static inline void btrfs_init_delayed_node( 42 struct btrfs_delayed_node *delayed_node, 43 struct btrfs_root *root, u64 inode_id) 44 { 45 delayed_node->root = root; 46 delayed_node->inode_id = inode_id; 47 refcount_set(&delayed_node->refs, 0); 48 delayed_node->ins_root = RB_ROOT_CACHED; 49 delayed_node->del_root = RB_ROOT_CACHED; 50 mutex_init(&delayed_node->mutex); 51 INIT_LIST_HEAD(&delayed_node->n_list); 52 INIT_LIST_HEAD(&delayed_node->p_list); 53 } 54 55 static inline int btrfs_is_continuous_delayed_item( 56 struct btrfs_delayed_item *item1, 57 struct btrfs_delayed_item *item2) 58 { 59 if (item1->key.type == BTRFS_DIR_INDEX_KEY && 60 item1->key.objectid == item2->key.objectid && 61 item1->key.type == item2->key.type && 62 item1->key.offset + 1 == item2->key.offset) 63 return 1; 64 return 0; 65 } 66 67 static struct btrfs_delayed_node *btrfs_get_delayed_node( 68 struct btrfs_inode *btrfs_inode) 69 { 70 struct btrfs_root *root = btrfs_inode->root; 71 u64 ino = btrfs_ino(btrfs_inode); 72 struct btrfs_delayed_node *node; 73 74 node = READ_ONCE(btrfs_inode->delayed_node); 75 if (node) { 76 refcount_inc(&node->refs); 77 return node; 78 } 79 80 spin_lock(&root->inode_lock); 81 node = radix_tree_lookup(&root->delayed_nodes_tree, ino); 82 83 if (node) { 84 if (btrfs_inode->delayed_node) { 85 refcount_inc(&node->refs); /* can be accessed */ 86 BUG_ON(btrfs_inode->delayed_node != node); 87 spin_unlock(&root->inode_lock); 88 return node; 89 } 90 91 /* 92 * It's possible that we're racing into the middle of removing 93 * this node from the radix tree. In this case, the refcount 94 * was zero and it should never go back to one. Just return 95 * NULL like it was never in the radix at all; our release 96 * function is in the process of removing it. 97 * 98 * Some implementations of refcount_inc refuse to bump the 99 * refcount once it has hit zero. If we don't do this dance 100 * here, refcount_inc() may decide to just WARN_ONCE() instead 101 * of actually bumping the refcount. 102 * 103 * If this node is properly in the radix, we want to bump the 104 * refcount twice, once for the inode and once for this get 105 * operation. 106 */ 107 if (refcount_inc_not_zero(&node->refs)) { 108 refcount_inc(&node->refs); 109 btrfs_inode->delayed_node = node; 110 } else { 111 node = NULL; 112 } 113 114 spin_unlock(&root->inode_lock); 115 return node; 116 } 117 spin_unlock(&root->inode_lock); 118 119 return NULL; 120 } 121 122 /* Will return either the node or PTR_ERR(-ENOMEM) */ 123 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( 124 struct btrfs_inode *btrfs_inode) 125 { 126 struct btrfs_delayed_node *node; 127 struct btrfs_root *root = btrfs_inode->root; 128 u64 ino = btrfs_ino(btrfs_inode); 129 int ret; 130 131 again: 132 node = btrfs_get_delayed_node(btrfs_inode); 133 if (node) 134 return node; 135 136 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS); 137 if (!node) 138 return ERR_PTR(-ENOMEM); 139 btrfs_init_delayed_node(node, root, ino); 140 141 /* cached in the btrfs inode and can be accessed */ 142 refcount_set(&node->refs, 2); 143 144 ret = radix_tree_preload(GFP_NOFS); 145 if (ret) { 146 kmem_cache_free(delayed_node_cache, node); 147 return ERR_PTR(ret); 148 } 149 150 spin_lock(&root->inode_lock); 151 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); 152 if (ret == -EEXIST) { 153 spin_unlock(&root->inode_lock); 154 kmem_cache_free(delayed_node_cache, node); 155 radix_tree_preload_end(); 156 goto again; 157 } 158 btrfs_inode->delayed_node = node; 159 spin_unlock(&root->inode_lock); 160 radix_tree_preload_end(); 161 162 return node; 163 } 164 165 /* 166 * Call it when holding delayed_node->mutex 167 * 168 * If mod = 1, add this node into the prepared list. 169 */ 170 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, 171 struct btrfs_delayed_node *node, 172 int mod) 173 { 174 spin_lock(&root->lock); 175 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { 176 if (!list_empty(&node->p_list)) 177 list_move_tail(&node->p_list, &root->prepare_list); 178 else if (mod) 179 list_add_tail(&node->p_list, &root->prepare_list); 180 } else { 181 list_add_tail(&node->n_list, &root->node_list); 182 list_add_tail(&node->p_list, &root->prepare_list); 183 refcount_inc(&node->refs); /* inserted into list */ 184 root->nodes++; 185 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags); 186 } 187 spin_unlock(&root->lock); 188 } 189 190 /* Call it when holding delayed_node->mutex */ 191 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, 192 struct btrfs_delayed_node *node) 193 { 194 spin_lock(&root->lock); 195 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { 196 root->nodes--; 197 refcount_dec(&node->refs); /* not in the list */ 198 list_del_init(&node->n_list); 199 if (!list_empty(&node->p_list)) 200 list_del_init(&node->p_list); 201 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags); 202 } 203 spin_unlock(&root->lock); 204 } 205 206 static struct btrfs_delayed_node *btrfs_first_delayed_node( 207 struct btrfs_delayed_root *delayed_root) 208 { 209 struct list_head *p; 210 struct btrfs_delayed_node *node = NULL; 211 212 spin_lock(&delayed_root->lock); 213 if (list_empty(&delayed_root->node_list)) 214 goto out; 215 216 p = delayed_root->node_list.next; 217 node = list_entry(p, struct btrfs_delayed_node, n_list); 218 refcount_inc(&node->refs); 219 out: 220 spin_unlock(&delayed_root->lock); 221 222 return node; 223 } 224 225 static struct btrfs_delayed_node *btrfs_next_delayed_node( 226 struct btrfs_delayed_node *node) 227 { 228 struct btrfs_delayed_root *delayed_root; 229 struct list_head *p; 230 struct btrfs_delayed_node *next = NULL; 231 232 delayed_root = node->root->fs_info->delayed_root; 233 spin_lock(&delayed_root->lock); 234 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { 235 /* not in the list */ 236 if (list_empty(&delayed_root->node_list)) 237 goto out; 238 p = delayed_root->node_list.next; 239 } else if (list_is_last(&node->n_list, &delayed_root->node_list)) 240 goto out; 241 else 242 p = node->n_list.next; 243 244 next = list_entry(p, struct btrfs_delayed_node, n_list); 245 refcount_inc(&next->refs); 246 out: 247 spin_unlock(&delayed_root->lock); 248 249 return next; 250 } 251 252 static void __btrfs_release_delayed_node( 253 struct btrfs_delayed_node *delayed_node, 254 int mod) 255 { 256 struct btrfs_delayed_root *delayed_root; 257 258 if (!delayed_node) 259 return; 260 261 delayed_root = delayed_node->root->fs_info->delayed_root; 262 263 mutex_lock(&delayed_node->mutex); 264 if (delayed_node->count) 265 btrfs_queue_delayed_node(delayed_root, delayed_node, mod); 266 else 267 btrfs_dequeue_delayed_node(delayed_root, delayed_node); 268 mutex_unlock(&delayed_node->mutex); 269 270 if (refcount_dec_and_test(&delayed_node->refs)) { 271 struct btrfs_root *root = delayed_node->root; 272 273 spin_lock(&root->inode_lock); 274 /* 275 * Once our refcount goes to zero, nobody is allowed to bump it 276 * back up. We can delete it now. 277 */ 278 ASSERT(refcount_read(&delayed_node->refs) == 0); 279 radix_tree_delete(&root->delayed_nodes_tree, 280 delayed_node->inode_id); 281 spin_unlock(&root->inode_lock); 282 kmem_cache_free(delayed_node_cache, delayed_node); 283 } 284 } 285 286 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) 287 { 288 __btrfs_release_delayed_node(node, 0); 289 } 290 291 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( 292 struct btrfs_delayed_root *delayed_root) 293 { 294 struct list_head *p; 295 struct btrfs_delayed_node *node = NULL; 296 297 spin_lock(&delayed_root->lock); 298 if (list_empty(&delayed_root->prepare_list)) 299 goto out; 300 301 p = delayed_root->prepare_list.next; 302 list_del_init(p); 303 node = list_entry(p, struct btrfs_delayed_node, p_list); 304 refcount_inc(&node->refs); 305 out: 306 spin_unlock(&delayed_root->lock); 307 308 return node; 309 } 310 311 static inline void btrfs_release_prepared_delayed_node( 312 struct btrfs_delayed_node *node) 313 { 314 __btrfs_release_delayed_node(node, 1); 315 } 316 317 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) 318 { 319 struct btrfs_delayed_item *item; 320 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); 321 if (item) { 322 item->data_len = data_len; 323 item->ins_or_del = 0; 324 item->bytes_reserved = 0; 325 item->delayed_node = NULL; 326 refcount_set(&item->refs, 1); 327 } 328 return item; 329 } 330 331 /* 332 * __btrfs_lookup_delayed_item - look up the delayed item by key 333 * @delayed_node: pointer to the delayed node 334 * @key: the key to look up 335 * @prev: used to store the prev item if the right item isn't found 336 * @next: used to store the next item if the right item isn't found 337 * 338 * Note: if we don't find the right item, we will return the prev item and 339 * the next item. 340 */ 341 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( 342 struct rb_root *root, 343 struct btrfs_key *key, 344 struct btrfs_delayed_item **prev, 345 struct btrfs_delayed_item **next) 346 { 347 struct rb_node *node, *prev_node = NULL; 348 struct btrfs_delayed_item *delayed_item = NULL; 349 int ret = 0; 350 351 node = root->rb_node; 352 353 while (node) { 354 delayed_item = rb_entry(node, struct btrfs_delayed_item, 355 rb_node); 356 prev_node = node; 357 ret = btrfs_comp_cpu_keys(&delayed_item->key, key); 358 if (ret < 0) 359 node = node->rb_right; 360 else if (ret > 0) 361 node = node->rb_left; 362 else 363 return delayed_item; 364 } 365 366 if (prev) { 367 if (!prev_node) 368 *prev = NULL; 369 else if (ret < 0) 370 *prev = delayed_item; 371 else if ((node = rb_prev(prev_node)) != NULL) { 372 *prev = rb_entry(node, struct btrfs_delayed_item, 373 rb_node); 374 } else 375 *prev = NULL; 376 } 377 378 if (next) { 379 if (!prev_node) 380 *next = NULL; 381 else if (ret > 0) 382 *next = delayed_item; 383 else if ((node = rb_next(prev_node)) != NULL) { 384 *next = rb_entry(node, struct btrfs_delayed_item, 385 rb_node); 386 } else 387 *next = NULL; 388 } 389 return NULL; 390 } 391 392 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( 393 struct btrfs_delayed_node *delayed_node, 394 struct btrfs_key *key) 395 { 396 return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key, 397 NULL, NULL); 398 } 399 400 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, 401 struct btrfs_delayed_item *ins, 402 int action) 403 { 404 struct rb_node **p, *node; 405 struct rb_node *parent_node = NULL; 406 struct rb_root_cached *root; 407 struct btrfs_delayed_item *item; 408 int cmp; 409 bool leftmost = true; 410 411 if (action == BTRFS_DELAYED_INSERTION_ITEM) 412 root = &delayed_node->ins_root; 413 else if (action == BTRFS_DELAYED_DELETION_ITEM) 414 root = &delayed_node->del_root; 415 else 416 BUG(); 417 p = &root->rb_root.rb_node; 418 node = &ins->rb_node; 419 420 while (*p) { 421 parent_node = *p; 422 item = rb_entry(parent_node, struct btrfs_delayed_item, 423 rb_node); 424 425 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key); 426 if (cmp < 0) { 427 p = &(*p)->rb_right; 428 leftmost = false; 429 } else if (cmp > 0) { 430 p = &(*p)->rb_left; 431 } else { 432 return -EEXIST; 433 } 434 } 435 436 rb_link_node(node, parent_node, p); 437 rb_insert_color_cached(node, root, leftmost); 438 ins->delayed_node = delayed_node; 439 ins->ins_or_del = action; 440 441 if (ins->key.type == BTRFS_DIR_INDEX_KEY && 442 action == BTRFS_DELAYED_INSERTION_ITEM && 443 ins->key.offset >= delayed_node->index_cnt) 444 delayed_node->index_cnt = ins->key.offset + 1; 445 446 delayed_node->count++; 447 atomic_inc(&delayed_node->root->fs_info->delayed_root->items); 448 return 0; 449 } 450 451 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, 452 struct btrfs_delayed_item *item) 453 { 454 return __btrfs_add_delayed_item(node, item, 455 BTRFS_DELAYED_INSERTION_ITEM); 456 } 457 458 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, 459 struct btrfs_delayed_item *item) 460 { 461 return __btrfs_add_delayed_item(node, item, 462 BTRFS_DELAYED_DELETION_ITEM); 463 } 464 465 static void finish_one_item(struct btrfs_delayed_root *delayed_root) 466 { 467 int seq = atomic_inc_return(&delayed_root->items_seq); 468 469 /* atomic_dec_return implies a barrier */ 470 if ((atomic_dec_return(&delayed_root->items) < 471 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0)) 472 cond_wake_up_nomb(&delayed_root->wait); 473 } 474 475 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) 476 { 477 struct rb_root_cached *root; 478 struct btrfs_delayed_root *delayed_root; 479 480 /* Not associated with any delayed_node */ 481 if (!delayed_item->delayed_node) 482 return; 483 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; 484 485 BUG_ON(!delayed_root); 486 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM && 487 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM); 488 489 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM) 490 root = &delayed_item->delayed_node->ins_root; 491 else 492 root = &delayed_item->delayed_node->del_root; 493 494 rb_erase_cached(&delayed_item->rb_node, root); 495 delayed_item->delayed_node->count--; 496 497 finish_one_item(delayed_root); 498 } 499 500 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) 501 { 502 if (item) { 503 __btrfs_remove_delayed_item(item); 504 if (refcount_dec_and_test(&item->refs)) 505 kfree(item); 506 } 507 } 508 509 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( 510 struct btrfs_delayed_node *delayed_node) 511 { 512 struct rb_node *p; 513 struct btrfs_delayed_item *item = NULL; 514 515 p = rb_first_cached(&delayed_node->ins_root); 516 if (p) 517 item = rb_entry(p, struct btrfs_delayed_item, rb_node); 518 519 return item; 520 } 521 522 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( 523 struct btrfs_delayed_node *delayed_node) 524 { 525 struct rb_node *p; 526 struct btrfs_delayed_item *item = NULL; 527 528 p = rb_first_cached(&delayed_node->del_root); 529 if (p) 530 item = rb_entry(p, struct btrfs_delayed_item, rb_node); 531 532 return item; 533 } 534 535 static struct btrfs_delayed_item *__btrfs_next_delayed_item( 536 struct btrfs_delayed_item *item) 537 { 538 struct rb_node *p; 539 struct btrfs_delayed_item *next = NULL; 540 541 p = rb_next(&item->rb_node); 542 if (p) 543 next = rb_entry(p, struct btrfs_delayed_item, rb_node); 544 545 return next; 546 } 547 548 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, 549 struct btrfs_root *root, 550 struct btrfs_delayed_item *item) 551 { 552 struct btrfs_block_rsv *src_rsv; 553 struct btrfs_block_rsv *dst_rsv; 554 struct btrfs_fs_info *fs_info = root->fs_info; 555 u64 num_bytes; 556 int ret; 557 558 if (!trans->bytes_reserved) 559 return 0; 560 561 src_rsv = trans->block_rsv; 562 dst_rsv = &fs_info->delayed_block_rsv; 563 564 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 565 566 /* 567 * Here we migrate space rsv from transaction rsv, since have already 568 * reserved space when starting a transaction. So no need to reserve 569 * qgroup space here. 570 */ 571 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true); 572 if (!ret) { 573 trace_btrfs_space_reservation(fs_info, "delayed_item", 574 item->key.objectid, 575 num_bytes, 1); 576 item->bytes_reserved = num_bytes; 577 } 578 579 return ret; 580 } 581 582 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, 583 struct btrfs_delayed_item *item) 584 { 585 struct btrfs_block_rsv *rsv; 586 struct btrfs_fs_info *fs_info = root->fs_info; 587 588 if (!item->bytes_reserved) 589 return; 590 591 rsv = &fs_info->delayed_block_rsv; 592 /* 593 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need 594 * to release/reserve qgroup space. 595 */ 596 trace_btrfs_space_reservation(fs_info, "delayed_item", 597 item->key.objectid, item->bytes_reserved, 598 0); 599 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL); 600 } 601 602 static int btrfs_delayed_inode_reserve_metadata( 603 struct btrfs_trans_handle *trans, 604 struct btrfs_root *root, 605 struct btrfs_delayed_node *node) 606 { 607 struct btrfs_fs_info *fs_info = root->fs_info; 608 struct btrfs_block_rsv *src_rsv; 609 struct btrfs_block_rsv *dst_rsv; 610 u64 num_bytes; 611 int ret; 612 613 src_rsv = trans->block_rsv; 614 dst_rsv = &fs_info->delayed_block_rsv; 615 616 num_bytes = btrfs_calc_metadata_size(fs_info, 1); 617 618 /* 619 * btrfs_dirty_inode will update the inode under btrfs_join_transaction 620 * which doesn't reserve space for speed. This is a problem since we 621 * still need to reserve space for this update, so try to reserve the 622 * space. 623 * 624 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since 625 * we always reserve enough to update the inode item. 626 */ 627 if (!src_rsv || (!trans->bytes_reserved && 628 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { 629 ret = btrfs_qgroup_reserve_meta(root, num_bytes, 630 BTRFS_QGROUP_RSV_META_PREALLOC, true); 631 if (ret < 0) 632 return ret; 633 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes, 634 BTRFS_RESERVE_NO_FLUSH); 635 /* NO_FLUSH could only fail with -ENOSPC */ 636 ASSERT(ret == 0 || ret == -ENOSPC); 637 if (ret) 638 btrfs_qgroup_free_meta_prealloc(root, num_bytes); 639 } else { 640 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true); 641 } 642 643 if (!ret) { 644 trace_btrfs_space_reservation(fs_info, "delayed_inode", 645 node->inode_id, num_bytes, 1); 646 node->bytes_reserved = num_bytes; 647 } 648 649 return ret; 650 } 651 652 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info, 653 struct btrfs_delayed_node *node, 654 bool qgroup_free) 655 { 656 struct btrfs_block_rsv *rsv; 657 658 if (!node->bytes_reserved) 659 return; 660 661 rsv = &fs_info->delayed_block_rsv; 662 trace_btrfs_space_reservation(fs_info, "delayed_inode", 663 node->inode_id, node->bytes_reserved, 0); 664 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL); 665 if (qgroup_free) 666 btrfs_qgroup_free_meta_prealloc(node->root, 667 node->bytes_reserved); 668 else 669 btrfs_qgroup_convert_reserved_meta(node->root, 670 node->bytes_reserved); 671 node->bytes_reserved = 0; 672 } 673 674 /* 675 * Insert a single delayed item or a batch of delayed items that have consecutive 676 * keys if they exist. 677 */ 678 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, 679 struct btrfs_root *root, 680 struct btrfs_path *path, 681 struct btrfs_delayed_item *first_item) 682 { 683 LIST_HEAD(item_list); 684 struct btrfs_delayed_item *curr; 685 struct btrfs_delayed_item *next; 686 const int max_size = BTRFS_LEAF_DATA_SIZE(root->fs_info); 687 struct btrfs_item_batch batch; 688 int total_size; 689 char *ins_data = NULL; 690 int ret; 691 692 list_add_tail(&first_item->tree_list, &item_list); 693 batch.total_data_size = first_item->data_len; 694 batch.nr = 1; 695 total_size = first_item->data_len + sizeof(struct btrfs_item); 696 curr = first_item; 697 698 while (true) { 699 int next_size; 700 701 next = __btrfs_next_delayed_item(curr); 702 if (!next || !btrfs_is_continuous_delayed_item(curr, next)) 703 break; 704 705 next_size = next->data_len + sizeof(struct btrfs_item); 706 if (total_size + next_size > max_size) 707 break; 708 709 list_add_tail(&next->tree_list, &item_list); 710 batch.nr++; 711 total_size += next_size; 712 batch.total_data_size += next->data_len; 713 curr = next; 714 } 715 716 if (batch.nr == 1) { 717 batch.keys = &first_item->key; 718 batch.data_sizes = &first_item->data_len; 719 } else { 720 struct btrfs_key *ins_keys; 721 u32 *ins_sizes; 722 int i = 0; 723 724 ins_data = kmalloc(batch.nr * sizeof(u32) + 725 batch.nr * sizeof(struct btrfs_key), GFP_NOFS); 726 if (!ins_data) { 727 ret = -ENOMEM; 728 goto out; 729 } 730 ins_sizes = (u32 *)ins_data; 731 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32)); 732 batch.keys = ins_keys; 733 batch.data_sizes = ins_sizes; 734 list_for_each_entry(curr, &item_list, tree_list) { 735 ins_keys[i] = curr->key; 736 ins_sizes[i] = curr->data_len; 737 i++; 738 } 739 } 740 741 ret = btrfs_insert_empty_items(trans, root, path, &batch); 742 if (ret) 743 goto out; 744 745 list_for_each_entry(curr, &item_list, tree_list) { 746 char *data_ptr; 747 748 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char); 749 write_extent_buffer(path->nodes[0], &curr->data, 750 (unsigned long)data_ptr, curr->data_len); 751 path->slots[0]++; 752 } 753 754 /* 755 * Now release our path before releasing the delayed items and their 756 * metadata reservations, so that we don't block other tasks for more 757 * time than needed. 758 */ 759 btrfs_release_path(path); 760 761 list_for_each_entry_safe(curr, next, &item_list, tree_list) { 762 list_del(&curr->tree_list); 763 btrfs_delayed_item_release_metadata(root, curr); 764 btrfs_release_delayed_item(curr); 765 } 766 out: 767 kfree(ins_data); 768 return ret; 769 } 770 771 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, 772 struct btrfs_path *path, 773 struct btrfs_root *root, 774 struct btrfs_delayed_node *node) 775 { 776 int ret = 0; 777 778 while (ret == 0) { 779 struct btrfs_delayed_item *curr; 780 781 mutex_lock(&node->mutex); 782 curr = __btrfs_first_delayed_insertion_item(node); 783 if (!curr) { 784 mutex_unlock(&node->mutex); 785 break; 786 } 787 ret = btrfs_insert_delayed_item(trans, root, path, curr); 788 mutex_unlock(&node->mutex); 789 } 790 791 return ret; 792 } 793 794 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, 795 struct btrfs_root *root, 796 struct btrfs_path *path, 797 struct btrfs_delayed_item *item) 798 { 799 struct btrfs_delayed_item *curr, *next; 800 struct extent_buffer *leaf; 801 struct btrfs_key key; 802 struct list_head head; 803 int nitems, i, last_item; 804 int ret = 0; 805 806 BUG_ON(!path->nodes[0]); 807 808 leaf = path->nodes[0]; 809 810 i = path->slots[0]; 811 last_item = btrfs_header_nritems(leaf) - 1; 812 if (i > last_item) 813 return -ENOENT; /* FIXME: Is errno suitable? */ 814 815 next = item; 816 INIT_LIST_HEAD(&head); 817 btrfs_item_key_to_cpu(leaf, &key, i); 818 nitems = 0; 819 /* 820 * count the number of the dir index items that we can delete in batch 821 */ 822 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) { 823 list_add_tail(&next->tree_list, &head); 824 nitems++; 825 826 curr = next; 827 next = __btrfs_next_delayed_item(curr); 828 if (!next) 829 break; 830 831 if (!btrfs_is_continuous_delayed_item(curr, next)) 832 break; 833 834 i++; 835 if (i > last_item) 836 break; 837 btrfs_item_key_to_cpu(leaf, &key, i); 838 } 839 840 if (!nitems) 841 return 0; 842 843 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); 844 if (ret) 845 goto out; 846 847 list_for_each_entry_safe(curr, next, &head, tree_list) { 848 btrfs_delayed_item_release_metadata(root, curr); 849 list_del(&curr->tree_list); 850 btrfs_release_delayed_item(curr); 851 } 852 853 out: 854 return ret; 855 } 856 857 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, 858 struct btrfs_path *path, 859 struct btrfs_root *root, 860 struct btrfs_delayed_node *node) 861 { 862 struct btrfs_delayed_item *curr, *prev; 863 int ret = 0; 864 865 do_again: 866 mutex_lock(&node->mutex); 867 curr = __btrfs_first_delayed_deletion_item(node); 868 if (!curr) 869 goto delete_fail; 870 871 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); 872 if (ret < 0) 873 goto delete_fail; 874 else if (ret > 0) { 875 /* 876 * can't find the item which the node points to, so this node 877 * is invalid, just drop it. 878 */ 879 prev = curr; 880 curr = __btrfs_next_delayed_item(prev); 881 btrfs_release_delayed_item(prev); 882 ret = 0; 883 btrfs_release_path(path); 884 if (curr) { 885 mutex_unlock(&node->mutex); 886 goto do_again; 887 } else 888 goto delete_fail; 889 } 890 891 btrfs_batch_delete_items(trans, root, path, curr); 892 btrfs_release_path(path); 893 mutex_unlock(&node->mutex); 894 goto do_again; 895 896 delete_fail: 897 btrfs_release_path(path); 898 mutex_unlock(&node->mutex); 899 return ret; 900 } 901 902 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) 903 { 904 struct btrfs_delayed_root *delayed_root; 905 906 if (delayed_node && 907 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 908 BUG_ON(!delayed_node->root); 909 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags); 910 delayed_node->count--; 911 912 delayed_root = delayed_node->root->fs_info->delayed_root; 913 finish_one_item(delayed_root); 914 } 915 } 916 917 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node) 918 { 919 920 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) { 921 struct btrfs_delayed_root *delayed_root; 922 923 ASSERT(delayed_node->root); 924 delayed_node->count--; 925 926 delayed_root = delayed_node->root->fs_info->delayed_root; 927 finish_one_item(delayed_root); 928 } 929 } 930 931 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, 932 struct btrfs_root *root, 933 struct btrfs_path *path, 934 struct btrfs_delayed_node *node) 935 { 936 struct btrfs_fs_info *fs_info = root->fs_info; 937 struct btrfs_key key; 938 struct btrfs_inode_item *inode_item; 939 struct extent_buffer *leaf; 940 int mod; 941 int ret; 942 943 key.objectid = node->inode_id; 944 key.type = BTRFS_INODE_ITEM_KEY; 945 key.offset = 0; 946 947 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags)) 948 mod = -1; 949 else 950 mod = 1; 951 952 ret = btrfs_lookup_inode(trans, root, path, &key, mod); 953 if (ret > 0) 954 ret = -ENOENT; 955 if (ret < 0) 956 goto out; 957 958 leaf = path->nodes[0]; 959 inode_item = btrfs_item_ptr(leaf, path->slots[0], 960 struct btrfs_inode_item); 961 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, 962 sizeof(struct btrfs_inode_item)); 963 btrfs_mark_buffer_dirty(leaf); 964 965 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags)) 966 goto out; 967 968 path->slots[0]++; 969 if (path->slots[0] >= btrfs_header_nritems(leaf)) 970 goto search; 971 again: 972 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 973 if (key.objectid != node->inode_id) 974 goto out; 975 976 if (key.type != BTRFS_INODE_REF_KEY && 977 key.type != BTRFS_INODE_EXTREF_KEY) 978 goto out; 979 980 /* 981 * Delayed iref deletion is for the inode who has only one link, 982 * so there is only one iref. The case that several irefs are 983 * in the same item doesn't exist. 984 */ 985 btrfs_del_item(trans, root, path); 986 out: 987 btrfs_release_delayed_iref(node); 988 btrfs_release_path(path); 989 err_out: 990 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0)); 991 btrfs_release_delayed_inode(node); 992 993 /* 994 * If we fail to update the delayed inode we need to abort the 995 * transaction, because we could leave the inode with the improper 996 * counts behind. 997 */ 998 if (ret && ret != -ENOENT) 999 btrfs_abort_transaction(trans, ret); 1000 1001 return ret; 1002 1003 search: 1004 btrfs_release_path(path); 1005 1006 key.type = BTRFS_INODE_EXTREF_KEY; 1007 key.offset = -1; 1008 1009 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1010 if (ret < 0) 1011 goto err_out; 1012 ASSERT(ret); 1013 1014 ret = 0; 1015 leaf = path->nodes[0]; 1016 path->slots[0]--; 1017 goto again; 1018 } 1019 1020 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, 1021 struct btrfs_root *root, 1022 struct btrfs_path *path, 1023 struct btrfs_delayed_node *node) 1024 { 1025 int ret; 1026 1027 mutex_lock(&node->mutex); 1028 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) { 1029 mutex_unlock(&node->mutex); 1030 return 0; 1031 } 1032 1033 ret = __btrfs_update_delayed_inode(trans, root, path, node); 1034 mutex_unlock(&node->mutex); 1035 return ret; 1036 } 1037 1038 static inline int 1039 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, 1040 struct btrfs_path *path, 1041 struct btrfs_delayed_node *node) 1042 { 1043 int ret; 1044 1045 ret = btrfs_insert_delayed_items(trans, path, node->root, node); 1046 if (ret) 1047 return ret; 1048 1049 ret = btrfs_delete_delayed_items(trans, path, node->root, node); 1050 if (ret) 1051 return ret; 1052 1053 ret = btrfs_update_delayed_inode(trans, node->root, path, node); 1054 return ret; 1055 } 1056 1057 /* 1058 * Called when committing the transaction. 1059 * Returns 0 on success. 1060 * Returns < 0 on error and returns with an aborted transaction with any 1061 * outstanding delayed items cleaned up. 1062 */ 1063 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr) 1064 { 1065 struct btrfs_fs_info *fs_info = trans->fs_info; 1066 struct btrfs_delayed_root *delayed_root; 1067 struct btrfs_delayed_node *curr_node, *prev_node; 1068 struct btrfs_path *path; 1069 struct btrfs_block_rsv *block_rsv; 1070 int ret = 0; 1071 bool count = (nr > 0); 1072 1073 if (TRANS_ABORTED(trans)) 1074 return -EIO; 1075 1076 path = btrfs_alloc_path(); 1077 if (!path) 1078 return -ENOMEM; 1079 1080 block_rsv = trans->block_rsv; 1081 trans->block_rsv = &fs_info->delayed_block_rsv; 1082 1083 delayed_root = fs_info->delayed_root; 1084 1085 curr_node = btrfs_first_delayed_node(delayed_root); 1086 while (curr_node && (!count || nr--)) { 1087 ret = __btrfs_commit_inode_delayed_items(trans, path, 1088 curr_node); 1089 if (ret) { 1090 btrfs_release_delayed_node(curr_node); 1091 curr_node = NULL; 1092 btrfs_abort_transaction(trans, ret); 1093 break; 1094 } 1095 1096 prev_node = curr_node; 1097 curr_node = btrfs_next_delayed_node(curr_node); 1098 btrfs_release_delayed_node(prev_node); 1099 } 1100 1101 if (curr_node) 1102 btrfs_release_delayed_node(curr_node); 1103 btrfs_free_path(path); 1104 trans->block_rsv = block_rsv; 1105 1106 return ret; 1107 } 1108 1109 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans) 1110 { 1111 return __btrfs_run_delayed_items(trans, -1); 1112 } 1113 1114 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr) 1115 { 1116 return __btrfs_run_delayed_items(trans, nr); 1117 } 1118 1119 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, 1120 struct btrfs_inode *inode) 1121 { 1122 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1123 struct btrfs_path *path; 1124 struct btrfs_block_rsv *block_rsv; 1125 int ret; 1126 1127 if (!delayed_node) 1128 return 0; 1129 1130 mutex_lock(&delayed_node->mutex); 1131 if (!delayed_node->count) { 1132 mutex_unlock(&delayed_node->mutex); 1133 btrfs_release_delayed_node(delayed_node); 1134 return 0; 1135 } 1136 mutex_unlock(&delayed_node->mutex); 1137 1138 path = btrfs_alloc_path(); 1139 if (!path) { 1140 btrfs_release_delayed_node(delayed_node); 1141 return -ENOMEM; 1142 } 1143 1144 block_rsv = trans->block_rsv; 1145 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv; 1146 1147 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node); 1148 1149 btrfs_release_delayed_node(delayed_node); 1150 btrfs_free_path(path); 1151 trans->block_rsv = block_rsv; 1152 1153 return ret; 1154 } 1155 1156 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode) 1157 { 1158 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1159 struct btrfs_trans_handle *trans; 1160 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1161 struct btrfs_path *path; 1162 struct btrfs_block_rsv *block_rsv; 1163 int ret; 1164 1165 if (!delayed_node) 1166 return 0; 1167 1168 mutex_lock(&delayed_node->mutex); 1169 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1170 mutex_unlock(&delayed_node->mutex); 1171 btrfs_release_delayed_node(delayed_node); 1172 return 0; 1173 } 1174 mutex_unlock(&delayed_node->mutex); 1175 1176 trans = btrfs_join_transaction(delayed_node->root); 1177 if (IS_ERR(trans)) { 1178 ret = PTR_ERR(trans); 1179 goto out; 1180 } 1181 1182 path = btrfs_alloc_path(); 1183 if (!path) { 1184 ret = -ENOMEM; 1185 goto trans_out; 1186 } 1187 1188 block_rsv = trans->block_rsv; 1189 trans->block_rsv = &fs_info->delayed_block_rsv; 1190 1191 mutex_lock(&delayed_node->mutex); 1192 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) 1193 ret = __btrfs_update_delayed_inode(trans, delayed_node->root, 1194 path, delayed_node); 1195 else 1196 ret = 0; 1197 mutex_unlock(&delayed_node->mutex); 1198 1199 btrfs_free_path(path); 1200 trans->block_rsv = block_rsv; 1201 trans_out: 1202 btrfs_end_transaction(trans); 1203 btrfs_btree_balance_dirty(fs_info); 1204 out: 1205 btrfs_release_delayed_node(delayed_node); 1206 1207 return ret; 1208 } 1209 1210 void btrfs_remove_delayed_node(struct btrfs_inode *inode) 1211 { 1212 struct btrfs_delayed_node *delayed_node; 1213 1214 delayed_node = READ_ONCE(inode->delayed_node); 1215 if (!delayed_node) 1216 return; 1217 1218 inode->delayed_node = NULL; 1219 btrfs_release_delayed_node(delayed_node); 1220 } 1221 1222 struct btrfs_async_delayed_work { 1223 struct btrfs_delayed_root *delayed_root; 1224 int nr; 1225 struct btrfs_work work; 1226 }; 1227 1228 static void btrfs_async_run_delayed_root(struct btrfs_work *work) 1229 { 1230 struct btrfs_async_delayed_work *async_work; 1231 struct btrfs_delayed_root *delayed_root; 1232 struct btrfs_trans_handle *trans; 1233 struct btrfs_path *path; 1234 struct btrfs_delayed_node *delayed_node = NULL; 1235 struct btrfs_root *root; 1236 struct btrfs_block_rsv *block_rsv; 1237 int total_done = 0; 1238 1239 async_work = container_of(work, struct btrfs_async_delayed_work, work); 1240 delayed_root = async_work->delayed_root; 1241 1242 path = btrfs_alloc_path(); 1243 if (!path) 1244 goto out; 1245 1246 do { 1247 if (atomic_read(&delayed_root->items) < 1248 BTRFS_DELAYED_BACKGROUND / 2) 1249 break; 1250 1251 delayed_node = btrfs_first_prepared_delayed_node(delayed_root); 1252 if (!delayed_node) 1253 break; 1254 1255 root = delayed_node->root; 1256 1257 trans = btrfs_join_transaction(root); 1258 if (IS_ERR(trans)) { 1259 btrfs_release_path(path); 1260 btrfs_release_prepared_delayed_node(delayed_node); 1261 total_done++; 1262 continue; 1263 } 1264 1265 block_rsv = trans->block_rsv; 1266 trans->block_rsv = &root->fs_info->delayed_block_rsv; 1267 1268 __btrfs_commit_inode_delayed_items(trans, path, delayed_node); 1269 1270 trans->block_rsv = block_rsv; 1271 btrfs_end_transaction(trans); 1272 btrfs_btree_balance_dirty_nodelay(root->fs_info); 1273 1274 btrfs_release_path(path); 1275 btrfs_release_prepared_delayed_node(delayed_node); 1276 total_done++; 1277 1278 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) 1279 || total_done < async_work->nr); 1280 1281 btrfs_free_path(path); 1282 out: 1283 wake_up(&delayed_root->wait); 1284 kfree(async_work); 1285 } 1286 1287 1288 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, 1289 struct btrfs_fs_info *fs_info, int nr) 1290 { 1291 struct btrfs_async_delayed_work *async_work; 1292 1293 async_work = kmalloc(sizeof(*async_work), GFP_NOFS); 1294 if (!async_work) 1295 return -ENOMEM; 1296 1297 async_work->delayed_root = delayed_root; 1298 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL, 1299 NULL); 1300 async_work->nr = nr; 1301 1302 btrfs_queue_work(fs_info->delayed_workers, &async_work->work); 1303 return 0; 1304 } 1305 1306 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info) 1307 { 1308 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root)); 1309 } 1310 1311 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq) 1312 { 1313 int val = atomic_read(&delayed_root->items_seq); 1314 1315 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH) 1316 return 1; 1317 1318 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) 1319 return 1; 1320 1321 return 0; 1322 } 1323 1324 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info) 1325 { 1326 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root; 1327 1328 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) || 1329 btrfs_workqueue_normal_congested(fs_info->delayed_workers)) 1330 return; 1331 1332 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { 1333 int seq; 1334 int ret; 1335 1336 seq = atomic_read(&delayed_root->items_seq); 1337 1338 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0); 1339 if (ret) 1340 return; 1341 1342 wait_event_interruptible(delayed_root->wait, 1343 could_end_wait(delayed_root, seq)); 1344 return; 1345 } 1346 1347 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH); 1348 } 1349 1350 /* Will return 0 or -ENOMEM */ 1351 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, 1352 const char *name, int name_len, 1353 struct btrfs_inode *dir, 1354 struct btrfs_disk_key *disk_key, u8 type, 1355 u64 index) 1356 { 1357 struct btrfs_delayed_node *delayed_node; 1358 struct btrfs_delayed_item *delayed_item; 1359 struct btrfs_dir_item *dir_item; 1360 int ret; 1361 1362 delayed_node = btrfs_get_or_create_delayed_node(dir); 1363 if (IS_ERR(delayed_node)) 1364 return PTR_ERR(delayed_node); 1365 1366 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len); 1367 if (!delayed_item) { 1368 ret = -ENOMEM; 1369 goto release_node; 1370 } 1371 1372 delayed_item->key.objectid = btrfs_ino(dir); 1373 delayed_item->key.type = BTRFS_DIR_INDEX_KEY; 1374 delayed_item->key.offset = index; 1375 1376 dir_item = (struct btrfs_dir_item *)delayed_item->data; 1377 dir_item->location = *disk_key; 1378 btrfs_set_stack_dir_transid(dir_item, trans->transid); 1379 btrfs_set_stack_dir_data_len(dir_item, 0); 1380 btrfs_set_stack_dir_name_len(dir_item, name_len); 1381 btrfs_set_stack_dir_type(dir_item, type); 1382 memcpy((char *)(dir_item + 1), name, name_len); 1383 1384 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item); 1385 /* 1386 * we have reserved enough space when we start a new transaction, 1387 * so reserving metadata failure is impossible 1388 */ 1389 BUG_ON(ret); 1390 1391 mutex_lock(&delayed_node->mutex); 1392 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); 1393 if (unlikely(ret)) { 1394 btrfs_err(trans->fs_info, 1395 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", 1396 name_len, name, delayed_node->root->root_key.objectid, 1397 delayed_node->inode_id, ret); 1398 BUG(); 1399 } 1400 mutex_unlock(&delayed_node->mutex); 1401 1402 release_node: 1403 btrfs_release_delayed_node(delayed_node); 1404 return ret; 1405 } 1406 1407 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info, 1408 struct btrfs_delayed_node *node, 1409 struct btrfs_key *key) 1410 { 1411 struct btrfs_delayed_item *item; 1412 1413 mutex_lock(&node->mutex); 1414 item = __btrfs_lookup_delayed_insertion_item(node, key); 1415 if (!item) { 1416 mutex_unlock(&node->mutex); 1417 return 1; 1418 } 1419 1420 btrfs_delayed_item_release_metadata(node->root, item); 1421 btrfs_release_delayed_item(item); 1422 mutex_unlock(&node->mutex); 1423 return 0; 1424 } 1425 1426 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, 1427 struct btrfs_inode *dir, u64 index) 1428 { 1429 struct btrfs_delayed_node *node; 1430 struct btrfs_delayed_item *item; 1431 struct btrfs_key item_key; 1432 int ret; 1433 1434 node = btrfs_get_or_create_delayed_node(dir); 1435 if (IS_ERR(node)) 1436 return PTR_ERR(node); 1437 1438 item_key.objectid = btrfs_ino(dir); 1439 item_key.type = BTRFS_DIR_INDEX_KEY; 1440 item_key.offset = index; 1441 1442 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, 1443 &item_key); 1444 if (!ret) 1445 goto end; 1446 1447 item = btrfs_alloc_delayed_item(0); 1448 if (!item) { 1449 ret = -ENOMEM; 1450 goto end; 1451 } 1452 1453 item->key = item_key; 1454 1455 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item); 1456 /* 1457 * we have reserved enough space when we start a new transaction, 1458 * so reserving metadata failure is impossible. 1459 */ 1460 if (ret < 0) { 1461 btrfs_err(trans->fs_info, 1462 "metadata reservation failed for delayed dir item deltiona, should have been reserved"); 1463 btrfs_release_delayed_item(item); 1464 goto end; 1465 } 1466 1467 mutex_lock(&node->mutex); 1468 ret = __btrfs_add_delayed_deletion_item(node, item); 1469 if (unlikely(ret)) { 1470 btrfs_err(trans->fs_info, 1471 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", 1472 index, node->root->root_key.objectid, 1473 node->inode_id, ret); 1474 btrfs_delayed_item_release_metadata(dir->root, item); 1475 btrfs_release_delayed_item(item); 1476 } 1477 mutex_unlock(&node->mutex); 1478 end: 1479 btrfs_release_delayed_node(node); 1480 return ret; 1481 } 1482 1483 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode) 1484 { 1485 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1486 1487 if (!delayed_node) 1488 return -ENOENT; 1489 1490 /* 1491 * Since we have held i_mutex of this directory, it is impossible that 1492 * a new directory index is added into the delayed node and index_cnt 1493 * is updated now. So we needn't lock the delayed node. 1494 */ 1495 if (!delayed_node->index_cnt) { 1496 btrfs_release_delayed_node(delayed_node); 1497 return -EINVAL; 1498 } 1499 1500 inode->index_cnt = delayed_node->index_cnt; 1501 btrfs_release_delayed_node(delayed_node); 1502 return 0; 1503 } 1504 1505 bool btrfs_readdir_get_delayed_items(struct inode *inode, 1506 struct list_head *ins_list, 1507 struct list_head *del_list) 1508 { 1509 struct btrfs_delayed_node *delayed_node; 1510 struct btrfs_delayed_item *item; 1511 1512 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode)); 1513 if (!delayed_node) 1514 return false; 1515 1516 /* 1517 * We can only do one readdir with delayed items at a time because of 1518 * item->readdir_list. 1519 */ 1520 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 1521 btrfs_inode_lock(inode, 0); 1522 1523 mutex_lock(&delayed_node->mutex); 1524 item = __btrfs_first_delayed_insertion_item(delayed_node); 1525 while (item) { 1526 refcount_inc(&item->refs); 1527 list_add_tail(&item->readdir_list, ins_list); 1528 item = __btrfs_next_delayed_item(item); 1529 } 1530 1531 item = __btrfs_first_delayed_deletion_item(delayed_node); 1532 while (item) { 1533 refcount_inc(&item->refs); 1534 list_add_tail(&item->readdir_list, del_list); 1535 item = __btrfs_next_delayed_item(item); 1536 } 1537 mutex_unlock(&delayed_node->mutex); 1538 /* 1539 * This delayed node is still cached in the btrfs inode, so refs 1540 * must be > 1 now, and we needn't check it is going to be freed 1541 * or not. 1542 * 1543 * Besides that, this function is used to read dir, we do not 1544 * insert/delete delayed items in this period. So we also needn't 1545 * requeue or dequeue this delayed node. 1546 */ 1547 refcount_dec(&delayed_node->refs); 1548 1549 return true; 1550 } 1551 1552 void btrfs_readdir_put_delayed_items(struct inode *inode, 1553 struct list_head *ins_list, 1554 struct list_head *del_list) 1555 { 1556 struct btrfs_delayed_item *curr, *next; 1557 1558 list_for_each_entry_safe(curr, next, ins_list, readdir_list) { 1559 list_del(&curr->readdir_list); 1560 if (refcount_dec_and_test(&curr->refs)) 1561 kfree(curr); 1562 } 1563 1564 list_for_each_entry_safe(curr, next, del_list, readdir_list) { 1565 list_del(&curr->readdir_list); 1566 if (refcount_dec_and_test(&curr->refs)) 1567 kfree(curr); 1568 } 1569 1570 /* 1571 * The VFS is going to do up_read(), so we need to downgrade back to a 1572 * read lock. 1573 */ 1574 downgrade_write(&inode->i_rwsem); 1575 } 1576 1577 int btrfs_should_delete_dir_index(struct list_head *del_list, 1578 u64 index) 1579 { 1580 struct btrfs_delayed_item *curr; 1581 int ret = 0; 1582 1583 list_for_each_entry(curr, del_list, readdir_list) { 1584 if (curr->key.offset > index) 1585 break; 1586 if (curr->key.offset == index) { 1587 ret = 1; 1588 break; 1589 } 1590 } 1591 return ret; 1592 } 1593 1594 /* 1595 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree 1596 * 1597 */ 1598 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, 1599 struct list_head *ins_list) 1600 { 1601 struct btrfs_dir_item *di; 1602 struct btrfs_delayed_item *curr, *next; 1603 struct btrfs_key location; 1604 char *name; 1605 int name_len; 1606 int over = 0; 1607 unsigned char d_type; 1608 1609 if (list_empty(ins_list)) 1610 return 0; 1611 1612 /* 1613 * Changing the data of the delayed item is impossible. So 1614 * we needn't lock them. And we have held i_mutex of the 1615 * directory, nobody can delete any directory indexes now. 1616 */ 1617 list_for_each_entry_safe(curr, next, ins_list, readdir_list) { 1618 list_del(&curr->readdir_list); 1619 1620 if (curr->key.offset < ctx->pos) { 1621 if (refcount_dec_and_test(&curr->refs)) 1622 kfree(curr); 1623 continue; 1624 } 1625 1626 ctx->pos = curr->key.offset; 1627 1628 di = (struct btrfs_dir_item *)curr->data; 1629 name = (char *)(di + 1); 1630 name_len = btrfs_stack_dir_name_len(di); 1631 1632 d_type = fs_ftype_to_dtype(di->type); 1633 btrfs_disk_key_to_cpu(&location, &di->location); 1634 1635 over = !dir_emit(ctx, name, name_len, 1636 location.objectid, d_type); 1637 1638 if (refcount_dec_and_test(&curr->refs)) 1639 kfree(curr); 1640 1641 if (over) 1642 return 1; 1643 ctx->pos++; 1644 } 1645 return 0; 1646 } 1647 1648 static void fill_stack_inode_item(struct btrfs_trans_handle *trans, 1649 struct btrfs_inode_item *inode_item, 1650 struct inode *inode) 1651 { 1652 u64 flags; 1653 1654 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode)); 1655 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode)); 1656 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); 1657 btrfs_set_stack_inode_mode(inode_item, inode->i_mode); 1658 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); 1659 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); 1660 btrfs_set_stack_inode_generation(inode_item, 1661 BTRFS_I(inode)->generation); 1662 btrfs_set_stack_inode_sequence(inode_item, 1663 inode_peek_iversion(inode)); 1664 btrfs_set_stack_inode_transid(inode_item, trans->transid); 1665 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); 1666 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 1667 BTRFS_I(inode)->ro_flags); 1668 btrfs_set_stack_inode_flags(inode_item, flags); 1669 btrfs_set_stack_inode_block_group(inode_item, 0); 1670 1671 btrfs_set_stack_timespec_sec(&inode_item->atime, 1672 inode->i_atime.tv_sec); 1673 btrfs_set_stack_timespec_nsec(&inode_item->atime, 1674 inode->i_atime.tv_nsec); 1675 1676 btrfs_set_stack_timespec_sec(&inode_item->mtime, 1677 inode->i_mtime.tv_sec); 1678 btrfs_set_stack_timespec_nsec(&inode_item->mtime, 1679 inode->i_mtime.tv_nsec); 1680 1681 btrfs_set_stack_timespec_sec(&inode_item->ctime, 1682 inode->i_ctime.tv_sec); 1683 btrfs_set_stack_timespec_nsec(&inode_item->ctime, 1684 inode->i_ctime.tv_nsec); 1685 1686 btrfs_set_stack_timespec_sec(&inode_item->otime, 1687 BTRFS_I(inode)->i_otime.tv_sec); 1688 btrfs_set_stack_timespec_nsec(&inode_item->otime, 1689 BTRFS_I(inode)->i_otime.tv_nsec); 1690 } 1691 1692 int btrfs_fill_inode(struct inode *inode, u32 *rdev) 1693 { 1694 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 1695 struct btrfs_delayed_node *delayed_node; 1696 struct btrfs_inode_item *inode_item; 1697 1698 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode)); 1699 if (!delayed_node) 1700 return -ENOENT; 1701 1702 mutex_lock(&delayed_node->mutex); 1703 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1704 mutex_unlock(&delayed_node->mutex); 1705 btrfs_release_delayed_node(delayed_node); 1706 return -ENOENT; 1707 } 1708 1709 inode_item = &delayed_node->inode_item; 1710 1711 i_uid_write(inode, btrfs_stack_inode_uid(inode_item)); 1712 i_gid_write(inode, btrfs_stack_inode_gid(inode_item)); 1713 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item)); 1714 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 1715 round_up(i_size_read(inode), fs_info->sectorsize)); 1716 inode->i_mode = btrfs_stack_inode_mode(inode_item); 1717 set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); 1718 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); 1719 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); 1720 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item); 1721 1722 inode_set_iversion_queried(inode, 1723 btrfs_stack_inode_sequence(inode_item)); 1724 inode->i_rdev = 0; 1725 *rdev = btrfs_stack_inode_rdev(inode_item); 1726 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item), 1727 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); 1728 1729 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime); 1730 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime); 1731 1732 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime); 1733 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime); 1734 1735 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime); 1736 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime); 1737 1738 BTRFS_I(inode)->i_otime.tv_sec = 1739 btrfs_stack_timespec_sec(&inode_item->otime); 1740 BTRFS_I(inode)->i_otime.tv_nsec = 1741 btrfs_stack_timespec_nsec(&inode_item->otime); 1742 1743 inode->i_generation = BTRFS_I(inode)->generation; 1744 BTRFS_I(inode)->index_cnt = (u64)-1; 1745 1746 mutex_unlock(&delayed_node->mutex); 1747 btrfs_release_delayed_node(delayed_node); 1748 return 0; 1749 } 1750 1751 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, 1752 struct btrfs_root *root, 1753 struct btrfs_inode *inode) 1754 { 1755 struct btrfs_delayed_node *delayed_node; 1756 int ret = 0; 1757 1758 delayed_node = btrfs_get_or_create_delayed_node(inode); 1759 if (IS_ERR(delayed_node)) 1760 return PTR_ERR(delayed_node); 1761 1762 mutex_lock(&delayed_node->mutex); 1763 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1764 fill_stack_inode_item(trans, &delayed_node->inode_item, 1765 &inode->vfs_inode); 1766 goto release_node; 1767 } 1768 1769 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); 1770 if (ret) 1771 goto release_node; 1772 1773 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode); 1774 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags); 1775 delayed_node->count++; 1776 atomic_inc(&root->fs_info->delayed_root->items); 1777 release_node: 1778 mutex_unlock(&delayed_node->mutex); 1779 btrfs_release_delayed_node(delayed_node); 1780 return ret; 1781 } 1782 1783 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode) 1784 { 1785 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1786 struct btrfs_delayed_node *delayed_node; 1787 1788 /* 1789 * we don't do delayed inode updates during log recovery because it 1790 * leads to enospc problems. This means we also can't do 1791 * delayed inode refs 1792 */ 1793 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 1794 return -EAGAIN; 1795 1796 delayed_node = btrfs_get_or_create_delayed_node(inode); 1797 if (IS_ERR(delayed_node)) 1798 return PTR_ERR(delayed_node); 1799 1800 /* 1801 * We don't reserve space for inode ref deletion is because: 1802 * - We ONLY do async inode ref deletion for the inode who has only 1803 * one link(i_nlink == 1), it means there is only one inode ref. 1804 * And in most case, the inode ref and the inode item are in the 1805 * same leaf, and we will deal with them at the same time. 1806 * Since we are sure we will reserve the space for the inode item, 1807 * it is unnecessary to reserve space for inode ref deletion. 1808 * - If the inode ref and the inode item are not in the same leaf, 1809 * We also needn't worry about enospc problem, because we reserve 1810 * much more space for the inode update than it needs. 1811 * - At the worst, we can steal some space from the global reservation. 1812 * It is very rare. 1813 */ 1814 mutex_lock(&delayed_node->mutex); 1815 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) 1816 goto release_node; 1817 1818 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags); 1819 delayed_node->count++; 1820 atomic_inc(&fs_info->delayed_root->items); 1821 release_node: 1822 mutex_unlock(&delayed_node->mutex); 1823 btrfs_release_delayed_node(delayed_node); 1824 return 0; 1825 } 1826 1827 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) 1828 { 1829 struct btrfs_root *root = delayed_node->root; 1830 struct btrfs_fs_info *fs_info = root->fs_info; 1831 struct btrfs_delayed_item *curr_item, *prev_item; 1832 1833 mutex_lock(&delayed_node->mutex); 1834 curr_item = __btrfs_first_delayed_insertion_item(delayed_node); 1835 while (curr_item) { 1836 btrfs_delayed_item_release_metadata(root, curr_item); 1837 prev_item = curr_item; 1838 curr_item = __btrfs_next_delayed_item(prev_item); 1839 btrfs_release_delayed_item(prev_item); 1840 } 1841 1842 curr_item = __btrfs_first_delayed_deletion_item(delayed_node); 1843 while (curr_item) { 1844 btrfs_delayed_item_release_metadata(root, curr_item); 1845 prev_item = curr_item; 1846 curr_item = __btrfs_next_delayed_item(prev_item); 1847 btrfs_release_delayed_item(prev_item); 1848 } 1849 1850 btrfs_release_delayed_iref(delayed_node); 1851 1852 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1853 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false); 1854 btrfs_release_delayed_inode(delayed_node); 1855 } 1856 mutex_unlock(&delayed_node->mutex); 1857 } 1858 1859 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode) 1860 { 1861 struct btrfs_delayed_node *delayed_node; 1862 1863 delayed_node = btrfs_get_delayed_node(inode); 1864 if (!delayed_node) 1865 return; 1866 1867 __btrfs_kill_delayed_node(delayed_node); 1868 btrfs_release_delayed_node(delayed_node); 1869 } 1870 1871 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) 1872 { 1873 u64 inode_id = 0; 1874 struct btrfs_delayed_node *delayed_nodes[8]; 1875 int i, n; 1876 1877 while (1) { 1878 spin_lock(&root->inode_lock); 1879 n = radix_tree_gang_lookup(&root->delayed_nodes_tree, 1880 (void **)delayed_nodes, inode_id, 1881 ARRAY_SIZE(delayed_nodes)); 1882 if (!n) { 1883 spin_unlock(&root->inode_lock); 1884 break; 1885 } 1886 1887 inode_id = delayed_nodes[n - 1]->inode_id + 1; 1888 for (i = 0; i < n; i++) { 1889 /* 1890 * Don't increase refs in case the node is dead and 1891 * about to be removed from the tree in the loop below 1892 */ 1893 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs)) 1894 delayed_nodes[i] = NULL; 1895 } 1896 spin_unlock(&root->inode_lock); 1897 1898 for (i = 0; i < n; i++) { 1899 if (!delayed_nodes[i]) 1900 continue; 1901 __btrfs_kill_delayed_node(delayed_nodes[i]); 1902 btrfs_release_delayed_node(delayed_nodes[i]); 1903 } 1904 } 1905 } 1906 1907 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info) 1908 { 1909 struct btrfs_delayed_node *curr_node, *prev_node; 1910 1911 curr_node = btrfs_first_delayed_node(fs_info->delayed_root); 1912 while (curr_node) { 1913 __btrfs_kill_delayed_node(curr_node); 1914 1915 prev_node = curr_node; 1916 curr_node = btrfs_next_delayed_node(curr_node); 1917 btrfs_release_delayed_node(prev_node); 1918 } 1919 } 1920 1921