1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 Fujitsu. All rights reserved. 4 * Written by Miao Xie <miaox@cn.fujitsu.com> 5 */ 6 7 #include <linux/slab.h> 8 #include <linux/iversion.h> 9 #include <linux/sched/mm.h> 10 #include "misc.h" 11 #include "delayed-inode.h" 12 #include "disk-io.h" 13 #include "transaction.h" 14 #include "ctree.h" 15 #include "qgroup.h" 16 #include "locking.h" 17 18 #define BTRFS_DELAYED_WRITEBACK 512 19 #define BTRFS_DELAYED_BACKGROUND 128 20 #define BTRFS_DELAYED_BATCH 16 21 22 static struct kmem_cache *delayed_node_cache; 23 24 int __init btrfs_delayed_inode_init(void) 25 { 26 delayed_node_cache = kmem_cache_create("btrfs_delayed_node", 27 sizeof(struct btrfs_delayed_node), 28 0, 29 SLAB_MEM_SPREAD, 30 NULL); 31 if (!delayed_node_cache) 32 return -ENOMEM; 33 return 0; 34 } 35 36 void __cold btrfs_delayed_inode_exit(void) 37 { 38 kmem_cache_destroy(delayed_node_cache); 39 } 40 41 static inline void btrfs_init_delayed_node( 42 struct btrfs_delayed_node *delayed_node, 43 struct btrfs_root *root, u64 inode_id) 44 { 45 delayed_node->root = root; 46 delayed_node->inode_id = inode_id; 47 refcount_set(&delayed_node->refs, 0); 48 delayed_node->ins_root = RB_ROOT_CACHED; 49 delayed_node->del_root = RB_ROOT_CACHED; 50 mutex_init(&delayed_node->mutex); 51 INIT_LIST_HEAD(&delayed_node->n_list); 52 INIT_LIST_HEAD(&delayed_node->p_list); 53 } 54 55 static inline int btrfs_is_continuous_delayed_item( 56 struct btrfs_delayed_item *item1, 57 struct btrfs_delayed_item *item2) 58 { 59 if (item1->key.type == BTRFS_DIR_INDEX_KEY && 60 item1->key.objectid == item2->key.objectid && 61 item1->key.type == item2->key.type && 62 item1->key.offset + 1 == item2->key.offset) 63 return 1; 64 return 0; 65 } 66 67 static struct btrfs_delayed_node *btrfs_get_delayed_node( 68 struct btrfs_inode *btrfs_inode) 69 { 70 struct btrfs_root *root = btrfs_inode->root; 71 u64 ino = btrfs_ino(btrfs_inode); 72 struct btrfs_delayed_node *node; 73 74 node = READ_ONCE(btrfs_inode->delayed_node); 75 if (node) { 76 refcount_inc(&node->refs); 77 return node; 78 } 79 80 spin_lock(&root->inode_lock); 81 node = radix_tree_lookup(&root->delayed_nodes_tree, ino); 82 83 if (node) { 84 if (btrfs_inode->delayed_node) { 85 refcount_inc(&node->refs); /* can be accessed */ 86 BUG_ON(btrfs_inode->delayed_node != node); 87 spin_unlock(&root->inode_lock); 88 return node; 89 } 90 91 /* 92 * It's possible that we're racing into the middle of removing 93 * this node from the radix tree. In this case, the refcount 94 * was zero and it should never go back to one. Just return 95 * NULL like it was never in the radix at all; our release 96 * function is in the process of removing it. 97 * 98 * Some implementations of refcount_inc refuse to bump the 99 * refcount once it has hit zero. If we don't do this dance 100 * here, refcount_inc() may decide to just WARN_ONCE() instead 101 * of actually bumping the refcount. 102 * 103 * If this node is properly in the radix, we want to bump the 104 * refcount twice, once for the inode and once for this get 105 * operation. 106 */ 107 if (refcount_inc_not_zero(&node->refs)) { 108 refcount_inc(&node->refs); 109 btrfs_inode->delayed_node = node; 110 } else { 111 node = NULL; 112 } 113 114 spin_unlock(&root->inode_lock); 115 return node; 116 } 117 spin_unlock(&root->inode_lock); 118 119 return NULL; 120 } 121 122 /* Will return either the node or PTR_ERR(-ENOMEM) */ 123 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( 124 struct btrfs_inode *btrfs_inode) 125 { 126 struct btrfs_delayed_node *node; 127 struct btrfs_root *root = btrfs_inode->root; 128 u64 ino = btrfs_ino(btrfs_inode); 129 int ret; 130 131 again: 132 node = btrfs_get_delayed_node(btrfs_inode); 133 if (node) 134 return node; 135 136 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS); 137 if (!node) 138 return ERR_PTR(-ENOMEM); 139 btrfs_init_delayed_node(node, root, ino); 140 141 /* cached in the btrfs inode and can be accessed */ 142 refcount_set(&node->refs, 2); 143 144 ret = radix_tree_preload(GFP_NOFS); 145 if (ret) { 146 kmem_cache_free(delayed_node_cache, node); 147 return ERR_PTR(ret); 148 } 149 150 spin_lock(&root->inode_lock); 151 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); 152 if (ret == -EEXIST) { 153 spin_unlock(&root->inode_lock); 154 kmem_cache_free(delayed_node_cache, node); 155 radix_tree_preload_end(); 156 goto again; 157 } 158 btrfs_inode->delayed_node = node; 159 spin_unlock(&root->inode_lock); 160 radix_tree_preload_end(); 161 162 return node; 163 } 164 165 /* 166 * Call it when holding delayed_node->mutex 167 * 168 * If mod = 1, add this node into the prepared list. 169 */ 170 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, 171 struct btrfs_delayed_node *node, 172 int mod) 173 { 174 spin_lock(&root->lock); 175 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { 176 if (!list_empty(&node->p_list)) 177 list_move_tail(&node->p_list, &root->prepare_list); 178 else if (mod) 179 list_add_tail(&node->p_list, &root->prepare_list); 180 } else { 181 list_add_tail(&node->n_list, &root->node_list); 182 list_add_tail(&node->p_list, &root->prepare_list); 183 refcount_inc(&node->refs); /* inserted into list */ 184 root->nodes++; 185 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags); 186 } 187 spin_unlock(&root->lock); 188 } 189 190 /* Call it when holding delayed_node->mutex */ 191 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, 192 struct btrfs_delayed_node *node) 193 { 194 spin_lock(&root->lock); 195 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { 196 root->nodes--; 197 refcount_dec(&node->refs); /* not in the list */ 198 list_del_init(&node->n_list); 199 if (!list_empty(&node->p_list)) 200 list_del_init(&node->p_list); 201 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags); 202 } 203 spin_unlock(&root->lock); 204 } 205 206 static struct btrfs_delayed_node *btrfs_first_delayed_node( 207 struct btrfs_delayed_root *delayed_root) 208 { 209 struct list_head *p; 210 struct btrfs_delayed_node *node = NULL; 211 212 spin_lock(&delayed_root->lock); 213 if (list_empty(&delayed_root->node_list)) 214 goto out; 215 216 p = delayed_root->node_list.next; 217 node = list_entry(p, struct btrfs_delayed_node, n_list); 218 refcount_inc(&node->refs); 219 out: 220 spin_unlock(&delayed_root->lock); 221 222 return node; 223 } 224 225 static struct btrfs_delayed_node *btrfs_next_delayed_node( 226 struct btrfs_delayed_node *node) 227 { 228 struct btrfs_delayed_root *delayed_root; 229 struct list_head *p; 230 struct btrfs_delayed_node *next = NULL; 231 232 delayed_root = node->root->fs_info->delayed_root; 233 spin_lock(&delayed_root->lock); 234 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { 235 /* not in the list */ 236 if (list_empty(&delayed_root->node_list)) 237 goto out; 238 p = delayed_root->node_list.next; 239 } else if (list_is_last(&node->n_list, &delayed_root->node_list)) 240 goto out; 241 else 242 p = node->n_list.next; 243 244 next = list_entry(p, struct btrfs_delayed_node, n_list); 245 refcount_inc(&next->refs); 246 out: 247 spin_unlock(&delayed_root->lock); 248 249 return next; 250 } 251 252 static void __btrfs_release_delayed_node( 253 struct btrfs_delayed_node *delayed_node, 254 int mod) 255 { 256 struct btrfs_delayed_root *delayed_root; 257 258 if (!delayed_node) 259 return; 260 261 delayed_root = delayed_node->root->fs_info->delayed_root; 262 263 mutex_lock(&delayed_node->mutex); 264 if (delayed_node->count) 265 btrfs_queue_delayed_node(delayed_root, delayed_node, mod); 266 else 267 btrfs_dequeue_delayed_node(delayed_root, delayed_node); 268 mutex_unlock(&delayed_node->mutex); 269 270 if (refcount_dec_and_test(&delayed_node->refs)) { 271 struct btrfs_root *root = delayed_node->root; 272 273 spin_lock(&root->inode_lock); 274 /* 275 * Once our refcount goes to zero, nobody is allowed to bump it 276 * back up. We can delete it now. 277 */ 278 ASSERT(refcount_read(&delayed_node->refs) == 0); 279 radix_tree_delete(&root->delayed_nodes_tree, 280 delayed_node->inode_id); 281 spin_unlock(&root->inode_lock); 282 kmem_cache_free(delayed_node_cache, delayed_node); 283 } 284 } 285 286 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) 287 { 288 __btrfs_release_delayed_node(node, 0); 289 } 290 291 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( 292 struct btrfs_delayed_root *delayed_root) 293 { 294 struct list_head *p; 295 struct btrfs_delayed_node *node = NULL; 296 297 spin_lock(&delayed_root->lock); 298 if (list_empty(&delayed_root->prepare_list)) 299 goto out; 300 301 p = delayed_root->prepare_list.next; 302 list_del_init(p); 303 node = list_entry(p, struct btrfs_delayed_node, p_list); 304 refcount_inc(&node->refs); 305 out: 306 spin_unlock(&delayed_root->lock); 307 308 return node; 309 } 310 311 static inline void btrfs_release_prepared_delayed_node( 312 struct btrfs_delayed_node *node) 313 { 314 __btrfs_release_delayed_node(node, 1); 315 } 316 317 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) 318 { 319 struct btrfs_delayed_item *item; 320 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); 321 if (item) { 322 item->data_len = data_len; 323 item->ins_or_del = 0; 324 item->bytes_reserved = 0; 325 item->delayed_node = NULL; 326 refcount_set(&item->refs, 1); 327 } 328 return item; 329 } 330 331 /* 332 * __btrfs_lookup_delayed_item - look up the delayed item by key 333 * @delayed_node: pointer to the delayed node 334 * @key: the key to look up 335 * @prev: used to store the prev item if the right item isn't found 336 * @next: used to store the next item if the right item isn't found 337 * 338 * Note: if we don't find the right item, we will return the prev item and 339 * the next item. 340 */ 341 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( 342 struct rb_root *root, 343 struct btrfs_key *key, 344 struct btrfs_delayed_item **prev, 345 struct btrfs_delayed_item **next) 346 { 347 struct rb_node *node, *prev_node = NULL; 348 struct btrfs_delayed_item *delayed_item = NULL; 349 int ret = 0; 350 351 node = root->rb_node; 352 353 while (node) { 354 delayed_item = rb_entry(node, struct btrfs_delayed_item, 355 rb_node); 356 prev_node = node; 357 ret = btrfs_comp_cpu_keys(&delayed_item->key, key); 358 if (ret < 0) 359 node = node->rb_right; 360 else if (ret > 0) 361 node = node->rb_left; 362 else 363 return delayed_item; 364 } 365 366 if (prev) { 367 if (!prev_node) 368 *prev = NULL; 369 else if (ret < 0) 370 *prev = delayed_item; 371 else if ((node = rb_prev(prev_node)) != NULL) { 372 *prev = rb_entry(node, struct btrfs_delayed_item, 373 rb_node); 374 } else 375 *prev = NULL; 376 } 377 378 if (next) { 379 if (!prev_node) 380 *next = NULL; 381 else if (ret > 0) 382 *next = delayed_item; 383 else if ((node = rb_next(prev_node)) != NULL) { 384 *next = rb_entry(node, struct btrfs_delayed_item, 385 rb_node); 386 } else 387 *next = NULL; 388 } 389 return NULL; 390 } 391 392 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( 393 struct btrfs_delayed_node *delayed_node, 394 struct btrfs_key *key) 395 { 396 return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key, 397 NULL, NULL); 398 } 399 400 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, 401 struct btrfs_delayed_item *ins, 402 int action) 403 { 404 struct rb_node **p, *node; 405 struct rb_node *parent_node = NULL; 406 struct rb_root_cached *root; 407 struct btrfs_delayed_item *item; 408 int cmp; 409 bool leftmost = true; 410 411 if (action == BTRFS_DELAYED_INSERTION_ITEM) 412 root = &delayed_node->ins_root; 413 else if (action == BTRFS_DELAYED_DELETION_ITEM) 414 root = &delayed_node->del_root; 415 else 416 BUG(); 417 p = &root->rb_root.rb_node; 418 node = &ins->rb_node; 419 420 while (*p) { 421 parent_node = *p; 422 item = rb_entry(parent_node, struct btrfs_delayed_item, 423 rb_node); 424 425 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key); 426 if (cmp < 0) { 427 p = &(*p)->rb_right; 428 leftmost = false; 429 } else if (cmp > 0) { 430 p = &(*p)->rb_left; 431 } else { 432 return -EEXIST; 433 } 434 } 435 436 rb_link_node(node, parent_node, p); 437 rb_insert_color_cached(node, root, leftmost); 438 ins->delayed_node = delayed_node; 439 ins->ins_or_del = action; 440 441 if (ins->key.type == BTRFS_DIR_INDEX_KEY && 442 action == BTRFS_DELAYED_INSERTION_ITEM && 443 ins->key.offset >= delayed_node->index_cnt) 444 delayed_node->index_cnt = ins->key.offset + 1; 445 446 delayed_node->count++; 447 atomic_inc(&delayed_node->root->fs_info->delayed_root->items); 448 return 0; 449 } 450 451 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, 452 struct btrfs_delayed_item *item) 453 { 454 return __btrfs_add_delayed_item(node, item, 455 BTRFS_DELAYED_INSERTION_ITEM); 456 } 457 458 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, 459 struct btrfs_delayed_item *item) 460 { 461 return __btrfs_add_delayed_item(node, item, 462 BTRFS_DELAYED_DELETION_ITEM); 463 } 464 465 static void finish_one_item(struct btrfs_delayed_root *delayed_root) 466 { 467 int seq = atomic_inc_return(&delayed_root->items_seq); 468 469 /* atomic_dec_return implies a barrier */ 470 if ((atomic_dec_return(&delayed_root->items) < 471 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0)) 472 cond_wake_up_nomb(&delayed_root->wait); 473 } 474 475 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) 476 { 477 struct rb_root_cached *root; 478 struct btrfs_delayed_root *delayed_root; 479 480 /* Not associated with any delayed_node */ 481 if (!delayed_item->delayed_node) 482 return; 483 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; 484 485 BUG_ON(!delayed_root); 486 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM && 487 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM); 488 489 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM) 490 root = &delayed_item->delayed_node->ins_root; 491 else 492 root = &delayed_item->delayed_node->del_root; 493 494 rb_erase_cached(&delayed_item->rb_node, root); 495 delayed_item->delayed_node->count--; 496 497 finish_one_item(delayed_root); 498 } 499 500 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) 501 { 502 if (item) { 503 __btrfs_remove_delayed_item(item); 504 if (refcount_dec_and_test(&item->refs)) 505 kfree(item); 506 } 507 } 508 509 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( 510 struct btrfs_delayed_node *delayed_node) 511 { 512 struct rb_node *p; 513 struct btrfs_delayed_item *item = NULL; 514 515 p = rb_first_cached(&delayed_node->ins_root); 516 if (p) 517 item = rb_entry(p, struct btrfs_delayed_item, rb_node); 518 519 return item; 520 } 521 522 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( 523 struct btrfs_delayed_node *delayed_node) 524 { 525 struct rb_node *p; 526 struct btrfs_delayed_item *item = NULL; 527 528 p = rb_first_cached(&delayed_node->del_root); 529 if (p) 530 item = rb_entry(p, struct btrfs_delayed_item, rb_node); 531 532 return item; 533 } 534 535 static struct btrfs_delayed_item *__btrfs_next_delayed_item( 536 struct btrfs_delayed_item *item) 537 { 538 struct rb_node *p; 539 struct btrfs_delayed_item *next = NULL; 540 541 p = rb_next(&item->rb_node); 542 if (p) 543 next = rb_entry(p, struct btrfs_delayed_item, rb_node); 544 545 return next; 546 } 547 548 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, 549 struct btrfs_root *root, 550 struct btrfs_delayed_item *item) 551 { 552 struct btrfs_block_rsv *src_rsv; 553 struct btrfs_block_rsv *dst_rsv; 554 struct btrfs_fs_info *fs_info = root->fs_info; 555 u64 num_bytes; 556 int ret; 557 558 if (!trans->bytes_reserved) 559 return 0; 560 561 src_rsv = trans->block_rsv; 562 dst_rsv = &fs_info->delayed_block_rsv; 563 564 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 565 566 /* 567 * Here we migrate space rsv from transaction rsv, since have already 568 * reserved space when starting a transaction. So no need to reserve 569 * qgroup space here. 570 */ 571 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true); 572 if (!ret) { 573 trace_btrfs_space_reservation(fs_info, "delayed_item", 574 item->key.objectid, 575 num_bytes, 1); 576 item->bytes_reserved = num_bytes; 577 } 578 579 return ret; 580 } 581 582 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, 583 struct btrfs_delayed_item *item) 584 { 585 struct btrfs_block_rsv *rsv; 586 struct btrfs_fs_info *fs_info = root->fs_info; 587 588 if (!item->bytes_reserved) 589 return; 590 591 rsv = &fs_info->delayed_block_rsv; 592 /* 593 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need 594 * to release/reserve qgroup space. 595 */ 596 trace_btrfs_space_reservation(fs_info, "delayed_item", 597 item->key.objectid, item->bytes_reserved, 598 0); 599 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL); 600 } 601 602 static int btrfs_delayed_inode_reserve_metadata( 603 struct btrfs_trans_handle *trans, 604 struct btrfs_root *root, 605 struct btrfs_delayed_node *node) 606 { 607 struct btrfs_fs_info *fs_info = root->fs_info; 608 struct btrfs_block_rsv *src_rsv; 609 struct btrfs_block_rsv *dst_rsv; 610 u64 num_bytes; 611 int ret; 612 613 src_rsv = trans->block_rsv; 614 dst_rsv = &fs_info->delayed_block_rsv; 615 616 num_bytes = btrfs_calc_metadata_size(fs_info, 1); 617 618 /* 619 * btrfs_dirty_inode will update the inode under btrfs_join_transaction 620 * which doesn't reserve space for speed. This is a problem since we 621 * still need to reserve space for this update, so try to reserve the 622 * space. 623 * 624 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since 625 * we always reserve enough to update the inode item. 626 */ 627 if (!src_rsv || (!trans->bytes_reserved && 628 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { 629 ret = btrfs_qgroup_reserve_meta(root, num_bytes, 630 BTRFS_QGROUP_RSV_META_PREALLOC, true); 631 if (ret < 0) 632 return ret; 633 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, 634 BTRFS_RESERVE_NO_FLUSH); 635 /* NO_FLUSH could only fail with -ENOSPC */ 636 ASSERT(ret == 0 || ret == -ENOSPC); 637 if (ret) 638 btrfs_qgroup_free_meta_prealloc(root, num_bytes); 639 } else { 640 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true); 641 } 642 643 if (!ret) { 644 trace_btrfs_space_reservation(fs_info, "delayed_inode", 645 node->inode_id, num_bytes, 1); 646 node->bytes_reserved = num_bytes; 647 } 648 649 return ret; 650 } 651 652 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info, 653 struct btrfs_delayed_node *node, 654 bool qgroup_free) 655 { 656 struct btrfs_block_rsv *rsv; 657 658 if (!node->bytes_reserved) 659 return; 660 661 rsv = &fs_info->delayed_block_rsv; 662 trace_btrfs_space_reservation(fs_info, "delayed_inode", 663 node->inode_id, node->bytes_reserved, 0); 664 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL); 665 if (qgroup_free) 666 btrfs_qgroup_free_meta_prealloc(node->root, 667 node->bytes_reserved); 668 else 669 btrfs_qgroup_convert_reserved_meta(node->root, 670 node->bytes_reserved); 671 node->bytes_reserved = 0; 672 } 673 674 /* 675 * This helper will insert some continuous items into the same leaf according 676 * to the free space of the leaf. 677 */ 678 static int btrfs_batch_insert_items(struct btrfs_root *root, 679 struct btrfs_path *path, 680 struct btrfs_delayed_item *item) 681 { 682 struct btrfs_delayed_item *curr, *next; 683 int free_space; 684 int total_size = 0; 685 struct extent_buffer *leaf; 686 char *data_ptr; 687 struct btrfs_key *keys; 688 u32 *data_size; 689 struct list_head head; 690 int slot; 691 int nitems; 692 int i; 693 int ret = 0; 694 695 BUG_ON(!path->nodes[0]); 696 697 leaf = path->nodes[0]; 698 free_space = btrfs_leaf_free_space(leaf); 699 INIT_LIST_HEAD(&head); 700 701 next = item; 702 nitems = 0; 703 704 /* 705 * count the number of the continuous items that we can insert in batch 706 */ 707 while (total_size + next->data_len + sizeof(struct btrfs_item) <= 708 free_space) { 709 total_size += next->data_len + sizeof(struct btrfs_item); 710 list_add_tail(&next->tree_list, &head); 711 nitems++; 712 713 curr = next; 714 next = __btrfs_next_delayed_item(curr); 715 if (!next) 716 break; 717 718 if (!btrfs_is_continuous_delayed_item(curr, next)) 719 break; 720 } 721 722 if (!nitems) { 723 ret = 0; 724 goto out; 725 } 726 727 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS); 728 if (!keys) { 729 ret = -ENOMEM; 730 goto out; 731 } 732 733 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS); 734 if (!data_size) { 735 ret = -ENOMEM; 736 goto error; 737 } 738 739 /* get keys of all the delayed items */ 740 i = 0; 741 list_for_each_entry(next, &head, tree_list) { 742 keys[i] = next->key; 743 data_size[i] = next->data_len; 744 i++; 745 } 746 747 /* insert the keys of the items */ 748 setup_items_for_insert(root, path, keys, data_size, nitems); 749 750 /* insert the dir index items */ 751 slot = path->slots[0]; 752 list_for_each_entry_safe(curr, next, &head, tree_list) { 753 data_ptr = btrfs_item_ptr(leaf, slot, char); 754 write_extent_buffer(leaf, &curr->data, 755 (unsigned long)data_ptr, 756 curr->data_len); 757 slot++; 758 759 btrfs_delayed_item_release_metadata(root, curr); 760 761 list_del(&curr->tree_list); 762 btrfs_release_delayed_item(curr); 763 } 764 765 error: 766 kfree(data_size); 767 kfree(keys); 768 out: 769 return ret; 770 } 771 772 /* 773 * This helper can just do simple insertion that needn't extend item for new 774 * data, such as directory name index insertion, inode insertion. 775 */ 776 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, 777 struct btrfs_root *root, 778 struct btrfs_path *path, 779 struct btrfs_delayed_item *delayed_item) 780 { 781 struct extent_buffer *leaf; 782 unsigned int nofs_flag; 783 char *ptr; 784 int ret; 785 786 nofs_flag = memalloc_nofs_save(); 787 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, 788 delayed_item->data_len); 789 memalloc_nofs_restore(nofs_flag); 790 if (ret < 0 && ret != -EEXIST) 791 return ret; 792 793 leaf = path->nodes[0]; 794 795 ptr = btrfs_item_ptr(leaf, path->slots[0], char); 796 797 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr, 798 delayed_item->data_len); 799 btrfs_mark_buffer_dirty(leaf); 800 801 btrfs_delayed_item_release_metadata(root, delayed_item); 802 return 0; 803 } 804 805 /* 806 * we insert an item first, then if there are some continuous items, we try 807 * to insert those items into the same leaf. 808 */ 809 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, 810 struct btrfs_path *path, 811 struct btrfs_root *root, 812 struct btrfs_delayed_node *node) 813 { 814 struct btrfs_delayed_item *curr, *prev; 815 int ret = 0; 816 817 do_again: 818 mutex_lock(&node->mutex); 819 curr = __btrfs_first_delayed_insertion_item(node); 820 if (!curr) 821 goto insert_end; 822 823 ret = btrfs_insert_delayed_item(trans, root, path, curr); 824 if (ret < 0) { 825 btrfs_release_path(path); 826 goto insert_end; 827 } 828 829 prev = curr; 830 curr = __btrfs_next_delayed_item(prev); 831 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) { 832 /* insert the continuous items into the same leaf */ 833 path->slots[0]++; 834 btrfs_batch_insert_items(root, path, curr); 835 } 836 btrfs_release_delayed_item(prev); 837 btrfs_mark_buffer_dirty(path->nodes[0]); 838 839 btrfs_release_path(path); 840 mutex_unlock(&node->mutex); 841 goto do_again; 842 843 insert_end: 844 mutex_unlock(&node->mutex); 845 return ret; 846 } 847 848 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, 849 struct btrfs_root *root, 850 struct btrfs_path *path, 851 struct btrfs_delayed_item *item) 852 { 853 struct btrfs_delayed_item *curr, *next; 854 struct extent_buffer *leaf; 855 struct btrfs_key key; 856 struct list_head head; 857 int nitems, i, last_item; 858 int ret = 0; 859 860 BUG_ON(!path->nodes[0]); 861 862 leaf = path->nodes[0]; 863 864 i = path->slots[0]; 865 last_item = btrfs_header_nritems(leaf) - 1; 866 if (i > last_item) 867 return -ENOENT; /* FIXME: Is errno suitable? */ 868 869 next = item; 870 INIT_LIST_HEAD(&head); 871 btrfs_item_key_to_cpu(leaf, &key, i); 872 nitems = 0; 873 /* 874 * count the number of the dir index items that we can delete in batch 875 */ 876 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) { 877 list_add_tail(&next->tree_list, &head); 878 nitems++; 879 880 curr = next; 881 next = __btrfs_next_delayed_item(curr); 882 if (!next) 883 break; 884 885 if (!btrfs_is_continuous_delayed_item(curr, next)) 886 break; 887 888 i++; 889 if (i > last_item) 890 break; 891 btrfs_item_key_to_cpu(leaf, &key, i); 892 } 893 894 if (!nitems) 895 return 0; 896 897 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); 898 if (ret) 899 goto out; 900 901 list_for_each_entry_safe(curr, next, &head, tree_list) { 902 btrfs_delayed_item_release_metadata(root, curr); 903 list_del(&curr->tree_list); 904 btrfs_release_delayed_item(curr); 905 } 906 907 out: 908 return ret; 909 } 910 911 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, 912 struct btrfs_path *path, 913 struct btrfs_root *root, 914 struct btrfs_delayed_node *node) 915 { 916 struct btrfs_delayed_item *curr, *prev; 917 unsigned int nofs_flag; 918 int ret = 0; 919 920 do_again: 921 mutex_lock(&node->mutex); 922 curr = __btrfs_first_delayed_deletion_item(node); 923 if (!curr) 924 goto delete_fail; 925 926 nofs_flag = memalloc_nofs_save(); 927 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); 928 memalloc_nofs_restore(nofs_flag); 929 if (ret < 0) 930 goto delete_fail; 931 else if (ret > 0) { 932 /* 933 * can't find the item which the node points to, so this node 934 * is invalid, just drop it. 935 */ 936 prev = curr; 937 curr = __btrfs_next_delayed_item(prev); 938 btrfs_release_delayed_item(prev); 939 ret = 0; 940 btrfs_release_path(path); 941 if (curr) { 942 mutex_unlock(&node->mutex); 943 goto do_again; 944 } else 945 goto delete_fail; 946 } 947 948 btrfs_batch_delete_items(trans, root, path, curr); 949 btrfs_release_path(path); 950 mutex_unlock(&node->mutex); 951 goto do_again; 952 953 delete_fail: 954 btrfs_release_path(path); 955 mutex_unlock(&node->mutex); 956 return ret; 957 } 958 959 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) 960 { 961 struct btrfs_delayed_root *delayed_root; 962 963 if (delayed_node && 964 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 965 BUG_ON(!delayed_node->root); 966 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags); 967 delayed_node->count--; 968 969 delayed_root = delayed_node->root->fs_info->delayed_root; 970 finish_one_item(delayed_root); 971 } 972 } 973 974 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node) 975 { 976 977 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) { 978 struct btrfs_delayed_root *delayed_root; 979 980 ASSERT(delayed_node->root); 981 delayed_node->count--; 982 983 delayed_root = delayed_node->root->fs_info->delayed_root; 984 finish_one_item(delayed_root); 985 } 986 } 987 988 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, 989 struct btrfs_root *root, 990 struct btrfs_path *path, 991 struct btrfs_delayed_node *node) 992 { 993 struct btrfs_fs_info *fs_info = root->fs_info; 994 struct btrfs_key key; 995 struct btrfs_inode_item *inode_item; 996 struct extent_buffer *leaf; 997 unsigned int nofs_flag; 998 int mod; 999 int ret; 1000 1001 key.objectid = node->inode_id; 1002 key.type = BTRFS_INODE_ITEM_KEY; 1003 key.offset = 0; 1004 1005 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags)) 1006 mod = -1; 1007 else 1008 mod = 1; 1009 1010 nofs_flag = memalloc_nofs_save(); 1011 ret = btrfs_lookup_inode(trans, root, path, &key, mod); 1012 memalloc_nofs_restore(nofs_flag); 1013 if (ret > 0) 1014 ret = -ENOENT; 1015 if (ret < 0) 1016 goto out; 1017 1018 leaf = path->nodes[0]; 1019 inode_item = btrfs_item_ptr(leaf, path->slots[0], 1020 struct btrfs_inode_item); 1021 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, 1022 sizeof(struct btrfs_inode_item)); 1023 btrfs_mark_buffer_dirty(leaf); 1024 1025 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags)) 1026 goto out; 1027 1028 path->slots[0]++; 1029 if (path->slots[0] >= btrfs_header_nritems(leaf)) 1030 goto search; 1031 again: 1032 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1033 if (key.objectid != node->inode_id) 1034 goto out; 1035 1036 if (key.type != BTRFS_INODE_REF_KEY && 1037 key.type != BTRFS_INODE_EXTREF_KEY) 1038 goto out; 1039 1040 /* 1041 * Delayed iref deletion is for the inode who has only one link, 1042 * so there is only one iref. The case that several irefs are 1043 * in the same item doesn't exist. 1044 */ 1045 btrfs_del_item(trans, root, path); 1046 out: 1047 btrfs_release_delayed_iref(node); 1048 btrfs_release_path(path); 1049 err_out: 1050 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0)); 1051 btrfs_release_delayed_inode(node); 1052 1053 /* 1054 * If we fail to update the delayed inode we need to abort the 1055 * transaction, because we could leave the inode with the improper 1056 * counts behind. 1057 */ 1058 if (ret && ret != -ENOENT) 1059 btrfs_abort_transaction(trans, ret); 1060 1061 return ret; 1062 1063 search: 1064 btrfs_release_path(path); 1065 1066 key.type = BTRFS_INODE_EXTREF_KEY; 1067 key.offset = -1; 1068 1069 nofs_flag = memalloc_nofs_save(); 1070 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1071 memalloc_nofs_restore(nofs_flag); 1072 if (ret < 0) 1073 goto err_out; 1074 ASSERT(ret); 1075 1076 ret = 0; 1077 leaf = path->nodes[0]; 1078 path->slots[0]--; 1079 goto again; 1080 } 1081 1082 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, 1083 struct btrfs_root *root, 1084 struct btrfs_path *path, 1085 struct btrfs_delayed_node *node) 1086 { 1087 int ret; 1088 1089 mutex_lock(&node->mutex); 1090 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) { 1091 mutex_unlock(&node->mutex); 1092 return 0; 1093 } 1094 1095 ret = __btrfs_update_delayed_inode(trans, root, path, node); 1096 mutex_unlock(&node->mutex); 1097 return ret; 1098 } 1099 1100 static inline int 1101 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, 1102 struct btrfs_path *path, 1103 struct btrfs_delayed_node *node) 1104 { 1105 int ret; 1106 1107 ret = btrfs_insert_delayed_items(trans, path, node->root, node); 1108 if (ret) 1109 return ret; 1110 1111 ret = btrfs_delete_delayed_items(trans, path, node->root, node); 1112 if (ret) 1113 return ret; 1114 1115 ret = btrfs_update_delayed_inode(trans, node->root, path, node); 1116 return ret; 1117 } 1118 1119 /* 1120 * Called when committing the transaction. 1121 * Returns 0 on success. 1122 * Returns < 0 on error and returns with an aborted transaction with any 1123 * outstanding delayed items cleaned up. 1124 */ 1125 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr) 1126 { 1127 struct btrfs_fs_info *fs_info = trans->fs_info; 1128 struct btrfs_delayed_root *delayed_root; 1129 struct btrfs_delayed_node *curr_node, *prev_node; 1130 struct btrfs_path *path; 1131 struct btrfs_block_rsv *block_rsv; 1132 int ret = 0; 1133 bool count = (nr > 0); 1134 1135 if (TRANS_ABORTED(trans)) 1136 return -EIO; 1137 1138 path = btrfs_alloc_path(); 1139 if (!path) 1140 return -ENOMEM; 1141 1142 block_rsv = trans->block_rsv; 1143 trans->block_rsv = &fs_info->delayed_block_rsv; 1144 1145 delayed_root = fs_info->delayed_root; 1146 1147 curr_node = btrfs_first_delayed_node(delayed_root); 1148 while (curr_node && (!count || nr--)) { 1149 ret = __btrfs_commit_inode_delayed_items(trans, path, 1150 curr_node); 1151 if (ret) { 1152 btrfs_release_delayed_node(curr_node); 1153 curr_node = NULL; 1154 btrfs_abort_transaction(trans, ret); 1155 break; 1156 } 1157 1158 prev_node = curr_node; 1159 curr_node = btrfs_next_delayed_node(curr_node); 1160 btrfs_release_delayed_node(prev_node); 1161 } 1162 1163 if (curr_node) 1164 btrfs_release_delayed_node(curr_node); 1165 btrfs_free_path(path); 1166 trans->block_rsv = block_rsv; 1167 1168 return ret; 1169 } 1170 1171 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans) 1172 { 1173 return __btrfs_run_delayed_items(trans, -1); 1174 } 1175 1176 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr) 1177 { 1178 return __btrfs_run_delayed_items(trans, nr); 1179 } 1180 1181 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, 1182 struct btrfs_inode *inode) 1183 { 1184 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1185 struct btrfs_path *path; 1186 struct btrfs_block_rsv *block_rsv; 1187 int ret; 1188 1189 if (!delayed_node) 1190 return 0; 1191 1192 mutex_lock(&delayed_node->mutex); 1193 if (!delayed_node->count) { 1194 mutex_unlock(&delayed_node->mutex); 1195 btrfs_release_delayed_node(delayed_node); 1196 return 0; 1197 } 1198 mutex_unlock(&delayed_node->mutex); 1199 1200 path = btrfs_alloc_path(); 1201 if (!path) { 1202 btrfs_release_delayed_node(delayed_node); 1203 return -ENOMEM; 1204 } 1205 1206 block_rsv = trans->block_rsv; 1207 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv; 1208 1209 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node); 1210 1211 btrfs_release_delayed_node(delayed_node); 1212 btrfs_free_path(path); 1213 trans->block_rsv = block_rsv; 1214 1215 return ret; 1216 } 1217 1218 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode) 1219 { 1220 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1221 struct btrfs_trans_handle *trans; 1222 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1223 struct btrfs_path *path; 1224 struct btrfs_block_rsv *block_rsv; 1225 int ret; 1226 1227 if (!delayed_node) 1228 return 0; 1229 1230 mutex_lock(&delayed_node->mutex); 1231 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1232 mutex_unlock(&delayed_node->mutex); 1233 btrfs_release_delayed_node(delayed_node); 1234 return 0; 1235 } 1236 mutex_unlock(&delayed_node->mutex); 1237 1238 trans = btrfs_join_transaction(delayed_node->root); 1239 if (IS_ERR(trans)) { 1240 ret = PTR_ERR(trans); 1241 goto out; 1242 } 1243 1244 path = btrfs_alloc_path(); 1245 if (!path) { 1246 ret = -ENOMEM; 1247 goto trans_out; 1248 } 1249 1250 block_rsv = trans->block_rsv; 1251 trans->block_rsv = &fs_info->delayed_block_rsv; 1252 1253 mutex_lock(&delayed_node->mutex); 1254 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) 1255 ret = __btrfs_update_delayed_inode(trans, delayed_node->root, 1256 path, delayed_node); 1257 else 1258 ret = 0; 1259 mutex_unlock(&delayed_node->mutex); 1260 1261 btrfs_free_path(path); 1262 trans->block_rsv = block_rsv; 1263 trans_out: 1264 btrfs_end_transaction(trans); 1265 btrfs_btree_balance_dirty(fs_info); 1266 out: 1267 btrfs_release_delayed_node(delayed_node); 1268 1269 return ret; 1270 } 1271 1272 void btrfs_remove_delayed_node(struct btrfs_inode *inode) 1273 { 1274 struct btrfs_delayed_node *delayed_node; 1275 1276 delayed_node = READ_ONCE(inode->delayed_node); 1277 if (!delayed_node) 1278 return; 1279 1280 inode->delayed_node = NULL; 1281 btrfs_release_delayed_node(delayed_node); 1282 } 1283 1284 struct btrfs_async_delayed_work { 1285 struct btrfs_delayed_root *delayed_root; 1286 int nr; 1287 struct btrfs_work work; 1288 }; 1289 1290 static void btrfs_async_run_delayed_root(struct btrfs_work *work) 1291 { 1292 struct btrfs_async_delayed_work *async_work; 1293 struct btrfs_delayed_root *delayed_root; 1294 struct btrfs_trans_handle *trans; 1295 struct btrfs_path *path; 1296 struct btrfs_delayed_node *delayed_node = NULL; 1297 struct btrfs_root *root; 1298 struct btrfs_block_rsv *block_rsv; 1299 int total_done = 0; 1300 1301 async_work = container_of(work, struct btrfs_async_delayed_work, work); 1302 delayed_root = async_work->delayed_root; 1303 1304 path = btrfs_alloc_path(); 1305 if (!path) 1306 goto out; 1307 1308 do { 1309 if (atomic_read(&delayed_root->items) < 1310 BTRFS_DELAYED_BACKGROUND / 2) 1311 break; 1312 1313 delayed_node = btrfs_first_prepared_delayed_node(delayed_root); 1314 if (!delayed_node) 1315 break; 1316 1317 root = delayed_node->root; 1318 1319 trans = btrfs_join_transaction(root); 1320 if (IS_ERR(trans)) { 1321 btrfs_release_path(path); 1322 btrfs_release_prepared_delayed_node(delayed_node); 1323 total_done++; 1324 continue; 1325 } 1326 1327 block_rsv = trans->block_rsv; 1328 trans->block_rsv = &root->fs_info->delayed_block_rsv; 1329 1330 __btrfs_commit_inode_delayed_items(trans, path, delayed_node); 1331 1332 trans->block_rsv = block_rsv; 1333 btrfs_end_transaction(trans); 1334 btrfs_btree_balance_dirty_nodelay(root->fs_info); 1335 1336 btrfs_release_path(path); 1337 btrfs_release_prepared_delayed_node(delayed_node); 1338 total_done++; 1339 1340 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) 1341 || total_done < async_work->nr); 1342 1343 btrfs_free_path(path); 1344 out: 1345 wake_up(&delayed_root->wait); 1346 kfree(async_work); 1347 } 1348 1349 1350 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, 1351 struct btrfs_fs_info *fs_info, int nr) 1352 { 1353 struct btrfs_async_delayed_work *async_work; 1354 1355 async_work = kmalloc(sizeof(*async_work), GFP_NOFS); 1356 if (!async_work) 1357 return -ENOMEM; 1358 1359 async_work->delayed_root = delayed_root; 1360 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL, 1361 NULL); 1362 async_work->nr = nr; 1363 1364 btrfs_queue_work(fs_info->delayed_workers, &async_work->work); 1365 return 0; 1366 } 1367 1368 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info) 1369 { 1370 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root)); 1371 } 1372 1373 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq) 1374 { 1375 int val = atomic_read(&delayed_root->items_seq); 1376 1377 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH) 1378 return 1; 1379 1380 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) 1381 return 1; 1382 1383 return 0; 1384 } 1385 1386 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info) 1387 { 1388 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root; 1389 1390 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) || 1391 btrfs_workqueue_normal_congested(fs_info->delayed_workers)) 1392 return; 1393 1394 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { 1395 int seq; 1396 int ret; 1397 1398 seq = atomic_read(&delayed_root->items_seq); 1399 1400 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0); 1401 if (ret) 1402 return; 1403 1404 wait_event_interruptible(delayed_root->wait, 1405 could_end_wait(delayed_root, seq)); 1406 return; 1407 } 1408 1409 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH); 1410 } 1411 1412 /* Will return 0 or -ENOMEM */ 1413 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, 1414 const char *name, int name_len, 1415 struct btrfs_inode *dir, 1416 struct btrfs_disk_key *disk_key, u8 type, 1417 u64 index) 1418 { 1419 struct btrfs_delayed_node *delayed_node; 1420 struct btrfs_delayed_item *delayed_item; 1421 struct btrfs_dir_item *dir_item; 1422 int ret; 1423 1424 delayed_node = btrfs_get_or_create_delayed_node(dir); 1425 if (IS_ERR(delayed_node)) 1426 return PTR_ERR(delayed_node); 1427 1428 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len); 1429 if (!delayed_item) { 1430 ret = -ENOMEM; 1431 goto release_node; 1432 } 1433 1434 delayed_item->key.objectid = btrfs_ino(dir); 1435 delayed_item->key.type = BTRFS_DIR_INDEX_KEY; 1436 delayed_item->key.offset = index; 1437 1438 dir_item = (struct btrfs_dir_item *)delayed_item->data; 1439 dir_item->location = *disk_key; 1440 btrfs_set_stack_dir_transid(dir_item, trans->transid); 1441 btrfs_set_stack_dir_data_len(dir_item, 0); 1442 btrfs_set_stack_dir_name_len(dir_item, name_len); 1443 btrfs_set_stack_dir_type(dir_item, type); 1444 memcpy((char *)(dir_item + 1), name, name_len); 1445 1446 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item); 1447 /* 1448 * we have reserved enough space when we start a new transaction, 1449 * so reserving metadata failure is impossible 1450 */ 1451 BUG_ON(ret); 1452 1453 mutex_lock(&delayed_node->mutex); 1454 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); 1455 if (unlikely(ret)) { 1456 btrfs_err(trans->fs_info, 1457 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", 1458 name_len, name, delayed_node->root->root_key.objectid, 1459 delayed_node->inode_id, ret); 1460 BUG(); 1461 } 1462 mutex_unlock(&delayed_node->mutex); 1463 1464 release_node: 1465 btrfs_release_delayed_node(delayed_node); 1466 return ret; 1467 } 1468 1469 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info, 1470 struct btrfs_delayed_node *node, 1471 struct btrfs_key *key) 1472 { 1473 struct btrfs_delayed_item *item; 1474 1475 mutex_lock(&node->mutex); 1476 item = __btrfs_lookup_delayed_insertion_item(node, key); 1477 if (!item) { 1478 mutex_unlock(&node->mutex); 1479 return 1; 1480 } 1481 1482 btrfs_delayed_item_release_metadata(node->root, item); 1483 btrfs_release_delayed_item(item); 1484 mutex_unlock(&node->mutex); 1485 return 0; 1486 } 1487 1488 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, 1489 struct btrfs_inode *dir, u64 index) 1490 { 1491 struct btrfs_delayed_node *node; 1492 struct btrfs_delayed_item *item; 1493 struct btrfs_key item_key; 1494 int ret; 1495 1496 node = btrfs_get_or_create_delayed_node(dir); 1497 if (IS_ERR(node)) 1498 return PTR_ERR(node); 1499 1500 item_key.objectid = btrfs_ino(dir); 1501 item_key.type = BTRFS_DIR_INDEX_KEY; 1502 item_key.offset = index; 1503 1504 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, 1505 &item_key); 1506 if (!ret) 1507 goto end; 1508 1509 item = btrfs_alloc_delayed_item(0); 1510 if (!item) { 1511 ret = -ENOMEM; 1512 goto end; 1513 } 1514 1515 item->key = item_key; 1516 1517 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item); 1518 /* 1519 * we have reserved enough space when we start a new transaction, 1520 * so reserving metadata failure is impossible. 1521 */ 1522 if (ret < 0) { 1523 btrfs_err(trans->fs_info, 1524 "metadata reservation failed for delayed dir item deltiona, should have been reserved"); 1525 btrfs_release_delayed_item(item); 1526 goto end; 1527 } 1528 1529 mutex_lock(&node->mutex); 1530 ret = __btrfs_add_delayed_deletion_item(node, item); 1531 if (unlikely(ret)) { 1532 btrfs_err(trans->fs_info, 1533 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", 1534 index, node->root->root_key.objectid, 1535 node->inode_id, ret); 1536 btrfs_delayed_item_release_metadata(dir->root, item); 1537 btrfs_release_delayed_item(item); 1538 } 1539 mutex_unlock(&node->mutex); 1540 end: 1541 btrfs_release_delayed_node(node); 1542 return ret; 1543 } 1544 1545 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode) 1546 { 1547 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1548 1549 if (!delayed_node) 1550 return -ENOENT; 1551 1552 /* 1553 * Since we have held i_mutex of this directory, it is impossible that 1554 * a new directory index is added into the delayed node and index_cnt 1555 * is updated now. So we needn't lock the delayed node. 1556 */ 1557 if (!delayed_node->index_cnt) { 1558 btrfs_release_delayed_node(delayed_node); 1559 return -EINVAL; 1560 } 1561 1562 inode->index_cnt = delayed_node->index_cnt; 1563 btrfs_release_delayed_node(delayed_node); 1564 return 0; 1565 } 1566 1567 bool btrfs_readdir_get_delayed_items(struct inode *inode, 1568 struct list_head *ins_list, 1569 struct list_head *del_list) 1570 { 1571 struct btrfs_delayed_node *delayed_node; 1572 struct btrfs_delayed_item *item; 1573 1574 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode)); 1575 if (!delayed_node) 1576 return false; 1577 1578 /* 1579 * We can only do one readdir with delayed items at a time because of 1580 * item->readdir_list. 1581 */ 1582 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 1583 btrfs_inode_lock(inode, 0); 1584 1585 mutex_lock(&delayed_node->mutex); 1586 item = __btrfs_first_delayed_insertion_item(delayed_node); 1587 while (item) { 1588 refcount_inc(&item->refs); 1589 list_add_tail(&item->readdir_list, ins_list); 1590 item = __btrfs_next_delayed_item(item); 1591 } 1592 1593 item = __btrfs_first_delayed_deletion_item(delayed_node); 1594 while (item) { 1595 refcount_inc(&item->refs); 1596 list_add_tail(&item->readdir_list, del_list); 1597 item = __btrfs_next_delayed_item(item); 1598 } 1599 mutex_unlock(&delayed_node->mutex); 1600 /* 1601 * This delayed node is still cached in the btrfs inode, so refs 1602 * must be > 1 now, and we needn't check it is going to be freed 1603 * or not. 1604 * 1605 * Besides that, this function is used to read dir, we do not 1606 * insert/delete delayed items in this period. So we also needn't 1607 * requeue or dequeue this delayed node. 1608 */ 1609 refcount_dec(&delayed_node->refs); 1610 1611 return true; 1612 } 1613 1614 void btrfs_readdir_put_delayed_items(struct inode *inode, 1615 struct list_head *ins_list, 1616 struct list_head *del_list) 1617 { 1618 struct btrfs_delayed_item *curr, *next; 1619 1620 list_for_each_entry_safe(curr, next, ins_list, readdir_list) { 1621 list_del(&curr->readdir_list); 1622 if (refcount_dec_and_test(&curr->refs)) 1623 kfree(curr); 1624 } 1625 1626 list_for_each_entry_safe(curr, next, del_list, readdir_list) { 1627 list_del(&curr->readdir_list); 1628 if (refcount_dec_and_test(&curr->refs)) 1629 kfree(curr); 1630 } 1631 1632 /* 1633 * The VFS is going to do up_read(), so we need to downgrade back to a 1634 * read lock. 1635 */ 1636 downgrade_write(&inode->i_rwsem); 1637 } 1638 1639 int btrfs_should_delete_dir_index(struct list_head *del_list, 1640 u64 index) 1641 { 1642 struct btrfs_delayed_item *curr; 1643 int ret = 0; 1644 1645 list_for_each_entry(curr, del_list, readdir_list) { 1646 if (curr->key.offset > index) 1647 break; 1648 if (curr->key.offset == index) { 1649 ret = 1; 1650 break; 1651 } 1652 } 1653 return ret; 1654 } 1655 1656 /* 1657 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree 1658 * 1659 */ 1660 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, 1661 struct list_head *ins_list) 1662 { 1663 struct btrfs_dir_item *di; 1664 struct btrfs_delayed_item *curr, *next; 1665 struct btrfs_key location; 1666 char *name; 1667 int name_len; 1668 int over = 0; 1669 unsigned char d_type; 1670 1671 if (list_empty(ins_list)) 1672 return 0; 1673 1674 /* 1675 * Changing the data of the delayed item is impossible. So 1676 * we needn't lock them. And we have held i_mutex of the 1677 * directory, nobody can delete any directory indexes now. 1678 */ 1679 list_for_each_entry_safe(curr, next, ins_list, readdir_list) { 1680 list_del(&curr->readdir_list); 1681 1682 if (curr->key.offset < ctx->pos) { 1683 if (refcount_dec_and_test(&curr->refs)) 1684 kfree(curr); 1685 continue; 1686 } 1687 1688 ctx->pos = curr->key.offset; 1689 1690 di = (struct btrfs_dir_item *)curr->data; 1691 name = (char *)(di + 1); 1692 name_len = btrfs_stack_dir_name_len(di); 1693 1694 d_type = fs_ftype_to_dtype(di->type); 1695 btrfs_disk_key_to_cpu(&location, &di->location); 1696 1697 over = !dir_emit(ctx, name, name_len, 1698 location.objectid, d_type); 1699 1700 if (refcount_dec_and_test(&curr->refs)) 1701 kfree(curr); 1702 1703 if (over) 1704 return 1; 1705 ctx->pos++; 1706 } 1707 return 0; 1708 } 1709 1710 static void fill_stack_inode_item(struct btrfs_trans_handle *trans, 1711 struct btrfs_inode_item *inode_item, 1712 struct inode *inode) 1713 { 1714 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode)); 1715 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode)); 1716 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); 1717 btrfs_set_stack_inode_mode(inode_item, inode->i_mode); 1718 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); 1719 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); 1720 btrfs_set_stack_inode_generation(inode_item, 1721 BTRFS_I(inode)->generation); 1722 btrfs_set_stack_inode_sequence(inode_item, 1723 inode_peek_iversion(inode)); 1724 btrfs_set_stack_inode_transid(inode_item, trans->transid); 1725 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); 1726 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); 1727 btrfs_set_stack_inode_block_group(inode_item, 0); 1728 1729 btrfs_set_stack_timespec_sec(&inode_item->atime, 1730 inode->i_atime.tv_sec); 1731 btrfs_set_stack_timespec_nsec(&inode_item->atime, 1732 inode->i_atime.tv_nsec); 1733 1734 btrfs_set_stack_timespec_sec(&inode_item->mtime, 1735 inode->i_mtime.tv_sec); 1736 btrfs_set_stack_timespec_nsec(&inode_item->mtime, 1737 inode->i_mtime.tv_nsec); 1738 1739 btrfs_set_stack_timespec_sec(&inode_item->ctime, 1740 inode->i_ctime.tv_sec); 1741 btrfs_set_stack_timespec_nsec(&inode_item->ctime, 1742 inode->i_ctime.tv_nsec); 1743 1744 btrfs_set_stack_timespec_sec(&inode_item->otime, 1745 BTRFS_I(inode)->i_otime.tv_sec); 1746 btrfs_set_stack_timespec_nsec(&inode_item->otime, 1747 BTRFS_I(inode)->i_otime.tv_nsec); 1748 } 1749 1750 int btrfs_fill_inode(struct inode *inode, u32 *rdev) 1751 { 1752 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 1753 struct btrfs_delayed_node *delayed_node; 1754 struct btrfs_inode_item *inode_item; 1755 1756 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode)); 1757 if (!delayed_node) 1758 return -ENOENT; 1759 1760 mutex_lock(&delayed_node->mutex); 1761 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1762 mutex_unlock(&delayed_node->mutex); 1763 btrfs_release_delayed_node(delayed_node); 1764 return -ENOENT; 1765 } 1766 1767 inode_item = &delayed_node->inode_item; 1768 1769 i_uid_write(inode, btrfs_stack_inode_uid(inode_item)); 1770 i_gid_write(inode, btrfs_stack_inode_gid(inode_item)); 1771 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item)); 1772 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 1773 round_up(i_size_read(inode), fs_info->sectorsize)); 1774 inode->i_mode = btrfs_stack_inode_mode(inode_item); 1775 set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); 1776 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); 1777 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); 1778 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item); 1779 1780 inode_set_iversion_queried(inode, 1781 btrfs_stack_inode_sequence(inode_item)); 1782 inode->i_rdev = 0; 1783 *rdev = btrfs_stack_inode_rdev(inode_item); 1784 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); 1785 1786 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime); 1787 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime); 1788 1789 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime); 1790 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime); 1791 1792 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime); 1793 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime); 1794 1795 BTRFS_I(inode)->i_otime.tv_sec = 1796 btrfs_stack_timespec_sec(&inode_item->otime); 1797 BTRFS_I(inode)->i_otime.tv_nsec = 1798 btrfs_stack_timespec_nsec(&inode_item->otime); 1799 1800 inode->i_generation = BTRFS_I(inode)->generation; 1801 BTRFS_I(inode)->index_cnt = (u64)-1; 1802 1803 mutex_unlock(&delayed_node->mutex); 1804 btrfs_release_delayed_node(delayed_node); 1805 return 0; 1806 } 1807 1808 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, 1809 struct btrfs_root *root, 1810 struct btrfs_inode *inode) 1811 { 1812 struct btrfs_delayed_node *delayed_node; 1813 int ret = 0; 1814 1815 delayed_node = btrfs_get_or_create_delayed_node(inode); 1816 if (IS_ERR(delayed_node)) 1817 return PTR_ERR(delayed_node); 1818 1819 mutex_lock(&delayed_node->mutex); 1820 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1821 fill_stack_inode_item(trans, &delayed_node->inode_item, 1822 &inode->vfs_inode); 1823 goto release_node; 1824 } 1825 1826 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); 1827 if (ret) 1828 goto release_node; 1829 1830 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode); 1831 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags); 1832 delayed_node->count++; 1833 atomic_inc(&root->fs_info->delayed_root->items); 1834 release_node: 1835 mutex_unlock(&delayed_node->mutex); 1836 btrfs_release_delayed_node(delayed_node); 1837 return ret; 1838 } 1839 1840 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode) 1841 { 1842 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1843 struct btrfs_delayed_node *delayed_node; 1844 1845 /* 1846 * we don't do delayed inode updates during log recovery because it 1847 * leads to enospc problems. This means we also can't do 1848 * delayed inode refs 1849 */ 1850 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 1851 return -EAGAIN; 1852 1853 delayed_node = btrfs_get_or_create_delayed_node(inode); 1854 if (IS_ERR(delayed_node)) 1855 return PTR_ERR(delayed_node); 1856 1857 /* 1858 * We don't reserve space for inode ref deletion is because: 1859 * - We ONLY do async inode ref deletion for the inode who has only 1860 * one link(i_nlink == 1), it means there is only one inode ref. 1861 * And in most case, the inode ref and the inode item are in the 1862 * same leaf, and we will deal with them at the same time. 1863 * Since we are sure we will reserve the space for the inode item, 1864 * it is unnecessary to reserve space for inode ref deletion. 1865 * - If the inode ref and the inode item are not in the same leaf, 1866 * We also needn't worry about enospc problem, because we reserve 1867 * much more space for the inode update than it needs. 1868 * - At the worst, we can steal some space from the global reservation. 1869 * It is very rare. 1870 */ 1871 mutex_lock(&delayed_node->mutex); 1872 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) 1873 goto release_node; 1874 1875 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags); 1876 delayed_node->count++; 1877 atomic_inc(&fs_info->delayed_root->items); 1878 release_node: 1879 mutex_unlock(&delayed_node->mutex); 1880 btrfs_release_delayed_node(delayed_node); 1881 return 0; 1882 } 1883 1884 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) 1885 { 1886 struct btrfs_root *root = delayed_node->root; 1887 struct btrfs_fs_info *fs_info = root->fs_info; 1888 struct btrfs_delayed_item *curr_item, *prev_item; 1889 1890 mutex_lock(&delayed_node->mutex); 1891 curr_item = __btrfs_first_delayed_insertion_item(delayed_node); 1892 while (curr_item) { 1893 btrfs_delayed_item_release_metadata(root, curr_item); 1894 prev_item = curr_item; 1895 curr_item = __btrfs_next_delayed_item(prev_item); 1896 btrfs_release_delayed_item(prev_item); 1897 } 1898 1899 curr_item = __btrfs_first_delayed_deletion_item(delayed_node); 1900 while (curr_item) { 1901 btrfs_delayed_item_release_metadata(root, curr_item); 1902 prev_item = curr_item; 1903 curr_item = __btrfs_next_delayed_item(prev_item); 1904 btrfs_release_delayed_item(prev_item); 1905 } 1906 1907 btrfs_release_delayed_iref(delayed_node); 1908 1909 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) { 1910 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false); 1911 btrfs_release_delayed_inode(delayed_node); 1912 } 1913 mutex_unlock(&delayed_node->mutex); 1914 } 1915 1916 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode) 1917 { 1918 struct btrfs_delayed_node *delayed_node; 1919 1920 delayed_node = btrfs_get_delayed_node(inode); 1921 if (!delayed_node) 1922 return; 1923 1924 __btrfs_kill_delayed_node(delayed_node); 1925 btrfs_release_delayed_node(delayed_node); 1926 } 1927 1928 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) 1929 { 1930 u64 inode_id = 0; 1931 struct btrfs_delayed_node *delayed_nodes[8]; 1932 int i, n; 1933 1934 while (1) { 1935 spin_lock(&root->inode_lock); 1936 n = radix_tree_gang_lookup(&root->delayed_nodes_tree, 1937 (void **)delayed_nodes, inode_id, 1938 ARRAY_SIZE(delayed_nodes)); 1939 if (!n) { 1940 spin_unlock(&root->inode_lock); 1941 break; 1942 } 1943 1944 inode_id = delayed_nodes[n - 1]->inode_id + 1; 1945 for (i = 0; i < n; i++) { 1946 /* 1947 * Don't increase refs in case the node is dead and 1948 * about to be removed from the tree in the loop below 1949 */ 1950 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs)) 1951 delayed_nodes[i] = NULL; 1952 } 1953 spin_unlock(&root->inode_lock); 1954 1955 for (i = 0; i < n; i++) { 1956 if (!delayed_nodes[i]) 1957 continue; 1958 __btrfs_kill_delayed_node(delayed_nodes[i]); 1959 btrfs_release_delayed_node(delayed_nodes[i]); 1960 } 1961 } 1962 } 1963 1964 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info) 1965 { 1966 struct btrfs_delayed_node *curr_node, *prev_node; 1967 1968 curr_node = btrfs_first_delayed_node(fs_info->delayed_root); 1969 while (curr_node) { 1970 __btrfs_kill_delayed_node(curr_node); 1971 1972 prev_node = curr_node; 1973 curr_node = btrfs_next_delayed_node(curr_node); 1974 btrfs_release_delayed_node(prev_node); 1975 } 1976 } 1977 1978