extent_cache.c (72840cccc0a1a0a0dc1bb27b669a9111be6d0f6a) | extent_cache.c (71644dff481180ba024ac4f5cb1f068756357adf) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * f2fs extent cache support 4 * 5 * Copyright (c) 2015 Motorola Mobility 6 * Copyright (c) 2015 Samsung Electronics 7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org> 8 * Chao Yu <chao2.yu@samsung.com> | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * f2fs extent cache support 4 * 5 * Copyright (c) 2015 Motorola Mobility 6 * Copyright (c) 2015 Samsung Electronics 7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org> 8 * Chao Yu <chao2.yu@samsung.com> |
9 * 10 * block_age-based extent cache added by: 11 * Copyright (c) 2022 xiaomi Co., Ltd. 12 * http://www.xiaomi.com/ |
|
9 */ 10 11#include <linux/fs.h> 12#include <linux/f2fs_fs.h> 13 14#include "f2fs.h" 15#include "node.h" 16#include <trace/events/f2fs.h> 17 18static void __set_extent_info(struct extent_info *ei, 19 unsigned int fofs, unsigned int len, 20 block_t blk, bool keep_clen, | 13 */ 14 15#include <linux/fs.h> 16#include <linux/f2fs_fs.h> 17 18#include "f2fs.h" 19#include "node.h" 20#include <trace/events/f2fs.h> 21 22static void __set_extent_info(struct extent_info *ei, 23 unsigned int fofs, unsigned int len, 24 block_t blk, bool keep_clen, |
25 unsigned long age, unsigned long last_blocks, |
|
21 enum extent_type type) 22{ 23 ei->fofs = fofs; 24 ei->len = len; 25 26 if (type == EX_READ) { 27 ei->blk = blk; 28 if (keep_clen) 29 return; 30#ifdef CONFIG_F2FS_FS_COMPRESSION 31 ei->c_len = 0; 32#endif | 26 enum extent_type type) 27{ 28 ei->fofs = fofs; 29 ei->len = len; 30 31 if (type == EX_READ) { 32 ei->blk = blk; 33 if (keep_clen) 34 return; 35#ifdef CONFIG_F2FS_FS_COMPRESSION 36 ei->c_len = 0; 37#endif |
38 } else if (type == EX_BLOCK_AGE) { 39 ei->age = age; 40 ei->last_blocks = last_blocks; |
|
33 } 34} 35 36static bool __may_read_extent_tree(struct inode *inode) 37{ 38 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 39 40 if (!test_opt(sbi, READ_EXTENT_CACHE)) 41 return false; 42 if (is_inode_flag_set(inode, FI_NO_EXTENT)) 43 return false; 44 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) && 45 !f2fs_sb_has_readonly(sbi)) 46 return false; 47 return S_ISREG(inode->i_mode); 48} 49 | 41 } 42} 43 44static bool __may_read_extent_tree(struct inode *inode) 45{ 46 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 47 48 if (!test_opt(sbi, READ_EXTENT_CACHE)) 49 return false; 50 if (is_inode_flag_set(inode, FI_NO_EXTENT)) 51 return false; 52 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) && 53 !f2fs_sb_has_readonly(sbi)) 54 return false; 55 return S_ISREG(inode->i_mode); 56} 57 |
58static bool __may_age_extent_tree(struct inode *inode) 59{ 60 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 61 62 if (!test_opt(sbi, AGE_EXTENT_CACHE)) 63 return false; 64 /* don't cache block age info for cold file */ 65 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE)) 66 return false; 67 if (file_is_cold(inode)) 68 return false; 69 70 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 71} 72 |
|
50static bool __init_may_extent_tree(struct inode *inode, enum extent_type type) 51{ 52 if (type == EX_READ) 53 return __may_read_extent_tree(inode); | 73static bool __init_may_extent_tree(struct inode *inode, enum extent_type type) 74{ 75 if (type == EX_READ) 76 return __may_read_extent_tree(inode); |
77 else if (type == EX_BLOCK_AGE) 78 return __may_age_extent_tree(inode); |
|
54 return false; 55} 56 57static bool __may_extent_tree(struct inode *inode, enum extent_type type) 58{ 59 /* 60 * for recovered files during mount do not create extents 61 * if shrinker is not registered. --- 23 unchanged lines hidden (view full) --- 85#ifdef CONFIG_F2FS_FS_COMPRESSION 86 if (back->c_len && back->len != back->c_len) 87 return false; 88 if (front->c_len && front->len != front->c_len) 89 return false; 90#endif 91 return (back->fofs + back->len == front->fofs && 92 back->blk + back->len == front->blk); | 79 return false; 80} 81 82static bool __may_extent_tree(struct inode *inode, enum extent_type type) 83{ 84 /* 85 * for recovered files during mount do not create extents 86 * if shrinker is not registered. --- 23 unchanged lines hidden (view full) --- 110#ifdef CONFIG_F2FS_FS_COMPRESSION 111 if (back->c_len && back->len != back->c_len) 112 return false; 113 if (front->c_len && front->len != front->c_len) 114 return false; 115#endif 116 return (back->fofs + back->len == front->fofs && 117 back->blk + back->len == front->blk); |
118 } else if (type == EX_BLOCK_AGE) { 119 return (back->fofs + back->len == front->fofs && 120 abs(back->age - front->age) <= SAME_AGE_REGION && 121 abs(back->last_blocks - front->last_blocks) <= 122 SAME_AGE_REGION); |
|
93 } 94 return false; 95} 96 97static bool __is_back_mergeable(struct extent_info *cur, 98 struct extent_info *back, enum extent_type type) 99{ 100 return __is_extent_mergeable(back, cur, type); --- 383 unchanged lines hidden (view full) --- 484 } 485unlock_out: 486 write_unlock(&et->lock); 487out: 488 if (!F2FS_I(inode)->extent_tree[EX_READ]) 489 set_inode_flag(inode, FI_NO_EXTENT); 490} 491 | 123 } 124 return false; 125} 126 127static bool __is_back_mergeable(struct extent_info *cur, 128 struct extent_info *back, enum extent_type type) 129{ 130 return __is_extent_mergeable(back, cur, type); --- 383 unchanged lines hidden (view full) --- 514 } 515unlock_out: 516 write_unlock(&et->lock); 517out: 518 if (!F2FS_I(inode)->extent_tree[EX_READ]) 519 set_inode_flag(inode, FI_NO_EXTENT); 520} 521 |
522void f2fs_init_age_extent_tree(struct inode *inode) 523{ 524 if (!__init_may_extent_tree(inode, EX_BLOCK_AGE)) 525 return; 526 __grab_extent_tree(inode, EX_BLOCK_AGE); 527} 528 |
|
492void f2fs_init_extent_tree(struct inode *inode) 493{ 494 /* initialize read cache */ 495 if (__init_may_extent_tree(inode, EX_READ)) 496 __grab_extent_tree(inode, EX_READ); | 529void f2fs_init_extent_tree(struct inode *inode) 530{ 531 /* initialize read cache */ 532 if (__init_may_extent_tree(inode, EX_READ)) 533 __grab_extent_tree(inode, EX_READ); |
534 535 /* initialize block age cache */ 536 if (__init_may_extent_tree(inode, EX_BLOCK_AGE)) 537 __grab_extent_tree(inode, EX_BLOCK_AGE); |
|
497} 498 499static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs, 500 struct extent_info *ei, enum extent_type type) 501{ 502 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 503 struct extent_tree_info *eti = &sbi->extent_tree[type]; 504 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; --- 34 unchanged lines hidden (view full) --- 539 spin_unlock(&eti->extent_lock); 540 ret = true; 541out: 542 stat_inc_total_hit(sbi, type); 543 read_unlock(&et->lock); 544 545 if (type == EX_READ) 546 trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei); | 538} 539 540static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs, 541 struct extent_info *ei, enum extent_type type) 542{ 543 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 544 struct extent_tree_info *eti = &sbi->extent_tree[type]; 545 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; --- 34 unchanged lines hidden (view full) --- 580 spin_unlock(&eti->extent_lock); 581 ret = true; 582out: 583 stat_inc_total_hit(sbi, type); 584 read_unlock(&et->lock); 585 586 if (type == EX_READ) 587 trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei); |
588 else if (type == EX_BLOCK_AGE) 589 trace_f2fs_lookup_age_extent_tree_end(inode, pgofs, ei); |
|
547 return ret; 548} 549 550static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi, 551 struct extent_tree *et, struct extent_info *ei, 552 struct extent_node *prev_ex, 553 struct extent_node *next_ex) 554{ --- 82 unchanged lines hidden (view full) --- 637 bool leftmost = false; 638 639 if (!et) 640 return; 641 642 if (type == EX_READ) 643 trace_f2fs_update_read_extent_tree_range(inode, fofs, len, 644 tei->blk, 0); | 590 return ret; 591} 592 593static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi, 594 struct extent_tree *et, struct extent_info *ei, 595 struct extent_node *prev_ex, 596 struct extent_node *next_ex) 597{ --- 82 unchanged lines hidden (view full) --- 680 bool leftmost = false; 681 682 if (!et) 683 return; 684 685 if (type == EX_READ) 686 trace_f2fs_update_read_extent_tree_range(inode, fofs, len, 687 tei->blk, 0); |
688 else if (type == EX_BLOCK_AGE) 689 trace_f2fs_update_age_extent_tree_range(inode, fofs, len, 690 tei->age, tei->last_blocks); 691 |
|
645 write_lock(&et->lock); 646 647 if (type == EX_READ) { 648 if (is_inode_flag_set(inode, FI_NO_EXTENT)) { 649 write_unlock(&et->lock); 650 return; 651 } 652 --- 36 unchanged lines hidden (view full) --- 689 } 690 691 if (end < org_end && (type != EX_READ || 692 org_end - end >= F2FS_MIN_EXTENT_LEN)) { 693 if (parts) { 694 __set_extent_info(&ei, 695 end, org_end - end, 696 end - dei.fofs + dei.blk, false, | 692 write_lock(&et->lock); 693 694 if (type == EX_READ) { 695 if (is_inode_flag_set(inode, FI_NO_EXTENT)) { 696 write_unlock(&et->lock); 697 return; 698 } 699 --- 36 unchanged lines hidden (view full) --- 736 } 737 738 if (end < org_end && (type != EX_READ || 739 org_end - end >= F2FS_MIN_EXTENT_LEN)) { 740 if (parts) { 741 __set_extent_info(&ei, 742 end, org_end - end, 743 end - dei.fofs + dei.blk, false, |
744 dei.age, dei.last_blocks, |
|
697 type); 698 en1 = __insert_extent_tree(sbi, et, &ei, 699 NULL, NULL, true); 700 next_en = en1; 701 } else { 702 __set_extent_info(&en->ei, 703 end, en->ei.len - (end - dei.fofs), 704 en->ei.blk + (end - dei.fofs), true, | 745 type); 746 en1 = __insert_extent_tree(sbi, et, &ei, 747 NULL, NULL, true); 748 next_en = en1; 749 } else { 750 __set_extent_info(&en->ei, 751 end, en->ei.len - (end - dei.fofs), 752 en->ei.blk + (end - dei.fofs), true, |
753 dei.age, dei.last_blocks, |
|
705 type); 706 next_en = en; 707 } 708 parts++; 709 } 710 711 if (!next_en) { 712 struct rb_node *node = rb_next(&en->rb_node); --- 14 unchanged lines hidden (view full) --- 727 */ 728 if (parts != 1) { 729 insert_p = NULL; 730 insert_parent = NULL; 731 } 732 en = next_en; 733 } 734 | 754 type); 755 next_en = en; 756 } 757 parts++; 758 } 759 760 if (!next_en) { 761 struct rb_node *node = rb_next(&en->rb_node); --- 14 unchanged lines hidden (view full) --- 776 */ 777 if (parts != 1) { 778 insert_p = NULL; 779 insert_parent = NULL; 780 } 781 en = next_en; 782 } 783 |
784 if (type == EX_BLOCK_AGE) 785 goto update_age_extent_cache; 786 |
|
735 /* 3. update extent in read extent cache */ 736 BUG_ON(type != EX_READ); 737 738 if (tei->blk) { | 787 /* 3. update extent in read extent cache */ 788 BUG_ON(type != EX_READ); 789 790 if (tei->blk) { |
739 __set_extent_info(&ei, fofs, len, tei->blk, false, EX_READ); | 791 __set_extent_info(&ei, fofs, len, tei->blk, false, 792 0, 0, EX_READ); |
740 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) 741 __insert_extent_tree(sbi, et, &ei, 742 insert_p, insert_parent, leftmost); 743 744 /* give up extent_cache, if split and small updates happen */ 745 if (dei.len >= 1 && 746 prev.len < F2FS_MIN_EXTENT_LEN && 747 et->largest.len < F2FS_MIN_EXTENT_LEN) { --- 5 unchanged lines hidden (view full) --- 753 754 if (is_inode_flag_set(inode, FI_NO_EXTENT)) 755 __free_extent_tree(sbi, et); 756 757 if (et->largest_updated) { 758 et->largest_updated = false; 759 updated = true; 760 } | 793 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) 794 __insert_extent_tree(sbi, et, &ei, 795 insert_p, insert_parent, leftmost); 796 797 /* give up extent_cache, if split and small updates happen */ 798 if (dei.len >= 1 && 799 prev.len < F2FS_MIN_EXTENT_LEN && 800 et->largest.len < F2FS_MIN_EXTENT_LEN) { --- 5 unchanged lines hidden (view full) --- 806 807 if (is_inode_flag_set(inode, FI_NO_EXTENT)) 808 __free_extent_tree(sbi, et); 809 810 if (et->largest_updated) { 811 et->largest_updated = false; 812 updated = true; 813 } |
814 goto out_read_extent_cache; 815update_age_extent_cache: 816 if (!tei->last_blocks) 817 goto out_read_extent_cache; |
|
761 | 818 |
819 __set_extent_info(&ei, fofs, len, 0, false, 820 tei->age, tei->last_blocks, EX_BLOCK_AGE); 821 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) 822 __insert_extent_tree(sbi, et, &ei, 823 insert_p, insert_parent, leftmost); 824out_read_extent_cache: |
|
762 write_unlock(&et->lock); 763 764 if (updated) 765 f2fs_mark_inode_dirty_sync(inode, true); 766} 767 768#ifdef CONFIG_F2FS_FS_COMPRESSION 769void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, --- 21 unchanged lines hidden (view full) --- 791 (struct rb_entry *)et->cached_en, fofs, 792 (struct rb_entry **)&prev_en, 793 (struct rb_entry **)&next_en, 794 &insert_p, &insert_parent, false, 795 &leftmost); 796 if (en) 797 goto unlock_out; 798 | 825 write_unlock(&et->lock); 826 827 if (updated) 828 f2fs_mark_inode_dirty_sync(inode, true); 829} 830 831#ifdef CONFIG_F2FS_FS_COMPRESSION 832void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, --- 21 unchanged lines hidden (view full) --- 854 (struct rb_entry *)et->cached_en, fofs, 855 (struct rb_entry **)&prev_en, 856 (struct rb_entry **)&next_en, 857 &insert_p, &insert_parent, false, 858 &leftmost); 859 if (en) 860 goto unlock_out; 861 |
799 __set_extent_info(&ei, fofs, llen, blkaddr, true, EX_READ); | 862 __set_extent_info(&ei, fofs, llen, blkaddr, true, 0, 0, EX_READ); |
800 ei.c_len = c_len; 801 802 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) 803 __insert_extent_tree(sbi, et, &ei, 804 insert_p, insert_parent, leftmost); 805unlock_out: 806 write_unlock(&et->lock); 807} 808#endif 809 | 863 ei.c_len = c_len; 864 865 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) 866 __insert_extent_tree(sbi, et, &ei, 867 insert_p, insert_parent, leftmost); 868unlock_out: 869 write_unlock(&et->lock); 870} 871#endif 872 |
873static unsigned long long __calculate_block_age(unsigned long long new, 874 unsigned long long old) 875{ 876 unsigned long long diff; 877 878 diff = (new >= old) ? new - (new - old) : new + (old - new); 879 880 return div_u64(diff * LAST_AGE_WEIGHT, 100); 881} 882 883/* This returns a new age and allocated blocks in ei */ 884static int __get_new_block_age(struct inode *inode, struct extent_info *ei) 885{ 886 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 887 loff_t f_size = i_size_read(inode); 888 unsigned long long cur_blocks = 889 atomic64_read(&sbi->allocated_data_blocks); 890 891 /* 892 * When I/O is not aligned to a PAGE_SIZE, update will happen to the last 893 * file block even in seq write. So don't record age for newly last file 894 * block here. 895 */ 896 if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) && 897 ei->blk == NEW_ADDR) 898 return -EINVAL; 899 900 if (__lookup_extent_tree(inode, ei->fofs, ei, EX_BLOCK_AGE)) { 901 unsigned long long cur_age; 902 903 if (cur_blocks >= ei->last_blocks) 904 cur_age = cur_blocks - ei->last_blocks; 905 else 906 /* allocated_data_blocks overflow */ 907 cur_age = ULLONG_MAX - ei->last_blocks + cur_blocks; 908 909 if (ei->age) 910 ei->age = __calculate_block_age(cur_age, ei->age); 911 else 912 ei->age = cur_age; 913 ei->last_blocks = cur_blocks; 914 WARN_ON(ei->age > cur_blocks); 915 return 0; 916 } 917 918 f2fs_bug_on(sbi, ei->blk == NULL_ADDR); 919 920 /* the data block was allocated for the first time */ 921 if (ei->blk == NEW_ADDR) 922 goto out; 923 924 if (__is_valid_data_blkaddr(ei->blk) && 925 !f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC_ENHANCE)) { 926 f2fs_bug_on(sbi, 1); 927 return -EINVAL; 928 } 929out: 930 /* 931 * init block age with zero, this can happen when the block age extent 932 * was reclaimed due to memory constraint or system reboot 933 */ 934 ei->age = 0; 935 ei->last_blocks = cur_blocks; 936 return 0; 937} 938 |
|
810static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type) 811{ 812 struct extent_info ei; 813 814 if (!__may_extent_tree(dn->inode, type)) 815 return; 816 817 ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + 818 dn->ofs_in_node; 819 ei.len = 1; 820 821 if (type == EX_READ) { 822 if (dn->data_blkaddr == NEW_ADDR) 823 ei.blk = NULL_ADDR; 824 else 825 ei.blk = dn->data_blkaddr; | 939static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type) 940{ 941 struct extent_info ei; 942 943 if (!__may_extent_tree(dn->inode, type)) 944 return; 945 946 ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + 947 dn->ofs_in_node; 948 ei.len = 1; 949 950 if (type == EX_READ) { 951 if (dn->data_blkaddr == NEW_ADDR) 952 ei.blk = NULL_ADDR; 953 else 954 ei.blk = dn->data_blkaddr; |
955 } else if (type == EX_BLOCK_AGE) { 956 ei.blk = dn->data_blkaddr; 957 if (__get_new_block_age(dn->inode, &ei)) 958 return; |
|
826 } 827 __update_extent_tree_range(dn->inode, &ei, type); 828} 829 830static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink, 831 enum extent_type type) 832{ 833 struct extent_tree_info *eti = &sbi->extent_tree[type]; --- 101 unchanged lines hidden (view full) --- 935unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) 936{ 937 if (!test_opt(sbi, READ_EXTENT_CACHE)) 938 return 0; 939 940 return __shrink_extent_tree(sbi, nr_shrink, EX_READ); 941} 942 | 959 } 960 __update_extent_tree_range(dn->inode, &ei, type); 961} 962 963static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink, 964 enum extent_type type) 965{ 966 struct extent_tree_info *eti = &sbi->extent_tree[type]; --- 101 unchanged lines hidden (view full) --- 1068unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) 1069{ 1070 if (!test_opt(sbi, READ_EXTENT_CACHE)) 1071 return 0; 1072 1073 return __shrink_extent_tree(sbi, nr_shrink, EX_READ); 1074} 1075 |
1076/* block age extent cache operations */ 1077bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs, 1078 struct extent_info *ei) 1079{ 1080 if (!__may_extent_tree(inode, EX_BLOCK_AGE)) 1081 return false; 1082 1083 return __lookup_extent_tree(inode, pgofs, ei, EX_BLOCK_AGE); 1084} 1085 1086void f2fs_update_age_extent_cache(struct dnode_of_data *dn) 1087{ 1088 return __update_extent_cache(dn, EX_BLOCK_AGE); 1089} 1090 1091void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn, 1092 pgoff_t fofs, unsigned int len) 1093{ 1094 struct extent_info ei = { 1095 .fofs = fofs, 1096 .len = len, 1097 }; 1098 1099 if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE)) 1100 return; 1101 1102 __update_extent_tree_range(dn->inode, &ei, EX_BLOCK_AGE); 1103} 1104 1105unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) 1106{ 1107 if (!test_opt(sbi, AGE_EXTENT_CACHE)) 1108 return 0; 1109 1110 return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE); 1111} 1112 |
|
943static unsigned int __destroy_extent_node(struct inode *inode, 944 enum extent_type type) 945{ 946 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 947 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; 948 unsigned int node_cnt = 0; 949 950 if (!et || !atomic_read(&et->node_cnt)) --- 4 unchanged lines hidden (view full) --- 955 write_unlock(&et->lock); 956 957 return node_cnt; 958} 959 960void f2fs_destroy_extent_node(struct inode *inode) 961{ 962 __destroy_extent_node(inode, EX_READ); | 1113static unsigned int __destroy_extent_node(struct inode *inode, 1114 enum extent_type type) 1115{ 1116 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1117 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; 1118 unsigned int node_cnt = 0; 1119 1120 if (!et || !atomic_read(&et->node_cnt)) --- 4 unchanged lines hidden (view full) --- 1125 write_unlock(&et->lock); 1126 1127 return node_cnt; 1128} 1129 1130void f2fs_destroy_extent_node(struct inode *inode) 1131{ 1132 __destroy_extent_node(inode, EX_READ); |
1133 __destroy_extent_node(inode, EX_BLOCK_AGE); |
|
963} 964 965static void __drop_extent_tree(struct inode *inode, enum extent_type type) 966{ 967 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 968 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; 969 bool updated = false; 970 --- 12 unchanged lines hidden (view full) --- 983 write_unlock(&et->lock); 984 if (updated) 985 f2fs_mark_inode_dirty_sync(inode, true); 986} 987 988void f2fs_drop_extent_tree(struct inode *inode) 989{ 990 __drop_extent_tree(inode, EX_READ); | 1134} 1135 1136static void __drop_extent_tree(struct inode *inode, enum extent_type type) 1137{ 1138 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1139 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; 1140 bool updated = false; 1141 --- 12 unchanged lines hidden (view full) --- 1154 write_unlock(&et->lock); 1155 if (updated) 1156 f2fs_mark_inode_dirty_sync(inode, true); 1157} 1158 1159void f2fs_drop_extent_tree(struct inode *inode) 1160{ 1161 __drop_extent_tree(inode, EX_READ); |
1162 __drop_extent_tree(inode, EX_BLOCK_AGE); |
|
991} 992 993static void __destroy_extent_tree(struct inode *inode, enum extent_type type) 994{ 995 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 996 struct extent_tree_info *eti = &sbi->extent_tree[type]; 997 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; 998 unsigned int node_cnt = 0; --- 24 unchanged lines hidden (view full) --- 1023 F2FS_I(inode)->extent_tree[type] = NULL; 1024 1025 trace_f2fs_destroy_extent_tree(inode, node_cnt, type); 1026} 1027 1028void f2fs_destroy_extent_tree(struct inode *inode) 1029{ 1030 __destroy_extent_tree(inode, EX_READ); | 1163} 1164 1165static void __destroy_extent_tree(struct inode *inode, enum extent_type type) 1166{ 1167 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1168 struct extent_tree_info *eti = &sbi->extent_tree[type]; 1169 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; 1170 unsigned int node_cnt = 0; --- 24 unchanged lines hidden (view full) --- 1195 F2FS_I(inode)->extent_tree[type] = NULL; 1196 1197 trace_f2fs_destroy_extent_tree(inode, node_cnt, type); 1198} 1199 1200void f2fs_destroy_extent_tree(struct inode *inode) 1201{ 1202 __destroy_extent_tree(inode, EX_READ); |
1203 __destroy_extent_tree(inode, EX_BLOCK_AGE); |
|
1031} 1032 1033static void __init_extent_tree_info(struct extent_tree_info *eti) 1034{ 1035 INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO); 1036 mutex_init(&eti->extent_tree_lock); 1037 INIT_LIST_HEAD(&eti->extent_list); 1038 spin_lock_init(&eti->extent_lock); 1039 atomic_set(&eti->total_ext_tree, 0); 1040 INIT_LIST_HEAD(&eti->zombie_list); 1041 atomic_set(&eti->total_zombie_tree, 0); 1042 atomic_set(&eti->total_ext_node, 0); 1043} 1044 1045void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi) 1046{ 1047 __init_extent_tree_info(&sbi->extent_tree[EX_READ]); | 1204} 1205 1206static void __init_extent_tree_info(struct extent_tree_info *eti) 1207{ 1208 INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO); 1209 mutex_init(&eti->extent_tree_lock); 1210 INIT_LIST_HEAD(&eti->extent_list); 1211 spin_lock_init(&eti->extent_lock); 1212 atomic_set(&eti->total_ext_tree, 0); 1213 INIT_LIST_HEAD(&eti->zombie_list); 1214 atomic_set(&eti->total_zombie_tree, 0); 1215 atomic_set(&eti->total_ext_node, 0); 1216} 1217 1218void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi) 1219{ 1220 __init_extent_tree_info(&sbi->extent_tree[EX_READ]); |
1221 __init_extent_tree_info(&sbi->extent_tree[EX_BLOCK_AGE]); 1222 1223 /* initialize for block age extents */ 1224 atomic64_set(&sbi->allocated_data_blocks, 0); 1225 sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD; 1226 sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD; |
|
1048} 1049 1050int __init f2fs_create_extent_cache(void) 1051{ 1052 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree", 1053 sizeof(struct extent_tree)); 1054 if (!extent_tree_slab) 1055 return -ENOMEM; --- 14 unchanged lines hidden --- | 1227} 1228 1229int __init f2fs_create_extent_cache(void) 1230{ 1231 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree", 1232 sizeof(struct extent_tree)); 1233 if (!extent_tree_slab) 1234 return -ENOMEM; --- 14 unchanged lines hidden --- |