xref: /openbmc/linux/fs/f2fs/shrinker.c (revision 2fa5ebe3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs shrinker support
4  *   the basic infra was copied from fs/ubifs/shrinker.c
5  *
6  * Copyright (c) 2015 Motorola Mobility
7  * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
8  */
9 #include <linux/fs.h>
10 #include <linux/f2fs_fs.h>
11 
12 #include "f2fs.h"
13 #include "node.h"
14 
15 static LIST_HEAD(f2fs_list);
16 static DEFINE_SPINLOCK(f2fs_list_lock);
17 static unsigned int shrinker_run_no;
18 
19 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
20 {
21 	return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
22 }
23 
24 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
25 {
26 	long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
27 
28 	return count > 0 ? count : 0;
29 }
30 
31 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi,
32 					enum extent_type type)
33 {
34 	struct extent_tree_info *eti = &sbi->extent_tree[type];
35 
36 	return atomic_read(&eti->total_zombie_tree) +
37 				atomic_read(&eti->total_ext_node);
38 }
39 
40 unsigned long f2fs_shrink_count(struct shrinker *shrink,
41 				struct shrink_control *sc)
42 {
43 	struct f2fs_sb_info *sbi;
44 	struct list_head *p;
45 	unsigned long count = 0;
46 
47 	spin_lock(&f2fs_list_lock);
48 	p = f2fs_list.next;
49 	while (p != &f2fs_list) {
50 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
51 
52 		/* stop f2fs_put_super */
53 		if (!mutex_trylock(&sbi->umount_mutex)) {
54 			p = p->next;
55 			continue;
56 		}
57 		spin_unlock(&f2fs_list_lock);
58 
59 		/* count read extent cache entries */
60 		count += __count_extent_cache(sbi, EX_READ);
61 
62 		/* count block age extent cache entries */
63 		count += __count_extent_cache(sbi, EX_BLOCK_AGE);
64 
65 		/* count clean nat cache entries */
66 		count += __count_nat_entries(sbi);
67 
68 		/* count free nids cache entries */
69 		count += __count_free_nids(sbi);
70 
71 		spin_lock(&f2fs_list_lock);
72 		p = p->next;
73 		mutex_unlock(&sbi->umount_mutex);
74 	}
75 	spin_unlock(&f2fs_list_lock);
76 	return count;
77 }
78 
79 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
80 				struct shrink_control *sc)
81 {
82 	unsigned long nr = sc->nr_to_scan;
83 	struct f2fs_sb_info *sbi;
84 	struct list_head *p;
85 	unsigned int run_no;
86 	unsigned long freed = 0;
87 
88 	spin_lock(&f2fs_list_lock);
89 	do {
90 		run_no = ++shrinker_run_no;
91 	} while (run_no == 0);
92 	p = f2fs_list.next;
93 	while (p != &f2fs_list) {
94 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
95 
96 		if (sbi->shrinker_run_no == run_no)
97 			break;
98 
99 		/* stop f2fs_put_super */
100 		if (!mutex_trylock(&sbi->umount_mutex)) {
101 			p = p->next;
102 			continue;
103 		}
104 		spin_unlock(&f2fs_list_lock);
105 
106 		sbi->shrinker_run_no = run_no;
107 
108 		/* shrink extent cache entries */
109 		freed += f2fs_shrink_age_extent_tree(sbi, nr >> 2);
110 
111 		/* shrink read extent cache entries */
112 		freed += f2fs_shrink_read_extent_tree(sbi, nr >> 2);
113 
114 		/* shrink clean nat cache entries */
115 		if (freed < nr)
116 			freed += f2fs_try_to_free_nats(sbi, nr - freed);
117 
118 		/* shrink free nids cache entries */
119 		if (freed < nr)
120 			freed += f2fs_try_to_free_nids(sbi, nr - freed);
121 
122 		spin_lock(&f2fs_list_lock);
123 		p = p->next;
124 		list_move_tail(&sbi->s_list, &f2fs_list);
125 		mutex_unlock(&sbi->umount_mutex);
126 		if (freed >= nr)
127 			break;
128 	}
129 	spin_unlock(&f2fs_list_lock);
130 	return freed;
131 }
132 
133 void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
134 {
135 	spin_lock(&f2fs_list_lock);
136 	list_add_tail(&sbi->s_list, &f2fs_list);
137 	spin_unlock(&f2fs_list_lock);
138 }
139 
140 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
141 {
142 	f2fs_shrink_read_extent_tree(sbi, __count_extent_cache(sbi, EX_READ));
143 	f2fs_shrink_age_extent_tree(sbi,
144 				__count_extent_cache(sbi, EX_BLOCK_AGE));
145 
146 	spin_lock(&f2fs_list_lock);
147 	list_del_init(&sbi->s_list);
148 	spin_unlock(&f2fs_list_lock);
149 }
150