xref: /openbmc/linux/fs/f2fs/shrinker.c (revision 8795a739)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs shrinker support
4  *   the basic infra was copied from fs/ubifs/shrinker.c
5  *
6  * Copyright (c) 2015 Motorola Mobility
7  * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
8  */
9 #include <linux/fs.h>
10 #include <linux/f2fs_fs.h>
11 
12 #include "f2fs.h"
13 #include "node.h"
14 
15 static LIST_HEAD(f2fs_list);
16 static DEFINE_SPINLOCK(f2fs_list_lock);
17 static unsigned int shrinker_run_no;
18 
19 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
20 {
21 	long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
22 
23 	return count > 0 ? count : 0;
24 }
25 
26 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
27 {
28 	long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
29 
30 	return count > 0 ? count : 0;
31 }
32 
33 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
34 {
35 	return atomic_read(&sbi->total_zombie_tree) +
36 				atomic_read(&sbi->total_ext_node);
37 }
38 
39 unsigned long f2fs_shrink_count(struct shrinker *shrink,
40 				struct shrink_control *sc)
41 {
42 	struct f2fs_sb_info *sbi;
43 	struct list_head *p;
44 	unsigned long count = 0;
45 
46 	spin_lock(&f2fs_list_lock);
47 	p = f2fs_list.next;
48 	while (p != &f2fs_list) {
49 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
50 
51 		/* stop f2fs_put_super */
52 		if (!mutex_trylock(&sbi->umount_mutex)) {
53 			p = p->next;
54 			continue;
55 		}
56 		spin_unlock(&f2fs_list_lock);
57 
58 		/* count extent cache entries */
59 		count += __count_extent_cache(sbi);
60 
61 		/* shrink clean nat cache entries */
62 		count += __count_nat_entries(sbi);
63 
64 		/* count free nids cache entries */
65 		count += __count_free_nids(sbi);
66 
67 		spin_lock(&f2fs_list_lock);
68 		p = p->next;
69 		mutex_unlock(&sbi->umount_mutex);
70 	}
71 	spin_unlock(&f2fs_list_lock);
72 	return count;
73 }
74 
75 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
76 				struct shrink_control *sc)
77 {
78 	unsigned long nr = sc->nr_to_scan;
79 	struct f2fs_sb_info *sbi;
80 	struct list_head *p;
81 	unsigned int run_no;
82 	unsigned long freed = 0;
83 
84 	spin_lock(&f2fs_list_lock);
85 	do {
86 		run_no = ++shrinker_run_no;
87 	} while (run_no == 0);
88 	p = f2fs_list.next;
89 	while (p != &f2fs_list) {
90 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
91 
92 		if (sbi->shrinker_run_no == run_no)
93 			break;
94 
95 		/* stop f2fs_put_super */
96 		if (!mutex_trylock(&sbi->umount_mutex)) {
97 			p = p->next;
98 			continue;
99 		}
100 		spin_unlock(&f2fs_list_lock);
101 
102 		sbi->shrinker_run_no = run_no;
103 
104 		/* shrink extent cache entries */
105 		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
106 
107 		/* shrink clean nat cache entries */
108 		if (freed < nr)
109 			freed += f2fs_try_to_free_nats(sbi, nr - freed);
110 
111 		/* shrink free nids cache entries */
112 		if (freed < nr)
113 			freed += f2fs_try_to_free_nids(sbi, nr - freed);
114 
115 		spin_lock(&f2fs_list_lock);
116 		p = p->next;
117 		list_move_tail(&sbi->s_list, &f2fs_list);
118 		mutex_unlock(&sbi->umount_mutex);
119 		if (freed >= nr)
120 			break;
121 	}
122 	spin_unlock(&f2fs_list_lock);
123 	return freed;
124 }
125 
126 void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
127 {
128 	spin_lock(&f2fs_list_lock);
129 	list_add_tail(&sbi->s_list, &f2fs_list);
130 	spin_unlock(&f2fs_list_lock);
131 }
132 
133 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
134 {
135 	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
136 
137 	spin_lock(&f2fs_list_lock);
138 	list_del_init(&sbi->s_list);
139 	spin_unlock(&f2fs_list_lock);
140 }
141