xref: /openbmc/linux/fs/f2fs/shrinker.c (revision 4a3fad70)
1 /*
2  * f2fs shrinker support
3  *   the basic infra was copied from fs/ubifs/shrinker.c
4  *
5  * Copyright (c) 2015 Motorola Mobility
6  * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/fs.h>
13 #include <linux/f2fs_fs.h>
14 
15 #include "f2fs.h"
16 #include "node.h"
17 
18 static LIST_HEAD(f2fs_list);
19 static DEFINE_SPINLOCK(f2fs_list_lock);
20 static unsigned int shrinker_run_no;
21 
22 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
23 {
24 	long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
25 
26 	return count > 0 ? count : 0;
27 }
28 
29 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
30 {
31 	long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
32 
33 	return count > 0 ? count : 0;
34 }
35 
36 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
37 {
38 	return atomic_read(&sbi->total_zombie_tree) +
39 				atomic_read(&sbi->total_ext_node);
40 }
41 
42 unsigned long f2fs_shrink_count(struct shrinker *shrink,
43 				struct shrink_control *sc)
44 {
45 	struct f2fs_sb_info *sbi;
46 	struct list_head *p;
47 	unsigned long count = 0;
48 
49 	spin_lock(&f2fs_list_lock);
50 	p = f2fs_list.next;
51 	while (p != &f2fs_list) {
52 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
53 
54 		/* stop f2fs_put_super */
55 		if (!mutex_trylock(&sbi->umount_mutex)) {
56 			p = p->next;
57 			continue;
58 		}
59 		spin_unlock(&f2fs_list_lock);
60 
61 		/* count extent cache entries */
62 		count += __count_extent_cache(sbi);
63 
64 		/* shrink clean nat cache entries */
65 		count += __count_nat_entries(sbi);
66 
67 		/* count free nids cache entries */
68 		count += __count_free_nids(sbi);
69 
70 		spin_lock(&f2fs_list_lock);
71 		p = p->next;
72 		mutex_unlock(&sbi->umount_mutex);
73 	}
74 	spin_unlock(&f2fs_list_lock);
75 	return count;
76 }
77 
78 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
79 				struct shrink_control *sc)
80 {
81 	unsigned long nr = sc->nr_to_scan;
82 	struct f2fs_sb_info *sbi;
83 	struct list_head *p;
84 	unsigned int run_no;
85 	unsigned long freed = 0;
86 
87 	spin_lock(&f2fs_list_lock);
88 	do {
89 		run_no = ++shrinker_run_no;
90 	} while (run_no == 0);
91 	p = f2fs_list.next;
92 	while (p != &f2fs_list) {
93 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
94 
95 		if (sbi->shrinker_run_no == run_no)
96 			break;
97 
98 		/* stop f2fs_put_super */
99 		if (!mutex_trylock(&sbi->umount_mutex)) {
100 			p = p->next;
101 			continue;
102 		}
103 		spin_unlock(&f2fs_list_lock);
104 
105 		sbi->shrinker_run_no = run_no;
106 
107 		/* shrink extent cache entries */
108 		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
109 
110 		/* shrink clean nat cache entries */
111 		if (freed < nr)
112 			freed += try_to_free_nats(sbi, nr - freed);
113 
114 		/* shrink free nids cache entries */
115 		if (freed < nr)
116 			freed += try_to_free_nids(sbi, nr - freed);
117 
118 		spin_lock(&f2fs_list_lock);
119 		p = p->next;
120 		list_move_tail(&sbi->s_list, &f2fs_list);
121 		mutex_unlock(&sbi->umount_mutex);
122 		if (freed >= nr)
123 			break;
124 	}
125 	spin_unlock(&f2fs_list_lock);
126 	return freed;
127 }
128 
129 void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
130 {
131 	spin_lock(&f2fs_list_lock);
132 	list_add_tail(&sbi->s_list, &f2fs_list);
133 	spin_unlock(&f2fs_list_lock);
134 }
135 
136 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
137 {
138 	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
139 
140 	spin_lock(&f2fs_list_lock);
141 	list_del(&sbi->s_list);
142 	spin_unlock(&f2fs_list_lock);
143 }
144