xref: /openbmc/linux/fs/f2fs/shrinker.c (revision bc5aa3a0)
1 /*
2  * f2fs shrinker support
3  *   the basic infra was copied from fs/ubifs/shrinker.c
4  *
5  * Copyright (c) 2015 Motorola Mobility
6  * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/fs.h>
13 #include <linux/f2fs_fs.h>
14 
15 #include "f2fs.h"
16 #include "node.h"
17 
18 static LIST_HEAD(f2fs_list);
19 static DEFINE_SPINLOCK(f2fs_list_lock);
20 static unsigned int shrinker_run_no;
21 
22 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
23 {
24 	return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
25 }
26 
27 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
28 {
29 	if (NM_I(sbi)->fcnt > MAX_FREE_NIDS)
30 		return NM_I(sbi)->fcnt - MAX_FREE_NIDS;
31 	return 0;
32 }
33 
34 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
35 {
36 	return atomic_read(&sbi->total_zombie_tree) +
37 				atomic_read(&sbi->total_ext_node);
38 }
39 
40 unsigned long f2fs_shrink_count(struct shrinker *shrink,
41 				struct shrink_control *sc)
42 {
43 	struct f2fs_sb_info *sbi;
44 	struct list_head *p;
45 	unsigned long count = 0;
46 
47 	spin_lock(&f2fs_list_lock);
48 	p = f2fs_list.next;
49 	while (p != &f2fs_list) {
50 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
51 
52 		/* stop f2fs_put_super */
53 		if (!mutex_trylock(&sbi->umount_mutex)) {
54 			p = p->next;
55 			continue;
56 		}
57 		spin_unlock(&f2fs_list_lock);
58 
59 		/* count extent cache entries */
60 		count += __count_extent_cache(sbi);
61 
62 		/* shrink clean nat cache entries */
63 		count += __count_nat_entries(sbi);
64 
65 		/* count free nids cache entries */
66 		count += __count_free_nids(sbi);
67 
68 		spin_lock(&f2fs_list_lock);
69 		p = p->next;
70 		mutex_unlock(&sbi->umount_mutex);
71 	}
72 	spin_unlock(&f2fs_list_lock);
73 	return count;
74 }
75 
76 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
77 				struct shrink_control *sc)
78 {
79 	unsigned long nr = sc->nr_to_scan;
80 	struct f2fs_sb_info *sbi;
81 	struct list_head *p;
82 	unsigned int run_no;
83 	unsigned long freed = 0;
84 
85 	spin_lock(&f2fs_list_lock);
86 	do {
87 		run_no = ++shrinker_run_no;
88 	} while (run_no == 0);
89 	p = f2fs_list.next;
90 	while (p != &f2fs_list) {
91 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
92 
93 		if (sbi->shrinker_run_no == run_no)
94 			break;
95 
96 		/* stop f2fs_put_super */
97 		if (!mutex_trylock(&sbi->umount_mutex)) {
98 			p = p->next;
99 			continue;
100 		}
101 		spin_unlock(&f2fs_list_lock);
102 
103 		sbi->shrinker_run_no = run_no;
104 
105 		/* shrink extent cache entries */
106 		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
107 
108 		/* shrink clean nat cache entries */
109 		if (freed < nr)
110 			freed += try_to_free_nats(sbi, nr - freed);
111 
112 		/* shrink free nids cache entries */
113 		if (freed < nr)
114 			freed += try_to_free_nids(sbi, nr - freed);
115 
116 		spin_lock(&f2fs_list_lock);
117 		p = p->next;
118 		list_move_tail(&sbi->s_list, &f2fs_list);
119 		mutex_unlock(&sbi->umount_mutex);
120 		if (freed >= nr)
121 			break;
122 	}
123 	spin_unlock(&f2fs_list_lock);
124 	return freed;
125 }
126 
127 void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
128 {
129 	spin_lock(&f2fs_list_lock);
130 	list_add_tail(&sbi->s_list, &f2fs_list);
131 	spin_unlock(&f2fs_list_lock);
132 }
133 
134 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
135 {
136 	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
137 
138 	spin_lock(&f2fs_list_lock);
139 	list_del(&sbi->s_list);
140 	spin_unlock(&f2fs_list_lock);
141 }
142