xref: /openbmc/linux/fs/erofs/utils.c (revision 96d3e6f0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include "internal.h"
7 #include <linux/pagevec.h>
8 
9 struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
10 {
11 	struct page *page = *pagepool;
12 
13 	if (page) {
14 		DBG_BUGON(page_ref_count(page) != 1);
15 		*pagepool = (struct page *)page_private(page);
16 	} else {
17 		page = alloc_page(gfp);
18 	}
19 	return page;
20 }
21 
22 void erofs_release_pages(struct page **pagepool)
23 {
24 	while (*pagepool) {
25 		struct page *page = *pagepool;
26 
27 		*pagepool = (struct page *)page_private(page);
28 		put_page(page);
29 	}
30 }
31 
32 #ifdef CONFIG_EROFS_FS_ZIP
33 /* global shrink count (for all mounted EROFS instances) */
34 static atomic_long_t erofs_global_shrink_cnt;
35 
36 static int erofs_workgroup_get(struct erofs_workgroup *grp)
37 {
38 	int o;
39 
40 repeat:
41 	o = erofs_wait_on_workgroup_freezed(grp);
42 	if (o <= 0)
43 		return -1;
44 
45 	if (atomic_cmpxchg(&grp->refcount, o, o + 1) != o)
46 		goto repeat;
47 
48 	/* decrease refcount paired by erofs_workgroup_put */
49 	if (o == 1)
50 		atomic_long_dec(&erofs_global_shrink_cnt);
51 	return 0;
52 }
53 
54 struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
55 					     pgoff_t index)
56 {
57 	struct erofs_sb_info *sbi = EROFS_SB(sb);
58 	struct erofs_workgroup *grp;
59 
60 repeat:
61 	rcu_read_lock();
62 	grp = xa_load(&sbi->managed_pslots, index);
63 	if (grp) {
64 		if (erofs_workgroup_get(grp)) {
65 			/* prefer to relax rcu read side */
66 			rcu_read_unlock();
67 			goto repeat;
68 		}
69 
70 		DBG_BUGON(index != grp->index);
71 	}
72 	rcu_read_unlock();
73 	return grp;
74 }
75 
76 struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
77 					       struct erofs_workgroup *grp)
78 {
79 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
80 	struct erofs_workgroup *pre;
81 
82 	/*
83 	 * Bump up a reference count before making this visible
84 	 * to others for the XArray in order to avoid potential
85 	 * UAF without serialized by xa_lock.
86 	 */
87 	atomic_inc(&grp->refcount);
88 
89 repeat:
90 	xa_lock(&sbi->managed_pslots);
91 	pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
92 			   NULL, grp, GFP_NOFS);
93 	if (pre) {
94 		if (xa_is_err(pre)) {
95 			pre = ERR_PTR(xa_err(pre));
96 		} else if (erofs_workgroup_get(pre)) {
97 			/* try to legitimize the current in-tree one */
98 			xa_unlock(&sbi->managed_pslots);
99 			cond_resched();
100 			goto repeat;
101 		}
102 		atomic_dec(&grp->refcount);
103 		grp = pre;
104 	}
105 	xa_unlock(&sbi->managed_pslots);
106 	return grp;
107 }
108 
109 static void  __erofs_workgroup_free(struct erofs_workgroup *grp)
110 {
111 	atomic_long_dec(&erofs_global_shrink_cnt);
112 	erofs_workgroup_free_rcu(grp);
113 }
114 
115 int erofs_workgroup_put(struct erofs_workgroup *grp)
116 {
117 	int count = atomic_dec_return(&grp->refcount);
118 
119 	if (count == 1)
120 		atomic_long_inc(&erofs_global_shrink_cnt);
121 	else if (!count)
122 		__erofs_workgroup_free(grp);
123 	return count;
124 }
125 
126 static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
127 					   struct erofs_workgroup *grp)
128 {
129 	/*
130 	 * If managed cache is on, refcount of workgroups
131 	 * themselves could be < 0 (freezed). In other words,
132 	 * there is no guarantee that all refcounts > 0.
133 	 */
134 	if (!erofs_workgroup_try_to_freeze(grp, 1))
135 		return false;
136 
137 	/*
138 	 * Note that all cached pages should be unattached
139 	 * before deleted from the XArray. Otherwise some
140 	 * cached pages could be still attached to the orphan
141 	 * old workgroup when the new one is available in the tree.
142 	 */
143 	if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
144 		erofs_workgroup_unfreeze(grp, 1);
145 		return false;
146 	}
147 
148 	/*
149 	 * It's impossible to fail after the workgroup is freezed,
150 	 * however in order to avoid some race conditions, add a
151 	 * DBG_BUGON to observe this in advance.
152 	 */
153 	DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
154 
155 	/* last refcount should be connected with its managed pslot.  */
156 	erofs_workgroup_unfreeze(grp, 0);
157 	__erofs_workgroup_free(grp);
158 	return true;
159 }
160 
161 static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
162 					      unsigned long nr_shrink)
163 {
164 	struct erofs_workgroup *grp;
165 	unsigned int freed = 0;
166 	unsigned long index;
167 
168 	xa_lock(&sbi->managed_pslots);
169 	xa_for_each(&sbi->managed_pslots, index, grp) {
170 		/* try to shrink each valid workgroup */
171 		if (!erofs_try_to_release_workgroup(sbi, grp))
172 			continue;
173 		xa_unlock(&sbi->managed_pslots);
174 
175 		++freed;
176 		if (!--nr_shrink)
177 			return freed;
178 		xa_lock(&sbi->managed_pslots);
179 	}
180 	xa_unlock(&sbi->managed_pslots);
181 	return freed;
182 }
183 
184 /* protected by 'erofs_sb_list_lock' */
185 static unsigned int shrinker_run_no;
186 
187 /* protects the mounted 'erofs_sb_list' */
188 static DEFINE_SPINLOCK(erofs_sb_list_lock);
189 static LIST_HEAD(erofs_sb_list);
190 
191 void erofs_shrinker_register(struct super_block *sb)
192 {
193 	struct erofs_sb_info *sbi = EROFS_SB(sb);
194 
195 	mutex_init(&sbi->umount_mutex);
196 
197 	spin_lock(&erofs_sb_list_lock);
198 	list_add(&sbi->list, &erofs_sb_list);
199 	spin_unlock(&erofs_sb_list_lock);
200 }
201 
202 void erofs_shrinker_unregister(struct super_block *sb)
203 {
204 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
205 
206 	mutex_lock(&sbi->umount_mutex);
207 	/* clean up all remaining workgroups in memory */
208 	erofs_shrink_workstation(sbi, ~0UL);
209 
210 	spin_lock(&erofs_sb_list_lock);
211 	list_del(&sbi->list);
212 	spin_unlock(&erofs_sb_list_lock);
213 	mutex_unlock(&sbi->umount_mutex);
214 }
215 
216 static unsigned long erofs_shrink_count(struct shrinker *shrink,
217 					struct shrink_control *sc)
218 {
219 	return atomic_long_read(&erofs_global_shrink_cnt);
220 }
221 
222 static unsigned long erofs_shrink_scan(struct shrinker *shrink,
223 				       struct shrink_control *sc)
224 {
225 	struct erofs_sb_info *sbi;
226 	struct list_head *p;
227 
228 	unsigned long nr = sc->nr_to_scan;
229 	unsigned int run_no;
230 	unsigned long freed = 0;
231 
232 	spin_lock(&erofs_sb_list_lock);
233 	do {
234 		run_no = ++shrinker_run_no;
235 	} while (run_no == 0);
236 
237 	/* Iterate over all mounted superblocks and try to shrink them */
238 	p = erofs_sb_list.next;
239 	while (p != &erofs_sb_list) {
240 		sbi = list_entry(p, struct erofs_sb_info, list);
241 
242 		/*
243 		 * We move the ones we do to the end of the list, so we stop
244 		 * when we see one we have already done.
245 		 */
246 		if (sbi->shrinker_run_no == run_no)
247 			break;
248 
249 		if (!mutex_trylock(&sbi->umount_mutex)) {
250 			p = p->next;
251 			continue;
252 		}
253 
254 		spin_unlock(&erofs_sb_list_lock);
255 		sbi->shrinker_run_no = run_no;
256 
257 		freed += erofs_shrink_workstation(sbi, nr - freed);
258 
259 		spin_lock(&erofs_sb_list_lock);
260 		/* Get the next list element before we move this one */
261 		p = p->next;
262 
263 		/*
264 		 * Move this one to the end of the list to provide some
265 		 * fairness.
266 		 */
267 		list_move_tail(&sbi->list, &erofs_sb_list);
268 		mutex_unlock(&sbi->umount_mutex);
269 
270 		if (freed >= nr)
271 			break;
272 	}
273 	spin_unlock(&erofs_sb_list_lock);
274 	return freed;
275 }
276 
277 static struct shrinker erofs_shrinker_info = {
278 	.scan_objects = erofs_shrink_scan,
279 	.count_objects = erofs_shrink_count,
280 	.seeks = DEFAULT_SEEKS,
281 };
282 
283 int __init erofs_init_shrinker(void)
284 {
285 	return register_shrinker(&erofs_shrinker_info, "erofs-shrinker");
286 }
287 
288 void erofs_exit_shrinker(void)
289 {
290 	unregister_shrinker(&erofs_shrinker_info);
291 }
292 #endif	/* !CONFIG_EROFS_FS_ZIP */
293