xref: /openbmc/linux/fs/fscache/cache.c (revision 1c45256e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* FS-Cache cache handling
3  *
4  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define FSCACHE_DEBUG_LEVEL CACHE
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include "internal.h"
12 
13 static LIST_HEAD(fscache_caches);
14 DECLARE_RWSEM(fscache_addremove_sem);
15 EXPORT_SYMBOL(fscache_addremove_sem);
16 DECLARE_WAIT_QUEUE_HEAD(fscache_clearance_waiters);
17 EXPORT_SYMBOL(fscache_clearance_waiters);
18 
19 static atomic_t fscache_cache_debug_id;
20 
21 /*
22  * Allocate a cache cookie.
23  */
fscache_alloc_cache(const char * name)24 static struct fscache_cache *fscache_alloc_cache(const char *name)
25 {
26 	struct fscache_cache *cache;
27 
28 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
29 	if (cache) {
30 		if (name) {
31 			cache->name = kstrdup(name, GFP_KERNEL);
32 			if (!cache->name) {
33 				kfree(cache);
34 				return NULL;
35 			}
36 		}
37 		refcount_set(&cache->ref, 1);
38 		INIT_LIST_HEAD(&cache->cache_link);
39 		cache->debug_id = atomic_inc_return(&fscache_cache_debug_id);
40 	}
41 	return cache;
42 }
43 
fscache_get_cache_maybe(struct fscache_cache * cache,enum fscache_cache_trace where)44 static bool fscache_get_cache_maybe(struct fscache_cache *cache,
45 				    enum fscache_cache_trace where)
46 {
47 	bool success;
48 	int ref;
49 
50 	success = __refcount_inc_not_zero(&cache->ref, &ref);
51 	if (success)
52 		trace_fscache_cache(cache->debug_id, ref + 1, where);
53 	return success;
54 }
55 
56 /*
57  * Look up a cache cookie.
58  */
fscache_lookup_cache(const char * name,bool is_cache)59 struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache)
60 {
61 	struct fscache_cache *candidate, *cache, *unnamed = NULL;
62 
63 	/* firstly check for the existence of the cache under read lock */
64 	down_read(&fscache_addremove_sem);
65 
66 	list_for_each_entry(cache, &fscache_caches, cache_link) {
67 		if (cache->name && name && strcmp(cache->name, name) == 0 &&
68 		    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
69 			goto got_cache_r;
70 		if (!cache->name && !name &&
71 		    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
72 			goto got_cache_r;
73 	}
74 
75 	if (!name) {
76 		list_for_each_entry(cache, &fscache_caches, cache_link) {
77 			if (cache->name &&
78 			    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
79 				goto got_cache_r;
80 		}
81 	}
82 
83 	up_read(&fscache_addremove_sem);
84 
85 	/* the cache does not exist - create a candidate */
86 	candidate = fscache_alloc_cache(name);
87 	if (!candidate)
88 		return ERR_PTR(-ENOMEM);
89 
90 	/* write lock, search again and add if still not present */
91 	down_write(&fscache_addremove_sem);
92 
93 	list_for_each_entry(cache, &fscache_caches, cache_link) {
94 		if (cache->name && name && strcmp(cache->name, name) == 0 &&
95 		    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
96 			goto got_cache_w;
97 		if (!cache->name) {
98 			unnamed = cache;
99 			if (!name &&
100 			    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
101 				goto got_cache_w;
102 		}
103 	}
104 
105 	if (unnamed && is_cache &&
106 	    fscache_get_cache_maybe(unnamed, fscache_cache_get_acquire))
107 		goto use_unnamed_cache;
108 
109 	if (!name) {
110 		list_for_each_entry(cache, &fscache_caches, cache_link) {
111 			if (cache->name &&
112 			    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
113 				goto got_cache_w;
114 		}
115 	}
116 
117 	list_add_tail(&candidate->cache_link, &fscache_caches);
118 	trace_fscache_cache(candidate->debug_id,
119 			    refcount_read(&candidate->ref),
120 			    fscache_cache_new_acquire);
121 	up_write(&fscache_addremove_sem);
122 	return candidate;
123 
124 got_cache_r:
125 	up_read(&fscache_addremove_sem);
126 	return cache;
127 use_unnamed_cache:
128 	cache = unnamed;
129 	cache->name = candidate->name;
130 	candidate->name = NULL;
131 got_cache_w:
132 	up_write(&fscache_addremove_sem);
133 	kfree(candidate->name);
134 	kfree(candidate);
135 	return cache;
136 }
137 
138 /**
139  * fscache_acquire_cache - Acquire a cache-level cookie.
140  * @name: The name of the cache.
141  *
142  * Get a cookie to represent an actual cache.  If a name is given and there is
143  * a nameless cache record available, this will acquire that and set its name,
144  * directing all the volumes using it to this cache.
145  *
146  * The cache will be switched over to the preparing state if not currently in
147  * use, otherwise -EBUSY will be returned.
148  */
fscache_acquire_cache(const char * name)149 struct fscache_cache *fscache_acquire_cache(const char *name)
150 {
151 	struct fscache_cache *cache;
152 
153 	ASSERT(name);
154 	cache = fscache_lookup_cache(name, true);
155 	if (IS_ERR(cache))
156 		return cache;
157 
158 	if (!fscache_set_cache_state_maybe(cache,
159 					   FSCACHE_CACHE_IS_NOT_PRESENT,
160 					   FSCACHE_CACHE_IS_PREPARING)) {
161 		pr_warn("Cache tag %s in use\n", name);
162 		fscache_put_cache(cache, fscache_cache_put_cache);
163 		return ERR_PTR(-EBUSY);
164 	}
165 
166 	return cache;
167 }
168 EXPORT_SYMBOL(fscache_acquire_cache);
169 
170 /**
171  * fscache_put_cache - Release a cache-level cookie.
172  * @cache: The cache cookie to be released
173  * @where: An indication of where the release happened
174  *
175  * Release the caller's reference on a cache-level cookie.  The @where
176  * indication should give information about the circumstances in which the call
177  * occurs and will be logged through a tracepoint.
178  */
fscache_put_cache(struct fscache_cache * cache,enum fscache_cache_trace where)179 void fscache_put_cache(struct fscache_cache *cache,
180 		       enum fscache_cache_trace where)
181 {
182 	unsigned int debug_id;
183 	bool zero;
184 	int ref;
185 
186 	if (IS_ERR_OR_NULL(cache))
187 		return;
188 
189 	debug_id = cache->debug_id;
190 	zero = __refcount_dec_and_test(&cache->ref, &ref);
191 	trace_fscache_cache(debug_id, ref - 1, where);
192 
193 	if (zero) {
194 		down_write(&fscache_addremove_sem);
195 		list_del_init(&cache->cache_link);
196 		up_write(&fscache_addremove_sem);
197 		kfree(cache->name);
198 		kfree(cache);
199 	}
200 }
201 
202 /**
203  * fscache_relinquish_cache - Reset cache state and release cookie
204  * @cache: The cache cookie to be released
205  *
206  * Reset the state of a cache and release the caller's reference on a cache
207  * cookie.
208  */
fscache_relinquish_cache(struct fscache_cache * cache)209 void fscache_relinquish_cache(struct fscache_cache *cache)
210 {
211 	enum fscache_cache_trace where =
212 		(cache->state == FSCACHE_CACHE_IS_PREPARING) ?
213 		fscache_cache_put_prep_failed :
214 		fscache_cache_put_relinquish;
215 
216 	cache->ops = NULL;
217 	cache->cache_priv = NULL;
218 	fscache_set_cache_state(cache, FSCACHE_CACHE_IS_NOT_PRESENT);
219 	fscache_put_cache(cache, where);
220 }
221 EXPORT_SYMBOL(fscache_relinquish_cache);
222 
223 /**
224  * fscache_add_cache - Declare a cache as being open for business
225  * @cache: The cache-level cookie representing the cache
226  * @ops: Table of cache operations to use
227  * @cache_priv: Private data for the cache record
228  *
229  * Add a cache to the system, making it available for netfs's to use.
230  *
231  * See Documentation/filesystems/caching/backend-api.rst for a complete
232  * description.
233  */
fscache_add_cache(struct fscache_cache * cache,const struct fscache_cache_ops * ops,void * cache_priv)234 int fscache_add_cache(struct fscache_cache *cache,
235 		      const struct fscache_cache_ops *ops,
236 		      void *cache_priv)
237 {
238 	int n_accesses;
239 
240 	_enter("{%s,%s}", ops->name, cache->name);
241 
242 	BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
243 
244 	/* Get a ref on the cache cookie and keep its n_accesses counter raised
245 	 * by 1 to prevent wakeups from transitioning it to 0 until we're
246 	 * withdrawing caching services from it.
247 	 */
248 	n_accesses = atomic_inc_return(&cache->n_accesses);
249 	trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
250 				   n_accesses, fscache_access_cache_pin);
251 
252 	down_write(&fscache_addremove_sem);
253 
254 	cache->ops = ops;
255 	cache->cache_priv = cache_priv;
256 	fscache_set_cache_state(cache, FSCACHE_CACHE_IS_ACTIVE);
257 
258 	up_write(&fscache_addremove_sem);
259 	pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
260 	_leave(" = 0 [%s]", cache->name);
261 	return 0;
262 }
263 EXPORT_SYMBOL(fscache_add_cache);
264 
265 /**
266  * fscache_begin_cache_access - Pin a cache so it can be accessed
267  * @cache: The cache-level cookie
268  * @why: An indication of the circumstances of the access for tracing
269  *
270  * Attempt to pin the cache to prevent it from going away whilst we're
271  * accessing it and returns true if successful.  This works as follows:
272  *
273  *  (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
274  *      then we return false to indicate access was not permitted.
275  *
276  *  (2) If the cache tests as live, then we increment the n_accesses count and
277  *      then recheck the liveness, ending the access if it ceased to be live.
278  *
279  *  (3) When we end the access, we decrement n_accesses and wake up the any
280  *      waiters if it reaches 0.
281  *
282  *  (4) Whilst the cache is caching, n_accesses is kept artificially
283  *      incremented to prevent wakeups from happening.
284  *
285  *  (5) When the cache is taken offline, the state is changed to prevent new
286  *      accesses, n_accesses is decremented and we wait for n_accesses to
287  *      become 0.
288  */
fscache_begin_cache_access(struct fscache_cache * cache,enum fscache_access_trace why)289 bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
290 {
291 	int n_accesses;
292 
293 	if (!fscache_cache_is_live(cache))
294 		return false;
295 
296 	n_accesses = atomic_inc_return(&cache->n_accesses);
297 	smp_mb__after_atomic(); /* Reread live flag after n_accesses */
298 	trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
299 				   n_accesses, why);
300 	if (!fscache_cache_is_live(cache)) {
301 		fscache_end_cache_access(cache, fscache_access_unlive);
302 		return false;
303 	}
304 	return true;
305 }
306 
307 /**
308  * fscache_end_cache_access - Unpin a cache at the end of an access.
309  * @cache: The cache-level cookie
310  * @why: An indication of the circumstances of the access for tracing
311  *
312  * Unpin a cache after we've accessed it.  The @why indicator is merely
313  * provided for tracing purposes.
314  */
fscache_end_cache_access(struct fscache_cache * cache,enum fscache_access_trace why)315 void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
316 {
317 	int n_accesses;
318 
319 	smp_mb__before_atomic();
320 	n_accesses = atomic_dec_return(&cache->n_accesses);
321 	trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
322 				   n_accesses, why);
323 	if (n_accesses == 0)
324 		wake_up_var(&cache->n_accesses);
325 }
326 
327 /**
328  * fscache_io_error - Note a cache I/O error
329  * @cache: The record describing the cache
330  *
331  * Note that an I/O error occurred in a cache and that it should no longer be
332  * used for anything.  This also reports the error into the kernel log.
333  *
334  * See Documentation/filesystems/caching/backend-api.rst for a complete
335  * description.
336  */
fscache_io_error(struct fscache_cache * cache)337 void fscache_io_error(struct fscache_cache *cache)
338 {
339 	if (fscache_set_cache_state_maybe(cache,
340 					  FSCACHE_CACHE_IS_ACTIVE,
341 					  FSCACHE_CACHE_GOT_IOERROR))
342 		pr_err("Cache '%s' stopped due to I/O error\n",
343 		       cache->name);
344 }
345 EXPORT_SYMBOL(fscache_io_error);
346 
347 /**
348  * fscache_withdraw_cache - Withdraw a cache from the active service
349  * @cache: The cache cookie
350  *
351  * Begin the process of withdrawing a cache from service.  This stops new
352  * cache-level and volume-level accesses from taking place and waits for
353  * currently ongoing cache-level accesses to end.
354  */
fscache_withdraw_cache(struct fscache_cache * cache)355 void fscache_withdraw_cache(struct fscache_cache *cache)
356 {
357 	int n_accesses;
358 
359 	pr_notice("Withdrawing cache \"%s\" (%u objs)\n",
360 		  cache->name, atomic_read(&cache->object_count));
361 
362 	fscache_set_cache_state(cache, FSCACHE_CACHE_IS_WITHDRAWN);
363 
364 	/* Allow wakeups on dec-to-0 */
365 	n_accesses = atomic_dec_return(&cache->n_accesses);
366 	trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
367 				   n_accesses, fscache_access_cache_unpin);
368 
369 	wait_var_event(&cache->n_accesses,
370 		       atomic_read(&cache->n_accesses) == 0);
371 }
372 EXPORT_SYMBOL(fscache_withdraw_cache);
373 
374 #ifdef CONFIG_PROC_FS
375 static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW";
376 
377 /*
378  * Generate a list of caches in /proc/fs/fscache/caches
379  */
fscache_caches_seq_show(struct seq_file * m,void * v)380 static int fscache_caches_seq_show(struct seq_file *m, void *v)
381 {
382 	struct fscache_cache *cache;
383 
384 	if (v == &fscache_caches) {
385 		seq_puts(m,
386 			 "CACHE    REF   VOLS  OBJS  ACCES S NAME\n"
387 			 "======== ===== ===== ===== ===== = ===============\n"
388 			 );
389 		return 0;
390 	}
391 
392 	cache = list_entry(v, struct fscache_cache, cache_link);
393 	seq_printf(m,
394 		   "%08x %5d %5d %5d %5d %c %s\n",
395 		   cache->debug_id,
396 		   refcount_read(&cache->ref),
397 		   atomic_read(&cache->n_volumes),
398 		   atomic_read(&cache->object_count),
399 		   atomic_read(&cache->n_accesses),
400 		   fscache_cache_states[cache->state],
401 		   cache->name ?: "-");
402 	return 0;
403 }
404 
fscache_caches_seq_start(struct seq_file * m,loff_t * _pos)405 static void *fscache_caches_seq_start(struct seq_file *m, loff_t *_pos)
406 	__acquires(fscache_addremove_sem)
407 {
408 	down_read(&fscache_addremove_sem);
409 	return seq_list_start_head(&fscache_caches, *_pos);
410 }
411 
fscache_caches_seq_next(struct seq_file * m,void * v,loff_t * _pos)412 static void *fscache_caches_seq_next(struct seq_file *m, void *v, loff_t *_pos)
413 {
414 	return seq_list_next(v, &fscache_caches, _pos);
415 }
416 
fscache_caches_seq_stop(struct seq_file * m,void * v)417 static void fscache_caches_seq_stop(struct seq_file *m, void *v)
418 	__releases(fscache_addremove_sem)
419 {
420 	up_read(&fscache_addremove_sem);
421 }
422 
423 const struct seq_operations fscache_caches_seq_ops = {
424 	.start  = fscache_caches_seq_start,
425 	.next   = fscache_caches_seq_next,
426 	.stop   = fscache_caches_seq_stop,
427 	.show   = fscache_caches_seq_show,
428 };
429 #endif /* CONFIG_PROC_FS */
430