xref: /openbmc/linux/fs/mbcache.c (revision 2da68a77)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/list.h>
5 #include <linux/list_bl.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
8 #include <linux/workqueue.h>
9 #include <linux/mbcache.h>
10 
11 /*
12  * Mbcache is a simple key-value store. Keys need not be unique, however
13  * key-value pairs are expected to be unique (we use this fact in
14  * mb_cache_entry_delete_or_get()).
15  *
16  * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
17  * Ext4 also uses it for deduplication of xattr values stored in inodes.
18  * They use hash of data as a key and provide a value that may represent a
19  * block or inode number. That's why keys need not be unique (hash of different
20  * data may be the same). However user provided value always uniquely
21  * identifies a cache entry.
22  *
23  * We provide functions for creation and removal of entries, search by key,
24  * and a special "delete entry with given key-value pair" operation. Fixed
25  * size hash table is used for fast key lookups.
26  */
27 
28 struct mb_cache {
29 	/* Hash table of entries */
30 	struct hlist_bl_head	*c_hash;
31 	/* log2 of hash table size */
32 	int			c_bucket_bits;
33 	/* Maximum entries in cache to avoid degrading hash too much */
34 	unsigned long		c_max_entries;
35 	/* Protects c_list, c_entry_count */
36 	spinlock_t		c_list_lock;
37 	struct list_head	c_list;
38 	/* Number of entries in cache */
39 	unsigned long		c_entry_count;
40 	struct shrinker		c_shrink;
41 	/* Work for shrinking when the cache has too many entries */
42 	struct work_struct	c_shrink_work;
43 };
44 
45 static struct kmem_cache *mb_entry_cache;
46 
47 static unsigned long mb_cache_shrink(struct mb_cache *cache,
48 				     unsigned long nr_to_scan);
49 
50 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
51 							u32 key)
52 {
53 	return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
54 }
55 
56 /*
57  * Number of entries to reclaim synchronously when there are too many entries
58  * in cache
59  */
60 #define SYNC_SHRINK_BATCH 64
61 
62 /*
63  * mb_cache_entry_create - create entry in cache
64  * @cache - cache where the entry should be created
65  * @mask - gfp mask with which the entry should be allocated
66  * @key - key of the entry
67  * @value - value of the entry
68  * @reusable - is the entry reusable by others?
69  *
70  * Creates entry in @cache with key @key and value @value. The function returns
71  * -EBUSY if entry with the same key and value already exists in cache.
72  * Otherwise 0 is returned.
73  */
74 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
75 			  u64 value, bool reusable)
76 {
77 	struct mb_cache_entry *entry, *dup;
78 	struct hlist_bl_node *dup_node;
79 	struct hlist_bl_head *head;
80 
81 	/* Schedule background reclaim if there are too many entries */
82 	if (cache->c_entry_count >= cache->c_max_entries)
83 		schedule_work(&cache->c_shrink_work);
84 	/* Do some sync reclaim if background reclaim cannot keep up */
85 	if (cache->c_entry_count >= 2*cache->c_max_entries)
86 		mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
87 
88 	entry = kmem_cache_alloc(mb_entry_cache, mask);
89 	if (!entry)
90 		return -ENOMEM;
91 
92 	INIT_LIST_HEAD(&entry->e_list);
93 	/*
94 	 * We create entry with two references. One reference is kept by the
95 	 * hash table, the other reference is used to protect us from
96 	 * mb_cache_entry_delete_or_get() until the entry is fully setup. This
97 	 * avoids nesting of cache->c_list_lock into hash table bit locks which
98 	 * is problematic for RT.
99 	 */
100 	atomic_set(&entry->e_refcnt, 2);
101 	entry->e_key = key;
102 	entry->e_value = value;
103 	entry->e_reusable = reusable;
104 	entry->e_referenced = 0;
105 	head = mb_cache_entry_head(cache, key);
106 	hlist_bl_lock(head);
107 	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
108 		if (dup->e_key == key && dup->e_value == value) {
109 			hlist_bl_unlock(head);
110 			kmem_cache_free(mb_entry_cache, entry);
111 			return -EBUSY;
112 		}
113 	}
114 	hlist_bl_add_head(&entry->e_hash_list, head);
115 	hlist_bl_unlock(head);
116 	spin_lock(&cache->c_list_lock);
117 	list_add_tail(&entry->e_list, &cache->c_list);
118 	cache->c_entry_count++;
119 	spin_unlock(&cache->c_list_lock);
120 	mb_cache_entry_put(cache, entry);
121 
122 	return 0;
123 }
124 EXPORT_SYMBOL(mb_cache_entry_create);
125 
126 void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry)
127 {
128 	struct hlist_bl_head *head;
129 
130 	head = mb_cache_entry_head(cache, entry->e_key);
131 	hlist_bl_lock(head);
132 	hlist_bl_del(&entry->e_hash_list);
133 	hlist_bl_unlock(head);
134 	kmem_cache_free(mb_entry_cache, entry);
135 }
136 EXPORT_SYMBOL(__mb_cache_entry_free);
137 
138 /*
139  * mb_cache_entry_wait_unused - wait to be the last user of the entry
140  *
141  * @entry - entry to work on
142  *
143  * Wait to be the last user of the entry.
144  */
145 void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
146 {
147 	wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2);
148 }
149 EXPORT_SYMBOL(mb_cache_entry_wait_unused);
150 
151 static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
152 					   struct mb_cache_entry *entry,
153 					   u32 key)
154 {
155 	struct mb_cache_entry *old_entry = entry;
156 	struct hlist_bl_node *node;
157 	struct hlist_bl_head *head;
158 
159 	head = mb_cache_entry_head(cache, key);
160 	hlist_bl_lock(head);
161 	if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
162 		node = entry->e_hash_list.next;
163 	else
164 		node = hlist_bl_first(head);
165 	while (node) {
166 		entry = hlist_bl_entry(node, struct mb_cache_entry,
167 				       e_hash_list);
168 		if (entry->e_key == key && entry->e_reusable &&
169 		    atomic_inc_not_zero(&entry->e_refcnt))
170 			goto out;
171 		node = node->next;
172 	}
173 	entry = NULL;
174 out:
175 	hlist_bl_unlock(head);
176 	if (old_entry)
177 		mb_cache_entry_put(cache, old_entry);
178 
179 	return entry;
180 }
181 
182 /*
183  * mb_cache_entry_find_first - find the first reusable entry with the given key
184  * @cache: cache where we should search
185  * @key: key to look for
186  *
187  * Search in @cache for a reusable entry with key @key. Grabs reference to the
188  * first reusable entry found and returns the entry.
189  */
190 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
191 						 u32 key)
192 {
193 	return __entry_find(cache, NULL, key);
194 }
195 EXPORT_SYMBOL(mb_cache_entry_find_first);
196 
197 /*
198  * mb_cache_entry_find_next - find next reusable entry with the same key
199  * @cache: cache where we should search
200  * @entry: entry to start search from
201  *
202  * Finds next reusable entry in the hash chain which has the same key as @entry.
203  * If @entry is unhashed (which can happen when deletion of entry races with the
204  * search), finds the first reusable entry in the hash chain. The function drops
205  * reference to @entry and returns with a reference to the found entry.
206  */
207 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
208 						struct mb_cache_entry *entry)
209 {
210 	return __entry_find(cache, entry, entry->e_key);
211 }
212 EXPORT_SYMBOL(mb_cache_entry_find_next);
213 
214 /*
215  * mb_cache_entry_get - get a cache entry by value (and key)
216  * @cache - cache we work with
217  * @key - key
218  * @value - value
219  */
220 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
221 					  u64 value)
222 {
223 	struct hlist_bl_node *node;
224 	struct hlist_bl_head *head;
225 	struct mb_cache_entry *entry;
226 
227 	head = mb_cache_entry_head(cache, key);
228 	hlist_bl_lock(head);
229 	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
230 		if (entry->e_key == key && entry->e_value == value &&
231 		    atomic_inc_not_zero(&entry->e_refcnt))
232 			goto out;
233 	}
234 	entry = NULL;
235 out:
236 	hlist_bl_unlock(head);
237 	return entry;
238 }
239 EXPORT_SYMBOL(mb_cache_entry_get);
240 
241 /* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
242  * @cache - cache we work with
243  * @key - key
244  * @value - value
245  *
246  * Remove entry from cache @cache with key @key and value @value. The removal
247  * happens only if the entry is unused. The function returns NULL in case the
248  * entry was successfully removed or there's no entry in cache. Otherwise the
249  * function grabs reference of the entry that we failed to delete because it
250  * still has users and return it.
251  */
252 struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
253 						    u32 key, u64 value)
254 {
255 	struct mb_cache_entry *entry;
256 
257 	entry = mb_cache_entry_get(cache, key, value);
258 	if (!entry)
259 		return NULL;
260 
261 	/*
262 	 * Drop the ref we got from mb_cache_entry_get() and the initial hash
263 	 * ref if we are the last user
264 	 */
265 	if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2)
266 		return entry;
267 
268 	spin_lock(&cache->c_list_lock);
269 	if (!list_empty(&entry->e_list))
270 		list_del_init(&entry->e_list);
271 	cache->c_entry_count--;
272 	spin_unlock(&cache->c_list_lock);
273 	__mb_cache_entry_free(cache, entry);
274 	return NULL;
275 }
276 EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
277 
278 /* mb_cache_entry_touch - cache entry got used
279  * @cache - cache the entry belongs to
280  * @entry - entry that got used
281  *
282  * Marks entry as used to give hit higher chances of surviving in cache.
283  */
284 void mb_cache_entry_touch(struct mb_cache *cache,
285 			  struct mb_cache_entry *entry)
286 {
287 	entry->e_referenced = 1;
288 }
289 EXPORT_SYMBOL(mb_cache_entry_touch);
290 
291 static unsigned long mb_cache_count(struct shrinker *shrink,
292 				    struct shrink_control *sc)
293 {
294 	struct mb_cache *cache = container_of(shrink, struct mb_cache,
295 					      c_shrink);
296 
297 	return cache->c_entry_count;
298 }
299 
300 /* Shrink number of entries in cache */
301 static unsigned long mb_cache_shrink(struct mb_cache *cache,
302 				     unsigned long nr_to_scan)
303 {
304 	struct mb_cache_entry *entry;
305 	unsigned long shrunk = 0;
306 
307 	spin_lock(&cache->c_list_lock);
308 	while (nr_to_scan-- && !list_empty(&cache->c_list)) {
309 		entry = list_first_entry(&cache->c_list,
310 					 struct mb_cache_entry, e_list);
311 		/* Drop initial hash reference if there is no user */
312 		if (entry->e_referenced ||
313 		    atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) {
314 			entry->e_referenced = 0;
315 			list_move_tail(&entry->e_list, &cache->c_list);
316 			continue;
317 		}
318 		list_del_init(&entry->e_list);
319 		cache->c_entry_count--;
320 		spin_unlock(&cache->c_list_lock);
321 		__mb_cache_entry_free(cache, entry);
322 		shrunk++;
323 		cond_resched();
324 		spin_lock(&cache->c_list_lock);
325 	}
326 	spin_unlock(&cache->c_list_lock);
327 
328 	return shrunk;
329 }
330 
331 static unsigned long mb_cache_scan(struct shrinker *shrink,
332 				   struct shrink_control *sc)
333 {
334 	struct mb_cache *cache = container_of(shrink, struct mb_cache,
335 					      c_shrink);
336 	return mb_cache_shrink(cache, sc->nr_to_scan);
337 }
338 
339 /* We shrink 1/X of the cache when we have too many entries in it */
340 #define SHRINK_DIVISOR 16
341 
342 static void mb_cache_shrink_worker(struct work_struct *work)
343 {
344 	struct mb_cache *cache = container_of(work, struct mb_cache,
345 					      c_shrink_work);
346 	mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
347 }
348 
349 /*
350  * mb_cache_create - create cache
351  * @bucket_bits: log2 of the hash table size
352  *
353  * Create cache for keys with 2^bucket_bits hash entries.
354  */
355 struct mb_cache *mb_cache_create(int bucket_bits)
356 {
357 	struct mb_cache *cache;
358 	unsigned long bucket_count = 1UL << bucket_bits;
359 	unsigned long i;
360 
361 	cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
362 	if (!cache)
363 		goto err_out;
364 	cache->c_bucket_bits = bucket_bits;
365 	cache->c_max_entries = bucket_count << 4;
366 	INIT_LIST_HEAD(&cache->c_list);
367 	spin_lock_init(&cache->c_list_lock);
368 	cache->c_hash = kmalloc_array(bucket_count,
369 				      sizeof(struct hlist_bl_head),
370 				      GFP_KERNEL);
371 	if (!cache->c_hash) {
372 		kfree(cache);
373 		goto err_out;
374 	}
375 	for (i = 0; i < bucket_count; i++)
376 		INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
377 
378 	cache->c_shrink.count_objects = mb_cache_count;
379 	cache->c_shrink.scan_objects = mb_cache_scan;
380 	cache->c_shrink.seeks = DEFAULT_SEEKS;
381 	if (register_shrinker(&cache->c_shrink, "mbcache-shrinker")) {
382 		kfree(cache->c_hash);
383 		kfree(cache);
384 		goto err_out;
385 	}
386 
387 	INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
388 
389 	return cache;
390 
391 err_out:
392 	return NULL;
393 }
394 EXPORT_SYMBOL(mb_cache_create);
395 
396 /*
397  * mb_cache_destroy - destroy cache
398  * @cache: the cache to destroy
399  *
400  * Free all entries in cache and cache itself. Caller must make sure nobody
401  * (except shrinker) can reach @cache when calling this.
402  */
403 void mb_cache_destroy(struct mb_cache *cache)
404 {
405 	struct mb_cache_entry *entry, *next;
406 
407 	unregister_shrinker(&cache->c_shrink);
408 
409 	/*
410 	 * We don't bother with any locking. Cache must not be used at this
411 	 * point.
412 	 */
413 	list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
414 		list_del(&entry->e_list);
415 		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
416 		mb_cache_entry_put(cache, entry);
417 	}
418 	kfree(cache->c_hash);
419 	kfree(cache);
420 }
421 EXPORT_SYMBOL(mb_cache_destroy);
422 
423 static int __init mbcache_init(void)
424 {
425 	mb_entry_cache = kmem_cache_create("mbcache",
426 				sizeof(struct mb_cache_entry), 0,
427 				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
428 	if (!mb_entry_cache)
429 		return -ENOMEM;
430 	return 0;
431 }
432 
433 static void __exit mbcache_exit(void)
434 {
435 	kmem_cache_destroy(mb_entry_cache);
436 }
437 
438 module_init(mbcache_init)
439 module_exit(mbcache_exit)
440 
441 MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
442 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
443 MODULE_LICENSE("GPL");
444