xref: /openbmc/linux/fs/mbcache.c (revision a44e84a9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/list.h>
5 #include <linux/list_bl.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
8 #include <linux/workqueue.h>
9 #include <linux/mbcache.h>
10 
11 /*
12  * Mbcache is a simple key-value store. Keys need not be unique, however
13  * key-value pairs are expected to be unique (we use this fact in
14  * mb_cache_entry_delete_or_get()).
15  *
16  * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
17  * Ext4 also uses it for deduplication of xattr values stored in inodes.
18  * They use hash of data as a key and provide a value that may represent a
19  * block or inode number. That's why keys need not be unique (hash of different
20  * data may be the same). However user provided value always uniquely
21  * identifies a cache entry.
22  *
23  * We provide functions for creation and removal of entries, search by key,
24  * and a special "delete entry with given key-value pair" operation. Fixed
25  * size hash table is used for fast key lookups.
26  */
27 
28 struct mb_cache {
29 	/* Hash table of entries */
30 	struct hlist_bl_head	*c_hash;
31 	/* log2 of hash table size */
32 	int			c_bucket_bits;
33 	/* Maximum entries in cache to avoid degrading hash too much */
34 	unsigned long		c_max_entries;
35 	/* Protects c_list, c_entry_count */
36 	spinlock_t		c_list_lock;
37 	struct list_head	c_list;
38 	/* Number of entries in cache */
39 	unsigned long		c_entry_count;
40 	struct shrinker		c_shrink;
41 	/* Work for shrinking when the cache has too many entries */
42 	struct work_struct	c_shrink_work;
43 };
44 
45 static struct kmem_cache *mb_entry_cache;
46 
47 static unsigned long mb_cache_shrink(struct mb_cache *cache,
48 				     unsigned long nr_to_scan);
49 
mb_cache_entry_head(struct mb_cache * cache,u32 key)50 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
51 							u32 key)
52 {
53 	return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
54 }
55 
56 /*
57  * Number of entries to reclaim synchronously when there are too many entries
58  * in cache
59  */
60 #define SYNC_SHRINK_BATCH 64
61 
62 /*
63  * mb_cache_entry_create - create entry in cache
64  * @cache - cache where the entry should be created
65  * @mask - gfp mask with which the entry should be allocated
66  * @key - key of the entry
67  * @value - value of the entry
68  * @reusable - is the entry reusable by others?
69  *
70  * Creates entry in @cache with key @key and value @value. The function returns
71  * -EBUSY if entry with the same key and value already exists in cache.
72  * Otherwise 0 is returned.
73  */
mb_cache_entry_create(struct mb_cache * cache,gfp_t mask,u32 key,u64 value,bool reusable)74 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
75 			  u64 value, bool reusable)
76 {
77 	struct mb_cache_entry *entry, *dup;
78 	struct hlist_bl_node *dup_node;
79 	struct hlist_bl_head *head;
80 
81 	/* Schedule background reclaim if there are too many entries */
82 	if (cache->c_entry_count >= cache->c_max_entries)
83 		schedule_work(&cache->c_shrink_work);
84 	/* Do some sync reclaim if background reclaim cannot keep up */
85 	if (cache->c_entry_count >= 2*cache->c_max_entries)
86 		mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
87 
88 	entry = kmem_cache_alloc(mb_entry_cache, mask);
89 	if (!entry)
90 		return -ENOMEM;
91 
92 	INIT_LIST_HEAD(&entry->e_list);
93 	/*
94 	 * We create entry with two references. One reference is kept by the
95 	 * hash table, the other reference is used to protect us from
96 	 * mb_cache_entry_delete_or_get() until the entry is fully setup. This
97 	 * avoids nesting of cache->c_list_lock into hash table bit locks which
98 	 * is problematic for RT.
99 	 */
100 	atomic_set(&entry->e_refcnt, 2);
101 	entry->e_key = key;
102 	entry->e_value = value;
103 	entry->e_flags = 0;
104 	if (reusable)
105 		set_bit(MBE_REUSABLE_B, &entry->e_flags);
106 	head = mb_cache_entry_head(cache, key);
107 	hlist_bl_lock(head);
108 	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
109 		if (dup->e_key == key && dup->e_value == value) {
110 			hlist_bl_unlock(head);
111 			kmem_cache_free(mb_entry_cache, entry);
112 			return -EBUSY;
113 		}
114 	}
115 	hlist_bl_add_head(&entry->e_hash_list, head);
116 	hlist_bl_unlock(head);
117 	spin_lock(&cache->c_list_lock);
118 	list_add_tail(&entry->e_list, &cache->c_list);
119 	cache->c_entry_count++;
120 	spin_unlock(&cache->c_list_lock);
121 	mb_cache_entry_put(cache, entry);
122 
123 	return 0;
124 }
125 EXPORT_SYMBOL(mb_cache_entry_create);
126 
__mb_cache_entry_free(struct mb_cache * cache,struct mb_cache_entry * entry)127 void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry)
128 {
129 	struct hlist_bl_head *head;
130 
131 	head = mb_cache_entry_head(cache, entry->e_key);
132 	hlist_bl_lock(head);
133 	hlist_bl_del(&entry->e_hash_list);
134 	hlist_bl_unlock(head);
135 	kmem_cache_free(mb_entry_cache, entry);
136 }
137 EXPORT_SYMBOL(__mb_cache_entry_free);
138 
139 /*
140  * mb_cache_entry_wait_unused - wait to be the last user of the entry
141  *
142  * @entry - entry to work on
143  *
144  * Wait to be the last user of the entry.
145  */
mb_cache_entry_wait_unused(struct mb_cache_entry * entry)146 void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
147 {
148 	wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2);
149 }
150 EXPORT_SYMBOL(mb_cache_entry_wait_unused);
151 
__entry_find(struct mb_cache * cache,struct mb_cache_entry * entry,u32 key)152 static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
153 					   struct mb_cache_entry *entry,
154 					   u32 key)
155 {
156 	struct mb_cache_entry *old_entry = entry;
157 	struct hlist_bl_node *node;
158 	struct hlist_bl_head *head;
159 
160 	head = mb_cache_entry_head(cache, key);
161 	hlist_bl_lock(head);
162 	if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
163 		node = entry->e_hash_list.next;
164 	else
165 		node = hlist_bl_first(head);
166 	while (node) {
167 		entry = hlist_bl_entry(node, struct mb_cache_entry,
168 				       e_hash_list);
169 		if (entry->e_key == key &&
170 		    test_bit(MBE_REUSABLE_B, &entry->e_flags) &&
171 		    atomic_inc_not_zero(&entry->e_refcnt))
172 			goto out;
173 		node = node->next;
174 	}
175 	entry = NULL;
176 out:
177 	hlist_bl_unlock(head);
178 	if (old_entry)
179 		mb_cache_entry_put(cache, old_entry);
180 
181 	return entry;
182 }
183 
184 /*
185  * mb_cache_entry_find_first - find the first reusable entry with the given key
186  * @cache: cache where we should search
187  * @key: key to look for
188  *
189  * Search in @cache for a reusable entry with key @key. Grabs reference to the
190  * first reusable entry found and returns the entry.
191  */
mb_cache_entry_find_first(struct mb_cache * cache,u32 key)192 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
193 						 u32 key)
194 {
195 	return __entry_find(cache, NULL, key);
196 }
197 EXPORT_SYMBOL(mb_cache_entry_find_first);
198 
199 /*
200  * mb_cache_entry_find_next - find next reusable entry with the same key
201  * @cache: cache where we should search
202  * @entry: entry to start search from
203  *
204  * Finds next reusable entry in the hash chain which has the same key as @entry.
205  * If @entry is unhashed (which can happen when deletion of entry races with the
206  * search), finds the first reusable entry in the hash chain. The function drops
207  * reference to @entry and returns with a reference to the found entry.
208  */
mb_cache_entry_find_next(struct mb_cache * cache,struct mb_cache_entry * entry)209 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
210 						struct mb_cache_entry *entry)
211 {
212 	return __entry_find(cache, entry, entry->e_key);
213 }
214 EXPORT_SYMBOL(mb_cache_entry_find_next);
215 
216 /*
217  * mb_cache_entry_get - get a cache entry by value (and key)
218  * @cache - cache we work with
219  * @key - key
220  * @value - value
221  */
mb_cache_entry_get(struct mb_cache * cache,u32 key,u64 value)222 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
223 					  u64 value)
224 {
225 	struct hlist_bl_node *node;
226 	struct hlist_bl_head *head;
227 	struct mb_cache_entry *entry;
228 
229 	head = mb_cache_entry_head(cache, key);
230 	hlist_bl_lock(head);
231 	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
232 		if (entry->e_key == key && entry->e_value == value &&
233 		    atomic_inc_not_zero(&entry->e_refcnt))
234 			goto out;
235 	}
236 	entry = NULL;
237 out:
238 	hlist_bl_unlock(head);
239 	return entry;
240 }
241 EXPORT_SYMBOL(mb_cache_entry_get);
242 
243 /* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
244  * @cache - cache we work with
245  * @key - key
246  * @value - value
247  *
248  * Remove entry from cache @cache with key @key and value @value. The removal
249  * happens only if the entry is unused. The function returns NULL in case the
250  * entry was successfully removed or there's no entry in cache. Otherwise the
251  * function grabs reference of the entry that we failed to delete because it
252  * still has users and return it.
253  */
mb_cache_entry_delete_or_get(struct mb_cache * cache,u32 key,u64 value)254 struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
255 						    u32 key, u64 value)
256 {
257 	struct mb_cache_entry *entry;
258 
259 	entry = mb_cache_entry_get(cache, key, value);
260 	if (!entry)
261 		return NULL;
262 
263 	/*
264 	 * Drop the ref we got from mb_cache_entry_get() and the initial hash
265 	 * ref if we are the last user
266 	 */
267 	if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2)
268 		return entry;
269 
270 	spin_lock(&cache->c_list_lock);
271 	if (!list_empty(&entry->e_list))
272 		list_del_init(&entry->e_list);
273 	cache->c_entry_count--;
274 	spin_unlock(&cache->c_list_lock);
275 	__mb_cache_entry_free(cache, entry);
276 	return NULL;
277 }
278 EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
279 
280 /* mb_cache_entry_touch - cache entry got used
281  * @cache - cache the entry belongs to
282  * @entry - entry that got used
283  *
284  * Marks entry as used to give hit higher chances of surviving in cache.
285  */
mb_cache_entry_touch(struct mb_cache * cache,struct mb_cache_entry * entry)286 void mb_cache_entry_touch(struct mb_cache *cache,
287 			  struct mb_cache_entry *entry)
288 {
289 	set_bit(MBE_REFERENCED_B, &entry->e_flags);
290 }
291 EXPORT_SYMBOL(mb_cache_entry_touch);
292 
mb_cache_count(struct shrinker * shrink,struct shrink_control * sc)293 static unsigned long mb_cache_count(struct shrinker *shrink,
294 				    struct shrink_control *sc)
295 {
296 	struct mb_cache *cache = container_of(shrink, struct mb_cache,
297 					      c_shrink);
298 
299 	return cache->c_entry_count;
300 }
301 
302 /* Shrink number of entries in cache */
mb_cache_shrink(struct mb_cache * cache,unsigned long nr_to_scan)303 static unsigned long mb_cache_shrink(struct mb_cache *cache,
304 				     unsigned long nr_to_scan)
305 {
306 	struct mb_cache_entry *entry;
307 	unsigned long shrunk = 0;
308 
309 	spin_lock(&cache->c_list_lock);
310 	while (nr_to_scan-- && !list_empty(&cache->c_list)) {
311 		entry = list_first_entry(&cache->c_list,
312 					 struct mb_cache_entry, e_list);
313 		/* Drop initial hash reference if there is no user */
314 		if (test_bit(MBE_REFERENCED_B, &entry->e_flags) ||
315 		    atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) {
316 			clear_bit(MBE_REFERENCED_B, &entry->e_flags);
317 			list_move_tail(&entry->e_list, &cache->c_list);
318 			continue;
319 		}
320 		list_del_init(&entry->e_list);
321 		cache->c_entry_count--;
322 		spin_unlock(&cache->c_list_lock);
323 		__mb_cache_entry_free(cache, entry);
324 		shrunk++;
325 		cond_resched();
326 		spin_lock(&cache->c_list_lock);
327 	}
328 	spin_unlock(&cache->c_list_lock);
329 
330 	return shrunk;
331 }
332 
mb_cache_scan(struct shrinker * shrink,struct shrink_control * sc)333 static unsigned long mb_cache_scan(struct shrinker *shrink,
334 				   struct shrink_control *sc)
335 {
336 	struct mb_cache *cache = container_of(shrink, struct mb_cache,
337 					      c_shrink);
338 	return mb_cache_shrink(cache, sc->nr_to_scan);
339 }
340 
341 /* We shrink 1/X of the cache when we have too many entries in it */
342 #define SHRINK_DIVISOR 16
343 
mb_cache_shrink_worker(struct work_struct * work)344 static void mb_cache_shrink_worker(struct work_struct *work)
345 {
346 	struct mb_cache *cache = container_of(work, struct mb_cache,
347 					      c_shrink_work);
348 	mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
349 }
350 
351 /*
352  * mb_cache_create - create cache
353  * @bucket_bits: log2 of the hash table size
354  *
355  * Create cache for keys with 2^bucket_bits hash entries.
356  */
mb_cache_create(int bucket_bits)357 struct mb_cache *mb_cache_create(int bucket_bits)
358 {
359 	struct mb_cache *cache;
360 	unsigned long bucket_count = 1UL << bucket_bits;
361 	unsigned long i;
362 
363 	cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
364 	if (!cache)
365 		goto err_out;
366 	cache->c_bucket_bits = bucket_bits;
367 	cache->c_max_entries = bucket_count << 4;
368 	INIT_LIST_HEAD(&cache->c_list);
369 	spin_lock_init(&cache->c_list_lock);
370 	cache->c_hash = kmalloc_array(bucket_count,
371 				      sizeof(struct hlist_bl_head),
372 				      GFP_KERNEL);
373 	if (!cache->c_hash) {
374 		kfree(cache);
375 		goto err_out;
376 	}
377 	for (i = 0; i < bucket_count; i++)
378 		INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
379 
380 	cache->c_shrink.count_objects = mb_cache_count;
381 	cache->c_shrink.scan_objects = mb_cache_scan;
382 	cache->c_shrink.seeks = DEFAULT_SEEKS;
383 	if (register_shrinker(&cache->c_shrink, "mbcache-shrinker")) {
384 		kfree(cache->c_hash);
385 		kfree(cache);
386 		goto err_out;
387 	}
388 
389 	INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
390 
391 	return cache;
392 
393 err_out:
394 	return NULL;
395 }
396 EXPORT_SYMBOL(mb_cache_create);
397 
398 /*
399  * mb_cache_destroy - destroy cache
400  * @cache: the cache to destroy
401  *
402  * Free all entries in cache and cache itself. Caller must make sure nobody
403  * (except shrinker) can reach @cache when calling this.
404  */
mb_cache_destroy(struct mb_cache * cache)405 void mb_cache_destroy(struct mb_cache *cache)
406 {
407 	struct mb_cache_entry *entry, *next;
408 
409 	unregister_shrinker(&cache->c_shrink);
410 
411 	/*
412 	 * We don't bother with any locking. Cache must not be used at this
413 	 * point.
414 	 */
415 	list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
416 		list_del(&entry->e_list);
417 		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
418 		mb_cache_entry_put(cache, entry);
419 	}
420 	kfree(cache->c_hash);
421 	kfree(cache);
422 }
423 EXPORT_SYMBOL(mb_cache_destroy);
424 
mbcache_init(void)425 static int __init mbcache_init(void)
426 {
427 	mb_entry_cache = kmem_cache_create("mbcache",
428 				sizeof(struct mb_cache_entry), 0,
429 				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
430 	if (!mb_entry_cache)
431 		return -ENOMEM;
432 	return 0;
433 }
434 
mbcache_exit(void)435 static void __exit mbcache_exit(void)
436 {
437 	kmem_cache_destroy(mb_entry_cache);
438 }
439 
440 module_init(mbcache_init)
441 module_exit(mbcache_exit)
442 
443 MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
444 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
445 MODULE_LICENSE("GPL");
446