xref: /openbmc/linux/fs/mbcache.c (revision 59bd9ded)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/list.h>
5 #include <linux/list_bl.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
8 #include <linux/workqueue.h>
9 #include <linux/mbcache.h>
10 
11 /*
12  * Mbcache is a simple key-value store. Keys need not be unique, however
13  * key-value pairs are expected to be unique (we use this fact in
14  * mb_cache_entry_delete()).
15  *
16  * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
17  * Ext4 also uses it for deduplication of xattr values stored in inodes.
18  * They use hash of data as a key and provide a value that may represent a
19  * block or inode number. That's why keys need not be unique (hash of different
20  * data may be the same). However user provided value always uniquely
21  * identifies a cache entry.
22  *
23  * We provide functions for creation and removal of entries, search by key,
24  * and a special "delete entry with given key-value pair" operation. Fixed
25  * size hash table is used for fast key lookups.
26  */
27 
28 struct mb_cache {
29 	/* Hash table of entries */
30 	struct hlist_bl_head	*c_hash;
31 	/* log2 of hash table size */
32 	int			c_bucket_bits;
33 	/* Maximum entries in cache to avoid degrading hash too much */
34 	unsigned long		c_max_entries;
35 	/* Protects c_list, c_entry_count */
36 	spinlock_t		c_list_lock;
37 	struct list_head	c_list;
38 	/* Number of entries in cache */
39 	unsigned long		c_entry_count;
40 	struct shrinker		c_shrink;
41 	/* Work for shrinking when the cache has too many entries */
42 	struct work_struct	c_shrink_work;
43 };
44 
45 static struct kmem_cache *mb_entry_cache;
46 
47 static unsigned long mb_cache_shrink(struct mb_cache *cache,
48 				     unsigned long nr_to_scan);
49 
50 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
51 							u32 key)
52 {
53 	return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
54 }
55 
56 /*
57  * Number of entries to reclaim synchronously when there are too many entries
58  * in cache
59  */
60 #define SYNC_SHRINK_BATCH 64
61 
62 /*
63  * mb_cache_entry_create - create entry in cache
64  * @cache - cache where the entry should be created
65  * @mask - gfp mask with which the entry should be allocated
66  * @key - key of the entry
67  * @value - value of the entry
68  * @reusable - is the entry reusable by others?
69  *
70  * Creates entry in @cache with key @key and value @value. The function returns
71  * -EBUSY if entry with the same key and value already exists in cache.
72  * Otherwise 0 is returned.
73  */
74 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
75 			  u64 value, bool reusable)
76 {
77 	struct mb_cache_entry *entry, *dup;
78 	struct hlist_bl_node *dup_node;
79 	struct hlist_bl_head *head;
80 
81 	/* Schedule background reclaim if there are too many entries */
82 	if (cache->c_entry_count >= cache->c_max_entries)
83 		schedule_work(&cache->c_shrink_work);
84 	/* Do some sync reclaim if background reclaim cannot keep up */
85 	if (cache->c_entry_count >= 2*cache->c_max_entries)
86 		mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
87 
88 	entry = kmem_cache_alloc(mb_entry_cache, mask);
89 	if (!entry)
90 		return -ENOMEM;
91 
92 	INIT_LIST_HEAD(&entry->e_list);
93 	/* One ref for hash, one ref returned */
94 	atomic_set(&entry->e_refcnt, 1);
95 	entry->e_key = key;
96 	entry->e_value = value;
97 	entry->e_reusable = reusable;
98 	entry->e_referenced = 0;
99 	head = mb_cache_entry_head(cache, key);
100 	hlist_bl_lock(head);
101 	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
102 		if (dup->e_key == key && dup->e_value == value) {
103 			hlist_bl_unlock(head);
104 			kmem_cache_free(mb_entry_cache, entry);
105 			return -EBUSY;
106 		}
107 	}
108 	hlist_bl_add_head(&entry->e_hash_list, head);
109 	hlist_bl_unlock(head);
110 
111 	spin_lock(&cache->c_list_lock);
112 	list_add_tail(&entry->e_list, &cache->c_list);
113 	/* Grab ref for LRU list */
114 	atomic_inc(&entry->e_refcnt);
115 	cache->c_entry_count++;
116 	spin_unlock(&cache->c_list_lock);
117 
118 	return 0;
119 }
120 EXPORT_SYMBOL(mb_cache_entry_create);
121 
122 void __mb_cache_entry_free(struct mb_cache_entry *entry)
123 {
124 	kmem_cache_free(mb_entry_cache, entry);
125 }
126 EXPORT_SYMBOL(__mb_cache_entry_free);
127 
128 static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
129 					   struct mb_cache_entry *entry,
130 					   u32 key)
131 {
132 	struct mb_cache_entry *old_entry = entry;
133 	struct hlist_bl_node *node;
134 	struct hlist_bl_head *head;
135 
136 	head = mb_cache_entry_head(cache, key);
137 	hlist_bl_lock(head);
138 	if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
139 		node = entry->e_hash_list.next;
140 	else
141 		node = hlist_bl_first(head);
142 	while (node) {
143 		entry = hlist_bl_entry(node, struct mb_cache_entry,
144 				       e_hash_list);
145 		if (entry->e_key == key && entry->e_reusable) {
146 			atomic_inc(&entry->e_refcnt);
147 			goto out;
148 		}
149 		node = node->next;
150 	}
151 	entry = NULL;
152 out:
153 	hlist_bl_unlock(head);
154 	if (old_entry)
155 		mb_cache_entry_put(cache, old_entry);
156 
157 	return entry;
158 }
159 
160 /*
161  * mb_cache_entry_find_first - find the first reusable entry with the given key
162  * @cache: cache where we should search
163  * @key: key to look for
164  *
165  * Search in @cache for a reusable entry with key @key. Grabs reference to the
166  * first reusable entry found and returns the entry.
167  */
168 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
169 						 u32 key)
170 {
171 	return __entry_find(cache, NULL, key);
172 }
173 EXPORT_SYMBOL(mb_cache_entry_find_first);
174 
175 /*
176  * mb_cache_entry_find_next - find next reusable entry with the same key
177  * @cache: cache where we should search
178  * @entry: entry to start search from
179  *
180  * Finds next reusable entry in the hash chain which has the same key as @entry.
181  * If @entry is unhashed (which can happen when deletion of entry races with the
182  * search), finds the first reusable entry in the hash chain. The function drops
183  * reference to @entry and returns with a reference to the found entry.
184  */
185 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
186 						struct mb_cache_entry *entry)
187 {
188 	return __entry_find(cache, entry, entry->e_key);
189 }
190 EXPORT_SYMBOL(mb_cache_entry_find_next);
191 
192 /*
193  * mb_cache_entry_get - get a cache entry by value (and key)
194  * @cache - cache we work with
195  * @key - key
196  * @value - value
197  */
198 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
199 					  u64 value)
200 {
201 	struct hlist_bl_node *node;
202 	struct hlist_bl_head *head;
203 	struct mb_cache_entry *entry;
204 
205 	head = mb_cache_entry_head(cache, key);
206 	hlist_bl_lock(head);
207 	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
208 		if (entry->e_key == key && entry->e_value == value) {
209 			atomic_inc(&entry->e_refcnt);
210 			goto out;
211 		}
212 	}
213 	entry = NULL;
214 out:
215 	hlist_bl_unlock(head);
216 	return entry;
217 }
218 EXPORT_SYMBOL(mb_cache_entry_get);
219 
220 /* mb_cache_entry_delete - remove a cache entry
221  * @cache - cache we work with
222  * @key - key
223  * @value - value
224  *
225  * Remove entry from cache @cache with key @key and value @value.
226  */
227 void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
228 {
229 	struct hlist_bl_node *node;
230 	struct hlist_bl_head *head;
231 	struct mb_cache_entry *entry;
232 
233 	head = mb_cache_entry_head(cache, key);
234 	hlist_bl_lock(head);
235 	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
236 		if (entry->e_key == key && entry->e_value == value) {
237 			/* We keep hash list reference to keep entry alive */
238 			hlist_bl_del_init(&entry->e_hash_list);
239 			hlist_bl_unlock(head);
240 			spin_lock(&cache->c_list_lock);
241 			if (!list_empty(&entry->e_list)) {
242 				list_del_init(&entry->e_list);
243 				if (!WARN_ONCE(cache->c_entry_count == 0,
244 		"mbcache: attempt to decrement c_entry_count past zero"))
245 					cache->c_entry_count--;
246 				atomic_dec(&entry->e_refcnt);
247 			}
248 			spin_unlock(&cache->c_list_lock);
249 			mb_cache_entry_put(cache, entry);
250 			return;
251 		}
252 	}
253 	hlist_bl_unlock(head);
254 }
255 EXPORT_SYMBOL(mb_cache_entry_delete);
256 
257 /* mb_cache_entry_touch - cache entry got used
258  * @cache - cache the entry belongs to
259  * @entry - entry that got used
260  *
261  * Marks entry as used to give hit higher chances of surviving in cache.
262  */
263 void mb_cache_entry_touch(struct mb_cache *cache,
264 			  struct mb_cache_entry *entry)
265 {
266 	entry->e_referenced = 1;
267 }
268 EXPORT_SYMBOL(mb_cache_entry_touch);
269 
270 static unsigned long mb_cache_count(struct shrinker *shrink,
271 				    struct shrink_control *sc)
272 {
273 	struct mb_cache *cache = container_of(shrink, struct mb_cache,
274 					      c_shrink);
275 
276 	return cache->c_entry_count;
277 }
278 
279 /* Shrink number of entries in cache */
280 static unsigned long mb_cache_shrink(struct mb_cache *cache,
281 				     unsigned long nr_to_scan)
282 {
283 	struct mb_cache_entry *entry;
284 	struct hlist_bl_head *head;
285 	unsigned long shrunk = 0;
286 
287 	spin_lock(&cache->c_list_lock);
288 	while (nr_to_scan-- && !list_empty(&cache->c_list)) {
289 		entry = list_first_entry(&cache->c_list,
290 					 struct mb_cache_entry, e_list);
291 		if (entry->e_referenced) {
292 			entry->e_referenced = 0;
293 			list_move_tail(&entry->e_list, &cache->c_list);
294 			continue;
295 		}
296 		list_del_init(&entry->e_list);
297 		cache->c_entry_count--;
298 		/*
299 		 * We keep LRU list reference so that entry doesn't go away
300 		 * from under us.
301 		 */
302 		spin_unlock(&cache->c_list_lock);
303 		head = mb_cache_entry_head(cache, entry->e_key);
304 		hlist_bl_lock(head);
305 		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
306 			hlist_bl_del_init(&entry->e_hash_list);
307 			atomic_dec(&entry->e_refcnt);
308 		}
309 		hlist_bl_unlock(head);
310 		if (mb_cache_entry_put(cache, entry))
311 			shrunk++;
312 		cond_resched();
313 		spin_lock(&cache->c_list_lock);
314 	}
315 	spin_unlock(&cache->c_list_lock);
316 
317 	return shrunk;
318 }
319 
320 static unsigned long mb_cache_scan(struct shrinker *shrink,
321 				   struct shrink_control *sc)
322 {
323 	struct mb_cache *cache = container_of(shrink, struct mb_cache,
324 					      c_shrink);
325 	return mb_cache_shrink(cache, sc->nr_to_scan);
326 }
327 
328 /* We shrink 1/X of the cache when we have too many entries in it */
329 #define SHRINK_DIVISOR 16
330 
331 static void mb_cache_shrink_worker(struct work_struct *work)
332 {
333 	struct mb_cache *cache = container_of(work, struct mb_cache,
334 					      c_shrink_work);
335 	mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
336 }
337 
338 /*
339  * mb_cache_create - create cache
340  * @bucket_bits: log2 of the hash table size
341  *
342  * Create cache for keys with 2^bucket_bits hash entries.
343  */
344 struct mb_cache *mb_cache_create(int bucket_bits)
345 {
346 	struct mb_cache *cache;
347 	unsigned long bucket_count = 1UL << bucket_bits;
348 	unsigned long i;
349 
350 	cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
351 	if (!cache)
352 		goto err_out;
353 	cache->c_bucket_bits = bucket_bits;
354 	cache->c_max_entries = bucket_count << 4;
355 	INIT_LIST_HEAD(&cache->c_list);
356 	spin_lock_init(&cache->c_list_lock);
357 	cache->c_hash = kmalloc_array(bucket_count,
358 				      sizeof(struct hlist_bl_head),
359 				      GFP_KERNEL);
360 	if (!cache->c_hash) {
361 		kfree(cache);
362 		goto err_out;
363 	}
364 	for (i = 0; i < bucket_count; i++)
365 		INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
366 
367 	cache->c_shrink.count_objects = mb_cache_count;
368 	cache->c_shrink.scan_objects = mb_cache_scan;
369 	cache->c_shrink.seeks = DEFAULT_SEEKS;
370 	if (register_shrinker(&cache->c_shrink)) {
371 		kfree(cache->c_hash);
372 		kfree(cache);
373 		goto err_out;
374 	}
375 
376 	INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
377 
378 	return cache;
379 
380 err_out:
381 	return NULL;
382 }
383 EXPORT_SYMBOL(mb_cache_create);
384 
385 /*
386  * mb_cache_destroy - destroy cache
387  * @cache: the cache to destroy
388  *
389  * Free all entries in cache and cache itself. Caller must make sure nobody
390  * (except shrinker) can reach @cache when calling this.
391  */
392 void mb_cache_destroy(struct mb_cache *cache)
393 {
394 	struct mb_cache_entry *entry, *next;
395 
396 	unregister_shrinker(&cache->c_shrink);
397 
398 	/*
399 	 * We don't bother with any locking. Cache must not be used at this
400 	 * point.
401 	 */
402 	list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
403 		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
404 			hlist_bl_del_init(&entry->e_hash_list);
405 			atomic_dec(&entry->e_refcnt);
406 		} else
407 			WARN_ON(1);
408 		list_del(&entry->e_list);
409 		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
410 		mb_cache_entry_put(cache, entry);
411 	}
412 	kfree(cache->c_hash);
413 	kfree(cache);
414 }
415 EXPORT_SYMBOL(mb_cache_destroy);
416 
417 static int __init mbcache_init(void)
418 {
419 	mb_entry_cache = kmem_cache_create("mbcache",
420 				sizeof(struct mb_cache_entry), 0,
421 				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
422 	if (!mb_entry_cache)
423 		return -ENOMEM;
424 	return 0;
425 }
426 
427 static void __exit mbcache_exit(void)
428 {
429 	kmem_cache_destroy(mb_entry_cache);
430 }
431 
432 module_init(mbcache_init)
433 module_exit(mbcache_exit)
434 
435 MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
436 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
437 MODULE_LICENSE("GPL");
438