xref: /openbmc/linux/fs/mbcache.c (revision f9a82c48)
1 #include <linux/spinlock.h>
2 #include <linux/slab.h>
3 #include <linux/list.h>
4 #include <linux/list_bl.h>
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/workqueue.h>
8 #include <linux/mbcache.h>
9 
10 /*
11  * Mbcache is a simple key-value store. Keys need not be unique, however
12  * key-value pairs are expected to be unique (we use this fact in
13  * mb_cache_entry_delete()).
14  *
15  * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
16  * Ext4 also uses it for deduplication of xattr values stored in inodes.
17  * They use hash of data as a key and provide a value that may represent a
18  * block or inode number. That's why keys need not be unique (hash of different
19  * data may be the same). However user provided value always uniquely
20  * identifies a cache entry.
21  *
22  * We provide functions for creation and removal of entries, search by key,
23  * and a special "delete entry with given key-value pair" operation. Fixed
24  * size hash table is used for fast key lookups.
25  */
26 
27 struct mb_cache {
28 	/* Hash table of entries */
29 	struct hlist_bl_head	*c_hash;
30 	/* log2 of hash table size */
31 	int			c_bucket_bits;
32 	/* Maximum entries in cache to avoid degrading hash too much */
33 	unsigned long		c_max_entries;
34 	/* Protects c_list, c_entry_count */
35 	spinlock_t		c_list_lock;
36 	struct list_head	c_list;
37 	/* Number of entries in cache */
38 	unsigned long		c_entry_count;
39 	struct shrinker		c_shrink;
40 	/* Work for shrinking when the cache has too many entries */
41 	struct work_struct	c_shrink_work;
42 };
43 
44 static struct kmem_cache *mb_entry_cache;
45 
46 static unsigned long mb_cache_shrink(struct mb_cache *cache,
47 				     unsigned long nr_to_scan);
48 
49 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
50 							u32 key)
51 {
52 	return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
53 }
54 
55 /*
56  * Number of entries to reclaim synchronously when there are too many entries
57  * in cache
58  */
59 #define SYNC_SHRINK_BATCH 64
60 
61 /*
62  * mb_cache_entry_create - create entry in cache
63  * @cache - cache where the entry should be created
64  * @mask - gfp mask with which the entry should be allocated
65  * @key - key of the entry
66  * @value - value of the entry
67  * @reusable - is the entry reusable by others?
68  *
69  * Creates entry in @cache with key @key and value @value. The function returns
70  * -EBUSY if entry with the same key and value already exists in cache.
71  * Otherwise 0 is returned.
72  */
73 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
74 			  u64 value, bool reusable)
75 {
76 	struct mb_cache_entry *entry, *dup;
77 	struct hlist_bl_node *dup_node;
78 	struct hlist_bl_head *head;
79 
80 	/* Schedule background reclaim if there are too many entries */
81 	if (cache->c_entry_count >= cache->c_max_entries)
82 		schedule_work(&cache->c_shrink_work);
83 	/* Do some sync reclaim if background reclaim cannot keep up */
84 	if (cache->c_entry_count >= 2*cache->c_max_entries)
85 		mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
86 
87 	entry = kmem_cache_alloc(mb_entry_cache, mask);
88 	if (!entry)
89 		return -ENOMEM;
90 
91 	INIT_LIST_HEAD(&entry->e_list);
92 	/* One ref for hash, one ref returned */
93 	atomic_set(&entry->e_refcnt, 1);
94 	entry->e_key = key;
95 	entry->e_value = value;
96 	entry->e_reusable = reusable;
97 	entry->e_referenced = 0;
98 	head = mb_cache_entry_head(cache, key);
99 	hlist_bl_lock(head);
100 	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
101 		if (dup->e_key == key && dup->e_value == value) {
102 			hlist_bl_unlock(head);
103 			kmem_cache_free(mb_entry_cache, entry);
104 			return -EBUSY;
105 		}
106 	}
107 	hlist_bl_add_head(&entry->e_hash_list, head);
108 	hlist_bl_unlock(head);
109 
110 	spin_lock(&cache->c_list_lock);
111 	list_add_tail(&entry->e_list, &cache->c_list);
112 	/* Grab ref for LRU list */
113 	atomic_inc(&entry->e_refcnt);
114 	cache->c_entry_count++;
115 	spin_unlock(&cache->c_list_lock);
116 
117 	return 0;
118 }
119 EXPORT_SYMBOL(mb_cache_entry_create);
120 
121 void __mb_cache_entry_free(struct mb_cache_entry *entry)
122 {
123 	kmem_cache_free(mb_entry_cache, entry);
124 }
125 EXPORT_SYMBOL(__mb_cache_entry_free);
126 
127 static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
128 					   struct mb_cache_entry *entry,
129 					   u32 key)
130 {
131 	struct mb_cache_entry *old_entry = entry;
132 	struct hlist_bl_node *node;
133 	struct hlist_bl_head *head;
134 
135 	head = mb_cache_entry_head(cache, key);
136 	hlist_bl_lock(head);
137 	if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
138 		node = entry->e_hash_list.next;
139 	else
140 		node = hlist_bl_first(head);
141 	while (node) {
142 		entry = hlist_bl_entry(node, struct mb_cache_entry,
143 				       e_hash_list);
144 		if (entry->e_key == key && entry->e_reusable) {
145 			atomic_inc(&entry->e_refcnt);
146 			goto out;
147 		}
148 		node = node->next;
149 	}
150 	entry = NULL;
151 out:
152 	hlist_bl_unlock(head);
153 	if (old_entry)
154 		mb_cache_entry_put(cache, old_entry);
155 
156 	return entry;
157 }
158 
159 /*
160  * mb_cache_entry_find_first - find the first reusable entry with the given key
161  * @cache: cache where we should search
162  * @key: key to look for
163  *
164  * Search in @cache for a reusable entry with key @key. Grabs reference to the
165  * first reusable entry found and returns the entry.
166  */
167 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
168 						 u32 key)
169 {
170 	return __entry_find(cache, NULL, key);
171 }
172 EXPORT_SYMBOL(mb_cache_entry_find_first);
173 
174 /*
175  * mb_cache_entry_find_next - find next reusable entry with the same key
176  * @cache: cache where we should search
177  * @entry: entry to start search from
178  *
179  * Finds next reusable entry in the hash chain which has the same key as @entry.
180  * If @entry is unhashed (which can happen when deletion of entry races with the
181  * search), finds the first reusable entry in the hash chain. The function drops
182  * reference to @entry and returns with a reference to the found entry.
183  */
184 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
185 						struct mb_cache_entry *entry)
186 {
187 	return __entry_find(cache, entry, entry->e_key);
188 }
189 EXPORT_SYMBOL(mb_cache_entry_find_next);
190 
191 /*
192  * mb_cache_entry_get - get a cache entry by value (and key)
193  * @cache - cache we work with
194  * @key - key
195  * @value - value
196  */
197 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
198 					  u64 value)
199 {
200 	struct hlist_bl_node *node;
201 	struct hlist_bl_head *head;
202 	struct mb_cache_entry *entry;
203 
204 	head = mb_cache_entry_head(cache, key);
205 	hlist_bl_lock(head);
206 	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
207 		if (entry->e_key == key && entry->e_value == value) {
208 			atomic_inc(&entry->e_refcnt);
209 			goto out;
210 		}
211 	}
212 	entry = NULL;
213 out:
214 	hlist_bl_unlock(head);
215 	return entry;
216 }
217 EXPORT_SYMBOL(mb_cache_entry_get);
218 
219 /* mb_cache_entry_delete - remove a cache entry
220  * @cache - cache we work with
221  * @key - key
222  * @value - value
223  *
224  * Remove entry from cache @cache with key @key and value @value.
225  */
226 void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
227 {
228 	struct hlist_bl_node *node;
229 	struct hlist_bl_head *head;
230 	struct mb_cache_entry *entry;
231 
232 	head = mb_cache_entry_head(cache, key);
233 	hlist_bl_lock(head);
234 	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
235 		if (entry->e_key == key && entry->e_value == value) {
236 			/* We keep hash list reference to keep entry alive */
237 			hlist_bl_del_init(&entry->e_hash_list);
238 			hlist_bl_unlock(head);
239 			spin_lock(&cache->c_list_lock);
240 			if (!list_empty(&entry->e_list)) {
241 				list_del_init(&entry->e_list);
242 				if (!WARN_ONCE(cache->c_entry_count == 0,
243 		"mbcache: attempt to decrement c_entry_count past zero"))
244 					cache->c_entry_count--;
245 				atomic_dec(&entry->e_refcnt);
246 			}
247 			spin_unlock(&cache->c_list_lock);
248 			mb_cache_entry_put(cache, entry);
249 			return;
250 		}
251 	}
252 	hlist_bl_unlock(head);
253 }
254 EXPORT_SYMBOL(mb_cache_entry_delete);
255 
256 /* mb_cache_entry_touch - cache entry got used
257  * @cache - cache the entry belongs to
258  * @entry - entry that got used
259  *
260  * Marks entry as used to give hit higher chances of surviving in cache.
261  */
262 void mb_cache_entry_touch(struct mb_cache *cache,
263 			  struct mb_cache_entry *entry)
264 {
265 	entry->e_referenced = 1;
266 }
267 EXPORT_SYMBOL(mb_cache_entry_touch);
268 
269 static unsigned long mb_cache_count(struct shrinker *shrink,
270 				    struct shrink_control *sc)
271 {
272 	struct mb_cache *cache = container_of(shrink, struct mb_cache,
273 					      c_shrink);
274 
275 	return cache->c_entry_count;
276 }
277 
278 /* Shrink number of entries in cache */
279 static unsigned long mb_cache_shrink(struct mb_cache *cache,
280 				     unsigned long nr_to_scan)
281 {
282 	struct mb_cache_entry *entry;
283 	struct hlist_bl_head *head;
284 	unsigned long shrunk = 0;
285 
286 	spin_lock(&cache->c_list_lock);
287 	while (nr_to_scan-- && !list_empty(&cache->c_list)) {
288 		entry = list_first_entry(&cache->c_list,
289 					 struct mb_cache_entry, e_list);
290 		if (entry->e_referenced) {
291 			entry->e_referenced = 0;
292 			list_move_tail(&entry->e_list, &cache->c_list);
293 			continue;
294 		}
295 		list_del_init(&entry->e_list);
296 		cache->c_entry_count--;
297 		/*
298 		 * We keep LRU list reference so that entry doesn't go away
299 		 * from under us.
300 		 */
301 		spin_unlock(&cache->c_list_lock);
302 		head = mb_cache_entry_head(cache, entry->e_key);
303 		hlist_bl_lock(head);
304 		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
305 			hlist_bl_del_init(&entry->e_hash_list);
306 			atomic_dec(&entry->e_refcnt);
307 		}
308 		hlist_bl_unlock(head);
309 		if (mb_cache_entry_put(cache, entry))
310 			shrunk++;
311 		cond_resched();
312 		spin_lock(&cache->c_list_lock);
313 	}
314 	spin_unlock(&cache->c_list_lock);
315 
316 	return shrunk;
317 }
318 
319 static unsigned long mb_cache_scan(struct shrinker *shrink,
320 				   struct shrink_control *sc)
321 {
322 	struct mb_cache *cache = container_of(shrink, struct mb_cache,
323 					      c_shrink);
324 	return mb_cache_shrink(cache, sc->nr_to_scan);
325 }
326 
327 /* We shrink 1/X of the cache when we have too many entries in it */
328 #define SHRINK_DIVISOR 16
329 
330 static void mb_cache_shrink_worker(struct work_struct *work)
331 {
332 	struct mb_cache *cache = container_of(work, struct mb_cache,
333 					      c_shrink_work);
334 	mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
335 }
336 
337 /*
338  * mb_cache_create - create cache
339  * @bucket_bits: log2 of the hash table size
340  *
341  * Create cache for keys with 2^bucket_bits hash entries.
342  */
343 struct mb_cache *mb_cache_create(int bucket_bits)
344 {
345 	struct mb_cache *cache;
346 	unsigned long bucket_count = 1UL << bucket_bits;
347 	unsigned long i;
348 
349 	cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
350 	if (!cache)
351 		goto err_out;
352 	cache->c_bucket_bits = bucket_bits;
353 	cache->c_max_entries = bucket_count << 4;
354 	INIT_LIST_HEAD(&cache->c_list);
355 	spin_lock_init(&cache->c_list_lock);
356 	cache->c_hash = kmalloc_array(bucket_count,
357 				      sizeof(struct hlist_bl_head),
358 				      GFP_KERNEL);
359 	if (!cache->c_hash) {
360 		kfree(cache);
361 		goto err_out;
362 	}
363 	for (i = 0; i < bucket_count; i++)
364 		INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
365 
366 	cache->c_shrink.count_objects = mb_cache_count;
367 	cache->c_shrink.scan_objects = mb_cache_scan;
368 	cache->c_shrink.seeks = DEFAULT_SEEKS;
369 	if (register_shrinker(&cache->c_shrink)) {
370 		kfree(cache->c_hash);
371 		kfree(cache);
372 		goto err_out;
373 	}
374 
375 	INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
376 
377 	return cache;
378 
379 err_out:
380 	return NULL;
381 }
382 EXPORT_SYMBOL(mb_cache_create);
383 
384 /*
385  * mb_cache_destroy - destroy cache
386  * @cache: the cache to destroy
387  *
388  * Free all entries in cache and cache itself. Caller must make sure nobody
389  * (except shrinker) can reach @cache when calling this.
390  */
391 void mb_cache_destroy(struct mb_cache *cache)
392 {
393 	struct mb_cache_entry *entry, *next;
394 
395 	unregister_shrinker(&cache->c_shrink);
396 
397 	/*
398 	 * We don't bother with any locking. Cache must not be used at this
399 	 * point.
400 	 */
401 	list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
402 		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
403 			hlist_bl_del_init(&entry->e_hash_list);
404 			atomic_dec(&entry->e_refcnt);
405 		} else
406 			WARN_ON(1);
407 		list_del(&entry->e_list);
408 		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
409 		mb_cache_entry_put(cache, entry);
410 	}
411 	kfree(cache->c_hash);
412 	kfree(cache);
413 }
414 EXPORT_SYMBOL(mb_cache_destroy);
415 
416 static int __init mbcache_init(void)
417 {
418 	mb_entry_cache = kmem_cache_create("mbcache",
419 				sizeof(struct mb_cache_entry), 0,
420 				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
421 	if (!mb_entry_cache)
422 		return -ENOMEM;
423 	return 0;
424 }
425 
426 static void __exit mbcache_exit(void)
427 {
428 	kmem_cache_destroy(mb_entry_cache);
429 }
430 
431 module_init(mbcache_init)
432 module_exit(mbcache_exit)
433 
434 MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
435 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
436 MODULE_LICENSE("GPL");
437