1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef BTRFS_LRU_CACHE_H
4 #define BTRFS_LRU_CACHE_H
5
6 #include <linux/maple_tree.h>
7 #include <linux/list.h>
8
9 /*
10 * A cache entry. This is meant to be embedded in a structure of a user of
11 * this module. Similar to how struct list_head and struct rb_node are used.
12 *
13 * Note: it should be embedded as the first element in a struct (offset 0), and
14 * this module assumes it was allocated with kmalloc(), so it calls kfree() when
15 * it needs to free an entry.
16 */
17 struct btrfs_lru_cache_entry {
18 struct list_head lru_list;
19 u64 key;
20 /*
21 * Optional generation associated to a key. Use 0 if not needed/used.
22 * Entries with the same key and different generations are stored in a
23 * linked list, so use this only for cases where there's a small number
24 * of different generations.
25 */
26 u64 gen;
27 /*
28 * The maple tree uses unsigned long type for the keys, which is 32 bits
29 * on 32 bits systems, and 64 bits on 64 bits systems. So if we want to
30 * use something like inode numbers as keys, which are always a u64, we
31 * have to deal with this in a special way - we store the key in the
32 * entry itself, as a u64, and the values inserted into the maple tree
33 * are linked lists of entries - so in case we are on a 64 bits system,
34 * that list always has a single entry, while on 32 bits systems it
35 * may have more than one, with each entry having the same value for
36 * their lower 32 bits of the u64 key.
37 */
38 struct list_head list;
39 };
40
41 struct btrfs_lru_cache {
42 struct list_head lru_list;
43 struct maple_tree entries;
44 /* Number of entries stored in the cache. */
45 unsigned int size;
46 /* Maximum number of entries the cache can have. */
47 unsigned int max_size;
48 };
49
50 #define btrfs_lru_cache_for_each_entry_safe(cache, entry, tmp) \
51 list_for_each_entry_safe_reverse((entry), (tmp), &(cache)->lru_list, lru_list)
52
btrfs_lru_cache_size(const struct btrfs_lru_cache * cache)53 static inline unsigned int btrfs_lru_cache_size(const struct btrfs_lru_cache *cache)
54 {
55 return cache->size;
56 }
57
btrfs_lru_cache_lru_entry(struct btrfs_lru_cache * cache)58 static inline struct btrfs_lru_cache_entry *btrfs_lru_cache_lru_entry(
59 struct btrfs_lru_cache *cache)
60 {
61 return list_first_entry_or_null(&cache->lru_list,
62 struct btrfs_lru_cache_entry, lru_list);
63 }
64
65 void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size);
66 struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache,
67 u64 key, u64 gen);
68 int btrfs_lru_cache_store(struct btrfs_lru_cache *cache,
69 struct btrfs_lru_cache_entry *new_entry,
70 gfp_t gfp);
71 void btrfs_lru_cache_remove(struct btrfs_lru_cache *cache,
72 struct btrfs_lru_cache_entry *entry);
73 void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache);
74
75 #endif
76