1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2021 Facebook */ 3 4 #include <linux/bitmap.h> 5 #include <linux/bpf.h> 6 #include <linux/btf.h> 7 #include <linux/err.h> 8 #include <linux/jhash.h> 9 #include <linux/random.h> 10 11 #define BLOOM_CREATE_FLAG_MASK \ 12 (BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK) 13 14 struct bpf_bloom_filter { 15 struct bpf_map map; 16 u32 bitset_mask; 17 u32 hash_seed; 18 /* If the size of the values in the bloom filter is u32 aligned, 19 * then it is more performant to use jhash2 as the underlying hash 20 * function, else we use jhash. This tracks the number of u32s 21 * in an u32-aligned value size. If the value size is not u32 aligned, 22 * this will be 0. 23 */ 24 u32 aligned_u32_count; 25 u32 nr_hash_funcs; 26 unsigned long bitset[]; 27 }; 28 29 static u32 hash(struct bpf_bloom_filter *bloom, void *value, 30 u32 value_size, u32 index) 31 { 32 u32 h; 33 34 if (bloom->aligned_u32_count) 35 h = jhash2(value, bloom->aligned_u32_count, 36 bloom->hash_seed + index); 37 else 38 h = jhash(value, value_size, bloom->hash_seed + index); 39 40 return h & bloom->bitset_mask; 41 } 42 43 static int bloom_map_peek_elem(struct bpf_map *map, void *value) 44 { 45 struct bpf_bloom_filter *bloom = 46 container_of(map, struct bpf_bloom_filter, map); 47 u32 i, h; 48 49 for (i = 0; i < bloom->nr_hash_funcs; i++) { 50 h = hash(bloom, value, map->value_size, i); 51 if (!test_bit(h, bloom->bitset)) 52 return -ENOENT; 53 } 54 55 return 0; 56 } 57 58 static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags) 59 { 60 struct bpf_bloom_filter *bloom = 61 container_of(map, struct bpf_bloom_filter, map); 62 u32 i, h; 63 64 if (flags != BPF_ANY) 65 return -EINVAL; 66 67 for (i = 0; i < bloom->nr_hash_funcs; i++) { 68 h = hash(bloom, value, map->value_size, i); 69 set_bit(h, bloom->bitset); 70 } 71 72 return 0; 73 } 74 75 static int bloom_map_pop_elem(struct bpf_map *map, void *value) 76 { 77 return -EOPNOTSUPP; 78 } 79 80 static int bloom_map_delete_elem(struct bpf_map *map, void *value) 81 { 82 return -EOPNOTSUPP; 83 } 84 85 static struct bpf_map *bloom_map_alloc(union bpf_attr *attr) 86 { 87 u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits; 88 int numa_node = bpf_map_attr_numa_node(attr); 89 struct bpf_bloom_filter *bloom; 90 91 if (!bpf_capable()) 92 return ERR_PTR(-EPERM); 93 94 if (attr->key_size != 0 || attr->value_size == 0 || 95 attr->max_entries == 0 || 96 attr->map_flags & ~BLOOM_CREATE_FLAG_MASK || 97 !bpf_map_flags_access_ok(attr->map_flags) || 98 /* The lower 4 bits of map_extra (0xF) specify the number 99 * of hash functions 100 */ 101 (attr->map_extra & ~0xF)) 102 return ERR_PTR(-EINVAL); 103 104 nr_hash_funcs = attr->map_extra; 105 if (nr_hash_funcs == 0) 106 /* Default to using 5 hash functions if unspecified */ 107 nr_hash_funcs = 5; 108 109 /* For the bloom filter, the optimal bit array size that minimizes the 110 * false positive probability is n * k / ln(2) where n is the number of 111 * expected entries in the bloom filter and k is the number of hash 112 * functions. We use 7 / 5 to approximate 1 / ln(2). 113 * 114 * We round this up to the nearest power of two to enable more efficient 115 * hashing using bitmasks. The bitmask will be the bit array size - 1. 116 * 117 * If this overflows a u32, the bit array size will have 2^32 (4 118 * GB) bits. 119 */ 120 if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) || 121 check_mul_overflow(nr_bits / 5, (u32)7, &nr_bits) || 122 nr_bits > (1UL << 31)) { 123 /* The bit array size is 2^32 bits but to avoid overflowing the 124 * u32, we use U32_MAX, which will round up to the equivalent 125 * number of bytes 126 */ 127 bitset_bytes = BITS_TO_BYTES(U32_MAX); 128 bitset_mask = U32_MAX; 129 } else { 130 if (nr_bits <= BITS_PER_LONG) 131 nr_bits = BITS_PER_LONG; 132 else 133 nr_bits = roundup_pow_of_two(nr_bits); 134 bitset_bytes = BITS_TO_BYTES(nr_bits); 135 bitset_mask = nr_bits - 1; 136 } 137 138 bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long)); 139 bloom = bpf_map_area_alloc(sizeof(*bloom) + bitset_bytes, numa_node); 140 141 if (!bloom) 142 return ERR_PTR(-ENOMEM); 143 144 bpf_map_init_from_attr(&bloom->map, attr); 145 146 bloom->nr_hash_funcs = nr_hash_funcs; 147 bloom->bitset_mask = bitset_mask; 148 149 /* Check whether the value size is u32-aligned */ 150 if ((attr->value_size & (sizeof(u32) - 1)) == 0) 151 bloom->aligned_u32_count = 152 attr->value_size / sizeof(u32); 153 154 if (!(attr->map_flags & BPF_F_ZERO_SEED)) 155 bloom->hash_seed = get_random_int(); 156 157 return &bloom->map; 158 } 159 160 static void bloom_map_free(struct bpf_map *map) 161 { 162 struct bpf_bloom_filter *bloom = 163 container_of(map, struct bpf_bloom_filter, map); 164 165 bpf_map_area_free(bloom); 166 } 167 168 static void *bloom_map_lookup_elem(struct bpf_map *map, void *key) 169 { 170 /* The eBPF program should use map_peek_elem instead */ 171 return ERR_PTR(-EINVAL); 172 } 173 174 static int bloom_map_update_elem(struct bpf_map *map, void *key, 175 void *value, u64 flags) 176 { 177 /* The eBPF program should use map_push_elem instead */ 178 return -EINVAL; 179 } 180 181 static int bloom_map_check_btf(const struct bpf_map *map, 182 const struct btf *btf, 183 const struct btf_type *key_type, 184 const struct btf_type *value_type) 185 { 186 /* Bloom filter maps are keyless */ 187 return btf_type_is_void(key_type) ? 0 : -EINVAL; 188 } 189 190 static int bpf_bloom_map_btf_id; 191 const struct bpf_map_ops bloom_filter_map_ops = { 192 .map_meta_equal = bpf_map_meta_equal, 193 .map_alloc = bloom_map_alloc, 194 .map_free = bloom_map_free, 195 .map_push_elem = bloom_map_push_elem, 196 .map_peek_elem = bloom_map_peek_elem, 197 .map_pop_elem = bloom_map_pop_elem, 198 .map_lookup_elem = bloom_map_lookup_elem, 199 .map_update_elem = bloom_map_update_elem, 200 .map_delete_elem = bloom_map_delete_elem, 201 .map_check_btf = bloom_map_check_btf, 202 .map_btf_name = "bpf_bloom_filter", 203 .map_btf_id = &bpf_bloom_map_btf_id, 204 }; 205