1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/random.h>
11 #include <uapi/linux/btf.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/btf_ids.h>
14 #include "percpu_freelist.h"
15 #include "bpf_lru_list.h"
16 #include "map_in_map.h"
17 #include <linux/bpf_mem_alloc.h>
18
19 #define HTAB_CREATE_FLAG_MASK \
20 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
21 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
22
23 #define BATCH_OPS(_name) \
24 .map_lookup_batch = \
25 _name##_map_lookup_batch, \
26 .map_lookup_and_delete_batch = \
27 _name##_map_lookup_and_delete_batch, \
28 .map_update_batch = \
29 generic_map_update_batch, \
30 .map_delete_batch = \
31 generic_map_delete_batch
32
33 /*
34 * The bucket lock has two protection scopes:
35 *
36 * 1) Serializing concurrent operations from BPF programs on different
37 * CPUs
38 *
39 * 2) Serializing concurrent operations from BPF programs and sys_bpf()
40 *
41 * BPF programs can execute in any context including perf, kprobes and
42 * tracing. As there are almost no limits where perf, kprobes and tracing
43 * can be invoked from the lock operations need to be protected against
44 * deadlocks. Deadlocks can be caused by recursion and by an invocation in
45 * the lock held section when functions which acquire this lock are invoked
46 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
47 * variable bpf_prog_active, which prevents BPF programs attached to perf
48 * events, kprobes and tracing to be invoked before the prior invocation
49 * from one of these contexts completed. sys_bpf() uses the same mechanism
50 * by pinning the task to the current CPU and incrementing the recursion
51 * protection across the map operation.
52 *
53 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
54 * operations like memory allocations (even with GFP_ATOMIC) from atomic
55 * contexts. This is required because even with GFP_ATOMIC the memory
56 * allocator calls into code paths which acquire locks with long held lock
57 * sections. To ensure the deterministic behaviour these locks are regular
58 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
59 * true atomic contexts on an RT kernel are the low level hardware
60 * handling, scheduling, low level interrupt handling, NMIs etc. None of
61 * these contexts should ever do memory allocations.
62 *
63 * As regular device interrupt handlers and soft interrupts are forced into
64 * thread context, the existing code which does
65 * spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
66 * just works.
67 *
68 * In theory the BPF locks could be converted to regular spinlocks as well,
69 * but the bucket locks and percpu_freelist locks can be taken from
70 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
71 * atomic contexts even on RT. Before the introduction of bpf_mem_alloc,
72 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
73 * because there is no memory allocation within the lock held sections. However
74 * after hash map was fully converted to use bpf_mem_alloc, there will be
75 * non-synchronous memory allocation for non-preallocated hash map, so it is
76 * safe to always use raw spinlock for bucket lock.
77 */
78 struct bucket {
79 struct hlist_nulls_head head;
80 raw_spinlock_t raw_lock;
81 };
82
83 #define HASHTAB_MAP_LOCK_COUNT 8
84 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
85
86 struct bpf_htab {
87 struct bpf_map map;
88 struct bpf_mem_alloc ma;
89 struct bpf_mem_alloc pcpu_ma;
90 struct bucket *buckets;
91 void *elems;
92 union {
93 struct pcpu_freelist freelist;
94 struct bpf_lru lru;
95 };
96 struct htab_elem *__percpu *extra_elems;
97 /* number of elements in non-preallocated hashtable are kept
98 * in either pcount or count
99 */
100 struct percpu_counter pcount;
101 atomic_t count;
102 bool use_percpu_counter;
103 u32 n_buckets; /* number of hash buckets */
104 u32 elem_size; /* size of each element in bytes */
105 u32 hashrnd;
106 struct lock_class_key lockdep_key;
107 int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
108 };
109
110 /* each htab element is struct htab_elem + key + value */
111 struct htab_elem {
112 union {
113 struct hlist_nulls_node hash_node;
114 struct {
115 void *padding;
116 union {
117 struct pcpu_freelist_node fnode;
118 struct htab_elem *batch_flink;
119 };
120 };
121 };
122 union {
123 /* pointer to per-cpu pointer */
124 void *ptr_to_pptr;
125 struct bpf_lru_node lru_node;
126 };
127 u32 hash;
128 char key[] __aligned(8);
129 };
130
htab_is_prealloc(const struct bpf_htab * htab)131 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
132 {
133 return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
134 }
135
htab_init_buckets(struct bpf_htab * htab)136 static void htab_init_buckets(struct bpf_htab *htab)
137 {
138 unsigned int i;
139
140 for (i = 0; i < htab->n_buckets; i++) {
141 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
142 raw_spin_lock_init(&htab->buckets[i].raw_lock);
143 lockdep_set_class(&htab->buckets[i].raw_lock,
144 &htab->lockdep_key);
145 cond_resched();
146 }
147 }
148
htab_lock_bucket(const struct bpf_htab * htab,struct bucket * b,u32 hash,unsigned long * pflags)149 static inline int htab_lock_bucket(const struct bpf_htab *htab,
150 struct bucket *b, u32 hash,
151 unsigned long *pflags)
152 {
153 unsigned long flags;
154
155 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
156
157 preempt_disable();
158 local_irq_save(flags);
159 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
160 __this_cpu_dec(*(htab->map_locked[hash]));
161 local_irq_restore(flags);
162 preempt_enable();
163 return -EBUSY;
164 }
165
166 raw_spin_lock(&b->raw_lock);
167 *pflags = flags;
168
169 return 0;
170 }
171
htab_unlock_bucket(const struct bpf_htab * htab,struct bucket * b,u32 hash,unsigned long flags)172 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
173 struct bucket *b, u32 hash,
174 unsigned long flags)
175 {
176 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
177 raw_spin_unlock(&b->raw_lock);
178 __this_cpu_dec(*(htab->map_locked[hash]));
179 local_irq_restore(flags);
180 preempt_enable();
181 }
182
183 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
184
htab_is_lru(const struct bpf_htab * htab)185 static bool htab_is_lru(const struct bpf_htab *htab)
186 {
187 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
188 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
189 }
190
htab_is_percpu(const struct bpf_htab * htab)191 static bool htab_is_percpu(const struct bpf_htab *htab)
192 {
193 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
194 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
195 }
196
htab_elem_set_ptr(struct htab_elem * l,u32 key_size,void __percpu * pptr)197 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
198 void __percpu *pptr)
199 {
200 *(void __percpu **)(l->key + key_size) = pptr;
201 }
202
htab_elem_get_ptr(struct htab_elem * l,u32 key_size)203 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
204 {
205 return *(void __percpu **)(l->key + key_size);
206 }
207
fd_htab_map_get_ptr(const struct bpf_map * map,struct htab_elem * l)208 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
209 {
210 return *(void **)(l->key + roundup(map->key_size, 8));
211 }
212
get_htab_elem(struct bpf_htab * htab,int i)213 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
214 {
215 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
216 }
217
htab_has_extra_elems(struct bpf_htab * htab)218 static bool htab_has_extra_elems(struct bpf_htab *htab)
219 {
220 return !htab_is_percpu(htab) && !htab_is_lru(htab);
221 }
222
htab_free_prealloced_timers(struct bpf_htab * htab)223 static void htab_free_prealloced_timers(struct bpf_htab *htab)
224 {
225 u32 num_entries = htab->map.max_entries;
226 int i;
227
228 if (!btf_record_has_field(htab->map.record, BPF_TIMER))
229 return;
230 if (htab_has_extra_elems(htab))
231 num_entries += num_possible_cpus();
232
233 for (i = 0; i < num_entries; i++) {
234 struct htab_elem *elem;
235
236 elem = get_htab_elem(htab, i);
237 bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
238 cond_resched();
239 }
240 }
241
htab_free_prealloced_fields(struct bpf_htab * htab)242 static void htab_free_prealloced_fields(struct bpf_htab *htab)
243 {
244 u32 num_entries = htab->map.max_entries;
245 int i;
246
247 if (IS_ERR_OR_NULL(htab->map.record))
248 return;
249 if (htab_has_extra_elems(htab))
250 num_entries += num_possible_cpus();
251 for (i = 0; i < num_entries; i++) {
252 struct htab_elem *elem;
253
254 elem = get_htab_elem(htab, i);
255 if (htab_is_percpu(htab)) {
256 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
257 int cpu;
258
259 for_each_possible_cpu(cpu) {
260 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
261 cond_resched();
262 }
263 } else {
264 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
265 cond_resched();
266 }
267 cond_resched();
268 }
269 }
270
htab_free_elems(struct bpf_htab * htab)271 static void htab_free_elems(struct bpf_htab *htab)
272 {
273 int i;
274
275 if (!htab_is_percpu(htab))
276 goto free_elems;
277
278 for (i = 0; i < htab->map.max_entries; i++) {
279 void __percpu *pptr;
280
281 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
282 htab->map.key_size);
283 free_percpu(pptr);
284 cond_resched();
285 }
286 free_elems:
287 bpf_map_area_free(htab->elems);
288 }
289
290 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
291 * (bucket_lock). If both locks need to be acquired together, the lock
292 * order is always lru_lock -> bucket_lock and this only happens in
293 * bpf_lru_list.c logic. For example, certain code path of
294 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
295 * will acquire lru_lock first followed by acquiring bucket_lock.
296 *
297 * In hashtab.c, to avoid deadlock, lock acquisition of
298 * bucket_lock followed by lru_lock is not allowed. In such cases,
299 * bucket_lock needs to be released first before acquiring lru_lock.
300 */
prealloc_lru_pop(struct bpf_htab * htab,void * key,u32 hash)301 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
302 u32 hash)
303 {
304 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
305 struct htab_elem *l;
306
307 if (node) {
308 bpf_map_inc_elem_count(&htab->map);
309 l = container_of(node, struct htab_elem, lru_node);
310 memcpy(l->key, key, htab->map.key_size);
311 return l;
312 }
313
314 return NULL;
315 }
316
prealloc_init(struct bpf_htab * htab)317 static int prealloc_init(struct bpf_htab *htab)
318 {
319 u32 num_entries = htab->map.max_entries;
320 int err = -ENOMEM, i;
321
322 if (htab_has_extra_elems(htab))
323 num_entries += num_possible_cpus();
324
325 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
326 htab->map.numa_node);
327 if (!htab->elems)
328 return -ENOMEM;
329
330 if (!htab_is_percpu(htab))
331 goto skip_percpu_elems;
332
333 for (i = 0; i < num_entries; i++) {
334 u32 size = round_up(htab->map.value_size, 8);
335 void __percpu *pptr;
336
337 pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
338 GFP_USER | __GFP_NOWARN);
339 if (!pptr)
340 goto free_elems;
341 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
342 pptr);
343 cond_resched();
344 }
345
346 skip_percpu_elems:
347 if (htab_is_lru(htab))
348 err = bpf_lru_init(&htab->lru,
349 htab->map.map_flags & BPF_F_NO_COMMON_LRU,
350 offsetof(struct htab_elem, hash) -
351 offsetof(struct htab_elem, lru_node),
352 htab_lru_map_delete_node,
353 htab);
354 else
355 err = pcpu_freelist_init(&htab->freelist);
356
357 if (err)
358 goto free_elems;
359
360 if (htab_is_lru(htab))
361 bpf_lru_populate(&htab->lru, htab->elems,
362 offsetof(struct htab_elem, lru_node),
363 htab->elem_size, num_entries);
364 else
365 pcpu_freelist_populate(&htab->freelist,
366 htab->elems + offsetof(struct htab_elem, fnode),
367 htab->elem_size, num_entries);
368
369 return 0;
370
371 free_elems:
372 htab_free_elems(htab);
373 return err;
374 }
375
prealloc_destroy(struct bpf_htab * htab)376 static void prealloc_destroy(struct bpf_htab *htab)
377 {
378 htab_free_elems(htab);
379
380 if (htab_is_lru(htab))
381 bpf_lru_destroy(&htab->lru);
382 else
383 pcpu_freelist_destroy(&htab->freelist);
384 }
385
alloc_extra_elems(struct bpf_htab * htab)386 static int alloc_extra_elems(struct bpf_htab *htab)
387 {
388 struct htab_elem *__percpu *pptr, *l_new;
389 struct pcpu_freelist_node *l;
390 int cpu;
391
392 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
393 GFP_USER | __GFP_NOWARN);
394 if (!pptr)
395 return -ENOMEM;
396
397 for_each_possible_cpu(cpu) {
398 l = pcpu_freelist_pop(&htab->freelist);
399 /* pop will succeed, since prealloc_init()
400 * preallocated extra num_possible_cpus elements
401 */
402 l_new = container_of(l, struct htab_elem, fnode);
403 *per_cpu_ptr(pptr, cpu) = l_new;
404 }
405 htab->extra_elems = pptr;
406 return 0;
407 }
408
409 /* Called from syscall */
htab_map_alloc_check(union bpf_attr * attr)410 static int htab_map_alloc_check(union bpf_attr *attr)
411 {
412 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
413 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
414 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
415 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
416 /* percpu_lru means each cpu has its own LRU list.
417 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
418 * the map's value itself is percpu. percpu_lru has
419 * nothing to do with the map's value.
420 */
421 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
422 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
423 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
424 int numa_node = bpf_map_attr_numa_node(attr);
425
426 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
427 offsetof(struct htab_elem, hash_node.pprev));
428
429 if (zero_seed && !capable(CAP_SYS_ADMIN))
430 /* Guard against local DoS, and discourage production use. */
431 return -EPERM;
432
433 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
434 !bpf_map_flags_access_ok(attr->map_flags))
435 return -EINVAL;
436
437 if (!lru && percpu_lru)
438 return -EINVAL;
439
440 if (lru && !prealloc)
441 return -ENOTSUPP;
442
443 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
444 return -EINVAL;
445
446 /* check sanity of attributes.
447 * value_size == 0 may be allowed in the future to use map as a set
448 */
449 if (attr->max_entries == 0 || attr->key_size == 0 ||
450 attr->value_size == 0)
451 return -EINVAL;
452
453 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
454 sizeof(struct htab_elem))
455 /* if key_size + value_size is bigger, the user space won't be
456 * able to access the elements via bpf syscall. This check
457 * also makes sure that the elem_size doesn't overflow and it's
458 * kmalloc-able later in htab_map_update_elem()
459 */
460 return -E2BIG;
461 /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
462 if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
463 return -E2BIG;
464
465 return 0;
466 }
467
htab_map_alloc(union bpf_attr * attr)468 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
469 {
470 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
471 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
472 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
473 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
474 /* percpu_lru means each cpu has its own LRU list.
475 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
476 * the map's value itself is percpu. percpu_lru has
477 * nothing to do with the map's value.
478 */
479 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
480 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
481 struct bpf_htab *htab;
482 int err, i;
483
484 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
485 if (!htab)
486 return ERR_PTR(-ENOMEM);
487
488 lockdep_register_key(&htab->lockdep_key);
489
490 bpf_map_init_from_attr(&htab->map, attr);
491
492 if (percpu_lru) {
493 /* ensure each CPU's lru list has >=1 elements.
494 * since we are at it, make each lru list has the same
495 * number of elements.
496 */
497 htab->map.max_entries = roundup(attr->max_entries,
498 num_possible_cpus());
499 if (htab->map.max_entries < attr->max_entries)
500 htab->map.max_entries = rounddown(attr->max_entries,
501 num_possible_cpus());
502 }
503
504 /* hash table size must be power of 2; roundup_pow_of_two() can overflow
505 * into UB on 32-bit arches, so check that first
506 */
507 err = -E2BIG;
508 if (htab->map.max_entries > 1UL << 31)
509 goto free_htab;
510
511 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
512
513 htab->elem_size = sizeof(struct htab_elem) +
514 round_up(htab->map.key_size, 8);
515 if (percpu)
516 htab->elem_size += sizeof(void *);
517 else
518 htab->elem_size += round_up(htab->map.value_size, 8);
519
520 /* check for u32 overflow */
521 if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
522 goto free_htab;
523
524 err = bpf_map_init_elem_count(&htab->map);
525 if (err)
526 goto free_htab;
527
528 err = -ENOMEM;
529 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
530 sizeof(struct bucket),
531 htab->map.numa_node);
532 if (!htab->buckets)
533 goto free_elem_count;
534
535 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
536 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
537 sizeof(int),
538 sizeof(int),
539 GFP_USER);
540 if (!htab->map_locked[i])
541 goto free_map_locked;
542 }
543
544 if (htab->map.map_flags & BPF_F_ZERO_SEED)
545 htab->hashrnd = 0;
546 else
547 htab->hashrnd = get_random_u32();
548
549 htab_init_buckets(htab);
550
551 /* compute_batch_value() computes batch value as num_online_cpus() * 2
552 * and __percpu_counter_compare() needs
553 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
554 * for percpu_counter to be faster than atomic_t. In practice the average bpf
555 * hash map size is 10k, which means that a system with 64 cpus will fill
556 * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
557 * define our own batch count as 32 then 10k hash map can be filled up to 80%:
558 * 10k - 8k > 32 _batch_ * 64 _cpus_
559 * and __percpu_counter_compare() will still be fast. At that point hash map
560 * collisions will dominate its performance anyway. Assume that hash map filled
561 * to 50+% isn't going to be O(1) and use the following formula to choose
562 * between percpu_counter and atomic_t.
563 */
564 #define PERCPU_COUNTER_BATCH 32
565 if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
566 htab->use_percpu_counter = true;
567
568 if (htab->use_percpu_counter) {
569 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
570 if (err)
571 goto free_map_locked;
572 }
573
574 if (prealloc) {
575 err = prealloc_init(htab);
576 if (err)
577 goto free_map_locked;
578
579 if (!percpu && !lru) {
580 /* lru itself can remove the least used element, so
581 * there is no need for an extra elem during map_update.
582 */
583 err = alloc_extra_elems(htab);
584 if (err)
585 goto free_prealloc;
586 }
587 } else {
588 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
589 if (err)
590 goto free_map_locked;
591 if (percpu) {
592 err = bpf_mem_alloc_init(&htab->pcpu_ma,
593 round_up(htab->map.value_size, 8), true);
594 if (err)
595 goto free_map_locked;
596 }
597 }
598
599 return &htab->map;
600
601 free_prealloc:
602 prealloc_destroy(htab);
603 free_map_locked:
604 if (htab->use_percpu_counter)
605 percpu_counter_destroy(&htab->pcount);
606 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
607 free_percpu(htab->map_locked[i]);
608 bpf_map_area_free(htab->buckets);
609 bpf_mem_alloc_destroy(&htab->pcpu_ma);
610 bpf_mem_alloc_destroy(&htab->ma);
611 free_elem_count:
612 bpf_map_free_elem_count(&htab->map);
613 free_htab:
614 lockdep_unregister_key(&htab->lockdep_key);
615 bpf_map_area_free(htab);
616 return ERR_PTR(err);
617 }
618
htab_map_hash(const void * key,u32 key_len,u32 hashrnd)619 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
620 {
621 if (likely(key_len % 4 == 0))
622 return jhash2(key, key_len / 4, hashrnd);
623 return jhash(key, key_len, hashrnd);
624 }
625
__select_bucket(struct bpf_htab * htab,u32 hash)626 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
627 {
628 return &htab->buckets[hash & (htab->n_buckets - 1)];
629 }
630
select_bucket(struct bpf_htab * htab,u32 hash)631 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
632 {
633 return &__select_bucket(htab, hash)->head;
634 }
635
636 /* this lookup function can only be called with bucket lock taken */
lookup_elem_raw(struct hlist_nulls_head * head,u32 hash,void * key,u32 key_size)637 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
638 void *key, u32 key_size)
639 {
640 struct hlist_nulls_node *n;
641 struct htab_elem *l;
642
643 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
644 if (l->hash == hash && !memcmp(&l->key, key, key_size))
645 return l;
646
647 return NULL;
648 }
649
650 /* can be called without bucket lock. it will repeat the loop in
651 * the unlikely event when elements moved from one bucket into another
652 * while link list is being walked
653 */
lookup_nulls_elem_raw(struct hlist_nulls_head * head,u32 hash,void * key,u32 key_size,u32 n_buckets)654 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
655 u32 hash, void *key,
656 u32 key_size, u32 n_buckets)
657 {
658 struct hlist_nulls_node *n;
659 struct htab_elem *l;
660
661 again:
662 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
663 if (l->hash == hash && !memcmp(&l->key, key, key_size))
664 return l;
665
666 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
667 goto again;
668
669 return NULL;
670 }
671
672 /* Called from syscall or from eBPF program directly, so
673 * arguments have to match bpf_map_lookup_elem() exactly.
674 * The return value is adjusted by BPF instructions
675 * in htab_map_gen_lookup().
676 */
__htab_map_lookup_elem(struct bpf_map * map,void * key)677 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
678 {
679 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
680 struct hlist_nulls_head *head;
681 struct htab_elem *l;
682 u32 hash, key_size;
683
684 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
685 !rcu_read_lock_bh_held());
686
687 key_size = map->key_size;
688
689 hash = htab_map_hash(key, key_size, htab->hashrnd);
690
691 head = select_bucket(htab, hash);
692
693 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
694
695 return l;
696 }
697
htab_map_lookup_elem(struct bpf_map * map,void * key)698 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
699 {
700 struct htab_elem *l = __htab_map_lookup_elem(map, key);
701
702 if (l)
703 return l->key + round_up(map->key_size, 8);
704
705 return NULL;
706 }
707
708 /* inline bpf_map_lookup_elem() call.
709 * Instead of:
710 * bpf_prog
711 * bpf_map_lookup_elem
712 * map->ops->map_lookup_elem
713 * htab_map_lookup_elem
714 * __htab_map_lookup_elem
715 * do:
716 * bpf_prog
717 * __htab_map_lookup_elem
718 */
htab_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)719 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
720 {
721 struct bpf_insn *insn = insn_buf;
722 const int ret = BPF_REG_0;
723
724 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
725 (void *(*)(struct bpf_map *map, void *key))NULL));
726 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
727 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
728 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
729 offsetof(struct htab_elem, key) +
730 round_up(map->key_size, 8));
731 return insn - insn_buf;
732 }
733
__htab_lru_map_lookup_elem(struct bpf_map * map,void * key,const bool mark)734 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
735 void *key, const bool mark)
736 {
737 struct htab_elem *l = __htab_map_lookup_elem(map, key);
738
739 if (l) {
740 if (mark)
741 bpf_lru_node_set_ref(&l->lru_node);
742 return l->key + round_up(map->key_size, 8);
743 }
744
745 return NULL;
746 }
747
htab_lru_map_lookup_elem(struct bpf_map * map,void * key)748 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
749 {
750 return __htab_lru_map_lookup_elem(map, key, true);
751 }
752
htab_lru_map_lookup_elem_sys(struct bpf_map * map,void * key)753 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
754 {
755 return __htab_lru_map_lookup_elem(map, key, false);
756 }
757
htab_lru_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)758 static int htab_lru_map_gen_lookup(struct bpf_map *map,
759 struct bpf_insn *insn_buf)
760 {
761 struct bpf_insn *insn = insn_buf;
762 const int ret = BPF_REG_0;
763 const int ref_reg = BPF_REG_1;
764
765 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
766 (void *(*)(struct bpf_map *map, void *key))NULL));
767 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
768 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
769 *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
770 offsetof(struct htab_elem, lru_node) +
771 offsetof(struct bpf_lru_node, ref));
772 *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
773 *insn++ = BPF_ST_MEM(BPF_B, ret,
774 offsetof(struct htab_elem, lru_node) +
775 offsetof(struct bpf_lru_node, ref),
776 1);
777 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
778 offsetof(struct htab_elem, key) +
779 round_up(map->key_size, 8));
780 return insn - insn_buf;
781 }
782
check_and_free_fields(struct bpf_htab * htab,struct htab_elem * elem)783 static void check_and_free_fields(struct bpf_htab *htab,
784 struct htab_elem *elem)
785 {
786 if (htab_is_percpu(htab)) {
787 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
788 int cpu;
789
790 for_each_possible_cpu(cpu)
791 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
792 } else {
793 void *map_value = elem->key + round_up(htab->map.key_size, 8);
794
795 bpf_obj_free_fields(htab->map.record, map_value);
796 }
797 }
798
799 /* It is called from the bpf_lru_list when the LRU needs to delete
800 * older elements from the htab.
801 */
htab_lru_map_delete_node(void * arg,struct bpf_lru_node * node)802 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
803 {
804 struct bpf_htab *htab = arg;
805 struct htab_elem *l = NULL, *tgt_l;
806 struct hlist_nulls_head *head;
807 struct hlist_nulls_node *n;
808 unsigned long flags;
809 struct bucket *b;
810 int ret;
811
812 tgt_l = container_of(node, struct htab_elem, lru_node);
813 b = __select_bucket(htab, tgt_l->hash);
814 head = &b->head;
815
816 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
817 if (ret)
818 return false;
819
820 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
821 if (l == tgt_l) {
822 hlist_nulls_del_rcu(&l->hash_node);
823 check_and_free_fields(htab, l);
824 bpf_map_dec_elem_count(&htab->map);
825 break;
826 }
827
828 htab_unlock_bucket(htab, b, tgt_l->hash, flags);
829
830 return l == tgt_l;
831 }
832
833 /* Called from syscall */
htab_map_get_next_key(struct bpf_map * map,void * key,void * next_key)834 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
835 {
836 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
837 struct hlist_nulls_head *head;
838 struct htab_elem *l, *next_l;
839 u32 hash, key_size;
840 int i = 0;
841
842 WARN_ON_ONCE(!rcu_read_lock_held());
843
844 key_size = map->key_size;
845
846 if (!key)
847 goto find_first_elem;
848
849 hash = htab_map_hash(key, key_size, htab->hashrnd);
850
851 head = select_bucket(htab, hash);
852
853 /* lookup the key */
854 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
855
856 if (!l)
857 goto find_first_elem;
858
859 /* key was found, get next key in the same bucket */
860 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
861 struct htab_elem, hash_node);
862
863 if (next_l) {
864 /* if next elem in this hash list is non-zero, just return it */
865 memcpy(next_key, next_l->key, key_size);
866 return 0;
867 }
868
869 /* no more elements in this hash list, go to the next bucket */
870 i = hash & (htab->n_buckets - 1);
871 i++;
872
873 find_first_elem:
874 /* iterate over buckets */
875 for (; i < htab->n_buckets; i++) {
876 head = select_bucket(htab, i);
877
878 /* pick first element in the bucket */
879 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
880 struct htab_elem, hash_node);
881 if (next_l) {
882 /* if it's not empty, just return it */
883 memcpy(next_key, next_l->key, key_size);
884 return 0;
885 }
886 }
887
888 /* iterated over all buckets and all elements */
889 return -ENOENT;
890 }
891
htab_elem_free(struct bpf_htab * htab,struct htab_elem * l)892 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
893 {
894 check_and_free_fields(htab, l);
895
896 migrate_disable();
897 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
898 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
899 bpf_mem_cache_free(&htab->ma, l);
900 migrate_enable();
901 }
902
htab_put_fd_value(struct bpf_htab * htab,struct htab_elem * l)903 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
904 {
905 struct bpf_map *map = &htab->map;
906 void *ptr;
907
908 if (map->ops->map_fd_put_ptr) {
909 ptr = fd_htab_map_get_ptr(map, l);
910 map->ops->map_fd_put_ptr(map, ptr, true);
911 }
912 }
913
is_map_full(struct bpf_htab * htab)914 static bool is_map_full(struct bpf_htab *htab)
915 {
916 if (htab->use_percpu_counter)
917 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
918 PERCPU_COUNTER_BATCH) >= 0;
919 return atomic_read(&htab->count) >= htab->map.max_entries;
920 }
921
inc_elem_count(struct bpf_htab * htab)922 static void inc_elem_count(struct bpf_htab *htab)
923 {
924 bpf_map_inc_elem_count(&htab->map);
925
926 if (htab->use_percpu_counter)
927 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
928 else
929 atomic_inc(&htab->count);
930 }
931
dec_elem_count(struct bpf_htab * htab)932 static void dec_elem_count(struct bpf_htab *htab)
933 {
934 bpf_map_dec_elem_count(&htab->map);
935
936 if (htab->use_percpu_counter)
937 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
938 else
939 atomic_dec(&htab->count);
940 }
941
942
free_htab_elem(struct bpf_htab * htab,struct htab_elem * l)943 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
944 {
945 htab_put_fd_value(htab, l);
946
947 if (htab_is_prealloc(htab)) {
948 bpf_map_dec_elem_count(&htab->map);
949 check_and_free_fields(htab, l);
950 pcpu_freelist_push(&htab->freelist, &l->fnode);
951 } else {
952 dec_elem_count(htab);
953 htab_elem_free(htab, l);
954 }
955 }
956
pcpu_copy_value(struct bpf_htab * htab,void __percpu * pptr,void * value,bool onallcpus)957 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
958 void *value, bool onallcpus)
959 {
960 if (!onallcpus) {
961 /* copy true value_size bytes */
962 copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
963 } else {
964 u32 size = round_up(htab->map.value_size, 8);
965 int off = 0, cpu;
966
967 for_each_possible_cpu(cpu) {
968 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
969 off += size;
970 }
971 }
972 }
973
pcpu_init_value(struct bpf_htab * htab,void __percpu * pptr,void * value,bool onallcpus)974 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
975 void *value, bool onallcpus)
976 {
977 /* When not setting the initial value on all cpus, zero-fill element
978 * values for other cpus. Otherwise, bpf program has no way to ensure
979 * known initial values for cpus other than current one
980 * (onallcpus=false always when coming from bpf prog).
981 */
982 if (!onallcpus) {
983 int current_cpu = raw_smp_processor_id();
984 int cpu;
985
986 for_each_possible_cpu(cpu) {
987 if (cpu == current_cpu)
988 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value);
989 else /* Since elem is preallocated, we cannot touch special fields */
990 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu));
991 }
992 } else {
993 pcpu_copy_value(htab, pptr, value, onallcpus);
994 }
995 }
996
fd_htab_map_needs_adjust(const struct bpf_htab * htab)997 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
998 {
999 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
1000 BITS_PER_LONG == 64;
1001 }
1002
alloc_htab_elem(struct bpf_htab * htab,void * key,void * value,u32 key_size,u32 hash,bool percpu,bool onallcpus,struct htab_elem * old_elem)1003 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
1004 void *value, u32 key_size, u32 hash,
1005 bool percpu, bool onallcpus,
1006 struct htab_elem *old_elem)
1007 {
1008 u32 size = htab->map.value_size;
1009 bool prealloc = htab_is_prealloc(htab);
1010 struct htab_elem *l_new, **pl_new;
1011 void __percpu *pptr;
1012
1013 if (prealloc) {
1014 if (old_elem) {
1015 /* if we're updating the existing element,
1016 * use per-cpu extra elems to avoid freelist_pop/push
1017 */
1018 pl_new = this_cpu_ptr(htab->extra_elems);
1019 l_new = *pl_new;
1020 *pl_new = old_elem;
1021 } else {
1022 struct pcpu_freelist_node *l;
1023
1024 l = __pcpu_freelist_pop(&htab->freelist);
1025 if (!l)
1026 return ERR_PTR(-E2BIG);
1027 l_new = container_of(l, struct htab_elem, fnode);
1028 bpf_map_inc_elem_count(&htab->map);
1029 }
1030 } else {
1031 if (is_map_full(htab))
1032 if (!old_elem)
1033 /* when map is full and update() is replacing
1034 * old element, it's ok to allocate, since
1035 * old element will be freed immediately.
1036 * Otherwise return an error
1037 */
1038 return ERR_PTR(-E2BIG);
1039 inc_elem_count(htab);
1040 l_new = bpf_mem_cache_alloc(&htab->ma);
1041 if (!l_new) {
1042 l_new = ERR_PTR(-ENOMEM);
1043 goto dec_count;
1044 }
1045 }
1046
1047 memcpy(l_new->key, key, key_size);
1048 if (percpu) {
1049 if (prealloc) {
1050 pptr = htab_elem_get_ptr(l_new, key_size);
1051 } else {
1052 /* alloc_percpu zero-fills */
1053 pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1054 if (!pptr) {
1055 bpf_mem_cache_free(&htab->ma, l_new);
1056 l_new = ERR_PTR(-ENOMEM);
1057 goto dec_count;
1058 }
1059 l_new->ptr_to_pptr = pptr;
1060 pptr = *(void **)pptr;
1061 }
1062
1063 pcpu_init_value(htab, pptr, value, onallcpus);
1064
1065 if (!prealloc)
1066 htab_elem_set_ptr(l_new, key_size, pptr);
1067 } else if (fd_htab_map_needs_adjust(htab)) {
1068 size = round_up(size, 8);
1069 memcpy(l_new->key + round_up(key_size, 8), value, size);
1070 } else {
1071 copy_map_value(&htab->map,
1072 l_new->key + round_up(key_size, 8),
1073 value);
1074 }
1075
1076 l_new->hash = hash;
1077 return l_new;
1078 dec_count:
1079 dec_elem_count(htab);
1080 return l_new;
1081 }
1082
check_flags(struct bpf_htab * htab,struct htab_elem * l_old,u64 map_flags)1083 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1084 u64 map_flags)
1085 {
1086 if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1087 /* elem already exists */
1088 return -EEXIST;
1089
1090 if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1091 /* elem doesn't exist, cannot update it */
1092 return -ENOENT;
1093
1094 return 0;
1095 }
1096
1097 /* Called from syscall or from eBPF program */
htab_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1098 static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1099 u64 map_flags)
1100 {
1101 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1102 struct htab_elem *l_new = NULL, *l_old;
1103 struct hlist_nulls_head *head;
1104 unsigned long flags;
1105 void *old_map_ptr;
1106 struct bucket *b;
1107 u32 key_size, hash;
1108 int ret;
1109
1110 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1111 /* unknown flags */
1112 return -EINVAL;
1113
1114 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1115 !rcu_read_lock_bh_held());
1116
1117 key_size = map->key_size;
1118
1119 hash = htab_map_hash(key, key_size, htab->hashrnd);
1120
1121 b = __select_bucket(htab, hash);
1122 head = &b->head;
1123
1124 if (unlikely(map_flags & BPF_F_LOCK)) {
1125 if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1126 return -EINVAL;
1127 /* find an element without taking the bucket lock */
1128 l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1129 htab->n_buckets);
1130 ret = check_flags(htab, l_old, map_flags);
1131 if (ret)
1132 return ret;
1133 if (l_old) {
1134 /* grab the element lock and update value in place */
1135 copy_map_value_locked(map,
1136 l_old->key + round_up(key_size, 8),
1137 value, false);
1138 return 0;
1139 }
1140 /* fall through, grab the bucket lock and lookup again.
1141 * 99.9% chance that the element won't be found,
1142 * but second lookup under lock has to be done.
1143 */
1144 }
1145
1146 ret = htab_lock_bucket(htab, b, hash, &flags);
1147 if (ret)
1148 return ret;
1149
1150 l_old = lookup_elem_raw(head, hash, key, key_size);
1151
1152 ret = check_flags(htab, l_old, map_flags);
1153 if (ret)
1154 goto err;
1155
1156 if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1157 /* first lookup without the bucket lock didn't find the element,
1158 * but second lookup with the bucket lock found it.
1159 * This case is highly unlikely, but has to be dealt with:
1160 * grab the element lock in addition to the bucket lock
1161 * and update element in place
1162 */
1163 copy_map_value_locked(map,
1164 l_old->key + round_up(key_size, 8),
1165 value, false);
1166 ret = 0;
1167 goto err;
1168 }
1169
1170 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1171 l_old);
1172 if (IS_ERR(l_new)) {
1173 /* all pre-allocated elements are in use or memory exhausted */
1174 ret = PTR_ERR(l_new);
1175 goto err;
1176 }
1177
1178 /* add new element to the head of the list, so that
1179 * concurrent search will find it before old elem
1180 */
1181 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1182 if (l_old) {
1183 hlist_nulls_del_rcu(&l_old->hash_node);
1184
1185 /* l_old has already been stashed in htab->extra_elems, free
1186 * its special fields before it is available for reuse. Also
1187 * save the old map pointer in htab of maps before unlock
1188 * and release it after unlock.
1189 */
1190 old_map_ptr = NULL;
1191 if (htab_is_prealloc(htab)) {
1192 if (map->ops->map_fd_put_ptr)
1193 old_map_ptr = fd_htab_map_get_ptr(map, l_old);
1194 check_and_free_fields(htab, l_old);
1195 }
1196 }
1197 htab_unlock_bucket(htab, b, hash, flags);
1198 if (l_old) {
1199 if (old_map_ptr)
1200 map->ops->map_fd_put_ptr(map, old_map_ptr, true);
1201 if (!htab_is_prealloc(htab))
1202 free_htab_elem(htab, l_old);
1203 }
1204 return 0;
1205 err:
1206 htab_unlock_bucket(htab, b, hash, flags);
1207 return ret;
1208 }
1209
htab_lru_push_free(struct bpf_htab * htab,struct htab_elem * elem)1210 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1211 {
1212 check_and_free_fields(htab, elem);
1213 bpf_map_dec_elem_count(&htab->map);
1214 bpf_lru_push_free(&htab->lru, &elem->lru_node);
1215 }
1216
htab_lru_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1217 static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1218 u64 map_flags)
1219 {
1220 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1221 struct htab_elem *l_new, *l_old = NULL;
1222 struct hlist_nulls_head *head;
1223 unsigned long flags;
1224 struct bucket *b;
1225 u32 key_size, hash;
1226 int ret;
1227
1228 if (unlikely(map_flags > BPF_EXIST))
1229 /* unknown flags */
1230 return -EINVAL;
1231
1232 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1233 !rcu_read_lock_bh_held());
1234
1235 key_size = map->key_size;
1236
1237 hash = htab_map_hash(key, key_size, htab->hashrnd);
1238
1239 b = __select_bucket(htab, hash);
1240 head = &b->head;
1241
1242 /* For LRU, we need to alloc before taking bucket's
1243 * spinlock because getting free nodes from LRU may need
1244 * to remove older elements from htab and this removal
1245 * operation will need a bucket lock.
1246 */
1247 l_new = prealloc_lru_pop(htab, key, hash);
1248 if (!l_new)
1249 return -ENOMEM;
1250 copy_map_value(&htab->map,
1251 l_new->key + round_up(map->key_size, 8), value);
1252
1253 ret = htab_lock_bucket(htab, b, hash, &flags);
1254 if (ret)
1255 goto err_lock_bucket;
1256
1257 l_old = lookup_elem_raw(head, hash, key, key_size);
1258
1259 ret = check_flags(htab, l_old, map_flags);
1260 if (ret)
1261 goto err;
1262
1263 /* add new element to the head of the list, so that
1264 * concurrent search will find it before old elem
1265 */
1266 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1267 if (l_old) {
1268 bpf_lru_node_set_ref(&l_new->lru_node);
1269 hlist_nulls_del_rcu(&l_old->hash_node);
1270 }
1271 ret = 0;
1272
1273 err:
1274 htab_unlock_bucket(htab, b, hash, flags);
1275
1276 err_lock_bucket:
1277 if (ret)
1278 htab_lru_push_free(htab, l_new);
1279 else if (l_old)
1280 htab_lru_push_free(htab, l_old);
1281
1282 return ret;
1283 }
1284
__htab_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags,bool onallcpus)1285 static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1286 void *value, u64 map_flags,
1287 bool onallcpus)
1288 {
1289 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1290 struct htab_elem *l_new = NULL, *l_old;
1291 struct hlist_nulls_head *head;
1292 unsigned long flags;
1293 struct bucket *b;
1294 u32 key_size, hash;
1295 int ret;
1296
1297 if (unlikely(map_flags > BPF_EXIST))
1298 /* unknown flags */
1299 return -EINVAL;
1300
1301 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1302 !rcu_read_lock_bh_held());
1303
1304 key_size = map->key_size;
1305
1306 hash = htab_map_hash(key, key_size, htab->hashrnd);
1307
1308 b = __select_bucket(htab, hash);
1309 head = &b->head;
1310
1311 ret = htab_lock_bucket(htab, b, hash, &flags);
1312 if (ret)
1313 return ret;
1314
1315 l_old = lookup_elem_raw(head, hash, key, key_size);
1316
1317 ret = check_flags(htab, l_old, map_flags);
1318 if (ret)
1319 goto err;
1320
1321 if (l_old) {
1322 /* per-cpu hash map can update value in-place */
1323 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1324 value, onallcpus);
1325 } else {
1326 l_new = alloc_htab_elem(htab, key, value, key_size,
1327 hash, true, onallcpus, NULL);
1328 if (IS_ERR(l_new)) {
1329 ret = PTR_ERR(l_new);
1330 goto err;
1331 }
1332 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1333 }
1334 ret = 0;
1335 err:
1336 htab_unlock_bucket(htab, b, hash, flags);
1337 return ret;
1338 }
1339
__htab_lru_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags,bool onallcpus)1340 static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1341 void *value, u64 map_flags,
1342 bool onallcpus)
1343 {
1344 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1345 struct htab_elem *l_new = NULL, *l_old;
1346 struct hlist_nulls_head *head;
1347 unsigned long flags;
1348 struct bucket *b;
1349 u32 key_size, hash;
1350 int ret;
1351
1352 if (unlikely(map_flags > BPF_EXIST))
1353 /* unknown flags */
1354 return -EINVAL;
1355
1356 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1357 !rcu_read_lock_bh_held());
1358
1359 key_size = map->key_size;
1360
1361 hash = htab_map_hash(key, key_size, htab->hashrnd);
1362
1363 b = __select_bucket(htab, hash);
1364 head = &b->head;
1365
1366 /* For LRU, we need to alloc before taking bucket's
1367 * spinlock because LRU's elem alloc may need
1368 * to remove older elem from htab and this removal
1369 * operation will need a bucket lock.
1370 */
1371 if (map_flags != BPF_EXIST) {
1372 l_new = prealloc_lru_pop(htab, key, hash);
1373 if (!l_new)
1374 return -ENOMEM;
1375 }
1376
1377 ret = htab_lock_bucket(htab, b, hash, &flags);
1378 if (ret)
1379 goto err_lock_bucket;
1380
1381 l_old = lookup_elem_raw(head, hash, key, key_size);
1382
1383 ret = check_flags(htab, l_old, map_flags);
1384 if (ret)
1385 goto err;
1386
1387 if (l_old) {
1388 bpf_lru_node_set_ref(&l_old->lru_node);
1389
1390 /* per-cpu hash map can update value in-place */
1391 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1392 value, onallcpus);
1393 } else {
1394 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1395 value, onallcpus);
1396 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1397 l_new = NULL;
1398 }
1399 ret = 0;
1400 err:
1401 htab_unlock_bucket(htab, b, hash, flags);
1402 err_lock_bucket:
1403 if (l_new) {
1404 bpf_map_dec_elem_count(&htab->map);
1405 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1406 }
1407 return ret;
1408 }
1409
htab_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1410 static long htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1411 void *value, u64 map_flags)
1412 {
1413 return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1414 }
1415
htab_lru_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1416 static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1417 void *value, u64 map_flags)
1418 {
1419 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1420 false);
1421 }
1422
1423 /* Called from syscall or from eBPF program */
htab_map_delete_elem(struct bpf_map * map,void * key)1424 static long htab_map_delete_elem(struct bpf_map *map, void *key)
1425 {
1426 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1427 struct hlist_nulls_head *head;
1428 struct bucket *b;
1429 struct htab_elem *l;
1430 unsigned long flags;
1431 u32 hash, key_size;
1432 int ret;
1433
1434 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1435 !rcu_read_lock_bh_held());
1436
1437 key_size = map->key_size;
1438
1439 hash = htab_map_hash(key, key_size, htab->hashrnd);
1440 b = __select_bucket(htab, hash);
1441 head = &b->head;
1442
1443 ret = htab_lock_bucket(htab, b, hash, &flags);
1444 if (ret)
1445 return ret;
1446
1447 l = lookup_elem_raw(head, hash, key, key_size);
1448 if (l)
1449 hlist_nulls_del_rcu(&l->hash_node);
1450 else
1451 ret = -ENOENT;
1452
1453 htab_unlock_bucket(htab, b, hash, flags);
1454
1455 if (l)
1456 free_htab_elem(htab, l);
1457 return ret;
1458 }
1459
htab_lru_map_delete_elem(struct bpf_map * map,void * key)1460 static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1461 {
1462 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1463 struct hlist_nulls_head *head;
1464 struct bucket *b;
1465 struct htab_elem *l;
1466 unsigned long flags;
1467 u32 hash, key_size;
1468 int ret;
1469
1470 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1471 !rcu_read_lock_bh_held());
1472
1473 key_size = map->key_size;
1474
1475 hash = htab_map_hash(key, key_size, htab->hashrnd);
1476 b = __select_bucket(htab, hash);
1477 head = &b->head;
1478
1479 ret = htab_lock_bucket(htab, b, hash, &flags);
1480 if (ret)
1481 return ret;
1482
1483 l = lookup_elem_raw(head, hash, key, key_size);
1484
1485 if (l)
1486 hlist_nulls_del_rcu(&l->hash_node);
1487 else
1488 ret = -ENOENT;
1489
1490 htab_unlock_bucket(htab, b, hash, flags);
1491 if (l)
1492 htab_lru_push_free(htab, l);
1493 return ret;
1494 }
1495
delete_all_elements(struct bpf_htab * htab)1496 static void delete_all_elements(struct bpf_htab *htab)
1497 {
1498 int i;
1499
1500 /* It's called from a worker thread, so disable migration here,
1501 * since bpf_mem_cache_free() relies on that.
1502 */
1503 migrate_disable();
1504 for (i = 0; i < htab->n_buckets; i++) {
1505 struct hlist_nulls_head *head = select_bucket(htab, i);
1506 struct hlist_nulls_node *n;
1507 struct htab_elem *l;
1508
1509 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1510 hlist_nulls_del_rcu(&l->hash_node);
1511 htab_elem_free(htab, l);
1512 }
1513 }
1514 migrate_enable();
1515 }
1516
htab_free_malloced_timers(struct bpf_htab * htab)1517 static void htab_free_malloced_timers(struct bpf_htab *htab)
1518 {
1519 int i;
1520
1521 rcu_read_lock();
1522 for (i = 0; i < htab->n_buckets; i++) {
1523 struct hlist_nulls_head *head = select_bucket(htab, i);
1524 struct hlist_nulls_node *n;
1525 struct htab_elem *l;
1526
1527 hlist_nulls_for_each_entry(l, n, head, hash_node) {
1528 /* We only free timer on uref dropping to zero */
1529 bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8));
1530 }
1531 cond_resched_rcu();
1532 }
1533 rcu_read_unlock();
1534 }
1535
htab_map_free_timers(struct bpf_map * map)1536 static void htab_map_free_timers(struct bpf_map *map)
1537 {
1538 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1539
1540 /* We only free timer on uref dropping to zero */
1541 if (!btf_record_has_field(htab->map.record, BPF_TIMER))
1542 return;
1543 if (!htab_is_prealloc(htab))
1544 htab_free_malloced_timers(htab);
1545 else
1546 htab_free_prealloced_timers(htab);
1547 }
1548
1549 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
htab_map_free(struct bpf_map * map)1550 static void htab_map_free(struct bpf_map *map)
1551 {
1552 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1553 int i;
1554
1555 /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1556 * bpf_free_used_maps() is called after bpf prog is no longer executing.
1557 * There is no need to synchronize_rcu() here to protect map elements.
1558 */
1559
1560 /* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
1561 * underneath and is reponsible for waiting for callbacks to finish
1562 * during bpf_mem_alloc_destroy().
1563 */
1564 if (!htab_is_prealloc(htab)) {
1565 delete_all_elements(htab);
1566 } else {
1567 htab_free_prealloced_fields(htab);
1568 prealloc_destroy(htab);
1569 }
1570
1571 bpf_map_free_elem_count(map);
1572 free_percpu(htab->extra_elems);
1573 bpf_map_area_free(htab->buckets);
1574 bpf_mem_alloc_destroy(&htab->pcpu_ma);
1575 bpf_mem_alloc_destroy(&htab->ma);
1576 if (htab->use_percpu_counter)
1577 percpu_counter_destroy(&htab->pcount);
1578 for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1579 free_percpu(htab->map_locked[i]);
1580 lockdep_unregister_key(&htab->lockdep_key);
1581 bpf_map_area_free(htab);
1582 }
1583
htab_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)1584 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1585 struct seq_file *m)
1586 {
1587 void *value;
1588
1589 rcu_read_lock();
1590
1591 value = htab_map_lookup_elem(map, key);
1592 if (!value) {
1593 rcu_read_unlock();
1594 return;
1595 }
1596
1597 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1598 seq_puts(m, ": ");
1599 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1600 seq_puts(m, "\n");
1601
1602 rcu_read_unlock();
1603 }
1604
__htab_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,bool is_lru_map,bool is_percpu,u64 flags)1605 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1606 void *value, bool is_lru_map,
1607 bool is_percpu, u64 flags)
1608 {
1609 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1610 struct hlist_nulls_head *head;
1611 unsigned long bflags;
1612 struct htab_elem *l;
1613 u32 hash, key_size;
1614 struct bucket *b;
1615 int ret;
1616
1617 key_size = map->key_size;
1618
1619 hash = htab_map_hash(key, key_size, htab->hashrnd);
1620 b = __select_bucket(htab, hash);
1621 head = &b->head;
1622
1623 ret = htab_lock_bucket(htab, b, hash, &bflags);
1624 if (ret)
1625 return ret;
1626
1627 l = lookup_elem_raw(head, hash, key, key_size);
1628 if (!l) {
1629 ret = -ENOENT;
1630 } else {
1631 if (is_percpu) {
1632 u32 roundup_value_size = round_up(map->value_size, 8);
1633 void __percpu *pptr;
1634 int off = 0, cpu;
1635
1636 pptr = htab_elem_get_ptr(l, key_size);
1637 for_each_possible_cpu(cpu) {
1638 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
1639 check_and_init_map_value(&htab->map, value + off);
1640 off += roundup_value_size;
1641 }
1642 } else {
1643 u32 roundup_key_size = round_up(map->key_size, 8);
1644
1645 if (flags & BPF_F_LOCK)
1646 copy_map_value_locked(map, value, l->key +
1647 roundup_key_size,
1648 true);
1649 else
1650 copy_map_value(map, value, l->key +
1651 roundup_key_size);
1652 /* Zeroing special fields in the temp buffer */
1653 check_and_init_map_value(map, value);
1654 }
1655
1656 hlist_nulls_del_rcu(&l->hash_node);
1657 if (!is_lru_map)
1658 free_htab_elem(htab, l);
1659 }
1660
1661 htab_unlock_bucket(htab, b, hash, bflags);
1662
1663 if (is_lru_map && l)
1664 htab_lru_push_free(htab, l);
1665
1666 return ret;
1667 }
1668
htab_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1669 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1670 void *value, u64 flags)
1671 {
1672 return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1673 flags);
1674 }
1675
htab_percpu_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1676 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1677 void *key, void *value,
1678 u64 flags)
1679 {
1680 return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1681 flags);
1682 }
1683
htab_lru_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1684 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1685 void *value, u64 flags)
1686 {
1687 return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1688 flags);
1689 }
1690
htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1691 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1692 void *key, void *value,
1693 u64 flags)
1694 {
1695 return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1696 flags);
1697 }
1698
1699 static int
__htab_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr,bool do_delete,bool is_lru_map,bool is_percpu)1700 __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1701 const union bpf_attr *attr,
1702 union bpf_attr __user *uattr,
1703 bool do_delete, bool is_lru_map,
1704 bool is_percpu)
1705 {
1706 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1707 u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1708 void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1709 void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1710 void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1711 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1712 u32 batch, max_count, size, bucket_size, map_id;
1713 struct htab_elem *node_to_free = NULL;
1714 u64 elem_map_flags, map_flags;
1715 struct hlist_nulls_head *head;
1716 struct hlist_nulls_node *n;
1717 unsigned long flags = 0;
1718 bool locked = false;
1719 struct htab_elem *l;
1720 struct bucket *b;
1721 int ret = 0;
1722
1723 elem_map_flags = attr->batch.elem_flags;
1724 if ((elem_map_flags & ~BPF_F_LOCK) ||
1725 ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1726 return -EINVAL;
1727
1728 map_flags = attr->batch.flags;
1729 if (map_flags)
1730 return -EINVAL;
1731
1732 max_count = attr->batch.count;
1733 if (!max_count)
1734 return 0;
1735
1736 if (put_user(0, &uattr->batch.count))
1737 return -EFAULT;
1738
1739 batch = 0;
1740 if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1741 return -EFAULT;
1742
1743 if (batch >= htab->n_buckets)
1744 return -ENOENT;
1745
1746 key_size = htab->map.key_size;
1747 roundup_key_size = round_up(htab->map.key_size, 8);
1748 value_size = htab->map.value_size;
1749 size = round_up(value_size, 8);
1750 if (is_percpu)
1751 value_size = size * num_possible_cpus();
1752 total = 0;
1753 /* while experimenting with hash tables with sizes ranging from 10 to
1754 * 1000, it was observed that a bucket can have up to 5 entries.
1755 */
1756 bucket_size = 5;
1757
1758 alloc:
1759 /* We cannot do copy_from_user or copy_to_user inside
1760 * the rcu_read_lock. Allocate enough space here.
1761 */
1762 keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1763 values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1764 if (!keys || !values) {
1765 ret = -ENOMEM;
1766 goto after_loop;
1767 }
1768
1769 again:
1770 bpf_disable_instrumentation();
1771 rcu_read_lock();
1772 again_nocopy:
1773 dst_key = keys;
1774 dst_val = values;
1775 b = &htab->buckets[batch];
1776 head = &b->head;
1777 /* do not grab the lock unless need it (bucket_cnt > 0). */
1778 if (locked) {
1779 ret = htab_lock_bucket(htab, b, batch, &flags);
1780 if (ret) {
1781 rcu_read_unlock();
1782 bpf_enable_instrumentation();
1783 goto after_loop;
1784 }
1785 }
1786
1787 bucket_cnt = 0;
1788 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1789 bucket_cnt++;
1790
1791 if (bucket_cnt && !locked) {
1792 locked = true;
1793 goto again_nocopy;
1794 }
1795
1796 if (bucket_cnt > (max_count - total)) {
1797 if (total == 0)
1798 ret = -ENOSPC;
1799 /* Note that since bucket_cnt > 0 here, it is implicit
1800 * that the locked was grabbed, so release it.
1801 */
1802 htab_unlock_bucket(htab, b, batch, flags);
1803 rcu_read_unlock();
1804 bpf_enable_instrumentation();
1805 goto after_loop;
1806 }
1807
1808 if (bucket_cnt > bucket_size) {
1809 bucket_size = bucket_cnt;
1810 /* Note that since bucket_cnt > 0 here, it is implicit
1811 * that the locked was grabbed, so release it.
1812 */
1813 htab_unlock_bucket(htab, b, batch, flags);
1814 rcu_read_unlock();
1815 bpf_enable_instrumentation();
1816 kvfree(keys);
1817 kvfree(values);
1818 goto alloc;
1819 }
1820
1821 /* Next block is only safe to run if you have grabbed the lock */
1822 if (!locked)
1823 goto next_batch;
1824
1825 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1826 memcpy(dst_key, l->key, key_size);
1827
1828 if (is_percpu) {
1829 int off = 0, cpu;
1830 void __percpu *pptr;
1831
1832 pptr = htab_elem_get_ptr(l, map->key_size);
1833 for_each_possible_cpu(cpu) {
1834 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu));
1835 check_and_init_map_value(&htab->map, dst_val + off);
1836 off += size;
1837 }
1838 } else {
1839 value = l->key + roundup_key_size;
1840 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1841 struct bpf_map **inner_map = value;
1842
1843 /* Actual value is the id of the inner map */
1844 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
1845 value = &map_id;
1846 }
1847
1848 if (elem_map_flags & BPF_F_LOCK)
1849 copy_map_value_locked(map, dst_val, value,
1850 true);
1851 else
1852 copy_map_value(map, dst_val, value);
1853 /* Zeroing special fields in the temp buffer */
1854 check_and_init_map_value(map, dst_val);
1855 }
1856 if (do_delete) {
1857 hlist_nulls_del_rcu(&l->hash_node);
1858
1859 /* bpf_lru_push_free() will acquire lru_lock, which
1860 * may cause deadlock. See comments in function
1861 * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1862 * after releasing the bucket lock.
1863 *
1864 * For htab of maps, htab_put_fd_value() in
1865 * free_htab_elem() may acquire a spinlock with bucket
1866 * lock being held and it violates the lock rule, so
1867 * invoke free_htab_elem() after unlock as well.
1868 */
1869 l->batch_flink = node_to_free;
1870 node_to_free = l;
1871 }
1872 dst_key += key_size;
1873 dst_val += value_size;
1874 }
1875
1876 htab_unlock_bucket(htab, b, batch, flags);
1877 locked = false;
1878
1879 while (node_to_free) {
1880 l = node_to_free;
1881 node_to_free = node_to_free->batch_flink;
1882 if (is_lru_map)
1883 htab_lru_push_free(htab, l);
1884 else
1885 free_htab_elem(htab, l);
1886 }
1887
1888 next_batch:
1889 /* If we are not copying data, we can go to next bucket and avoid
1890 * unlocking the rcu.
1891 */
1892 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1893 batch++;
1894 goto again_nocopy;
1895 }
1896
1897 rcu_read_unlock();
1898 bpf_enable_instrumentation();
1899 if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1900 key_size * bucket_cnt) ||
1901 copy_to_user(uvalues + total * value_size, values,
1902 value_size * bucket_cnt))) {
1903 ret = -EFAULT;
1904 goto after_loop;
1905 }
1906
1907 total += bucket_cnt;
1908 batch++;
1909 if (batch >= htab->n_buckets) {
1910 ret = -ENOENT;
1911 goto after_loop;
1912 }
1913 goto again;
1914
1915 after_loop:
1916 if (ret == -EFAULT)
1917 goto out;
1918
1919 /* copy # of entries and next batch */
1920 ubatch = u64_to_user_ptr(attr->batch.out_batch);
1921 if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1922 put_user(total, &uattr->batch.count))
1923 ret = -EFAULT;
1924
1925 out:
1926 kvfree(keys);
1927 kvfree(values);
1928 return ret;
1929 }
1930
1931 static int
htab_percpu_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1932 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1933 union bpf_attr __user *uattr)
1934 {
1935 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1936 false, true);
1937 }
1938
1939 static int
htab_percpu_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1940 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1941 const union bpf_attr *attr,
1942 union bpf_attr __user *uattr)
1943 {
1944 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1945 false, true);
1946 }
1947
1948 static int
htab_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1949 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1950 union bpf_attr __user *uattr)
1951 {
1952 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1953 false, false);
1954 }
1955
1956 static int
htab_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1957 htab_map_lookup_and_delete_batch(struct bpf_map *map,
1958 const union bpf_attr *attr,
1959 union bpf_attr __user *uattr)
1960 {
1961 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1962 false, false);
1963 }
1964
1965 static int
htab_lru_percpu_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1966 htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1967 const union bpf_attr *attr,
1968 union bpf_attr __user *uattr)
1969 {
1970 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1971 true, true);
1972 }
1973
1974 static int
htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1975 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1976 const union bpf_attr *attr,
1977 union bpf_attr __user *uattr)
1978 {
1979 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1980 true, true);
1981 }
1982
1983 static int
htab_lru_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1984 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1985 union bpf_attr __user *uattr)
1986 {
1987 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1988 true, false);
1989 }
1990
1991 static int
htab_lru_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1992 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
1993 const union bpf_attr *attr,
1994 union bpf_attr __user *uattr)
1995 {
1996 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1997 true, false);
1998 }
1999
2000 struct bpf_iter_seq_hash_map_info {
2001 struct bpf_map *map;
2002 struct bpf_htab *htab;
2003 void *percpu_value_buf; // non-zero means percpu hash
2004 u32 bucket_id;
2005 u32 skip_elems;
2006 };
2007
2008 static struct htab_elem *
bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info * info,struct htab_elem * prev_elem)2009 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
2010 struct htab_elem *prev_elem)
2011 {
2012 const struct bpf_htab *htab = info->htab;
2013 u32 skip_elems = info->skip_elems;
2014 u32 bucket_id = info->bucket_id;
2015 struct hlist_nulls_head *head;
2016 struct hlist_nulls_node *n;
2017 struct htab_elem *elem;
2018 struct bucket *b;
2019 u32 i, count;
2020
2021 if (bucket_id >= htab->n_buckets)
2022 return NULL;
2023
2024 /* try to find next elem in the same bucket */
2025 if (prev_elem) {
2026 /* no update/deletion on this bucket, prev_elem should be still valid
2027 * and we won't skip elements.
2028 */
2029 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
2030 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
2031 if (elem)
2032 return elem;
2033
2034 /* not found, unlock and go to the next bucket */
2035 b = &htab->buckets[bucket_id++];
2036 rcu_read_unlock();
2037 skip_elems = 0;
2038 }
2039
2040 for (i = bucket_id; i < htab->n_buckets; i++) {
2041 b = &htab->buckets[i];
2042 rcu_read_lock();
2043
2044 count = 0;
2045 head = &b->head;
2046 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2047 if (count >= skip_elems) {
2048 info->bucket_id = i;
2049 info->skip_elems = count;
2050 return elem;
2051 }
2052 count++;
2053 }
2054
2055 rcu_read_unlock();
2056 skip_elems = 0;
2057 }
2058
2059 info->bucket_id = i;
2060 info->skip_elems = 0;
2061 return NULL;
2062 }
2063
bpf_hash_map_seq_start(struct seq_file * seq,loff_t * pos)2064 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
2065 {
2066 struct bpf_iter_seq_hash_map_info *info = seq->private;
2067 struct htab_elem *elem;
2068
2069 elem = bpf_hash_map_seq_find_next(info, NULL);
2070 if (!elem)
2071 return NULL;
2072
2073 if (*pos == 0)
2074 ++*pos;
2075 return elem;
2076 }
2077
bpf_hash_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)2078 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2079 {
2080 struct bpf_iter_seq_hash_map_info *info = seq->private;
2081
2082 ++*pos;
2083 ++info->skip_elems;
2084 return bpf_hash_map_seq_find_next(info, v);
2085 }
2086
__bpf_hash_map_seq_show(struct seq_file * seq,struct htab_elem * elem)2087 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
2088 {
2089 struct bpf_iter_seq_hash_map_info *info = seq->private;
2090 u32 roundup_key_size, roundup_value_size;
2091 struct bpf_iter__bpf_map_elem ctx = {};
2092 struct bpf_map *map = info->map;
2093 struct bpf_iter_meta meta;
2094 int ret = 0, off = 0, cpu;
2095 struct bpf_prog *prog;
2096 void __percpu *pptr;
2097
2098 meta.seq = seq;
2099 prog = bpf_iter_get_info(&meta, elem == NULL);
2100 if (prog) {
2101 ctx.meta = &meta;
2102 ctx.map = info->map;
2103 if (elem) {
2104 roundup_key_size = round_up(map->key_size, 8);
2105 ctx.key = elem->key;
2106 if (!info->percpu_value_buf) {
2107 ctx.value = elem->key + roundup_key_size;
2108 } else {
2109 roundup_value_size = round_up(map->value_size, 8);
2110 pptr = htab_elem_get_ptr(elem, map->key_size);
2111 for_each_possible_cpu(cpu) {
2112 copy_map_value_long(map, info->percpu_value_buf + off,
2113 per_cpu_ptr(pptr, cpu));
2114 check_and_init_map_value(map, info->percpu_value_buf + off);
2115 off += roundup_value_size;
2116 }
2117 ctx.value = info->percpu_value_buf;
2118 }
2119 }
2120 ret = bpf_iter_run_prog(prog, &ctx);
2121 }
2122
2123 return ret;
2124 }
2125
bpf_hash_map_seq_show(struct seq_file * seq,void * v)2126 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2127 {
2128 return __bpf_hash_map_seq_show(seq, v);
2129 }
2130
bpf_hash_map_seq_stop(struct seq_file * seq,void * v)2131 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2132 {
2133 if (!v)
2134 (void)__bpf_hash_map_seq_show(seq, NULL);
2135 else
2136 rcu_read_unlock();
2137 }
2138
bpf_iter_init_hash_map(void * priv_data,struct bpf_iter_aux_info * aux)2139 static int bpf_iter_init_hash_map(void *priv_data,
2140 struct bpf_iter_aux_info *aux)
2141 {
2142 struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2143 struct bpf_map *map = aux->map;
2144 void *value_buf;
2145 u32 buf_size;
2146
2147 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2148 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2149 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2150 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2151 if (!value_buf)
2152 return -ENOMEM;
2153
2154 seq_info->percpu_value_buf = value_buf;
2155 }
2156
2157 bpf_map_inc_with_uref(map);
2158 seq_info->map = map;
2159 seq_info->htab = container_of(map, struct bpf_htab, map);
2160 return 0;
2161 }
2162
bpf_iter_fini_hash_map(void * priv_data)2163 static void bpf_iter_fini_hash_map(void *priv_data)
2164 {
2165 struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2166
2167 bpf_map_put_with_uref(seq_info->map);
2168 kfree(seq_info->percpu_value_buf);
2169 }
2170
2171 static const struct seq_operations bpf_hash_map_seq_ops = {
2172 .start = bpf_hash_map_seq_start,
2173 .next = bpf_hash_map_seq_next,
2174 .stop = bpf_hash_map_seq_stop,
2175 .show = bpf_hash_map_seq_show,
2176 };
2177
2178 static const struct bpf_iter_seq_info iter_seq_info = {
2179 .seq_ops = &bpf_hash_map_seq_ops,
2180 .init_seq_private = bpf_iter_init_hash_map,
2181 .fini_seq_private = bpf_iter_fini_hash_map,
2182 .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info),
2183 };
2184
bpf_for_each_hash_elem(struct bpf_map * map,bpf_callback_t callback_fn,void * callback_ctx,u64 flags)2185 static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
2186 void *callback_ctx, u64 flags)
2187 {
2188 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2189 struct hlist_nulls_head *head;
2190 struct hlist_nulls_node *n;
2191 struct htab_elem *elem;
2192 u32 roundup_key_size;
2193 int i, num_elems = 0;
2194 void __percpu *pptr;
2195 struct bucket *b;
2196 void *key, *val;
2197 bool is_percpu;
2198 u64 ret = 0;
2199
2200 if (flags != 0)
2201 return -EINVAL;
2202
2203 is_percpu = htab_is_percpu(htab);
2204
2205 roundup_key_size = round_up(map->key_size, 8);
2206 /* disable migration so percpu value prepared here will be the
2207 * same as the one seen by the bpf program with bpf_map_lookup_elem().
2208 */
2209 if (is_percpu)
2210 migrate_disable();
2211 for (i = 0; i < htab->n_buckets; i++) {
2212 b = &htab->buckets[i];
2213 rcu_read_lock();
2214 head = &b->head;
2215 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2216 key = elem->key;
2217 if (is_percpu) {
2218 /* current cpu value for percpu map */
2219 pptr = htab_elem_get_ptr(elem, map->key_size);
2220 val = this_cpu_ptr(pptr);
2221 } else {
2222 val = elem->key + roundup_key_size;
2223 }
2224 num_elems++;
2225 ret = callback_fn((u64)(long)map, (u64)(long)key,
2226 (u64)(long)val, (u64)(long)callback_ctx, 0);
2227 /* return value: 0 - continue, 1 - stop and return */
2228 if (ret) {
2229 rcu_read_unlock();
2230 goto out;
2231 }
2232 }
2233 rcu_read_unlock();
2234 }
2235 out:
2236 if (is_percpu)
2237 migrate_enable();
2238 return num_elems;
2239 }
2240
htab_map_mem_usage(const struct bpf_map * map)2241 static u64 htab_map_mem_usage(const struct bpf_map *map)
2242 {
2243 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2244 u32 value_size = round_up(htab->map.value_size, 8);
2245 bool prealloc = htab_is_prealloc(htab);
2246 bool percpu = htab_is_percpu(htab);
2247 bool lru = htab_is_lru(htab);
2248 u64 num_entries;
2249 u64 usage = sizeof(struct bpf_htab);
2250
2251 usage += sizeof(struct bucket) * htab->n_buckets;
2252 usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT;
2253 if (prealloc) {
2254 num_entries = map->max_entries;
2255 if (htab_has_extra_elems(htab))
2256 num_entries += num_possible_cpus();
2257
2258 usage += htab->elem_size * num_entries;
2259
2260 if (percpu)
2261 usage += value_size * num_possible_cpus() * num_entries;
2262 else if (!lru)
2263 usage += sizeof(struct htab_elem *) * num_possible_cpus();
2264 } else {
2265 #define LLIST_NODE_SZ sizeof(struct llist_node)
2266
2267 num_entries = htab->use_percpu_counter ?
2268 percpu_counter_sum(&htab->pcount) :
2269 atomic_read(&htab->count);
2270 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
2271 if (percpu) {
2272 usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries;
2273 usage += value_size * num_possible_cpus() * num_entries;
2274 }
2275 }
2276 return usage;
2277 }
2278
2279 BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
2280 const struct bpf_map_ops htab_map_ops = {
2281 .map_meta_equal = bpf_map_meta_equal,
2282 .map_alloc_check = htab_map_alloc_check,
2283 .map_alloc = htab_map_alloc,
2284 .map_free = htab_map_free,
2285 .map_get_next_key = htab_map_get_next_key,
2286 .map_release_uref = htab_map_free_timers,
2287 .map_lookup_elem = htab_map_lookup_elem,
2288 .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2289 .map_update_elem = htab_map_update_elem,
2290 .map_delete_elem = htab_map_delete_elem,
2291 .map_gen_lookup = htab_map_gen_lookup,
2292 .map_seq_show_elem = htab_map_seq_show_elem,
2293 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2294 .map_for_each_callback = bpf_for_each_hash_elem,
2295 .map_mem_usage = htab_map_mem_usage,
2296 BATCH_OPS(htab),
2297 .map_btf_id = &htab_map_btf_ids[0],
2298 .iter_seq_info = &iter_seq_info,
2299 };
2300
2301 const struct bpf_map_ops htab_lru_map_ops = {
2302 .map_meta_equal = bpf_map_meta_equal,
2303 .map_alloc_check = htab_map_alloc_check,
2304 .map_alloc = htab_map_alloc,
2305 .map_free = htab_map_free,
2306 .map_get_next_key = htab_map_get_next_key,
2307 .map_release_uref = htab_map_free_timers,
2308 .map_lookup_elem = htab_lru_map_lookup_elem,
2309 .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2310 .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2311 .map_update_elem = htab_lru_map_update_elem,
2312 .map_delete_elem = htab_lru_map_delete_elem,
2313 .map_gen_lookup = htab_lru_map_gen_lookup,
2314 .map_seq_show_elem = htab_map_seq_show_elem,
2315 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2316 .map_for_each_callback = bpf_for_each_hash_elem,
2317 .map_mem_usage = htab_map_mem_usage,
2318 BATCH_OPS(htab_lru),
2319 .map_btf_id = &htab_map_btf_ids[0],
2320 .iter_seq_info = &iter_seq_info,
2321 };
2322
2323 /* Called from eBPF program */
htab_percpu_map_lookup_elem(struct bpf_map * map,void * key)2324 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2325 {
2326 struct htab_elem *l = __htab_map_lookup_elem(map, key);
2327
2328 if (l)
2329 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2330 else
2331 return NULL;
2332 }
2333
htab_percpu_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)2334 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2335 {
2336 struct htab_elem *l;
2337
2338 if (cpu >= nr_cpu_ids)
2339 return NULL;
2340
2341 l = __htab_map_lookup_elem(map, key);
2342 if (l)
2343 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2344 else
2345 return NULL;
2346 }
2347
htab_lru_percpu_map_lookup_elem(struct bpf_map * map,void * key)2348 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2349 {
2350 struct htab_elem *l = __htab_map_lookup_elem(map, key);
2351
2352 if (l) {
2353 bpf_lru_node_set_ref(&l->lru_node);
2354 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2355 }
2356
2357 return NULL;
2358 }
2359
htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)2360 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2361 {
2362 struct htab_elem *l;
2363
2364 if (cpu >= nr_cpu_ids)
2365 return NULL;
2366
2367 l = __htab_map_lookup_elem(map, key);
2368 if (l) {
2369 bpf_lru_node_set_ref(&l->lru_node);
2370 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2371 }
2372
2373 return NULL;
2374 }
2375
bpf_percpu_hash_copy(struct bpf_map * map,void * key,void * value)2376 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2377 {
2378 struct htab_elem *l;
2379 void __percpu *pptr;
2380 int ret = -ENOENT;
2381 int cpu, off = 0;
2382 u32 size;
2383
2384 /* per_cpu areas are zero-filled and bpf programs can only
2385 * access 'value_size' of them, so copying rounded areas
2386 * will not leak any kernel data
2387 */
2388 size = round_up(map->value_size, 8);
2389 rcu_read_lock();
2390 l = __htab_map_lookup_elem(map, key);
2391 if (!l)
2392 goto out;
2393 /* We do not mark LRU map element here in order to not mess up
2394 * eviction heuristics when user space does a map walk.
2395 */
2396 pptr = htab_elem_get_ptr(l, map->key_size);
2397 for_each_possible_cpu(cpu) {
2398 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
2399 check_and_init_map_value(map, value + off);
2400 off += size;
2401 }
2402 ret = 0;
2403 out:
2404 rcu_read_unlock();
2405 return ret;
2406 }
2407
bpf_percpu_hash_update(struct bpf_map * map,void * key,void * value,u64 map_flags)2408 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2409 u64 map_flags)
2410 {
2411 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2412 int ret;
2413
2414 rcu_read_lock();
2415 if (htab_is_lru(htab))
2416 ret = __htab_lru_percpu_map_update_elem(map, key, value,
2417 map_flags, true);
2418 else
2419 ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2420 true);
2421 rcu_read_unlock();
2422
2423 return ret;
2424 }
2425
htab_percpu_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)2426 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2427 struct seq_file *m)
2428 {
2429 struct htab_elem *l;
2430 void __percpu *pptr;
2431 int cpu;
2432
2433 rcu_read_lock();
2434
2435 l = __htab_map_lookup_elem(map, key);
2436 if (!l) {
2437 rcu_read_unlock();
2438 return;
2439 }
2440
2441 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2442 seq_puts(m, ": {\n");
2443 pptr = htab_elem_get_ptr(l, map->key_size);
2444 for_each_possible_cpu(cpu) {
2445 seq_printf(m, "\tcpu%d: ", cpu);
2446 btf_type_seq_show(map->btf, map->btf_value_type_id,
2447 per_cpu_ptr(pptr, cpu), m);
2448 seq_puts(m, "\n");
2449 }
2450 seq_puts(m, "}\n");
2451
2452 rcu_read_unlock();
2453 }
2454
2455 const struct bpf_map_ops htab_percpu_map_ops = {
2456 .map_meta_equal = bpf_map_meta_equal,
2457 .map_alloc_check = htab_map_alloc_check,
2458 .map_alloc = htab_map_alloc,
2459 .map_free = htab_map_free,
2460 .map_get_next_key = htab_map_get_next_key,
2461 .map_lookup_elem = htab_percpu_map_lookup_elem,
2462 .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2463 .map_update_elem = htab_percpu_map_update_elem,
2464 .map_delete_elem = htab_map_delete_elem,
2465 .map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
2466 .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2467 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2468 .map_for_each_callback = bpf_for_each_hash_elem,
2469 .map_mem_usage = htab_map_mem_usage,
2470 BATCH_OPS(htab_percpu),
2471 .map_btf_id = &htab_map_btf_ids[0],
2472 .iter_seq_info = &iter_seq_info,
2473 };
2474
2475 const struct bpf_map_ops htab_lru_percpu_map_ops = {
2476 .map_meta_equal = bpf_map_meta_equal,
2477 .map_alloc_check = htab_map_alloc_check,
2478 .map_alloc = htab_map_alloc,
2479 .map_free = htab_map_free,
2480 .map_get_next_key = htab_map_get_next_key,
2481 .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2482 .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2483 .map_update_elem = htab_lru_percpu_map_update_elem,
2484 .map_delete_elem = htab_lru_map_delete_elem,
2485 .map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
2486 .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2487 .map_set_for_each_callback_args = map_set_for_each_callback_args,
2488 .map_for_each_callback = bpf_for_each_hash_elem,
2489 .map_mem_usage = htab_map_mem_usage,
2490 BATCH_OPS(htab_lru_percpu),
2491 .map_btf_id = &htab_map_btf_ids[0],
2492 .iter_seq_info = &iter_seq_info,
2493 };
2494
fd_htab_map_alloc_check(union bpf_attr * attr)2495 static int fd_htab_map_alloc_check(union bpf_attr *attr)
2496 {
2497 if (attr->value_size != sizeof(u32))
2498 return -EINVAL;
2499 return htab_map_alloc_check(attr);
2500 }
2501
fd_htab_map_free(struct bpf_map * map)2502 static void fd_htab_map_free(struct bpf_map *map)
2503 {
2504 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2505 struct hlist_nulls_node *n;
2506 struct hlist_nulls_head *head;
2507 struct htab_elem *l;
2508 int i;
2509
2510 for (i = 0; i < htab->n_buckets; i++) {
2511 head = select_bucket(htab, i);
2512
2513 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2514 void *ptr = fd_htab_map_get_ptr(map, l);
2515
2516 map->ops->map_fd_put_ptr(map, ptr, false);
2517 }
2518 }
2519
2520 htab_map_free(map);
2521 }
2522
2523 /* only called from syscall */
bpf_fd_htab_map_lookup_elem(struct bpf_map * map,void * key,u32 * value)2524 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2525 {
2526 void **ptr;
2527 int ret = 0;
2528
2529 if (!map->ops->map_fd_sys_lookup_elem)
2530 return -ENOTSUPP;
2531
2532 rcu_read_lock();
2533 ptr = htab_map_lookup_elem(map, key);
2534 if (ptr)
2535 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2536 else
2537 ret = -ENOENT;
2538 rcu_read_unlock();
2539
2540 return ret;
2541 }
2542
2543 /* only called from syscall */
bpf_fd_htab_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags)2544 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2545 void *key, void *value, u64 map_flags)
2546 {
2547 void *ptr;
2548 int ret;
2549 u32 ufd = *(u32 *)value;
2550
2551 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2552 if (IS_ERR(ptr))
2553 return PTR_ERR(ptr);
2554
2555 ret = htab_map_update_elem(map, key, &ptr, map_flags);
2556 if (ret)
2557 map->ops->map_fd_put_ptr(map, ptr, false);
2558
2559 return ret;
2560 }
2561
htab_of_map_alloc(union bpf_attr * attr)2562 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2563 {
2564 struct bpf_map *map, *inner_map_meta;
2565
2566 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2567 if (IS_ERR(inner_map_meta))
2568 return inner_map_meta;
2569
2570 map = htab_map_alloc(attr);
2571 if (IS_ERR(map)) {
2572 bpf_map_meta_free(inner_map_meta);
2573 return map;
2574 }
2575
2576 map->inner_map_meta = inner_map_meta;
2577
2578 return map;
2579 }
2580
htab_of_map_lookup_elem(struct bpf_map * map,void * key)2581 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2582 {
2583 struct bpf_map **inner_map = htab_map_lookup_elem(map, key);
2584
2585 if (!inner_map)
2586 return NULL;
2587
2588 return READ_ONCE(*inner_map);
2589 }
2590
htab_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)2591 static int htab_of_map_gen_lookup(struct bpf_map *map,
2592 struct bpf_insn *insn_buf)
2593 {
2594 struct bpf_insn *insn = insn_buf;
2595 const int ret = BPF_REG_0;
2596
2597 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2598 (void *(*)(struct bpf_map *map, void *key))NULL));
2599 *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2600 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2601 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2602 offsetof(struct htab_elem, key) +
2603 round_up(map->key_size, 8));
2604 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2605
2606 return insn - insn_buf;
2607 }
2608
htab_of_map_free(struct bpf_map * map)2609 static void htab_of_map_free(struct bpf_map *map)
2610 {
2611 bpf_map_meta_free(map->inner_map_meta);
2612 fd_htab_map_free(map);
2613 }
2614
2615 const struct bpf_map_ops htab_of_maps_map_ops = {
2616 .map_alloc_check = fd_htab_map_alloc_check,
2617 .map_alloc = htab_of_map_alloc,
2618 .map_free = htab_of_map_free,
2619 .map_get_next_key = htab_map_get_next_key,
2620 .map_lookup_elem = htab_of_map_lookup_elem,
2621 .map_delete_elem = htab_map_delete_elem,
2622 .map_fd_get_ptr = bpf_map_fd_get_ptr,
2623 .map_fd_put_ptr = bpf_map_fd_put_ptr,
2624 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2625 .map_gen_lookup = htab_of_map_gen_lookup,
2626 .map_check_btf = map_check_no_btf,
2627 .map_mem_usage = htab_map_mem_usage,
2628 BATCH_OPS(htab),
2629 .map_btf_id = &htab_map_btf_ids[0],
2630 };
2631