xref: /openbmc/linux/kernel/bpf/hashtab.c (revision c43622d6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/random.h>
11 #include <uapi/linux/btf.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/btf_ids.h>
14 #include "percpu_freelist.h"
15 #include "bpf_lru_list.h"
16 #include "map_in_map.h"
17 #include <linux/bpf_mem_alloc.h>
18 
19 #define HTAB_CREATE_FLAG_MASK						\
20 	(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |	\
21 	 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
22 
23 #define BATCH_OPS(_name)			\
24 	.map_lookup_batch =			\
25 	_name##_map_lookup_batch,		\
26 	.map_lookup_and_delete_batch =		\
27 	_name##_map_lookup_and_delete_batch,	\
28 	.map_update_batch =			\
29 	generic_map_update_batch,		\
30 	.map_delete_batch =			\
31 	generic_map_delete_batch
32 
33 /*
34  * The bucket lock has two protection scopes:
35  *
36  * 1) Serializing concurrent operations from BPF programs on different
37  *    CPUs
38  *
39  * 2) Serializing concurrent operations from BPF programs and sys_bpf()
40  *
41  * BPF programs can execute in any context including perf, kprobes and
42  * tracing. As there are almost no limits where perf, kprobes and tracing
43  * can be invoked from the lock operations need to be protected against
44  * deadlocks. Deadlocks can be caused by recursion and by an invocation in
45  * the lock held section when functions which acquire this lock are invoked
46  * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
47  * variable bpf_prog_active, which prevents BPF programs attached to perf
48  * events, kprobes and tracing to be invoked before the prior invocation
49  * from one of these contexts completed. sys_bpf() uses the same mechanism
50  * by pinning the task to the current CPU and incrementing the recursion
51  * protection across the map operation.
52  *
53  * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
54  * operations like memory allocations (even with GFP_ATOMIC) from atomic
55  * contexts. This is required because even with GFP_ATOMIC the memory
56  * allocator calls into code paths which acquire locks with long held lock
57  * sections. To ensure the deterministic behaviour these locks are regular
58  * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
59  * true atomic contexts on an RT kernel are the low level hardware
60  * handling, scheduling, low level interrupt handling, NMIs etc. None of
61  * these contexts should ever do memory allocations.
62  *
63  * As regular device interrupt handlers and soft interrupts are forced into
64  * thread context, the existing code which does
65  *   spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
66  * just works.
67  *
68  * In theory the BPF locks could be converted to regular spinlocks as well,
69  * but the bucket locks and percpu_freelist locks can be taken from
70  * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
71  * atomic contexts even on RT. Before the introduction of bpf_mem_alloc,
72  * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
73  * because there is no memory allocation within the lock held sections. However
74  * after hash map was fully converted to use bpf_mem_alloc, there will be
75  * non-synchronous memory allocation for non-preallocated hash map, so it is
76  * safe to always use raw spinlock for bucket lock.
77  */
78 struct bucket {
79 	struct hlist_nulls_head head;
80 	raw_spinlock_t raw_lock;
81 };
82 
83 #define HASHTAB_MAP_LOCK_COUNT 8
84 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
85 
86 struct bpf_htab {
87 	struct bpf_map map;
88 	struct bpf_mem_alloc ma;
89 	struct bpf_mem_alloc pcpu_ma;
90 	struct bucket *buckets;
91 	void *elems;
92 	union {
93 		struct pcpu_freelist freelist;
94 		struct bpf_lru lru;
95 	};
96 	struct htab_elem *__percpu *extra_elems;
97 	/* number of elements in non-preallocated hashtable are kept
98 	 * in either pcount or count
99 	 */
100 	struct percpu_counter pcount;
101 	atomic_t count;
102 	bool use_percpu_counter;
103 	u32 n_buckets;	/* number of hash buckets */
104 	u32 elem_size;	/* size of each element in bytes */
105 	u32 hashrnd;
106 	struct lock_class_key lockdep_key;
107 	int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
108 };
109 
110 /* each htab element is struct htab_elem + key + value */
111 struct htab_elem {
112 	union {
113 		struct hlist_nulls_node hash_node;
114 		struct {
115 			void *padding;
116 			union {
117 				struct pcpu_freelist_node fnode;
118 				struct htab_elem *batch_flink;
119 			};
120 		};
121 	};
122 	union {
123 		/* pointer to per-cpu pointer */
124 		void *ptr_to_pptr;
125 		struct bpf_lru_node lru_node;
126 	};
127 	u32 hash;
128 	char key[] __aligned(8);
129 };
130 
htab_is_prealloc(const struct bpf_htab * htab)131 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
132 {
133 	return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
134 }
135 
htab_init_buckets(struct bpf_htab * htab)136 static void htab_init_buckets(struct bpf_htab *htab)
137 {
138 	unsigned int i;
139 
140 	for (i = 0; i < htab->n_buckets; i++) {
141 		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
142 		raw_spin_lock_init(&htab->buckets[i].raw_lock);
143 		lockdep_set_class(&htab->buckets[i].raw_lock,
144 					  &htab->lockdep_key);
145 		cond_resched();
146 	}
147 }
148 
htab_lock_bucket(const struct bpf_htab * htab,struct bucket * b,u32 hash,unsigned long * pflags)149 static inline int htab_lock_bucket(const struct bpf_htab *htab,
150 				   struct bucket *b, u32 hash,
151 				   unsigned long *pflags)
152 {
153 	unsigned long flags;
154 
155 	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
156 
157 	preempt_disable();
158 	local_irq_save(flags);
159 	if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
160 		__this_cpu_dec(*(htab->map_locked[hash]));
161 		local_irq_restore(flags);
162 		preempt_enable();
163 		return -EBUSY;
164 	}
165 
166 	raw_spin_lock(&b->raw_lock);
167 	*pflags = flags;
168 
169 	return 0;
170 }
171 
htab_unlock_bucket(const struct bpf_htab * htab,struct bucket * b,u32 hash,unsigned long flags)172 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
173 				      struct bucket *b, u32 hash,
174 				      unsigned long flags)
175 {
176 	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
177 	raw_spin_unlock(&b->raw_lock);
178 	__this_cpu_dec(*(htab->map_locked[hash]));
179 	local_irq_restore(flags);
180 	preempt_enable();
181 }
182 
183 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
184 
htab_is_lru(const struct bpf_htab * htab)185 static bool htab_is_lru(const struct bpf_htab *htab)
186 {
187 	return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
188 		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
189 }
190 
htab_is_percpu(const struct bpf_htab * htab)191 static bool htab_is_percpu(const struct bpf_htab *htab)
192 {
193 	return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
194 		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
195 }
196 
htab_elem_set_ptr(struct htab_elem * l,u32 key_size,void __percpu * pptr)197 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
198 				     void __percpu *pptr)
199 {
200 	*(void __percpu **)(l->key + key_size) = pptr;
201 }
202 
htab_elem_get_ptr(struct htab_elem * l,u32 key_size)203 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
204 {
205 	return *(void __percpu **)(l->key + key_size);
206 }
207 
fd_htab_map_get_ptr(const struct bpf_map * map,struct htab_elem * l)208 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
209 {
210 	return *(void **)(l->key + roundup(map->key_size, 8));
211 }
212 
get_htab_elem(struct bpf_htab * htab,int i)213 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
214 {
215 	return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
216 }
217 
htab_has_extra_elems(struct bpf_htab * htab)218 static bool htab_has_extra_elems(struct bpf_htab *htab)
219 {
220 	return !htab_is_percpu(htab) && !htab_is_lru(htab);
221 }
222 
htab_free_prealloced_timers(struct bpf_htab * htab)223 static void htab_free_prealloced_timers(struct bpf_htab *htab)
224 {
225 	u32 num_entries = htab->map.max_entries;
226 	int i;
227 
228 	if (!btf_record_has_field(htab->map.record, BPF_TIMER))
229 		return;
230 	if (htab_has_extra_elems(htab))
231 		num_entries += num_possible_cpus();
232 
233 	for (i = 0; i < num_entries; i++) {
234 		struct htab_elem *elem;
235 
236 		elem = get_htab_elem(htab, i);
237 		bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
238 		cond_resched();
239 	}
240 }
241 
htab_free_prealloced_fields(struct bpf_htab * htab)242 static void htab_free_prealloced_fields(struct bpf_htab *htab)
243 {
244 	u32 num_entries = htab->map.max_entries;
245 	int i;
246 
247 	if (IS_ERR_OR_NULL(htab->map.record))
248 		return;
249 	if (htab_has_extra_elems(htab))
250 		num_entries += num_possible_cpus();
251 	for (i = 0; i < num_entries; i++) {
252 		struct htab_elem *elem;
253 
254 		elem = get_htab_elem(htab, i);
255 		if (htab_is_percpu(htab)) {
256 			void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
257 			int cpu;
258 
259 			for_each_possible_cpu(cpu) {
260 				bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
261 				cond_resched();
262 			}
263 		} else {
264 			bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
265 			cond_resched();
266 		}
267 		cond_resched();
268 	}
269 }
270 
htab_free_elems(struct bpf_htab * htab)271 static void htab_free_elems(struct bpf_htab *htab)
272 {
273 	int i;
274 
275 	if (!htab_is_percpu(htab))
276 		goto free_elems;
277 
278 	for (i = 0; i < htab->map.max_entries; i++) {
279 		void __percpu *pptr;
280 
281 		pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
282 					 htab->map.key_size);
283 		free_percpu(pptr);
284 		cond_resched();
285 	}
286 free_elems:
287 	bpf_map_area_free(htab->elems);
288 }
289 
290 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
291  * (bucket_lock). If both locks need to be acquired together, the lock
292  * order is always lru_lock -> bucket_lock and this only happens in
293  * bpf_lru_list.c logic. For example, certain code path of
294  * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
295  * will acquire lru_lock first followed by acquiring bucket_lock.
296  *
297  * In hashtab.c, to avoid deadlock, lock acquisition of
298  * bucket_lock followed by lru_lock is not allowed. In such cases,
299  * bucket_lock needs to be released first before acquiring lru_lock.
300  */
prealloc_lru_pop(struct bpf_htab * htab,void * key,u32 hash)301 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
302 					  u32 hash)
303 {
304 	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
305 	struct htab_elem *l;
306 
307 	if (node) {
308 		bpf_map_inc_elem_count(&htab->map);
309 		l = container_of(node, struct htab_elem, lru_node);
310 		memcpy(l->key, key, htab->map.key_size);
311 		return l;
312 	}
313 
314 	return NULL;
315 }
316 
prealloc_init(struct bpf_htab * htab)317 static int prealloc_init(struct bpf_htab *htab)
318 {
319 	u32 num_entries = htab->map.max_entries;
320 	int err = -ENOMEM, i;
321 
322 	if (htab_has_extra_elems(htab))
323 		num_entries += num_possible_cpus();
324 
325 	htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
326 					 htab->map.numa_node);
327 	if (!htab->elems)
328 		return -ENOMEM;
329 
330 	if (!htab_is_percpu(htab))
331 		goto skip_percpu_elems;
332 
333 	for (i = 0; i < num_entries; i++) {
334 		u32 size = round_up(htab->map.value_size, 8);
335 		void __percpu *pptr;
336 
337 		pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
338 					    GFP_USER | __GFP_NOWARN);
339 		if (!pptr)
340 			goto free_elems;
341 		htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
342 				  pptr);
343 		cond_resched();
344 	}
345 
346 skip_percpu_elems:
347 	if (htab_is_lru(htab))
348 		err = bpf_lru_init(&htab->lru,
349 				   htab->map.map_flags & BPF_F_NO_COMMON_LRU,
350 				   offsetof(struct htab_elem, hash) -
351 				   offsetof(struct htab_elem, lru_node),
352 				   htab_lru_map_delete_node,
353 				   htab);
354 	else
355 		err = pcpu_freelist_init(&htab->freelist);
356 
357 	if (err)
358 		goto free_elems;
359 
360 	if (htab_is_lru(htab))
361 		bpf_lru_populate(&htab->lru, htab->elems,
362 				 offsetof(struct htab_elem, lru_node),
363 				 htab->elem_size, num_entries);
364 	else
365 		pcpu_freelist_populate(&htab->freelist,
366 				       htab->elems + offsetof(struct htab_elem, fnode),
367 				       htab->elem_size, num_entries);
368 
369 	return 0;
370 
371 free_elems:
372 	htab_free_elems(htab);
373 	return err;
374 }
375 
prealloc_destroy(struct bpf_htab * htab)376 static void prealloc_destroy(struct bpf_htab *htab)
377 {
378 	htab_free_elems(htab);
379 
380 	if (htab_is_lru(htab))
381 		bpf_lru_destroy(&htab->lru);
382 	else
383 		pcpu_freelist_destroy(&htab->freelist);
384 }
385 
alloc_extra_elems(struct bpf_htab * htab)386 static int alloc_extra_elems(struct bpf_htab *htab)
387 {
388 	struct htab_elem *__percpu *pptr, *l_new;
389 	struct pcpu_freelist_node *l;
390 	int cpu;
391 
392 	pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
393 				    GFP_USER | __GFP_NOWARN);
394 	if (!pptr)
395 		return -ENOMEM;
396 
397 	for_each_possible_cpu(cpu) {
398 		l = pcpu_freelist_pop(&htab->freelist);
399 		/* pop will succeed, since prealloc_init()
400 		 * preallocated extra num_possible_cpus elements
401 		 */
402 		l_new = container_of(l, struct htab_elem, fnode);
403 		*per_cpu_ptr(pptr, cpu) = l_new;
404 	}
405 	htab->extra_elems = pptr;
406 	return 0;
407 }
408 
409 /* Called from syscall */
htab_map_alloc_check(union bpf_attr * attr)410 static int htab_map_alloc_check(union bpf_attr *attr)
411 {
412 	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
413 		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
414 	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
415 		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
416 	/* percpu_lru means each cpu has its own LRU list.
417 	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
418 	 * the map's value itself is percpu.  percpu_lru has
419 	 * nothing to do with the map's value.
420 	 */
421 	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
422 	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
423 	bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
424 	int numa_node = bpf_map_attr_numa_node(attr);
425 
426 	BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
427 		     offsetof(struct htab_elem, hash_node.pprev));
428 
429 	if (zero_seed && !capable(CAP_SYS_ADMIN))
430 		/* Guard against local DoS, and discourage production use. */
431 		return -EPERM;
432 
433 	if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
434 	    !bpf_map_flags_access_ok(attr->map_flags))
435 		return -EINVAL;
436 
437 	if (!lru && percpu_lru)
438 		return -EINVAL;
439 
440 	if (lru && !prealloc)
441 		return -ENOTSUPP;
442 
443 	if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
444 		return -EINVAL;
445 
446 	/* check sanity of attributes.
447 	 * value_size == 0 may be allowed in the future to use map as a set
448 	 */
449 	if (attr->max_entries == 0 || attr->key_size == 0 ||
450 	    attr->value_size == 0)
451 		return -EINVAL;
452 
453 	if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
454 	   sizeof(struct htab_elem))
455 		/* if key_size + value_size is bigger, the user space won't be
456 		 * able to access the elements via bpf syscall. This check
457 		 * also makes sure that the elem_size doesn't overflow and it's
458 		 * kmalloc-able later in htab_map_update_elem()
459 		 */
460 		return -E2BIG;
461 	/* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
462 	if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
463 		return -E2BIG;
464 
465 	return 0;
466 }
467 
htab_map_alloc(union bpf_attr * attr)468 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
469 {
470 	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
471 		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
472 	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
473 		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
474 	/* percpu_lru means each cpu has its own LRU list.
475 	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
476 	 * the map's value itself is percpu.  percpu_lru has
477 	 * nothing to do with the map's value.
478 	 */
479 	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
480 	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
481 	struct bpf_htab *htab;
482 	int err, i;
483 
484 	htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
485 	if (!htab)
486 		return ERR_PTR(-ENOMEM);
487 
488 	lockdep_register_key(&htab->lockdep_key);
489 
490 	bpf_map_init_from_attr(&htab->map, attr);
491 
492 	if (percpu_lru) {
493 		/* ensure each CPU's lru list has >=1 elements.
494 		 * since we are at it, make each lru list has the same
495 		 * number of elements.
496 		 */
497 		htab->map.max_entries = roundup(attr->max_entries,
498 						num_possible_cpus());
499 		if (htab->map.max_entries < attr->max_entries)
500 			htab->map.max_entries = rounddown(attr->max_entries,
501 							  num_possible_cpus());
502 	}
503 
504 	/* hash table size must be power of 2; roundup_pow_of_two() can overflow
505 	 * into UB on 32-bit arches, so check that first
506 	 */
507 	err = -E2BIG;
508 	if (htab->map.max_entries > 1UL << 31)
509 		goto free_htab;
510 
511 	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
512 
513 	htab->elem_size = sizeof(struct htab_elem) +
514 			  round_up(htab->map.key_size, 8);
515 	if (percpu)
516 		htab->elem_size += sizeof(void *);
517 	else
518 		htab->elem_size += round_up(htab->map.value_size, 8);
519 
520 	/* check for u32 overflow */
521 	if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
522 		goto free_htab;
523 
524 	err = bpf_map_init_elem_count(&htab->map);
525 	if (err)
526 		goto free_htab;
527 
528 	err = -ENOMEM;
529 	htab->buckets = bpf_map_area_alloc(htab->n_buckets *
530 					   sizeof(struct bucket),
531 					   htab->map.numa_node);
532 	if (!htab->buckets)
533 		goto free_elem_count;
534 
535 	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
536 		htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
537 							   sizeof(int),
538 							   sizeof(int),
539 							   GFP_USER);
540 		if (!htab->map_locked[i])
541 			goto free_map_locked;
542 	}
543 
544 	if (htab->map.map_flags & BPF_F_ZERO_SEED)
545 		htab->hashrnd = 0;
546 	else
547 		htab->hashrnd = get_random_u32();
548 
549 	htab_init_buckets(htab);
550 
551 /* compute_batch_value() computes batch value as num_online_cpus() * 2
552  * and __percpu_counter_compare() needs
553  * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
554  * for percpu_counter to be faster than atomic_t. In practice the average bpf
555  * hash map size is 10k, which means that a system with 64 cpus will fill
556  * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
557  * define our own batch count as 32 then 10k hash map can be filled up to 80%:
558  * 10k - 8k > 32 _batch_ * 64 _cpus_
559  * and __percpu_counter_compare() will still be fast. At that point hash map
560  * collisions will dominate its performance anyway. Assume that hash map filled
561  * to 50+% isn't going to be O(1) and use the following formula to choose
562  * between percpu_counter and atomic_t.
563  */
564 #define PERCPU_COUNTER_BATCH 32
565 	if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
566 		htab->use_percpu_counter = true;
567 
568 	if (htab->use_percpu_counter) {
569 		err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
570 		if (err)
571 			goto free_map_locked;
572 	}
573 
574 	if (prealloc) {
575 		err = prealloc_init(htab);
576 		if (err)
577 			goto free_map_locked;
578 
579 		if (!percpu && !lru) {
580 			/* lru itself can remove the least used element, so
581 			 * there is no need for an extra elem during map_update.
582 			 */
583 			err = alloc_extra_elems(htab);
584 			if (err)
585 				goto free_prealloc;
586 		}
587 	} else {
588 		err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
589 		if (err)
590 			goto free_map_locked;
591 		if (percpu) {
592 			err = bpf_mem_alloc_init(&htab->pcpu_ma,
593 						 round_up(htab->map.value_size, 8), true);
594 			if (err)
595 				goto free_map_locked;
596 		}
597 	}
598 
599 	return &htab->map;
600 
601 free_prealloc:
602 	prealloc_destroy(htab);
603 free_map_locked:
604 	if (htab->use_percpu_counter)
605 		percpu_counter_destroy(&htab->pcount);
606 	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
607 		free_percpu(htab->map_locked[i]);
608 	bpf_map_area_free(htab->buckets);
609 	bpf_mem_alloc_destroy(&htab->pcpu_ma);
610 	bpf_mem_alloc_destroy(&htab->ma);
611 free_elem_count:
612 	bpf_map_free_elem_count(&htab->map);
613 free_htab:
614 	lockdep_unregister_key(&htab->lockdep_key);
615 	bpf_map_area_free(htab);
616 	return ERR_PTR(err);
617 }
618 
htab_map_hash(const void * key,u32 key_len,u32 hashrnd)619 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
620 {
621 	if (likely(key_len % 4 == 0))
622 		return jhash2(key, key_len / 4, hashrnd);
623 	return jhash(key, key_len, hashrnd);
624 }
625 
__select_bucket(struct bpf_htab * htab,u32 hash)626 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
627 {
628 	return &htab->buckets[hash & (htab->n_buckets - 1)];
629 }
630 
select_bucket(struct bpf_htab * htab,u32 hash)631 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
632 {
633 	return &__select_bucket(htab, hash)->head;
634 }
635 
636 /* this lookup function can only be called with bucket lock taken */
lookup_elem_raw(struct hlist_nulls_head * head,u32 hash,void * key,u32 key_size)637 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
638 					 void *key, u32 key_size)
639 {
640 	struct hlist_nulls_node *n;
641 	struct htab_elem *l;
642 
643 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
644 		if (l->hash == hash && !memcmp(&l->key, key, key_size))
645 			return l;
646 
647 	return NULL;
648 }
649 
650 /* can be called without bucket lock. it will repeat the loop in
651  * the unlikely event when elements moved from one bucket into another
652  * while link list is being walked
653  */
lookup_nulls_elem_raw(struct hlist_nulls_head * head,u32 hash,void * key,u32 key_size,u32 n_buckets)654 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
655 					       u32 hash, void *key,
656 					       u32 key_size, u32 n_buckets)
657 {
658 	struct hlist_nulls_node *n;
659 	struct htab_elem *l;
660 
661 again:
662 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
663 		if (l->hash == hash && !memcmp(&l->key, key, key_size))
664 			return l;
665 
666 	if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
667 		goto again;
668 
669 	return NULL;
670 }
671 
672 /* Called from syscall or from eBPF program directly, so
673  * arguments have to match bpf_map_lookup_elem() exactly.
674  * The return value is adjusted by BPF instructions
675  * in htab_map_gen_lookup().
676  */
__htab_map_lookup_elem(struct bpf_map * map,void * key)677 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
678 {
679 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
680 	struct hlist_nulls_head *head;
681 	struct htab_elem *l;
682 	u32 hash, key_size;
683 
684 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
685 		     !rcu_read_lock_bh_held());
686 
687 	key_size = map->key_size;
688 
689 	hash = htab_map_hash(key, key_size, htab->hashrnd);
690 
691 	head = select_bucket(htab, hash);
692 
693 	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
694 
695 	return l;
696 }
697 
htab_map_lookup_elem(struct bpf_map * map,void * key)698 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
699 {
700 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
701 
702 	if (l)
703 		return l->key + round_up(map->key_size, 8);
704 
705 	return NULL;
706 }
707 
708 /* inline bpf_map_lookup_elem() call.
709  * Instead of:
710  * bpf_prog
711  *   bpf_map_lookup_elem
712  *     map->ops->map_lookup_elem
713  *       htab_map_lookup_elem
714  *         __htab_map_lookup_elem
715  * do:
716  * bpf_prog
717  *   __htab_map_lookup_elem
718  */
htab_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)719 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
720 {
721 	struct bpf_insn *insn = insn_buf;
722 	const int ret = BPF_REG_0;
723 
724 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
725 		     (void *(*)(struct bpf_map *map, void *key))NULL));
726 	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
727 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
728 	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
729 				offsetof(struct htab_elem, key) +
730 				round_up(map->key_size, 8));
731 	return insn - insn_buf;
732 }
733 
__htab_lru_map_lookup_elem(struct bpf_map * map,void * key,const bool mark)734 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
735 							void *key, const bool mark)
736 {
737 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
738 
739 	if (l) {
740 		if (mark)
741 			bpf_lru_node_set_ref(&l->lru_node);
742 		return l->key + round_up(map->key_size, 8);
743 	}
744 
745 	return NULL;
746 }
747 
htab_lru_map_lookup_elem(struct bpf_map * map,void * key)748 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
749 {
750 	return __htab_lru_map_lookup_elem(map, key, true);
751 }
752 
htab_lru_map_lookup_elem_sys(struct bpf_map * map,void * key)753 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
754 {
755 	return __htab_lru_map_lookup_elem(map, key, false);
756 }
757 
htab_lru_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)758 static int htab_lru_map_gen_lookup(struct bpf_map *map,
759 				   struct bpf_insn *insn_buf)
760 {
761 	struct bpf_insn *insn = insn_buf;
762 	const int ret = BPF_REG_0;
763 	const int ref_reg = BPF_REG_1;
764 
765 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
766 		     (void *(*)(struct bpf_map *map, void *key))NULL));
767 	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
768 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
769 	*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
770 			      offsetof(struct htab_elem, lru_node) +
771 			      offsetof(struct bpf_lru_node, ref));
772 	*insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
773 	*insn++ = BPF_ST_MEM(BPF_B, ret,
774 			     offsetof(struct htab_elem, lru_node) +
775 			     offsetof(struct bpf_lru_node, ref),
776 			     1);
777 	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
778 				offsetof(struct htab_elem, key) +
779 				round_up(map->key_size, 8));
780 	return insn - insn_buf;
781 }
782 
check_and_free_fields(struct bpf_htab * htab,struct htab_elem * elem)783 static void check_and_free_fields(struct bpf_htab *htab,
784 				  struct htab_elem *elem)
785 {
786 	if (htab_is_percpu(htab)) {
787 		void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
788 		int cpu;
789 
790 		for_each_possible_cpu(cpu)
791 			bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
792 	} else {
793 		void *map_value = elem->key + round_up(htab->map.key_size, 8);
794 
795 		bpf_obj_free_fields(htab->map.record, map_value);
796 	}
797 }
798 
799 /* It is called from the bpf_lru_list when the LRU needs to delete
800  * older elements from the htab.
801  */
htab_lru_map_delete_node(void * arg,struct bpf_lru_node * node)802 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
803 {
804 	struct bpf_htab *htab = arg;
805 	struct htab_elem *l = NULL, *tgt_l;
806 	struct hlist_nulls_head *head;
807 	struct hlist_nulls_node *n;
808 	unsigned long flags;
809 	struct bucket *b;
810 	int ret;
811 
812 	tgt_l = container_of(node, struct htab_elem, lru_node);
813 	b = __select_bucket(htab, tgt_l->hash);
814 	head = &b->head;
815 
816 	ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
817 	if (ret)
818 		return false;
819 
820 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
821 		if (l == tgt_l) {
822 			hlist_nulls_del_rcu(&l->hash_node);
823 			check_and_free_fields(htab, l);
824 			bpf_map_dec_elem_count(&htab->map);
825 			break;
826 		}
827 
828 	htab_unlock_bucket(htab, b, tgt_l->hash, flags);
829 
830 	return l == tgt_l;
831 }
832 
833 /* Called from syscall */
htab_map_get_next_key(struct bpf_map * map,void * key,void * next_key)834 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
835 {
836 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
837 	struct hlist_nulls_head *head;
838 	struct htab_elem *l, *next_l;
839 	u32 hash, key_size;
840 	int i = 0;
841 
842 	WARN_ON_ONCE(!rcu_read_lock_held());
843 
844 	key_size = map->key_size;
845 
846 	if (!key)
847 		goto find_first_elem;
848 
849 	hash = htab_map_hash(key, key_size, htab->hashrnd);
850 
851 	head = select_bucket(htab, hash);
852 
853 	/* lookup the key */
854 	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
855 
856 	if (!l)
857 		goto find_first_elem;
858 
859 	/* key was found, get next key in the same bucket */
860 	next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
861 				  struct htab_elem, hash_node);
862 
863 	if (next_l) {
864 		/* if next elem in this hash list is non-zero, just return it */
865 		memcpy(next_key, next_l->key, key_size);
866 		return 0;
867 	}
868 
869 	/* no more elements in this hash list, go to the next bucket */
870 	i = hash & (htab->n_buckets - 1);
871 	i++;
872 
873 find_first_elem:
874 	/* iterate over buckets */
875 	for (; i < htab->n_buckets; i++) {
876 		head = select_bucket(htab, i);
877 
878 		/* pick first element in the bucket */
879 		next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
880 					  struct htab_elem, hash_node);
881 		if (next_l) {
882 			/* if it's not empty, just return it */
883 			memcpy(next_key, next_l->key, key_size);
884 			return 0;
885 		}
886 	}
887 
888 	/* iterated over all buckets and all elements */
889 	return -ENOENT;
890 }
891 
htab_elem_free(struct bpf_htab * htab,struct htab_elem * l)892 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
893 {
894 	check_and_free_fields(htab, l);
895 	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
896 		bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
897 	bpf_mem_cache_free(&htab->ma, l);
898 }
899 
htab_put_fd_value(struct bpf_htab * htab,struct htab_elem * l)900 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
901 {
902 	struct bpf_map *map = &htab->map;
903 	void *ptr;
904 
905 	if (map->ops->map_fd_put_ptr) {
906 		ptr = fd_htab_map_get_ptr(map, l);
907 		map->ops->map_fd_put_ptr(map, ptr, true);
908 	}
909 }
910 
is_map_full(struct bpf_htab * htab)911 static bool is_map_full(struct bpf_htab *htab)
912 {
913 	if (htab->use_percpu_counter)
914 		return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
915 						PERCPU_COUNTER_BATCH) >= 0;
916 	return atomic_read(&htab->count) >= htab->map.max_entries;
917 }
918 
inc_elem_count(struct bpf_htab * htab)919 static void inc_elem_count(struct bpf_htab *htab)
920 {
921 	bpf_map_inc_elem_count(&htab->map);
922 
923 	if (htab->use_percpu_counter)
924 		percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
925 	else
926 		atomic_inc(&htab->count);
927 }
928 
dec_elem_count(struct bpf_htab * htab)929 static void dec_elem_count(struct bpf_htab *htab)
930 {
931 	bpf_map_dec_elem_count(&htab->map);
932 
933 	if (htab->use_percpu_counter)
934 		percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
935 	else
936 		atomic_dec(&htab->count);
937 }
938 
939 
free_htab_elem(struct bpf_htab * htab,struct htab_elem * l)940 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
941 {
942 	htab_put_fd_value(htab, l);
943 
944 	if (htab_is_prealloc(htab)) {
945 		bpf_map_dec_elem_count(&htab->map);
946 		check_and_free_fields(htab, l);
947 		__pcpu_freelist_push(&htab->freelist, &l->fnode);
948 	} else {
949 		dec_elem_count(htab);
950 		htab_elem_free(htab, l);
951 	}
952 }
953 
pcpu_copy_value(struct bpf_htab * htab,void __percpu * pptr,void * value,bool onallcpus)954 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
955 			    void *value, bool onallcpus)
956 {
957 	if (!onallcpus) {
958 		/* copy true value_size bytes */
959 		copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
960 	} else {
961 		u32 size = round_up(htab->map.value_size, 8);
962 		int off = 0, cpu;
963 
964 		for_each_possible_cpu(cpu) {
965 			copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
966 			off += size;
967 		}
968 	}
969 }
970 
pcpu_init_value(struct bpf_htab * htab,void __percpu * pptr,void * value,bool onallcpus)971 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
972 			    void *value, bool onallcpus)
973 {
974 	/* When not setting the initial value on all cpus, zero-fill element
975 	 * values for other cpus. Otherwise, bpf program has no way to ensure
976 	 * known initial values for cpus other than current one
977 	 * (onallcpus=false always when coming from bpf prog).
978 	 */
979 	if (!onallcpus) {
980 		int current_cpu = raw_smp_processor_id();
981 		int cpu;
982 
983 		for_each_possible_cpu(cpu) {
984 			if (cpu == current_cpu)
985 				copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value);
986 			else /* Since elem is preallocated, we cannot touch special fields */
987 				zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu));
988 		}
989 	} else {
990 		pcpu_copy_value(htab, pptr, value, onallcpus);
991 	}
992 }
993 
fd_htab_map_needs_adjust(const struct bpf_htab * htab)994 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
995 {
996 	return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
997 	       BITS_PER_LONG == 64;
998 }
999 
alloc_htab_elem(struct bpf_htab * htab,void * key,void * value,u32 key_size,u32 hash,bool percpu,bool onallcpus,struct htab_elem * old_elem)1000 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
1001 					 void *value, u32 key_size, u32 hash,
1002 					 bool percpu, bool onallcpus,
1003 					 struct htab_elem *old_elem)
1004 {
1005 	u32 size = htab->map.value_size;
1006 	bool prealloc = htab_is_prealloc(htab);
1007 	struct htab_elem *l_new, **pl_new;
1008 	void __percpu *pptr;
1009 
1010 	if (prealloc) {
1011 		if (old_elem) {
1012 			/* if we're updating the existing element,
1013 			 * use per-cpu extra elems to avoid freelist_pop/push
1014 			 */
1015 			pl_new = this_cpu_ptr(htab->extra_elems);
1016 			l_new = *pl_new;
1017 			htab_put_fd_value(htab, old_elem);
1018 			*pl_new = old_elem;
1019 		} else {
1020 			struct pcpu_freelist_node *l;
1021 
1022 			l = __pcpu_freelist_pop(&htab->freelist);
1023 			if (!l)
1024 				return ERR_PTR(-E2BIG);
1025 			l_new = container_of(l, struct htab_elem, fnode);
1026 			bpf_map_inc_elem_count(&htab->map);
1027 		}
1028 	} else {
1029 		if (is_map_full(htab))
1030 			if (!old_elem)
1031 				/* when map is full and update() is replacing
1032 				 * old element, it's ok to allocate, since
1033 				 * old element will be freed immediately.
1034 				 * Otherwise return an error
1035 				 */
1036 				return ERR_PTR(-E2BIG);
1037 		inc_elem_count(htab);
1038 		l_new = bpf_mem_cache_alloc(&htab->ma);
1039 		if (!l_new) {
1040 			l_new = ERR_PTR(-ENOMEM);
1041 			goto dec_count;
1042 		}
1043 	}
1044 
1045 	memcpy(l_new->key, key, key_size);
1046 	if (percpu) {
1047 		if (prealloc) {
1048 			pptr = htab_elem_get_ptr(l_new, key_size);
1049 		} else {
1050 			/* alloc_percpu zero-fills */
1051 			pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1052 			if (!pptr) {
1053 				bpf_mem_cache_free(&htab->ma, l_new);
1054 				l_new = ERR_PTR(-ENOMEM);
1055 				goto dec_count;
1056 			}
1057 			l_new->ptr_to_pptr = pptr;
1058 			pptr = *(void **)pptr;
1059 		}
1060 
1061 		pcpu_init_value(htab, pptr, value, onallcpus);
1062 
1063 		if (!prealloc)
1064 			htab_elem_set_ptr(l_new, key_size, pptr);
1065 	} else if (fd_htab_map_needs_adjust(htab)) {
1066 		size = round_up(size, 8);
1067 		memcpy(l_new->key + round_up(key_size, 8), value, size);
1068 	} else {
1069 		copy_map_value(&htab->map,
1070 			       l_new->key + round_up(key_size, 8),
1071 			       value);
1072 	}
1073 
1074 	l_new->hash = hash;
1075 	return l_new;
1076 dec_count:
1077 	dec_elem_count(htab);
1078 	return l_new;
1079 }
1080 
check_flags(struct bpf_htab * htab,struct htab_elem * l_old,u64 map_flags)1081 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1082 		       u64 map_flags)
1083 {
1084 	if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1085 		/* elem already exists */
1086 		return -EEXIST;
1087 
1088 	if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1089 		/* elem doesn't exist, cannot update it */
1090 		return -ENOENT;
1091 
1092 	return 0;
1093 }
1094 
1095 /* Called from syscall or from eBPF program */
htab_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1096 static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1097 				 u64 map_flags)
1098 {
1099 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1100 	struct htab_elem *l_new = NULL, *l_old;
1101 	struct hlist_nulls_head *head;
1102 	unsigned long flags;
1103 	struct bucket *b;
1104 	u32 key_size, hash;
1105 	int ret;
1106 
1107 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1108 		/* unknown flags */
1109 		return -EINVAL;
1110 
1111 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1112 		     !rcu_read_lock_bh_held());
1113 
1114 	key_size = map->key_size;
1115 
1116 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1117 
1118 	b = __select_bucket(htab, hash);
1119 	head = &b->head;
1120 
1121 	if (unlikely(map_flags & BPF_F_LOCK)) {
1122 		if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1123 			return -EINVAL;
1124 		/* find an element without taking the bucket lock */
1125 		l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1126 					      htab->n_buckets);
1127 		ret = check_flags(htab, l_old, map_flags);
1128 		if (ret)
1129 			return ret;
1130 		if (l_old) {
1131 			/* grab the element lock and update value in place */
1132 			copy_map_value_locked(map,
1133 					      l_old->key + round_up(key_size, 8),
1134 					      value, false);
1135 			return 0;
1136 		}
1137 		/* fall through, grab the bucket lock and lookup again.
1138 		 * 99.9% chance that the element won't be found,
1139 		 * but second lookup under lock has to be done.
1140 		 */
1141 	}
1142 
1143 	ret = htab_lock_bucket(htab, b, hash, &flags);
1144 	if (ret)
1145 		return ret;
1146 
1147 	l_old = lookup_elem_raw(head, hash, key, key_size);
1148 
1149 	ret = check_flags(htab, l_old, map_flags);
1150 	if (ret)
1151 		goto err;
1152 
1153 	if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1154 		/* first lookup without the bucket lock didn't find the element,
1155 		 * but second lookup with the bucket lock found it.
1156 		 * This case is highly unlikely, but has to be dealt with:
1157 		 * grab the element lock in addition to the bucket lock
1158 		 * and update element in place
1159 		 */
1160 		copy_map_value_locked(map,
1161 				      l_old->key + round_up(key_size, 8),
1162 				      value, false);
1163 		ret = 0;
1164 		goto err;
1165 	}
1166 
1167 	l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1168 				l_old);
1169 	if (IS_ERR(l_new)) {
1170 		/* all pre-allocated elements are in use or memory exhausted */
1171 		ret = PTR_ERR(l_new);
1172 		goto err;
1173 	}
1174 
1175 	/* add new element to the head of the list, so that
1176 	 * concurrent search will find it before old elem
1177 	 */
1178 	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1179 	if (l_old) {
1180 		hlist_nulls_del_rcu(&l_old->hash_node);
1181 		if (!htab_is_prealloc(htab))
1182 			free_htab_elem(htab, l_old);
1183 		else
1184 			check_and_free_fields(htab, l_old);
1185 	}
1186 	ret = 0;
1187 err:
1188 	htab_unlock_bucket(htab, b, hash, flags);
1189 	return ret;
1190 }
1191 
htab_lru_push_free(struct bpf_htab * htab,struct htab_elem * elem)1192 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1193 {
1194 	check_and_free_fields(htab, elem);
1195 	bpf_map_dec_elem_count(&htab->map);
1196 	bpf_lru_push_free(&htab->lru, &elem->lru_node);
1197 }
1198 
htab_lru_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1199 static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1200 				     u64 map_flags)
1201 {
1202 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1203 	struct htab_elem *l_new, *l_old = NULL;
1204 	struct hlist_nulls_head *head;
1205 	unsigned long flags;
1206 	struct bucket *b;
1207 	u32 key_size, hash;
1208 	int ret;
1209 
1210 	if (unlikely(map_flags > BPF_EXIST))
1211 		/* unknown flags */
1212 		return -EINVAL;
1213 
1214 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1215 		     !rcu_read_lock_bh_held());
1216 
1217 	key_size = map->key_size;
1218 
1219 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1220 
1221 	b = __select_bucket(htab, hash);
1222 	head = &b->head;
1223 
1224 	/* For LRU, we need to alloc before taking bucket's
1225 	 * spinlock because getting free nodes from LRU may need
1226 	 * to remove older elements from htab and this removal
1227 	 * operation will need a bucket lock.
1228 	 */
1229 	l_new = prealloc_lru_pop(htab, key, hash);
1230 	if (!l_new)
1231 		return -ENOMEM;
1232 	copy_map_value(&htab->map,
1233 		       l_new->key + round_up(map->key_size, 8), value);
1234 
1235 	ret = htab_lock_bucket(htab, b, hash, &flags);
1236 	if (ret)
1237 		goto err_lock_bucket;
1238 
1239 	l_old = lookup_elem_raw(head, hash, key, key_size);
1240 
1241 	ret = check_flags(htab, l_old, map_flags);
1242 	if (ret)
1243 		goto err;
1244 
1245 	/* add new element to the head of the list, so that
1246 	 * concurrent search will find it before old elem
1247 	 */
1248 	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1249 	if (l_old) {
1250 		bpf_lru_node_set_ref(&l_new->lru_node);
1251 		hlist_nulls_del_rcu(&l_old->hash_node);
1252 	}
1253 	ret = 0;
1254 
1255 err:
1256 	htab_unlock_bucket(htab, b, hash, flags);
1257 
1258 err_lock_bucket:
1259 	if (ret)
1260 		htab_lru_push_free(htab, l_new);
1261 	else if (l_old)
1262 		htab_lru_push_free(htab, l_old);
1263 
1264 	return ret;
1265 }
1266 
__htab_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags,bool onallcpus)1267 static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1268 					  void *value, u64 map_flags,
1269 					  bool onallcpus)
1270 {
1271 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1272 	struct htab_elem *l_new = NULL, *l_old;
1273 	struct hlist_nulls_head *head;
1274 	unsigned long flags;
1275 	struct bucket *b;
1276 	u32 key_size, hash;
1277 	int ret;
1278 
1279 	if (unlikely(map_flags > BPF_EXIST))
1280 		/* unknown flags */
1281 		return -EINVAL;
1282 
1283 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1284 		     !rcu_read_lock_bh_held());
1285 
1286 	key_size = map->key_size;
1287 
1288 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1289 
1290 	b = __select_bucket(htab, hash);
1291 	head = &b->head;
1292 
1293 	ret = htab_lock_bucket(htab, b, hash, &flags);
1294 	if (ret)
1295 		return ret;
1296 
1297 	l_old = lookup_elem_raw(head, hash, key, key_size);
1298 
1299 	ret = check_flags(htab, l_old, map_flags);
1300 	if (ret)
1301 		goto err;
1302 
1303 	if (l_old) {
1304 		/* per-cpu hash map can update value in-place */
1305 		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1306 				value, onallcpus);
1307 	} else {
1308 		l_new = alloc_htab_elem(htab, key, value, key_size,
1309 					hash, true, onallcpus, NULL);
1310 		if (IS_ERR(l_new)) {
1311 			ret = PTR_ERR(l_new);
1312 			goto err;
1313 		}
1314 		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1315 	}
1316 	ret = 0;
1317 err:
1318 	htab_unlock_bucket(htab, b, hash, flags);
1319 	return ret;
1320 }
1321 
__htab_lru_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags,bool onallcpus)1322 static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1323 					      void *value, u64 map_flags,
1324 					      bool onallcpus)
1325 {
1326 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1327 	struct htab_elem *l_new = NULL, *l_old;
1328 	struct hlist_nulls_head *head;
1329 	unsigned long flags;
1330 	struct bucket *b;
1331 	u32 key_size, hash;
1332 	int ret;
1333 
1334 	if (unlikely(map_flags > BPF_EXIST))
1335 		/* unknown flags */
1336 		return -EINVAL;
1337 
1338 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1339 		     !rcu_read_lock_bh_held());
1340 
1341 	key_size = map->key_size;
1342 
1343 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1344 
1345 	b = __select_bucket(htab, hash);
1346 	head = &b->head;
1347 
1348 	/* For LRU, we need to alloc before taking bucket's
1349 	 * spinlock because LRU's elem alloc may need
1350 	 * to remove older elem from htab and this removal
1351 	 * operation will need a bucket lock.
1352 	 */
1353 	if (map_flags != BPF_EXIST) {
1354 		l_new = prealloc_lru_pop(htab, key, hash);
1355 		if (!l_new)
1356 			return -ENOMEM;
1357 	}
1358 
1359 	ret = htab_lock_bucket(htab, b, hash, &flags);
1360 	if (ret)
1361 		goto err_lock_bucket;
1362 
1363 	l_old = lookup_elem_raw(head, hash, key, key_size);
1364 
1365 	ret = check_flags(htab, l_old, map_flags);
1366 	if (ret)
1367 		goto err;
1368 
1369 	if (l_old) {
1370 		bpf_lru_node_set_ref(&l_old->lru_node);
1371 
1372 		/* per-cpu hash map can update value in-place */
1373 		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1374 				value, onallcpus);
1375 	} else {
1376 		pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1377 				value, onallcpus);
1378 		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1379 		l_new = NULL;
1380 	}
1381 	ret = 0;
1382 err:
1383 	htab_unlock_bucket(htab, b, hash, flags);
1384 err_lock_bucket:
1385 	if (l_new) {
1386 		bpf_map_dec_elem_count(&htab->map);
1387 		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1388 	}
1389 	return ret;
1390 }
1391 
htab_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1392 static long htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1393 					void *value, u64 map_flags)
1394 {
1395 	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1396 }
1397 
htab_lru_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1398 static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1399 					    void *value, u64 map_flags)
1400 {
1401 	return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1402 						 false);
1403 }
1404 
1405 /* Called from syscall or from eBPF program */
htab_map_delete_elem(struct bpf_map * map,void * key)1406 static long htab_map_delete_elem(struct bpf_map *map, void *key)
1407 {
1408 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1409 	struct hlist_nulls_head *head;
1410 	struct bucket *b;
1411 	struct htab_elem *l;
1412 	unsigned long flags;
1413 	u32 hash, key_size;
1414 	int ret;
1415 
1416 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1417 		     !rcu_read_lock_bh_held());
1418 
1419 	key_size = map->key_size;
1420 
1421 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1422 	b = __select_bucket(htab, hash);
1423 	head = &b->head;
1424 
1425 	ret = htab_lock_bucket(htab, b, hash, &flags);
1426 	if (ret)
1427 		return ret;
1428 
1429 	l = lookup_elem_raw(head, hash, key, key_size);
1430 
1431 	if (l) {
1432 		hlist_nulls_del_rcu(&l->hash_node);
1433 		free_htab_elem(htab, l);
1434 	} else {
1435 		ret = -ENOENT;
1436 	}
1437 
1438 	htab_unlock_bucket(htab, b, hash, flags);
1439 	return ret;
1440 }
1441 
htab_lru_map_delete_elem(struct bpf_map * map,void * key)1442 static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1443 {
1444 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1445 	struct hlist_nulls_head *head;
1446 	struct bucket *b;
1447 	struct htab_elem *l;
1448 	unsigned long flags;
1449 	u32 hash, key_size;
1450 	int ret;
1451 
1452 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1453 		     !rcu_read_lock_bh_held());
1454 
1455 	key_size = map->key_size;
1456 
1457 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1458 	b = __select_bucket(htab, hash);
1459 	head = &b->head;
1460 
1461 	ret = htab_lock_bucket(htab, b, hash, &flags);
1462 	if (ret)
1463 		return ret;
1464 
1465 	l = lookup_elem_raw(head, hash, key, key_size);
1466 
1467 	if (l)
1468 		hlist_nulls_del_rcu(&l->hash_node);
1469 	else
1470 		ret = -ENOENT;
1471 
1472 	htab_unlock_bucket(htab, b, hash, flags);
1473 	if (l)
1474 		htab_lru_push_free(htab, l);
1475 	return ret;
1476 }
1477 
delete_all_elements(struct bpf_htab * htab)1478 static void delete_all_elements(struct bpf_htab *htab)
1479 {
1480 	int i;
1481 
1482 	/* It's called from a worker thread, so disable migration here,
1483 	 * since bpf_mem_cache_free() relies on that.
1484 	 */
1485 	migrate_disable();
1486 	for (i = 0; i < htab->n_buckets; i++) {
1487 		struct hlist_nulls_head *head = select_bucket(htab, i);
1488 		struct hlist_nulls_node *n;
1489 		struct htab_elem *l;
1490 
1491 		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1492 			hlist_nulls_del_rcu(&l->hash_node);
1493 			htab_elem_free(htab, l);
1494 		}
1495 	}
1496 	migrate_enable();
1497 }
1498 
htab_free_malloced_timers(struct bpf_htab * htab)1499 static void htab_free_malloced_timers(struct bpf_htab *htab)
1500 {
1501 	int i;
1502 
1503 	rcu_read_lock();
1504 	for (i = 0; i < htab->n_buckets; i++) {
1505 		struct hlist_nulls_head *head = select_bucket(htab, i);
1506 		struct hlist_nulls_node *n;
1507 		struct htab_elem *l;
1508 
1509 		hlist_nulls_for_each_entry(l, n, head, hash_node) {
1510 			/* We only free timer on uref dropping to zero */
1511 			bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8));
1512 		}
1513 		cond_resched_rcu();
1514 	}
1515 	rcu_read_unlock();
1516 }
1517 
htab_map_free_timers(struct bpf_map * map)1518 static void htab_map_free_timers(struct bpf_map *map)
1519 {
1520 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1521 
1522 	/* We only free timer on uref dropping to zero */
1523 	if (!btf_record_has_field(htab->map.record, BPF_TIMER))
1524 		return;
1525 	if (!htab_is_prealloc(htab))
1526 		htab_free_malloced_timers(htab);
1527 	else
1528 		htab_free_prealloced_timers(htab);
1529 }
1530 
1531 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
htab_map_free(struct bpf_map * map)1532 static void htab_map_free(struct bpf_map *map)
1533 {
1534 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1535 	int i;
1536 
1537 	/* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1538 	 * bpf_free_used_maps() is called after bpf prog is no longer executing.
1539 	 * There is no need to synchronize_rcu() here to protect map elements.
1540 	 */
1541 
1542 	/* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
1543 	 * underneath and is reponsible for waiting for callbacks to finish
1544 	 * during bpf_mem_alloc_destroy().
1545 	 */
1546 	if (!htab_is_prealloc(htab)) {
1547 		delete_all_elements(htab);
1548 	} else {
1549 		htab_free_prealloced_fields(htab);
1550 		prealloc_destroy(htab);
1551 	}
1552 
1553 	bpf_map_free_elem_count(map);
1554 	free_percpu(htab->extra_elems);
1555 	bpf_map_area_free(htab->buckets);
1556 	bpf_mem_alloc_destroy(&htab->pcpu_ma);
1557 	bpf_mem_alloc_destroy(&htab->ma);
1558 	if (htab->use_percpu_counter)
1559 		percpu_counter_destroy(&htab->pcount);
1560 	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1561 		free_percpu(htab->map_locked[i]);
1562 	lockdep_unregister_key(&htab->lockdep_key);
1563 	bpf_map_area_free(htab);
1564 }
1565 
htab_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)1566 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1567 				   struct seq_file *m)
1568 {
1569 	void *value;
1570 
1571 	rcu_read_lock();
1572 
1573 	value = htab_map_lookup_elem(map, key);
1574 	if (!value) {
1575 		rcu_read_unlock();
1576 		return;
1577 	}
1578 
1579 	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1580 	seq_puts(m, ": ");
1581 	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1582 	seq_puts(m, "\n");
1583 
1584 	rcu_read_unlock();
1585 }
1586 
__htab_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,bool is_lru_map,bool is_percpu,u64 flags)1587 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1588 					     void *value, bool is_lru_map,
1589 					     bool is_percpu, u64 flags)
1590 {
1591 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1592 	struct hlist_nulls_head *head;
1593 	unsigned long bflags;
1594 	struct htab_elem *l;
1595 	u32 hash, key_size;
1596 	struct bucket *b;
1597 	int ret;
1598 
1599 	key_size = map->key_size;
1600 
1601 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1602 	b = __select_bucket(htab, hash);
1603 	head = &b->head;
1604 
1605 	ret = htab_lock_bucket(htab, b, hash, &bflags);
1606 	if (ret)
1607 		return ret;
1608 
1609 	l = lookup_elem_raw(head, hash, key, key_size);
1610 	if (!l) {
1611 		ret = -ENOENT;
1612 	} else {
1613 		if (is_percpu) {
1614 			u32 roundup_value_size = round_up(map->value_size, 8);
1615 			void __percpu *pptr;
1616 			int off = 0, cpu;
1617 
1618 			pptr = htab_elem_get_ptr(l, key_size);
1619 			for_each_possible_cpu(cpu) {
1620 				copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
1621 				check_and_init_map_value(&htab->map, value + off);
1622 				off += roundup_value_size;
1623 			}
1624 		} else {
1625 			u32 roundup_key_size = round_up(map->key_size, 8);
1626 
1627 			if (flags & BPF_F_LOCK)
1628 				copy_map_value_locked(map, value, l->key +
1629 						      roundup_key_size,
1630 						      true);
1631 			else
1632 				copy_map_value(map, value, l->key +
1633 					       roundup_key_size);
1634 			/* Zeroing special fields in the temp buffer */
1635 			check_and_init_map_value(map, value);
1636 		}
1637 
1638 		hlist_nulls_del_rcu(&l->hash_node);
1639 		if (!is_lru_map)
1640 			free_htab_elem(htab, l);
1641 	}
1642 
1643 	htab_unlock_bucket(htab, b, hash, bflags);
1644 
1645 	if (is_lru_map && l)
1646 		htab_lru_push_free(htab, l);
1647 
1648 	return ret;
1649 }
1650 
htab_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1651 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1652 					   void *value, u64 flags)
1653 {
1654 	return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1655 						 flags);
1656 }
1657 
htab_percpu_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1658 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1659 						  void *key, void *value,
1660 						  u64 flags)
1661 {
1662 	return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1663 						 flags);
1664 }
1665 
htab_lru_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1666 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1667 					       void *value, u64 flags)
1668 {
1669 	return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1670 						 flags);
1671 }
1672 
htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags)1673 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1674 						      void *key, void *value,
1675 						      u64 flags)
1676 {
1677 	return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1678 						 flags);
1679 }
1680 
1681 static int
__htab_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr,bool do_delete,bool is_lru_map,bool is_percpu)1682 __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1683 				   const union bpf_attr *attr,
1684 				   union bpf_attr __user *uattr,
1685 				   bool do_delete, bool is_lru_map,
1686 				   bool is_percpu)
1687 {
1688 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1689 	u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1690 	void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1691 	void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1692 	void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1693 	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1694 	u32 batch, max_count, size, bucket_size, map_id;
1695 	struct htab_elem *node_to_free = NULL;
1696 	u64 elem_map_flags, map_flags;
1697 	struct hlist_nulls_head *head;
1698 	struct hlist_nulls_node *n;
1699 	unsigned long flags = 0;
1700 	bool locked = false;
1701 	struct htab_elem *l;
1702 	struct bucket *b;
1703 	int ret = 0;
1704 
1705 	elem_map_flags = attr->batch.elem_flags;
1706 	if ((elem_map_flags & ~BPF_F_LOCK) ||
1707 	    ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1708 		return -EINVAL;
1709 
1710 	map_flags = attr->batch.flags;
1711 	if (map_flags)
1712 		return -EINVAL;
1713 
1714 	max_count = attr->batch.count;
1715 	if (!max_count)
1716 		return 0;
1717 
1718 	if (put_user(0, &uattr->batch.count))
1719 		return -EFAULT;
1720 
1721 	batch = 0;
1722 	if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1723 		return -EFAULT;
1724 
1725 	if (batch >= htab->n_buckets)
1726 		return -ENOENT;
1727 
1728 	key_size = htab->map.key_size;
1729 	roundup_key_size = round_up(htab->map.key_size, 8);
1730 	value_size = htab->map.value_size;
1731 	size = round_up(value_size, 8);
1732 	if (is_percpu)
1733 		value_size = size * num_possible_cpus();
1734 	total = 0;
1735 	/* while experimenting with hash tables with sizes ranging from 10 to
1736 	 * 1000, it was observed that a bucket can have up to 5 entries.
1737 	 */
1738 	bucket_size = 5;
1739 
1740 alloc:
1741 	/* We cannot do copy_from_user or copy_to_user inside
1742 	 * the rcu_read_lock. Allocate enough space here.
1743 	 */
1744 	keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1745 	values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1746 	if (!keys || !values) {
1747 		ret = -ENOMEM;
1748 		goto after_loop;
1749 	}
1750 
1751 again:
1752 	bpf_disable_instrumentation();
1753 	rcu_read_lock();
1754 again_nocopy:
1755 	dst_key = keys;
1756 	dst_val = values;
1757 	b = &htab->buckets[batch];
1758 	head = &b->head;
1759 	/* do not grab the lock unless need it (bucket_cnt > 0). */
1760 	if (locked) {
1761 		ret = htab_lock_bucket(htab, b, batch, &flags);
1762 		if (ret) {
1763 			rcu_read_unlock();
1764 			bpf_enable_instrumentation();
1765 			goto after_loop;
1766 		}
1767 	}
1768 
1769 	bucket_cnt = 0;
1770 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1771 		bucket_cnt++;
1772 
1773 	if (bucket_cnt && !locked) {
1774 		locked = true;
1775 		goto again_nocopy;
1776 	}
1777 
1778 	if (bucket_cnt > (max_count - total)) {
1779 		if (total == 0)
1780 			ret = -ENOSPC;
1781 		/* Note that since bucket_cnt > 0 here, it is implicit
1782 		 * that the locked was grabbed, so release it.
1783 		 */
1784 		htab_unlock_bucket(htab, b, batch, flags);
1785 		rcu_read_unlock();
1786 		bpf_enable_instrumentation();
1787 		goto after_loop;
1788 	}
1789 
1790 	if (bucket_cnt > bucket_size) {
1791 		bucket_size = bucket_cnt;
1792 		/* Note that since bucket_cnt > 0 here, it is implicit
1793 		 * that the locked was grabbed, so release it.
1794 		 */
1795 		htab_unlock_bucket(htab, b, batch, flags);
1796 		rcu_read_unlock();
1797 		bpf_enable_instrumentation();
1798 		kvfree(keys);
1799 		kvfree(values);
1800 		goto alloc;
1801 	}
1802 
1803 	/* Next block is only safe to run if you have grabbed the lock */
1804 	if (!locked)
1805 		goto next_batch;
1806 
1807 	hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1808 		memcpy(dst_key, l->key, key_size);
1809 
1810 		if (is_percpu) {
1811 			int off = 0, cpu;
1812 			void __percpu *pptr;
1813 
1814 			pptr = htab_elem_get_ptr(l, map->key_size);
1815 			for_each_possible_cpu(cpu) {
1816 				copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu));
1817 				check_and_init_map_value(&htab->map, dst_val + off);
1818 				off += size;
1819 			}
1820 		} else {
1821 			value = l->key + roundup_key_size;
1822 			if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1823 				struct bpf_map **inner_map = value;
1824 
1825 				 /* Actual value is the id of the inner map */
1826 				map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
1827 				value = &map_id;
1828 			}
1829 
1830 			if (elem_map_flags & BPF_F_LOCK)
1831 				copy_map_value_locked(map, dst_val, value,
1832 						      true);
1833 			else
1834 				copy_map_value(map, dst_val, value);
1835 			/* Zeroing special fields in the temp buffer */
1836 			check_and_init_map_value(map, dst_val);
1837 		}
1838 		if (do_delete) {
1839 			hlist_nulls_del_rcu(&l->hash_node);
1840 
1841 			/* bpf_lru_push_free() will acquire lru_lock, which
1842 			 * may cause deadlock. See comments in function
1843 			 * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1844 			 * after releasing the bucket lock.
1845 			 */
1846 			if (is_lru_map) {
1847 				l->batch_flink = node_to_free;
1848 				node_to_free = l;
1849 			} else {
1850 				free_htab_elem(htab, l);
1851 			}
1852 		}
1853 		dst_key += key_size;
1854 		dst_val += value_size;
1855 	}
1856 
1857 	htab_unlock_bucket(htab, b, batch, flags);
1858 	locked = false;
1859 
1860 	while (node_to_free) {
1861 		l = node_to_free;
1862 		node_to_free = node_to_free->batch_flink;
1863 		htab_lru_push_free(htab, l);
1864 	}
1865 
1866 next_batch:
1867 	/* If we are not copying data, we can go to next bucket and avoid
1868 	 * unlocking the rcu.
1869 	 */
1870 	if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1871 		batch++;
1872 		goto again_nocopy;
1873 	}
1874 
1875 	rcu_read_unlock();
1876 	bpf_enable_instrumentation();
1877 	if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1878 	    key_size * bucket_cnt) ||
1879 	    copy_to_user(uvalues + total * value_size, values,
1880 	    value_size * bucket_cnt))) {
1881 		ret = -EFAULT;
1882 		goto after_loop;
1883 	}
1884 
1885 	total += bucket_cnt;
1886 	batch++;
1887 	if (batch >= htab->n_buckets) {
1888 		ret = -ENOENT;
1889 		goto after_loop;
1890 	}
1891 	goto again;
1892 
1893 after_loop:
1894 	if (ret == -EFAULT)
1895 		goto out;
1896 
1897 	/* copy # of entries and next batch */
1898 	ubatch = u64_to_user_ptr(attr->batch.out_batch);
1899 	if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1900 	    put_user(total, &uattr->batch.count))
1901 		ret = -EFAULT;
1902 
1903 out:
1904 	kvfree(keys);
1905 	kvfree(values);
1906 	return ret;
1907 }
1908 
1909 static int
htab_percpu_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1910 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1911 			     union bpf_attr __user *uattr)
1912 {
1913 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1914 						  false, true);
1915 }
1916 
1917 static int
htab_percpu_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1918 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1919 					const union bpf_attr *attr,
1920 					union bpf_attr __user *uattr)
1921 {
1922 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1923 						  false, true);
1924 }
1925 
1926 static int
htab_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1927 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1928 		      union bpf_attr __user *uattr)
1929 {
1930 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1931 						  false, false);
1932 }
1933 
1934 static int
htab_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1935 htab_map_lookup_and_delete_batch(struct bpf_map *map,
1936 				 const union bpf_attr *attr,
1937 				 union bpf_attr __user *uattr)
1938 {
1939 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1940 						  false, false);
1941 }
1942 
1943 static int
htab_lru_percpu_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1944 htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1945 				 const union bpf_attr *attr,
1946 				 union bpf_attr __user *uattr)
1947 {
1948 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1949 						  true, true);
1950 }
1951 
1952 static int
htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1953 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1954 					    const union bpf_attr *attr,
1955 					    union bpf_attr __user *uattr)
1956 {
1957 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1958 						  true, true);
1959 }
1960 
1961 static int
htab_lru_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1962 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1963 			  union bpf_attr __user *uattr)
1964 {
1965 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1966 						  true, false);
1967 }
1968 
1969 static int
htab_lru_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1970 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
1971 				     const union bpf_attr *attr,
1972 				     union bpf_attr __user *uattr)
1973 {
1974 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1975 						  true, false);
1976 }
1977 
1978 struct bpf_iter_seq_hash_map_info {
1979 	struct bpf_map *map;
1980 	struct bpf_htab *htab;
1981 	void *percpu_value_buf; // non-zero means percpu hash
1982 	u32 bucket_id;
1983 	u32 skip_elems;
1984 };
1985 
1986 static struct htab_elem *
bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info * info,struct htab_elem * prev_elem)1987 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
1988 			   struct htab_elem *prev_elem)
1989 {
1990 	const struct bpf_htab *htab = info->htab;
1991 	u32 skip_elems = info->skip_elems;
1992 	u32 bucket_id = info->bucket_id;
1993 	struct hlist_nulls_head *head;
1994 	struct hlist_nulls_node *n;
1995 	struct htab_elem *elem;
1996 	struct bucket *b;
1997 	u32 i, count;
1998 
1999 	if (bucket_id >= htab->n_buckets)
2000 		return NULL;
2001 
2002 	/* try to find next elem in the same bucket */
2003 	if (prev_elem) {
2004 		/* no update/deletion on this bucket, prev_elem should be still valid
2005 		 * and we won't skip elements.
2006 		 */
2007 		n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
2008 		elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
2009 		if (elem)
2010 			return elem;
2011 
2012 		/* not found, unlock and go to the next bucket */
2013 		b = &htab->buckets[bucket_id++];
2014 		rcu_read_unlock();
2015 		skip_elems = 0;
2016 	}
2017 
2018 	for (i = bucket_id; i < htab->n_buckets; i++) {
2019 		b = &htab->buckets[i];
2020 		rcu_read_lock();
2021 
2022 		count = 0;
2023 		head = &b->head;
2024 		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2025 			if (count >= skip_elems) {
2026 				info->bucket_id = i;
2027 				info->skip_elems = count;
2028 				return elem;
2029 			}
2030 			count++;
2031 		}
2032 
2033 		rcu_read_unlock();
2034 		skip_elems = 0;
2035 	}
2036 
2037 	info->bucket_id = i;
2038 	info->skip_elems = 0;
2039 	return NULL;
2040 }
2041 
bpf_hash_map_seq_start(struct seq_file * seq,loff_t * pos)2042 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
2043 {
2044 	struct bpf_iter_seq_hash_map_info *info = seq->private;
2045 	struct htab_elem *elem;
2046 
2047 	elem = bpf_hash_map_seq_find_next(info, NULL);
2048 	if (!elem)
2049 		return NULL;
2050 
2051 	if (*pos == 0)
2052 		++*pos;
2053 	return elem;
2054 }
2055 
bpf_hash_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)2056 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2057 {
2058 	struct bpf_iter_seq_hash_map_info *info = seq->private;
2059 
2060 	++*pos;
2061 	++info->skip_elems;
2062 	return bpf_hash_map_seq_find_next(info, v);
2063 }
2064 
__bpf_hash_map_seq_show(struct seq_file * seq,struct htab_elem * elem)2065 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
2066 {
2067 	struct bpf_iter_seq_hash_map_info *info = seq->private;
2068 	u32 roundup_key_size, roundup_value_size;
2069 	struct bpf_iter__bpf_map_elem ctx = {};
2070 	struct bpf_map *map = info->map;
2071 	struct bpf_iter_meta meta;
2072 	int ret = 0, off = 0, cpu;
2073 	struct bpf_prog *prog;
2074 	void __percpu *pptr;
2075 
2076 	meta.seq = seq;
2077 	prog = bpf_iter_get_info(&meta, elem == NULL);
2078 	if (prog) {
2079 		ctx.meta = &meta;
2080 		ctx.map = info->map;
2081 		if (elem) {
2082 			roundup_key_size = round_up(map->key_size, 8);
2083 			ctx.key = elem->key;
2084 			if (!info->percpu_value_buf) {
2085 				ctx.value = elem->key + roundup_key_size;
2086 			} else {
2087 				roundup_value_size = round_up(map->value_size, 8);
2088 				pptr = htab_elem_get_ptr(elem, map->key_size);
2089 				for_each_possible_cpu(cpu) {
2090 					copy_map_value_long(map, info->percpu_value_buf + off,
2091 							    per_cpu_ptr(pptr, cpu));
2092 					check_and_init_map_value(map, info->percpu_value_buf + off);
2093 					off += roundup_value_size;
2094 				}
2095 				ctx.value = info->percpu_value_buf;
2096 			}
2097 		}
2098 		ret = bpf_iter_run_prog(prog, &ctx);
2099 	}
2100 
2101 	return ret;
2102 }
2103 
bpf_hash_map_seq_show(struct seq_file * seq,void * v)2104 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2105 {
2106 	return __bpf_hash_map_seq_show(seq, v);
2107 }
2108 
bpf_hash_map_seq_stop(struct seq_file * seq,void * v)2109 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2110 {
2111 	if (!v)
2112 		(void)__bpf_hash_map_seq_show(seq, NULL);
2113 	else
2114 		rcu_read_unlock();
2115 }
2116 
bpf_iter_init_hash_map(void * priv_data,struct bpf_iter_aux_info * aux)2117 static int bpf_iter_init_hash_map(void *priv_data,
2118 				  struct bpf_iter_aux_info *aux)
2119 {
2120 	struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2121 	struct bpf_map *map = aux->map;
2122 	void *value_buf;
2123 	u32 buf_size;
2124 
2125 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2126 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2127 		buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2128 		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2129 		if (!value_buf)
2130 			return -ENOMEM;
2131 
2132 		seq_info->percpu_value_buf = value_buf;
2133 	}
2134 
2135 	bpf_map_inc_with_uref(map);
2136 	seq_info->map = map;
2137 	seq_info->htab = container_of(map, struct bpf_htab, map);
2138 	return 0;
2139 }
2140 
bpf_iter_fini_hash_map(void * priv_data)2141 static void bpf_iter_fini_hash_map(void *priv_data)
2142 {
2143 	struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2144 
2145 	bpf_map_put_with_uref(seq_info->map);
2146 	kfree(seq_info->percpu_value_buf);
2147 }
2148 
2149 static const struct seq_operations bpf_hash_map_seq_ops = {
2150 	.start	= bpf_hash_map_seq_start,
2151 	.next	= bpf_hash_map_seq_next,
2152 	.stop	= bpf_hash_map_seq_stop,
2153 	.show	= bpf_hash_map_seq_show,
2154 };
2155 
2156 static const struct bpf_iter_seq_info iter_seq_info = {
2157 	.seq_ops		= &bpf_hash_map_seq_ops,
2158 	.init_seq_private	= bpf_iter_init_hash_map,
2159 	.fini_seq_private	= bpf_iter_fini_hash_map,
2160 	.seq_priv_size		= sizeof(struct bpf_iter_seq_hash_map_info),
2161 };
2162 
bpf_for_each_hash_elem(struct bpf_map * map,bpf_callback_t callback_fn,void * callback_ctx,u64 flags)2163 static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
2164 				   void *callback_ctx, u64 flags)
2165 {
2166 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2167 	struct hlist_nulls_head *head;
2168 	struct hlist_nulls_node *n;
2169 	struct htab_elem *elem;
2170 	u32 roundup_key_size;
2171 	int i, num_elems = 0;
2172 	void __percpu *pptr;
2173 	struct bucket *b;
2174 	void *key, *val;
2175 	bool is_percpu;
2176 	u64 ret = 0;
2177 
2178 	if (flags != 0)
2179 		return -EINVAL;
2180 
2181 	is_percpu = htab_is_percpu(htab);
2182 
2183 	roundup_key_size = round_up(map->key_size, 8);
2184 	/* disable migration so percpu value prepared here will be the
2185 	 * same as the one seen by the bpf program with bpf_map_lookup_elem().
2186 	 */
2187 	if (is_percpu)
2188 		migrate_disable();
2189 	for (i = 0; i < htab->n_buckets; i++) {
2190 		b = &htab->buckets[i];
2191 		rcu_read_lock();
2192 		head = &b->head;
2193 		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2194 			key = elem->key;
2195 			if (is_percpu) {
2196 				/* current cpu value for percpu map */
2197 				pptr = htab_elem_get_ptr(elem, map->key_size);
2198 				val = this_cpu_ptr(pptr);
2199 			} else {
2200 				val = elem->key + roundup_key_size;
2201 			}
2202 			num_elems++;
2203 			ret = callback_fn((u64)(long)map, (u64)(long)key,
2204 					  (u64)(long)val, (u64)(long)callback_ctx, 0);
2205 			/* return value: 0 - continue, 1 - stop and return */
2206 			if (ret) {
2207 				rcu_read_unlock();
2208 				goto out;
2209 			}
2210 		}
2211 		rcu_read_unlock();
2212 	}
2213 out:
2214 	if (is_percpu)
2215 		migrate_enable();
2216 	return num_elems;
2217 }
2218 
htab_map_mem_usage(const struct bpf_map * map)2219 static u64 htab_map_mem_usage(const struct bpf_map *map)
2220 {
2221 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2222 	u32 value_size = round_up(htab->map.value_size, 8);
2223 	bool prealloc = htab_is_prealloc(htab);
2224 	bool percpu = htab_is_percpu(htab);
2225 	bool lru = htab_is_lru(htab);
2226 	u64 num_entries;
2227 	u64 usage = sizeof(struct bpf_htab);
2228 
2229 	usage += sizeof(struct bucket) * htab->n_buckets;
2230 	usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT;
2231 	if (prealloc) {
2232 		num_entries = map->max_entries;
2233 		if (htab_has_extra_elems(htab))
2234 			num_entries += num_possible_cpus();
2235 
2236 		usage += htab->elem_size * num_entries;
2237 
2238 		if (percpu)
2239 			usage += value_size * num_possible_cpus() * num_entries;
2240 		else if (!lru)
2241 			usage += sizeof(struct htab_elem *) * num_possible_cpus();
2242 	} else {
2243 #define LLIST_NODE_SZ sizeof(struct llist_node)
2244 
2245 		num_entries = htab->use_percpu_counter ?
2246 					  percpu_counter_sum(&htab->pcount) :
2247 					  atomic_read(&htab->count);
2248 		usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
2249 		if (percpu) {
2250 			usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries;
2251 			usage += value_size * num_possible_cpus() * num_entries;
2252 		}
2253 	}
2254 	return usage;
2255 }
2256 
2257 BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
2258 const struct bpf_map_ops htab_map_ops = {
2259 	.map_meta_equal = bpf_map_meta_equal,
2260 	.map_alloc_check = htab_map_alloc_check,
2261 	.map_alloc = htab_map_alloc,
2262 	.map_free = htab_map_free,
2263 	.map_get_next_key = htab_map_get_next_key,
2264 	.map_release_uref = htab_map_free_timers,
2265 	.map_lookup_elem = htab_map_lookup_elem,
2266 	.map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2267 	.map_update_elem = htab_map_update_elem,
2268 	.map_delete_elem = htab_map_delete_elem,
2269 	.map_gen_lookup = htab_map_gen_lookup,
2270 	.map_seq_show_elem = htab_map_seq_show_elem,
2271 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2272 	.map_for_each_callback = bpf_for_each_hash_elem,
2273 	.map_mem_usage = htab_map_mem_usage,
2274 	BATCH_OPS(htab),
2275 	.map_btf_id = &htab_map_btf_ids[0],
2276 	.iter_seq_info = &iter_seq_info,
2277 };
2278 
2279 const struct bpf_map_ops htab_lru_map_ops = {
2280 	.map_meta_equal = bpf_map_meta_equal,
2281 	.map_alloc_check = htab_map_alloc_check,
2282 	.map_alloc = htab_map_alloc,
2283 	.map_free = htab_map_free,
2284 	.map_get_next_key = htab_map_get_next_key,
2285 	.map_release_uref = htab_map_free_timers,
2286 	.map_lookup_elem = htab_lru_map_lookup_elem,
2287 	.map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2288 	.map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2289 	.map_update_elem = htab_lru_map_update_elem,
2290 	.map_delete_elem = htab_lru_map_delete_elem,
2291 	.map_gen_lookup = htab_lru_map_gen_lookup,
2292 	.map_seq_show_elem = htab_map_seq_show_elem,
2293 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2294 	.map_for_each_callback = bpf_for_each_hash_elem,
2295 	.map_mem_usage = htab_map_mem_usage,
2296 	BATCH_OPS(htab_lru),
2297 	.map_btf_id = &htab_map_btf_ids[0],
2298 	.iter_seq_info = &iter_seq_info,
2299 };
2300 
2301 /* Called from eBPF program */
htab_percpu_map_lookup_elem(struct bpf_map * map,void * key)2302 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2303 {
2304 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
2305 
2306 	if (l)
2307 		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2308 	else
2309 		return NULL;
2310 }
2311 
htab_percpu_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)2312 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2313 {
2314 	struct htab_elem *l;
2315 
2316 	if (cpu >= nr_cpu_ids)
2317 		return NULL;
2318 
2319 	l = __htab_map_lookup_elem(map, key);
2320 	if (l)
2321 		return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2322 	else
2323 		return NULL;
2324 }
2325 
htab_lru_percpu_map_lookup_elem(struct bpf_map * map,void * key)2326 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2327 {
2328 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
2329 
2330 	if (l) {
2331 		bpf_lru_node_set_ref(&l->lru_node);
2332 		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2333 	}
2334 
2335 	return NULL;
2336 }
2337 
htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)2338 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2339 {
2340 	struct htab_elem *l;
2341 
2342 	if (cpu >= nr_cpu_ids)
2343 		return NULL;
2344 
2345 	l = __htab_map_lookup_elem(map, key);
2346 	if (l) {
2347 		bpf_lru_node_set_ref(&l->lru_node);
2348 		return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2349 	}
2350 
2351 	return NULL;
2352 }
2353 
bpf_percpu_hash_copy(struct bpf_map * map,void * key,void * value)2354 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2355 {
2356 	struct htab_elem *l;
2357 	void __percpu *pptr;
2358 	int ret = -ENOENT;
2359 	int cpu, off = 0;
2360 	u32 size;
2361 
2362 	/* per_cpu areas are zero-filled and bpf programs can only
2363 	 * access 'value_size' of them, so copying rounded areas
2364 	 * will not leak any kernel data
2365 	 */
2366 	size = round_up(map->value_size, 8);
2367 	rcu_read_lock();
2368 	l = __htab_map_lookup_elem(map, key);
2369 	if (!l)
2370 		goto out;
2371 	/* We do not mark LRU map element here in order to not mess up
2372 	 * eviction heuristics when user space does a map walk.
2373 	 */
2374 	pptr = htab_elem_get_ptr(l, map->key_size);
2375 	for_each_possible_cpu(cpu) {
2376 		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
2377 		check_and_init_map_value(map, value + off);
2378 		off += size;
2379 	}
2380 	ret = 0;
2381 out:
2382 	rcu_read_unlock();
2383 	return ret;
2384 }
2385 
bpf_percpu_hash_update(struct bpf_map * map,void * key,void * value,u64 map_flags)2386 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2387 			   u64 map_flags)
2388 {
2389 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2390 	int ret;
2391 
2392 	rcu_read_lock();
2393 	if (htab_is_lru(htab))
2394 		ret = __htab_lru_percpu_map_update_elem(map, key, value,
2395 							map_flags, true);
2396 	else
2397 		ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2398 						    true);
2399 	rcu_read_unlock();
2400 
2401 	return ret;
2402 }
2403 
htab_percpu_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)2404 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2405 					  struct seq_file *m)
2406 {
2407 	struct htab_elem *l;
2408 	void __percpu *pptr;
2409 	int cpu;
2410 
2411 	rcu_read_lock();
2412 
2413 	l = __htab_map_lookup_elem(map, key);
2414 	if (!l) {
2415 		rcu_read_unlock();
2416 		return;
2417 	}
2418 
2419 	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2420 	seq_puts(m, ": {\n");
2421 	pptr = htab_elem_get_ptr(l, map->key_size);
2422 	for_each_possible_cpu(cpu) {
2423 		seq_printf(m, "\tcpu%d: ", cpu);
2424 		btf_type_seq_show(map->btf, map->btf_value_type_id,
2425 				  per_cpu_ptr(pptr, cpu), m);
2426 		seq_puts(m, "\n");
2427 	}
2428 	seq_puts(m, "}\n");
2429 
2430 	rcu_read_unlock();
2431 }
2432 
2433 const struct bpf_map_ops htab_percpu_map_ops = {
2434 	.map_meta_equal = bpf_map_meta_equal,
2435 	.map_alloc_check = htab_map_alloc_check,
2436 	.map_alloc = htab_map_alloc,
2437 	.map_free = htab_map_free,
2438 	.map_get_next_key = htab_map_get_next_key,
2439 	.map_lookup_elem = htab_percpu_map_lookup_elem,
2440 	.map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2441 	.map_update_elem = htab_percpu_map_update_elem,
2442 	.map_delete_elem = htab_map_delete_elem,
2443 	.map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
2444 	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
2445 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2446 	.map_for_each_callback = bpf_for_each_hash_elem,
2447 	.map_mem_usage = htab_map_mem_usage,
2448 	BATCH_OPS(htab_percpu),
2449 	.map_btf_id = &htab_map_btf_ids[0],
2450 	.iter_seq_info = &iter_seq_info,
2451 };
2452 
2453 const struct bpf_map_ops htab_lru_percpu_map_ops = {
2454 	.map_meta_equal = bpf_map_meta_equal,
2455 	.map_alloc_check = htab_map_alloc_check,
2456 	.map_alloc = htab_map_alloc,
2457 	.map_free = htab_map_free,
2458 	.map_get_next_key = htab_map_get_next_key,
2459 	.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2460 	.map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2461 	.map_update_elem = htab_lru_percpu_map_update_elem,
2462 	.map_delete_elem = htab_lru_map_delete_elem,
2463 	.map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
2464 	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
2465 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2466 	.map_for_each_callback = bpf_for_each_hash_elem,
2467 	.map_mem_usage = htab_map_mem_usage,
2468 	BATCH_OPS(htab_lru_percpu),
2469 	.map_btf_id = &htab_map_btf_ids[0],
2470 	.iter_seq_info = &iter_seq_info,
2471 };
2472 
fd_htab_map_alloc_check(union bpf_attr * attr)2473 static int fd_htab_map_alloc_check(union bpf_attr *attr)
2474 {
2475 	if (attr->value_size != sizeof(u32))
2476 		return -EINVAL;
2477 	return htab_map_alloc_check(attr);
2478 }
2479 
fd_htab_map_free(struct bpf_map * map)2480 static void fd_htab_map_free(struct bpf_map *map)
2481 {
2482 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2483 	struct hlist_nulls_node *n;
2484 	struct hlist_nulls_head *head;
2485 	struct htab_elem *l;
2486 	int i;
2487 
2488 	for (i = 0; i < htab->n_buckets; i++) {
2489 		head = select_bucket(htab, i);
2490 
2491 		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2492 			void *ptr = fd_htab_map_get_ptr(map, l);
2493 
2494 			map->ops->map_fd_put_ptr(map, ptr, false);
2495 		}
2496 	}
2497 
2498 	htab_map_free(map);
2499 }
2500 
2501 /* only called from syscall */
bpf_fd_htab_map_lookup_elem(struct bpf_map * map,void * key,u32 * value)2502 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2503 {
2504 	void **ptr;
2505 	int ret = 0;
2506 
2507 	if (!map->ops->map_fd_sys_lookup_elem)
2508 		return -ENOTSUPP;
2509 
2510 	rcu_read_lock();
2511 	ptr = htab_map_lookup_elem(map, key);
2512 	if (ptr)
2513 		*value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2514 	else
2515 		ret = -ENOENT;
2516 	rcu_read_unlock();
2517 
2518 	return ret;
2519 }
2520 
2521 /* only called from syscall */
bpf_fd_htab_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags)2522 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2523 				void *key, void *value, u64 map_flags)
2524 {
2525 	void *ptr;
2526 	int ret;
2527 	u32 ufd = *(u32 *)value;
2528 
2529 	ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2530 	if (IS_ERR(ptr))
2531 		return PTR_ERR(ptr);
2532 
2533 	ret = htab_map_update_elem(map, key, &ptr, map_flags);
2534 	if (ret)
2535 		map->ops->map_fd_put_ptr(map, ptr, false);
2536 
2537 	return ret;
2538 }
2539 
htab_of_map_alloc(union bpf_attr * attr)2540 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2541 {
2542 	struct bpf_map *map, *inner_map_meta;
2543 
2544 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2545 	if (IS_ERR(inner_map_meta))
2546 		return inner_map_meta;
2547 
2548 	map = htab_map_alloc(attr);
2549 	if (IS_ERR(map)) {
2550 		bpf_map_meta_free(inner_map_meta);
2551 		return map;
2552 	}
2553 
2554 	map->inner_map_meta = inner_map_meta;
2555 
2556 	return map;
2557 }
2558 
htab_of_map_lookup_elem(struct bpf_map * map,void * key)2559 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2560 {
2561 	struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
2562 
2563 	if (!inner_map)
2564 		return NULL;
2565 
2566 	return READ_ONCE(*inner_map);
2567 }
2568 
htab_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)2569 static int htab_of_map_gen_lookup(struct bpf_map *map,
2570 				  struct bpf_insn *insn_buf)
2571 {
2572 	struct bpf_insn *insn = insn_buf;
2573 	const int ret = BPF_REG_0;
2574 
2575 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2576 		     (void *(*)(struct bpf_map *map, void *key))NULL));
2577 	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2578 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2579 	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2580 				offsetof(struct htab_elem, key) +
2581 				round_up(map->key_size, 8));
2582 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2583 
2584 	return insn - insn_buf;
2585 }
2586 
htab_of_map_free(struct bpf_map * map)2587 static void htab_of_map_free(struct bpf_map *map)
2588 {
2589 	bpf_map_meta_free(map->inner_map_meta);
2590 	fd_htab_map_free(map);
2591 }
2592 
2593 const struct bpf_map_ops htab_of_maps_map_ops = {
2594 	.map_alloc_check = fd_htab_map_alloc_check,
2595 	.map_alloc = htab_of_map_alloc,
2596 	.map_free = htab_of_map_free,
2597 	.map_get_next_key = htab_map_get_next_key,
2598 	.map_lookup_elem = htab_of_map_lookup_elem,
2599 	.map_delete_elem = htab_map_delete_elem,
2600 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
2601 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
2602 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2603 	.map_gen_lookup = htab_of_map_gen_lookup,
2604 	.map_check_btf = map_check_no_btf,
2605 	.map_mem_usage = htab_map_mem_usage,
2606 	BATCH_OPS(htab),
2607 	.map_btf_id = &htab_map_btf_ids[0],
2608 };
2609