xref: /openbmc/linux/kernel/bpf/hashtab.c (revision 5ae75a1a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/random.h>
11 #include <uapi/linux/btf.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/btf_ids.h>
14 #include "percpu_freelist.h"
15 #include "bpf_lru_list.h"
16 #include "map_in_map.h"
17 #include <linux/bpf_mem_alloc.h>
18 
19 #define HTAB_CREATE_FLAG_MASK						\
20 	(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |	\
21 	 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
22 
23 #define BATCH_OPS(_name)			\
24 	.map_lookup_batch =			\
25 	_name##_map_lookup_batch,		\
26 	.map_lookup_and_delete_batch =		\
27 	_name##_map_lookup_and_delete_batch,	\
28 	.map_update_batch =			\
29 	generic_map_update_batch,		\
30 	.map_delete_batch =			\
31 	generic_map_delete_batch
32 
33 /*
34  * The bucket lock has two protection scopes:
35  *
36  * 1) Serializing concurrent operations from BPF programs on different
37  *    CPUs
38  *
39  * 2) Serializing concurrent operations from BPF programs and sys_bpf()
40  *
41  * BPF programs can execute in any context including perf, kprobes and
42  * tracing. As there are almost no limits where perf, kprobes and tracing
43  * can be invoked from the lock operations need to be protected against
44  * deadlocks. Deadlocks can be caused by recursion and by an invocation in
45  * the lock held section when functions which acquire this lock are invoked
46  * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
47  * variable bpf_prog_active, which prevents BPF programs attached to perf
48  * events, kprobes and tracing to be invoked before the prior invocation
49  * from one of these contexts completed. sys_bpf() uses the same mechanism
50  * by pinning the task to the current CPU and incrementing the recursion
51  * protection across the map operation.
52  *
53  * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
54  * operations like memory allocations (even with GFP_ATOMIC) from atomic
55  * contexts. This is required because even with GFP_ATOMIC the memory
56  * allocator calls into code paths which acquire locks with long held lock
57  * sections. To ensure the deterministic behaviour these locks are regular
58  * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
59  * true atomic contexts on an RT kernel are the low level hardware
60  * handling, scheduling, low level interrupt handling, NMIs etc. None of
61  * these contexts should ever do memory allocations.
62  *
63  * As regular device interrupt handlers and soft interrupts are forced into
64  * thread context, the existing code which does
65  *   spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
66  * just works.
67  *
68  * In theory the BPF locks could be converted to regular spinlocks as well,
69  * but the bucket locks and percpu_freelist locks can be taken from
70  * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
71  * atomic contexts even on RT. These mechanisms require preallocated maps,
72  * so there is no need to invoke memory allocations within the lock held
73  * sections.
74  *
75  * BPF maps which need dynamic allocation are only used from (forced)
76  * thread context on RT and can therefore use regular spinlocks which in
77  * turn allows to invoke memory allocations from the lock held section.
78  *
79  * On a non RT kernel this distinction is neither possible nor required.
80  * spinlock maps to raw_spinlock and the extra code is optimized out by the
81  * compiler.
82  */
83 struct bucket {
84 	struct hlist_nulls_head head;
85 	union {
86 		raw_spinlock_t raw_lock;
87 		spinlock_t     lock;
88 	};
89 };
90 
91 #define HASHTAB_MAP_LOCK_COUNT 8
92 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
93 
94 struct bpf_htab {
95 	struct bpf_map map;
96 	struct bpf_mem_alloc ma;
97 	struct bpf_mem_alloc pcpu_ma;
98 	struct bucket *buckets;
99 	void *elems;
100 	union {
101 		struct pcpu_freelist freelist;
102 		struct bpf_lru lru;
103 	};
104 	struct htab_elem *__percpu *extra_elems;
105 	/* number of elements in non-preallocated hashtable are kept
106 	 * in either pcount or count
107 	 */
108 	struct percpu_counter pcount;
109 	atomic_t count;
110 	bool use_percpu_counter;
111 	u32 n_buckets;	/* number of hash buckets */
112 	u32 elem_size;	/* size of each element in bytes */
113 	u32 hashrnd;
114 	struct lock_class_key lockdep_key;
115 	int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
116 };
117 
118 /* each htab element is struct htab_elem + key + value */
119 struct htab_elem {
120 	union {
121 		struct hlist_nulls_node hash_node;
122 		struct {
123 			void *padding;
124 			union {
125 				struct pcpu_freelist_node fnode;
126 				struct htab_elem *batch_flink;
127 			};
128 		};
129 	};
130 	union {
131 		/* pointer to per-cpu pointer */
132 		void *ptr_to_pptr;
133 		struct bpf_lru_node lru_node;
134 	};
135 	u32 hash;
136 	char key[] __aligned(8);
137 };
138 
139 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
140 {
141 	return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
142 }
143 
144 static inline bool htab_use_raw_lock(const struct bpf_htab *htab)
145 {
146 	return (!IS_ENABLED(CONFIG_PREEMPT_RT) || htab_is_prealloc(htab));
147 }
148 
149 static void htab_init_buckets(struct bpf_htab *htab)
150 {
151 	unsigned int i;
152 
153 	for (i = 0; i < htab->n_buckets; i++) {
154 		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
155 		if (htab_use_raw_lock(htab)) {
156 			raw_spin_lock_init(&htab->buckets[i].raw_lock);
157 			lockdep_set_class(&htab->buckets[i].raw_lock,
158 					  &htab->lockdep_key);
159 		} else {
160 			spin_lock_init(&htab->buckets[i].lock);
161 			lockdep_set_class(&htab->buckets[i].lock,
162 					  &htab->lockdep_key);
163 		}
164 		cond_resched();
165 	}
166 }
167 
168 static inline int htab_lock_bucket(const struct bpf_htab *htab,
169 				   struct bucket *b, u32 hash,
170 				   unsigned long *pflags)
171 {
172 	unsigned long flags;
173 	bool use_raw_lock;
174 
175 	hash = hash & HASHTAB_MAP_LOCK_MASK;
176 
177 	use_raw_lock = htab_use_raw_lock(htab);
178 	if (use_raw_lock)
179 		preempt_disable();
180 	else
181 		migrate_disable();
182 	if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
183 		__this_cpu_dec(*(htab->map_locked[hash]));
184 		if (use_raw_lock)
185 			preempt_enable();
186 		else
187 			migrate_enable();
188 		return -EBUSY;
189 	}
190 
191 	if (use_raw_lock)
192 		raw_spin_lock_irqsave(&b->raw_lock, flags);
193 	else
194 		spin_lock_irqsave(&b->lock, flags);
195 	*pflags = flags;
196 
197 	return 0;
198 }
199 
200 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
201 				      struct bucket *b, u32 hash,
202 				      unsigned long flags)
203 {
204 	bool use_raw_lock = htab_use_raw_lock(htab);
205 
206 	hash = hash & HASHTAB_MAP_LOCK_MASK;
207 	if (use_raw_lock)
208 		raw_spin_unlock_irqrestore(&b->raw_lock, flags);
209 	else
210 		spin_unlock_irqrestore(&b->lock, flags);
211 	__this_cpu_dec(*(htab->map_locked[hash]));
212 	if (use_raw_lock)
213 		preempt_enable();
214 	else
215 		migrate_enable();
216 }
217 
218 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
219 
220 static bool htab_is_lru(const struct bpf_htab *htab)
221 {
222 	return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
223 		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
224 }
225 
226 static bool htab_is_percpu(const struct bpf_htab *htab)
227 {
228 	return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
229 		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
230 }
231 
232 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
233 				     void __percpu *pptr)
234 {
235 	*(void __percpu **)(l->key + key_size) = pptr;
236 }
237 
238 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
239 {
240 	return *(void __percpu **)(l->key + key_size);
241 }
242 
243 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
244 {
245 	return *(void **)(l->key + roundup(map->key_size, 8));
246 }
247 
248 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
249 {
250 	return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
251 }
252 
253 static bool htab_has_extra_elems(struct bpf_htab *htab)
254 {
255 	return !htab_is_percpu(htab) && !htab_is_lru(htab);
256 }
257 
258 static void htab_free_prealloced_timers(struct bpf_htab *htab)
259 {
260 	u32 num_entries = htab->map.max_entries;
261 	int i;
262 
263 	if (!map_value_has_timer(&htab->map))
264 		return;
265 	if (htab_has_extra_elems(htab))
266 		num_entries += num_possible_cpus();
267 
268 	for (i = 0; i < num_entries; i++) {
269 		struct htab_elem *elem;
270 
271 		elem = get_htab_elem(htab, i);
272 		bpf_timer_cancel_and_free(elem->key +
273 					  round_up(htab->map.key_size, 8) +
274 					  htab->map.timer_off);
275 		cond_resched();
276 	}
277 }
278 
279 static void htab_free_prealloced_kptrs(struct bpf_htab *htab)
280 {
281 	u32 num_entries = htab->map.max_entries;
282 	int i;
283 
284 	if (!map_value_has_kptrs(&htab->map))
285 		return;
286 	if (htab_has_extra_elems(htab))
287 		num_entries += num_possible_cpus();
288 
289 	for (i = 0; i < num_entries; i++) {
290 		struct htab_elem *elem;
291 
292 		elem = get_htab_elem(htab, i);
293 		bpf_map_free_kptrs(&htab->map, elem->key + round_up(htab->map.key_size, 8));
294 		cond_resched();
295 	}
296 }
297 
298 static void htab_free_elems(struct bpf_htab *htab)
299 {
300 	int i;
301 
302 	if (!htab_is_percpu(htab))
303 		goto free_elems;
304 
305 	for (i = 0; i < htab->map.max_entries; i++) {
306 		void __percpu *pptr;
307 
308 		pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
309 					 htab->map.key_size);
310 		free_percpu(pptr);
311 		cond_resched();
312 	}
313 free_elems:
314 	bpf_map_area_free(htab->elems);
315 }
316 
317 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
318  * (bucket_lock). If both locks need to be acquired together, the lock
319  * order is always lru_lock -> bucket_lock and this only happens in
320  * bpf_lru_list.c logic. For example, certain code path of
321  * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
322  * will acquire lru_lock first followed by acquiring bucket_lock.
323  *
324  * In hashtab.c, to avoid deadlock, lock acquisition of
325  * bucket_lock followed by lru_lock is not allowed. In such cases,
326  * bucket_lock needs to be released first before acquiring lru_lock.
327  */
328 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
329 					  u32 hash)
330 {
331 	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
332 	struct htab_elem *l;
333 
334 	if (node) {
335 		l = container_of(node, struct htab_elem, lru_node);
336 		memcpy(l->key, key, htab->map.key_size);
337 		return l;
338 	}
339 
340 	return NULL;
341 }
342 
343 static int prealloc_init(struct bpf_htab *htab)
344 {
345 	u32 num_entries = htab->map.max_entries;
346 	int err = -ENOMEM, i;
347 
348 	if (htab_has_extra_elems(htab))
349 		num_entries += num_possible_cpus();
350 
351 	htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
352 					 htab->map.numa_node);
353 	if (!htab->elems)
354 		return -ENOMEM;
355 
356 	if (!htab_is_percpu(htab))
357 		goto skip_percpu_elems;
358 
359 	for (i = 0; i < num_entries; i++) {
360 		u32 size = round_up(htab->map.value_size, 8);
361 		void __percpu *pptr;
362 
363 		pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
364 					    GFP_USER | __GFP_NOWARN);
365 		if (!pptr)
366 			goto free_elems;
367 		htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
368 				  pptr);
369 		cond_resched();
370 	}
371 
372 skip_percpu_elems:
373 	if (htab_is_lru(htab))
374 		err = bpf_lru_init(&htab->lru,
375 				   htab->map.map_flags & BPF_F_NO_COMMON_LRU,
376 				   offsetof(struct htab_elem, hash) -
377 				   offsetof(struct htab_elem, lru_node),
378 				   htab_lru_map_delete_node,
379 				   htab);
380 	else
381 		err = pcpu_freelist_init(&htab->freelist);
382 
383 	if (err)
384 		goto free_elems;
385 
386 	if (htab_is_lru(htab))
387 		bpf_lru_populate(&htab->lru, htab->elems,
388 				 offsetof(struct htab_elem, lru_node),
389 				 htab->elem_size, num_entries);
390 	else
391 		pcpu_freelist_populate(&htab->freelist,
392 				       htab->elems + offsetof(struct htab_elem, fnode),
393 				       htab->elem_size, num_entries);
394 
395 	return 0;
396 
397 free_elems:
398 	htab_free_elems(htab);
399 	return err;
400 }
401 
402 static void prealloc_destroy(struct bpf_htab *htab)
403 {
404 	htab_free_elems(htab);
405 
406 	if (htab_is_lru(htab))
407 		bpf_lru_destroy(&htab->lru);
408 	else
409 		pcpu_freelist_destroy(&htab->freelist);
410 }
411 
412 static int alloc_extra_elems(struct bpf_htab *htab)
413 {
414 	struct htab_elem *__percpu *pptr, *l_new;
415 	struct pcpu_freelist_node *l;
416 	int cpu;
417 
418 	pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
419 				    GFP_USER | __GFP_NOWARN);
420 	if (!pptr)
421 		return -ENOMEM;
422 
423 	for_each_possible_cpu(cpu) {
424 		l = pcpu_freelist_pop(&htab->freelist);
425 		/* pop will succeed, since prealloc_init()
426 		 * preallocated extra num_possible_cpus elements
427 		 */
428 		l_new = container_of(l, struct htab_elem, fnode);
429 		*per_cpu_ptr(pptr, cpu) = l_new;
430 	}
431 	htab->extra_elems = pptr;
432 	return 0;
433 }
434 
435 /* Called from syscall */
436 static int htab_map_alloc_check(union bpf_attr *attr)
437 {
438 	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
439 		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
440 	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
441 		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
442 	/* percpu_lru means each cpu has its own LRU list.
443 	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
444 	 * the map's value itself is percpu.  percpu_lru has
445 	 * nothing to do with the map's value.
446 	 */
447 	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
448 	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
449 	bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
450 	int numa_node = bpf_map_attr_numa_node(attr);
451 
452 	BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
453 		     offsetof(struct htab_elem, hash_node.pprev));
454 
455 	if (lru && !bpf_capable())
456 		/* LRU implementation is much complicated than other
457 		 * maps.  Hence, limit to CAP_BPF.
458 		 */
459 		return -EPERM;
460 
461 	if (zero_seed && !capable(CAP_SYS_ADMIN))
462 		/* Guard against local DoS, and discourage production use. */
463 		return -EPERM;
464 
465 	if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
466 	    !bpf_map_flags_access_ok(attr->map_flags))
467 		return -EINVAL;
468 
469 	if (!lru && percpu_lru)
470 		return -EINVAL;
471 
472 	if (lru && !prealloc)
473 		return -ENOTSUPP;
474 
475 	if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
476 		return -EINVAL;
477 
478 	/* check sanity of attributes.
479 	 * value_size == 0 may be allowed in the future to use map as a set
480 	 */
481 	if (attr->max_entries == 0 || attr->key_size == 0 ||
482 	    attr->value_size == 0)
483 		return -EINVAL;
484 
485 	if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
486 	   sizeof(struct htab_elem))
487 		/* if key_size + value_size is bigger, the user space won't be
488 		 * able to access the elements via bpf syscall. This check
489 		 * also makes sure that the elem_size doesn't overflow and it's
490 		 * kmalloc-able later in htab_map_update_elem()
491 		 */
492 		return -E2BIG;
493 
494 	return 0;
495 }
496 
497 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
498 {
499 	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
500 		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
501 	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
502 		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
503 	/* percpu_lru means each cpu has its own LRU list.
504 	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
505 	 * the map's value itself is percpu.  percpu_lru has
506 	 * nothing to do with the map's value.
507 	 */
508 	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
509 	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
510 	struct bpf_htab *htab;
511 	int err, i;
512 
513 	htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
514 	if (!htab)
515 		return ERR_PTR(-ENOMEM);
516 
517 	lockdep_register_key(&htab->lockdep_key);
518 
519 	bpf_map_init_from_attr(&htab->map, attr);
520 
521 	if (percpu_lru) {
522 		/* ensure each CPU's lru list has >=1 elements.
523 		 * since we are at it, make each lru list has the same
524 		 * number of elements.
525 		 */
526 		htab->map.max_entries = roundup(attr->max_entries,
527 						num_possible_cpus());
528 		if (htab->map.max_entries < attr->max_entries)
529 			htab->map.max_entries = rounddown(attr->max_entries,
530 							  num_possible_cpus());
531 	}
532 
533 	/* hash table size must be power of 2 */
534 	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
535 
536 	htab->elem_size = sizeof(struct htab_elem) +
537 			  round_up(htab->map.key_size, 8);
538 	if (percpu)
539 		htab->elem_size += sizeof(void *);
540 	else
541 		htab->elem_size += round_up(htab->map.value_size, 8);
542 
543 	err = -E2BIG;
544 	/* prevent zero size kmalloc and check for u32 overflow */
545 	if (htab->n_buckets == 0 ||
546 	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
547 		goto free_htab;
548 
549 	err = -ENOMEM;
550 	htab->buckets = bpf_map_area_alloc(htab->n_buckets *
551 					   sizeof(struct bucket),
552 					   htab->map.numa_node);
553 	if (!htab->buckets)
554 		goto free_htab;
555 
556 	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
557 		htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
558 							   sizeof(int),
559 							   sizeof(int),
560 							   GFP_USER);
561 		if (!htab->map_locked[i])
562 			goto free_map_locked;
563 	}
564 
565 	if (htab->map.map_flags & BPF_F_ZERO_SEED)
566 		htab->hashrnd = 0;
567 	else
568 		htab->hashrnd = get_random_int();
569 
570 	htab_init_buckets(htab);
571 
572 /* compute_batch_value() computes batch value as num_online_cpus() * 2
573  * and __percpu_counter_compare() needs
574  * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
575  * for percpu_counter to be faster than atomic_t. In practice the average bpf
576  * hash map size is 10k, which means that a system with 64 cpus will fill
577  * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
578  * define our own batch count as 32 then 10k hash map can be filled up to 80%:
579  * 10k - 8k > 32 _batch_ * 64 _cpus_
580  * and __percpu_counter_compare() will still be fast. At that point hash map
581  * collisions will dominate its performance anyway. Assume that hash map filled
582  * to 50+% isn't going to be O(1) and use the following formula to choose
583  * between percpu_counter and atomic_t.
584  */
585 #define PERCPU_COUNTER_BATCH 32
586 	if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
587 		htab->use_percpu_counter = true;
588 
589 	if (htab->use_percpu_counter) {
590 		err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
591 		if (err)
592 			goto free_map_locked;
593 	}
594 
595 	if (prealloc) {
596 		err = prealloc_init(htab);
597 		if (err)
598 			goto free_map_locked;
599 
600 		if (!percpu && !lru) {
601 			/* lru itself can remove the least used element, so
602 			 * there is no need for an extra elem during map_update.
603 			 */
604 			err = alloc_extra_elems(htab);
605 			if (err)
606 				goto free_prealloc;
607 		}
608 	} else {
609 		err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
610 		if (err)
611 			goto free_map_locked;
612 		if (percpu) {
613 			err = bpf_mem_alloc_init(&htab->pcpu_ma,
614 						 round_up(htab->map.value_size, 8), true);
615 			if (err)
616 				goto free_map_locked;
617 		}
618 	}
619 
620 	return &htab->map;
621 
622 free_prealloc:
623 	prealloc_destroy(htab);
624 free_map_locked:
625 	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
626 		free_percpu(htab->map_locked[i]);
627 	bpf_map_area_free(htab->buckets);
628 	bpf_mem_alloc_destroy(&htab->pcpu_ma);
629 	bpf_mem_alloc_destroy(&htab->ma);
630 free_htab:
631 	lockdep_unregister_key(&htab->lockdep_key);
632 	bpf_map_area_free(htab);
633 	return ERR_PTR(err);
634 }
635 
636 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
637 {
638 	return jhash(key, key_len, hashrnd);
639 }
640 
641 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
642 {
643 	return &htab->buckets[hash & (htab->n_buckets - 1)];
644 }
645 
646 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
647 {
648 	return &__select_bucket(htab, hash)->head;
649 }
650 
651 /* this lookup function can only be called with bucket lock taken */
652 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
653 					 void *key, u32 key_size)
654 {
655 	struct hlist_nulls_node *n;
656 	struct htab_elem *l;
657 
658 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
659 		if (l->hash == hash && !memcmp(&l->key, key, key_size))
660 			return l;
661 
662 	return NULL;
663 }
664 
665 /* can be called without bucket lock. it will repeat the loop in
666  * the unlikely event when elements moved from one bucket into another
667  * while link list is being walked
668  */
669 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
670 					       u32 hash, void *key,
671 					       u32 key_size, u32 n_buckets)
672 {
673 	struct hlist_nulls_node *n;
674 	struct htab_elem *l;
675 
676 again:
677 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
678 		if (l->hash == hash && !memcmp(&l->key, key, key_size))
679 			return l;
680 
681 	if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
682 		goto again;
683 
684 	return NULL;
685 }
686 
687 /* Called from syscall or from eBPF program directly, so
688  * arguments have to match bpf_map_lookup_elem() exactly.
689  * The return value is adjusted by BPF instructions
690  * in htab_map_gen_lookup().
691  */
692 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
693 {
694 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
695 	struct hlist_nulls_head *head;
696 	struct htab_elem *l;
697 	u32 hash, key_size;
698 
699 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
700 		     !rcu_read_lock_bh_held());
701 
702 	key_size = map->key_size;
703 
704 	hash = htab_map_hash(key, key_size, htab->hashrnd);
705 
706 	head = select_bucket(htab, hash);
707 
708 	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
709 
710 	return l;
711 }
712 
713 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
714 {
715 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
716 
717 	if (l)
718 		return l->key + round_up(map->key_size, 8);
719 
720 	return NULL;
721 }
722 
723 /* inline bpf_map_lookup_elem() call.
724  * Instead of:
725  * bpf_prog
726  *   bpf_map_lookup_elem
727  *     map->ops->map_lookup_elem
728  *       htab_map_lookup_elem
729  *         __htab_map_lookup_elem
730  * do:
731  * bpf_prog
732  *   __htab_map_lookup_elem
733  */
734 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
735 {
736 	struct bpf_insn *insn = insn_buf;
737 	const int ret = BPF_REG_0;
738 
739 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
740 		     (void *(*)(struct bpf_map *map, void *key))NULL));
741 	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
742 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
743 	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
744 				offsetof(struct htab_elem, key) +
745 				round_up(map->key_size, 8));
746 	return insn - insn_buf;
747 }
748 
749 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
750 							void *key, const bool mark)
751 {
752 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
753 
754 	if (l) {
755 		if (mark)
756 			bpf_lru_node_set_ref(&l->lru_node);
757 		return l->key + round_up(map->key_size, 8);
758 	}
759 
760 	return NULL;
761 }
762 
763 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
764 {
765 	return __htab_lru_map_lookup_elem(map, key, true);
766 }
767 
768 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
769 {
770 	return __htab_lru_map_lookup_elem(map, key, false);
771 }
772 
773 static int htab_lru_map_gen_lookup(struct bpf_map *map,
774 				   struct bpf_insn *insn_buf)
775 {
776 	struct bpf_insn *insn = insn_buf;
777 	const int ret = BPF_REG_0;
778 	const int ref_reg = BPF_REG_1;
779 
780 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
781 		     (void *(*)(struct bpf_map *map, void *key))NULL));
782 	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
783 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
784 	*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
785 			      offsetof(struct htab_elem, lru_node) +
786 			      offsetof(struct bpf_lru_node, ref));
787 	*insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
788 	*insn++ = BPF_ST_MEM(BPF_B, ret,
789 			     offsetof(struct htab_elem, lru_node) +
790 			     offsetof(struct bpf_lru_node, ref),
791 			     1);
792 	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
793 				offsetof(struct htab_elem, key) +
794 				round_up(map->key_size, 8));
795 	return insn - insn_buf;
796 }
797 
798 static void check_and_free_fields(struct bpf_htab *htab,
799 				  struct htab_elem *elem)
800 {
801 	void *map_value = elem->key + round_up(htab->map.key_size, 8);
802 
803 	if (map_value_has_timer(&htab->map))
804 		bpf_timer_cancel_and_free(map_value + htab->map.timer_off);
805 	if (map_value_has_kptrs(&htab->map))
806 		bpf_map_free_kptrs(&htab->map, map_value);
807 }
808 
809 /* It is called from the bpf_lru_list when the LRU needs to delete
810  * older elements from the htab.
811  */
812 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
813 {
814 	struct bpf_htab *htab = arg;
815 	struct htab_elem *l = NULL, *tgt_l;
816 	struct hlist_nulls_head *head;
817 	struct hlist_nulls_node *n;
818 	unsigned long flags;
819 	struct bucket *b;
820 	int ret;
821 
822 	tgt_l = container_of(node, struct htab_elem, lru_node);
823 	b = __select_bucket(htab, tgt_l->hash);
824 	head = &b->head;
825 
826 	ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
827 	if (ret)
828 		return false;
829 
830 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
831 		if (l == tgt_l) {
832 			hlist_nulls_del_rcu(&l->hash_node);
833 			check_and_free_fields(htab, l);
834 			break;
835 		}
836 
837 	htab_unlock_bucket(htab, b, tgt_l->hash, flags);
838 
839 	return l == tgt_l;
840 }
841 
842 /* Called from syscall */
843 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
844 {
845 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
846 	struct hlist_nulls_head *head;
847 	struct htab_elem *l, *next_l;
848 	u32 hash, key_size;
849 	int i = 0;
850 
851 	WARN_ON_ONCE(!rcu_read_lock_held());
852 
853 	key_size = map->key_size;
854 
855 	if (!key)
856 		goto find_first_elem;
857 
858 	hash = htab_map_hash(key, key_size, htab->hashrnd);
859 
860 	head = select_bucket(htab, hash);
861 
862 	/* lookup the key */
863 	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
864 
865 	if (!l)
866 		goto find_first_elem;
867 
868 	/* key was found, get next key in the same bucket */
869 	next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
870 				  struct htab_elem, hash_node);
871 
872 	if (next_l) {
873 		/* if next elem in this hash list is non-zero, just return it */
874 		memcpy(next_key, next_l->key, key_size);
875 		return 0;
876 	}
877 
878 	/* no more elements in this hash list, go to the next bucket */
879 	i = hash & (htab->n_buckets - 1);
880 	i++;
881 
882 find_first_elem:
883 	/* iterate over buckets */
884 	for (; i < htab->n_buckets; i++) {
885 		head = select_bucket(htab, i);
886 
887 		/* pick first element in the bucket */
888 		next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
889 					  struct htab_elem, hash_node);
890 		if (next_l) {
891 			/* if it's not empty, just return it */
892 			memcpy(next_key, next_l->key, key_size);
893 			return 0;
894 		}
895 	}
896 
897 	/* iterated over all buckets and all elements */
898 	return -ENOENT;
899 }
900 
901 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
902 {
903 	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
904 		bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
905 	check_and_free_fields(htab, l);
906 	bpf_mem_cache_free(&htab->ma, l);
907 }
908 
909 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
910 {
911 	struct bpf_map *map = &htab->map;
912 	void *ptr;
913 
914 	if (map->ops->map_fd_put_ptr) {
915 		ptr = fd_htab_map_get_ptr(map, l);
916 		map->ops->map_fd_put_ptr(ptr);
917 	}
918 }
919 
920 static bool is_map_full(struct bpf_htab *htab)
921 {
922 	if (htab->use_percpu_counter)
923 		return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
924 						PERCPU_COUNTER_BATCH) >= 0;
925 	return atomic_read(&htab->count) >= htab->map.max_entries;
926 }
927 
928 static void inc_elem_count(struct bpf_htab *htab)
929 {
930 	if (htab->use_percpu_counter)
931 		percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
932 	else
933 		atomic_inc(&htab->count);
934 }
935 
936 static void dec_elem_count(struct bpf_htab *htab)
937 {
938 	if (htab->use_percpu_counter)
939 		percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
940 	else
941 		atomic_dec(&htab->count);
942 }
943 
944 
945 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
946 {
947 	htab_put_fd_value(htab, l);
948 
949 	if (htab_is_prealloc(htab)) {
950 		check_and_free_fields(htab, l);
951 		__pcpu_freelist_push(&htab->freelist, &l->fnode);
952 	} else {
953 		dec_elem_count(htab);
954 		htab_elem_free(htab, l);
955 	}
956 }
957 
958 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
959 			    void *value, bool onallcpus)
960 {
961 	if (!onallcpus) {
962 		/* copy true value_size bytes */
963 		memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
964 	} else {
965 		u32 size = round_up(htab->map.value_size, 8);
966 		int off = 0, cpu;
967 
968 		for_each_possible_cpu(cpu) {
969 			bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
970 					value + off, size);
971 			off += size;
972 		}
973 	}
974 }
975 
976 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
977 			    void *value, bool onallcpus)
978 {
979 	/* When not setting the initial value on all cpus, zero-fill element
980 	 * values for other cpus. Otherwise, bpf program has no way to ensure
981 	 * known initial values for cpus other than current one
982 	 * (onallcpus=false always when coming from bpf prog).
983 	 */
984 	if (!onallcpus) {
985 		u32 size = round_up(htab->map.value_size, 8);
986 		int current_cpu = raw_smp_processor_id();
987 		int cpu;
988 
989 		for_each_possible_cpu(cpu) {
990 			if (cpu == current_cpu)
991 				bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
992 						size);
993 			else
994 				memset(per_cpu_ptr(pptr, cpu), 0, size);
995 		}
996 	} else {
997 		pcpu_copy_value(htab, pptr, value, onallcpus);
998 	}
999 }
1000 
1001 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
1002 {
1003 	return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
1004 	       BITS_PER_LONG == 64;
1005 }
1006 
1007 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
1008 					 void *value, u32 key_size, u32 hash,
1009 					 bool percpu, bool onallcpus,
1010 					 struct htab_elem *old_elem)
1011 {
1012 	u32 size = htab->map.value_size;
1013 	bool prealloc = htab_is_prealloc(htab);
1014 	struct htab_elem *l_new, **pl_new;
1015 	void __percpu *pptr;
1016 
1017 	if (prealloc) {
1018 		if (old_elem) {
1019 			/* if we're updating the existing element,
1020 			 * use per-cpu extra elems to avoid freelist_pop/push
1021 			 */
1022 			pl_new = this_cpu_ptr(htab->extra_elems);
1023 			l_new = *pl_new;
1024 			htab_put_fd_value(htab, old_elem);
1025 			*pl_new = old_elem;
1026 		} else {
1027 			struct pcpu_freelist_node *l;
1028 
1029 			l = __pcpu_freelist_pop(&htab->freelist);
1030 			if (!l)
1031 				return ERR_PTR(-E2BIG);
1032 			l_new = container_of(l, struct htab_elem, fnode);
1033 		}
1034 	} else {
1035 		if (is_map_full(htab))
1036 			if (!old_elem)
1037 				/* when map is full and update() is replacing
1038 				 * old element, it's ok to allocate, since
1039 				 * old element will be freed immediately.
1040 				 * Otherwise return an error
1041 				 */
1042 				return ERR_PTR(-E2BIG);
1043 		inc_elem_count(htab);
1044 		l_new = bpf_mem_cache_alloc(&htab->ma);
1045 		if (!l_new) {
1046 			l_new = ERR_PTR(-ENOMEM);
1047 			goto dec_count;
1048 		}
1049 		check_and_init_map_value(&htab->map,
1050 					 l_new->key + round_up(key_size, 8));
1051 	}
1052 
1053 	memcpy(l_new->key, key, key_size);
1054 	if (percpu) {
1055 		if (prealloc) {
1056 			pptr = htab_elem_get_ptr(l_new, key_size);
1057 		} else {
1058 			/* alloc_percpu zero-fills */
1059 			pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1060 			if (!pptr) {
1061 				bpf_mem_cache_free(&htab->ma, l_new);
1062 				l_new = ERR_PTR(-ENOMEM);
1063 				goto dec_count;
1064 			}
1065 			l_new->ptr_to_pptr = pptr;
1066 			pptr = *(void **)pptr;
1067 		}
1068 
1069 		pcpu_init_value(htab, pptr, value, onallcpus);
1070 
1071 		if (!prealloc)
1072 			htab_elem_set_ptr(l_new, key_size, pptr);
1073 	} else if (fd_htab_map_needs_adjust(htab)) {
1074 		size = round_up(size, 8);
1075 		memcpy(l_new->key + round_up(key_size, 8), value, size);
1076 	} else {
1077 		copy_map_value(&htab->map,
1078 			       l_new->key + round_up(key_size, 8),
1079 			       value);
1080 	}
1081 
1082 	l_new->hash = hash;
1083 	return l_new;
1084 dec_count:
1085 	dec_elem_count(htab);
1086 	return l_new;
1087 }
1088 
1089 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1090 		       u64 map_flags)
1091 {
1092 	if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1093 		/* elem already exists */
1094 		return -EEXIST;
1095 
1096 	if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1097 		/* elem doesn't exist, cannot update it */
1098 		return -ENOENT;
1099 
1100 	return 0;
1101 }
1102 
1103 /* Called from syscall or from eBPF program */
1104 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1105 				u64 map_flags)
1106 {
1107 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1108 	struct htab_elem *l_new = NULL, *l_old;
1109 	struct hlist_nulls_head *head;
1110 	unsigned long flags;
1111 	struct bucket *b;
1112 	u32 key_size, hash;
1113 	int ret;
1114 
1115 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1116 		/* unknown flags */
1117 		return -EINVAL;
1118 
1119 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1120 		     !rcu_read_lock_bh_held());
1121 
1122 	key_size = map->key_size;
1123 
1124 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1125 
1126 	b = __select_bucket(htab, hash);
1127 	head = &b->head;
1128 
1129 	if (unlikely(map_flags & BPF_F_LOCK)) {
1130 		if (unlikely(!map_value_has_spin_lock(map)))
1131 			return -EINVAL;
1132 		/* find an element without taking the bucket lock */
1133 		l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1134 					      htab->n_buckets);
1135 		ret = check_flags(htab, l_old, map_flags);
1136 		if (ret)
1137 			return ret;
1138 		if (l_old) {
1139 			/* grab the element lock and update value in place */
1140 			copy_map_value_locked(map,
1141 					      l_old->key + round_up(key_size, 8),
1142 					      value, false);
1143 			return 0;
1144 		}
1145 		/* fall through, grab the bucket lock and lookup again.
1146 		 * 99.9% chance that the element won't be found,
1147 		 * but second lookup under lock has to be done.
1148 		 */
1149 	}
1150 
1151 	ret = htab_lock_bucket(htab, b, hash, &flags);
1152 	if (ret)
1153 		return ret;
1154 
1155 	l_old = lookup_elem_raw(head, hash, key, key_size);
1156 
1157 	ret = check_flags(htab, l_old, map_flags);
1158 	if (ret)
1159 		goto err;
1160 
1161 	if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1162 		/* first lookup without the bucket lock didn't find the element,
1163 		 * but second lookup with the bucket lock found it.
1164 		 * This case is highly unlikely, but has to be dealt with:
1165 		 * grab the element lock in addition to the bucket lock
1166 		 * and update element in place
1167 		 */
1168 		copy_map_value_locked(map,
1169 				      l_old->key + round_up(key_size, 8),
1170 				      value, false);
1171 		ret = 0;
1172 		goto err;
1173 	}
1174 
1175 	l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1176 				l_old);
1177 	if (IS_ERR(l_new)) {
1178 		/* all pre-allocated elements are in use or memory exhausted */
1179 		ret = PTR_ERR(l_new);
1180 		goto err;
1181 	}
1182 
1183 	/* add new element to the head of the list, so that
1184 	 * concurrent search will find it before old elem
1185 	 */
1186 	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1187 	if (l_old) {
1188 		hlist_nulls_del_rcu(&l_old->hash_node);
1189 		if (!htab_is_prealloc(htab))
1190 			free_htab_elem(htab, l_old);
1191 		else
1192 			check_and_free_fields(htab, l_old);
1193 	}
1194 	ret = 0;
1195 err:
1196 	htab_unlock_bucket(htab, b, hash, flags);
1197 	return ret;
1198 }
1199 
1200 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1201 {
1202 	check_and_free_fields(htab, elem);
1203 	bpf_lru_push_free(&htab->lru, &elem->lru_node);
1204 }
1205 
1206 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1207 				    u64 map_flags)
1208 {
1209 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1210 	struct htab_elem *l_new, *l_old = NULL;
1211 	struct hlist_nulls_head *head;
1212 	unsigned long flags;
1213 	struct bucket *b;
1214 	u32 key_size, hash;
1215 	int ret;
1216 
1217 	if (unlikely(map_flags > BPF_EXIST))
1218 		/* unknown flags */
1219 		return -EINVAL;
1220 
1221 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1222 		     !rcu_read_lock_bh_held());
1223 
1224 	key_size = map->key_size;
1225 
1226 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1227 
1228 	b = __select_bucket(htab, hash);
1229 	head = &b->head;
1230 
1231 	/* For LRU, we need to alloc before taking bucket's
1232 	 * spinlock because getting free nodes from LRU may need
1233 	 * to remove older elements from htab and this removal
1234 	 * operation will need a bucket lock.
1235 	 */
1236 	l_new = prealloc_lru_pop(htab, key, hash);
1237 	if (!l_new)
1238 		return -ENOMEM;
1239 	copy_map_value(&htab->map,
1240 		       l_new->key + round_up(map->key_size, 8), value);
1241 
1242 	ret = htab_lock_bucket(htab, b, hash, &flags);
1243 	if (ret)
1244 		return ret;
1245 
1246 	l_old = lookup_elem_raw(head, hash, key, key_size);
1247 
1248 	ret = check_flags(htab, l_old, map_flags);
1249 	if (ret)
1250 		goto err;
1251 
1252 	/* add new element to the head of the list, so that
1253 	 * concurrent search will find it before old elem
1254 	 */
1255 	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1256 	if (l_old) {
1257 		bpf_lru_node_set_ref(&l_new->lru_node);
1258 		hlist_nulls_del_rcu(&l_old->hash_node);
1259 	}
1260 	ret = 0;
1261 
1262 err:
1263 	htab_unlock_bucket(htab, b, hash, flags);
1264 
1265 	if (ret)
1266 		htab_lru_push_free(htab, l_new);
1267 	else if (l_old)
1268 		htab_lru_push_free(htab, l_old);
1269 
1270 	return ret;
1271 }
1272 
1273 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1274 					 void *value, u64 map_flags,
1275 					 bool onallcpus)
1276 {
1277 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1278 	struct htab_elem *l_new = NULL, *l_old;
1279 	struct hlist_nulls_head *head;
1280 	unsigned long flags;
1281 	struct bucket *b;
1282 	u32 key_size, hash;
1283 	int ret;
1284 
1285 	if (unlikely(map_flags > BPF_EXIST))
1286 		/* unknown flags */
1287 		return -EINVAL;
1288 
1289 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1290 		     !rcu_read_lock_bh_held());
1291 
1292 	key_size = map->key_size;
1293 
1294 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1295 
1296 	b = __select_bucket(htab, hash);
1297 	head = &b->head;
1298 
1299 	ret = htab_lock_bucket(htab, b, hash, &flags);
1300 	if (ret)
1301 		return ret;
1302 
1303 	l_old = lookup_elem_raw(head, hash, key, key_size);
1304 
1305 	ret = check_flags(htab, l_old, map_flags);
1306 	if (ret)
1307 		goto err;
1308 
1309 	if (l_old) {
1310 		/* per-cpu hash map can update value in-place */
1311 		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1312 				value, onallcpus);
1313 	} else {
1314 		l_new = alloc_htab_elem(htab, key, value, key_size,
1315 					hash, true, onallcpus, NULL);
1316 		if (IS_ERR(l_new)) {
1317 			ret = PTR_ERR(l_new);
1318 			goto err;
1319 		}
1320 		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1321 	}
1322 	ret = 0;
1323 err:
1324 	htab_unlock_bucket(htab, b, hash, flags);
1325 	return ret;
1326 }
1327 
1328 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1329 					     void *value, u64 map_flags,
1330 					     bool onallcpus)
1331 {
1332 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1333 	struct htab_elem *l_new = NULL, *l_old;
1334 	struct hlist_nulls_head *head;
1335 	unsigned long flags;
1336 	struct bucket *b;
1337 	u32 key_size, hash;
1338 	int ret;
1339 
1340 	if (unlikely(map_flags > BPF_EXIST))
1341 		/* unknown flags */
1342 		return -EINVAL;
1343 
1344 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1345 		     !rcu_read_lock_bh_held());
1346 
1347 	key_size = map->key_size;
1348 
1349 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1350 
1351 	b = __select_bucket(htab, hash);
1352 	head = &b->head;
1353 
1354 	/* For LRU, we need to alloc before taking bucket's
1355 	 * spinlock because LRU's elem alloc may need
1356 	 * to remove older elem from htab and this removal
1357 	 * operation will need a bucket lock.
1358 	 */
1359 	if (map_flags != BPF_EXIST) {
1360 		l_new = prealloc_lru_pop(htab, key, hash);
1361 		if (!l_new)
1362 			return -ENOMEM;
1363 	}
1364 
1365 	ret = htab_lock_bucket(htab, b, hash, &flags);
1366 	if (ret)
1367 		return ret;
1368 
1369 	l_old = lookup_elem_raw(head, hash, key, key_size);
1370 
1371 	ret = check_flags(htab, l_old, map_flags);
1372 	if (ret)
1373 		goto err;
1374 
1375 	if (l_old) {
1376 		bpf_lru_node_set_ref(&l_old->lru_node);
1377 
1378 		/* per-cpu hash map can update value in-place */
1379 		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1380 				value, onallcpus);
1381 	} else {
1382 		pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1383 				value, onallcpus);
1384 		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1385 		l_new = NULL;
1386 	}
1387 	ret = 0;
1388 err:
1389 	htab_unlock_bucket(htab, b, hash, flags);
1390 	if (l_new)
1391 		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1392 	return ret;
1393 }
1394 
1395 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1396 				       void *value, u64 map_flags)
1397 {
1398 	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1399 }
1400 
1401 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1402 					   void *value, u64 map_flags)
1403 {
1404 	return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1405 						 false);
1406 }
1407 
1408 /* Called from syscall or from eBPF program */
1409 static int htab_map_delete_elem(struct bpf_map *map, void *key)
1410 {
1411 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1412 	struct hlist_nulls_head *head;
1413 	struct bucket *b;
1414 	struct htab_elem *l;
1415 	unsigned long flags;
1416 	u32 hash, key_size;
1417 	int ret;
1418 
1419 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1420 		     !rcu_read_lock_bh_held());
1421 
1422 	key_size = map->key_size;
1423 
1424 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1425 	b = __select_bucket(htab, hash);
1426 	head = &b->head;
1427 
1428 	ret = htab_lock_bucket(htab, b, hash, &flags);
1429 	if (ret)
1430 		return ret;
1431 
1432 	l = lookup_elem_raw(head, hash, key, key_size);
1433 
1434 	if (l) {
1435 		hlist_nulls_del_rcu(&l->hash_node);
1436 		free_htab_elem(htab, l);
1437 	} else {
1438 		ret = -ENOENT;
1439 	}
1440 
1441 	htab_unlock_bucket(htab, b, hash, flags);
1442 	return ret;
1443 }
1444 
1445 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1446 {
1447 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1448 	struct hlist_nulls_head *head;
1449 	struct bucket *b;
1450 	struct htab_elem *l;
1451 	unsigned long flags;
1452 	u32 hash, key_size;
1453 	int ret;
1454 
1455 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1456 		     !rcu_read_lock_bh_held());
1457 
1458 	key_size = map->key_size;
1459 
1460 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1461 	b = __select_bucket(htab, hash);
1462 	head = &b->head;
1463 
1464 	ret = htab_lock_bucket(htab, b, hash, &flags);
1465 	if (ret)
1466 		return ret;
1467 
1468 	l = lookup_elem_raw(head, hash, key, key_size);
1469 
1470 	if (l)
1471 		hlist_nulls_del_rcu(&l->hash_node);
1472 	else
1473 		ret = -ENOENT;
1474 
1475 	htab_unlock_bucket(htab, b, hash, flags);
1476 	if (l)
1477 		htab_lru_push_free(htab, l);
1478 	return ret;
1479 }
1480 
1481 static void delete_all_elements(struct bpf_htab *htab)
1482 {
1483 	int i;
1484 
1485 	/* It's called from a worker thread, so disable migration here,
1486 	 * since bpf_mem_cache_free() relies on that.
1487 	 */
1488 	migrate_disable();
1489 	for (i = 0; i < htab->n_buckets; i++) {
1490 		struct hlist_nulls_head *head = select_bucket(htab, i);
1491 		struct hlist_nulls_node *n;
1492 		struct htab_elem *l;
1493 
1494 		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1495 			hlist_nulls_del_rcu(&l->hash_node);
1496 			htab_elem_free(htab, l);
1497 		}
1498 	}
1499 	migrate_enable();
1500 }
1501 
1502 static void htab_free_malloced_timers(struct bpf_htab *htab)
1503 {
1504 	int i;
1505 
1506 	rcu_read_lock();
1507 	for (i = 0; i < htab->n_buckets; i++) {
1508 		struct hlist_nulls_head *head = select_bucket(htab, i);
1509 		struct hlist_nulls_node *n;
1510 		struct htab_elem *l;
1511 
1512 		hlist_nulls_for_each_entry(l, n, head, hash_node) {
1513 			/* We don't reset or free kptr on uref dropping to zero,
1514 			 * hence just free timer.
1515 			 */
1516 			bpf_timer_cancel_and_free(l->key +
1517 						  round_up(htab->map.key_size, 8) +
1518 						  htab->map.timer_off);
1519 		}
1520 		cond_resched_rcu();
1521 	}
1522 	rcu_read_unlock();
1523 }
1524 
1525 static void htab_map_free_timers(struct bpf_map *map)
1526 {
1527 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1528 
1529 	/* We don't reset or free kptr on uref dropping to zero. */
1530 	if (!map_value_has_timer(&htab->map))
1531 		return;
1532 	if (!htab_is_prealloc(htab))
1533 		htab_free_malloced_timers(htab);
1534 	else
1535 		htab_free_prealloced_timers(htab);
1536 }
1537 
1538 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1539 static void htab_map_free(struct bpf_map *map)
1540 {
1541 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1542 	int i;
1543 
1544 	/* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1545 	 * bpf_free_used_maps() is called after bpf prog is no longer executing.
1546 	 * There is no need to synchronize_rcu() here to protect map elements.
1547 	 */
1548 
1549 	/* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
1550 	 * underneath and is reponsible for waiting for callbacks to finish
1551 	 * during bpf_mem_alloc_destroy().
1552 	 */
1553 	if (!htab_is_prealloc(htab)) {
1554 		delete_all_elements(htab);
1555 	} else {
1556 		htab_free_prealloced_kptrs(htab);
1557 		prealloc_destroy(htab);
1558 	}
1559 
1560 	bpf_map_free_kptr_off_tab(map);
1561 	free_percpu(htab->extra_elems);
1562 	bpf_map_area_free(htab->buckets);
1563 	bpf_mem_alloc_destroy(&htab->pcpu_ma);
1564 	bpf_mem_alloc_destroy(&htab->ma);
1565 	if (htab->use_percpu_counter)
1566 		percpu_counter_destroy(&htab->pcount);
1567 	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1568 		free_percpu(htab->map_locked[i]);
1569 	lockdep_unregister_key(&htab->lockdep_key);
1570 	bpf_map_area_free(htab);
1571 }
1572 
1573 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1574 				   struct seq_file *m)
1575 {
1576 	void *value;
1577 
1578 	rcu_read_lock();
1579 
1580 	value = htab_map_lookup_elem(map, key);
1581 	if (!value) {
1582 		rcu_read_unlock();
1583 		return;
1584 	}
1585 
1586 	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1587 	seq_puts(m, ": ");
1588 	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1589 	seq_puts(m, "\n");
1590 
1591 	rcu_read_unlock();
1592 }
1593 
1594 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1595 					     void *value, bool is_lru_map,
1596 					     bool is_percpu, u64 flags)
1597 {
1598 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1599 	struct hlist_nulls_head *head;
1600 	unsigned long bflags;
1601 	struct htab_elem *l;
1602 	u32 hash, key_size;
1603 	struct bucket *b;
1604 	int ret;
1605 
1606 	key_size = map->key_size;
1607 
1608 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1609 	b = __select_bucket(htab, hash);
1610 	head = &b->head;
1611 
1612 	ret = htab_lock_bucket(htab, b, hash, &bflags);
1613 	if (ret)
1614 		return ret;
1615 
1616 	l = lookup_elem_raw(head, hash, key, key_size);
1617 	if (!l) {
1618 		ret = -ENOENT;
1619 	} else {
1620 		if (is_percpu) {
1621 			u32 roundup_value_size = round_up(map->value_size, 8);
1622 			void __percpu *pptr;
1623 			int off = 0, cpu;
1624 
1625 			pptr = htab_elem_get_ptr(l, key_size);
1626 			for_each_possible_cpu(cpu) {
1627 				bpf_long_memcpy(value + off,
1628 						per_cpu_ptr(pptr, cpu),
1629 						roundup_value_size);
1630 				off += roundup_value_size;
1631 			}
1632 		} else {
1633 			u32 roundup_key_size = round_up(map->key_size, 8);
1634 
1635 			if (flags & BPF_F_LOCK)
1636 				copy_map_value_locked(map, value, l->key +
1637 						      roundup_key_size,
1638 						      true);
1639 			else
1640 				copy_map_value(map, value, l->key +
1641 					       roundup_key_size);
1642 			check_and_init_map_value(map, value);
1643 		}
1644 
1645 		hlist_nulls_del_rcu(&l->hash_node);
1646 		if (!is_lru_map)
1647 			free_htab_elem(htab, l);
1648 	}
1649 
1650 	htab_unlock_bucket(htab, b, hash, bflags);
1651 
1652 	if (is_lru_map && l)
1653 		htab_lru_push_free(htab, l);
1654 
1655 	return ret;
1656 }
1657 
1658 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1659 					   void *value, u64 flags)
1660 {
1661 	return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1662 						 flags);
1663 }
1664 
1665 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1666 						  void *key, void *value,
1667 						  u64 flags)
1668 {
1669 	return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1670 						 flags);
1671 }
1672 
1673 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1674 					       void *value, u64 flags)
1675 {
1676 	return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1677 						 flags);
1678 }
1679 
1680 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1681 						      void *key, void *value,
1682 						      u64 flags)
1683 {
1684 	return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1685 						 flags);
1686 }
1687 
1688 static int
1689 __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1690 				   const union bpf_attr *attr,
1691 				   union bpf_attr __user *uattr,
1692 				   bool do_delete, bool is_lru_map,
1693 				   bool is_percpu)
1694 {
1695 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1696 	u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1697 	void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1698 	void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1699 	void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1700 	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1701 	u32 batch, max_count, size, bucket_size, map_id;
1702 	struct htab_elem *node_to_free = NULL;
1703 	u64 elem_map_flags, map_flags;
1704 	struct hlist_nulls_head *head;
1705 	struct hlist_nulls_node *n;
1706 	unsigned long flags = 0;
1707 	bool locked = false;
1708 	struct htab_elem *l;
1709 	struct bucket *b;
1710 	int ret = 0;
1711 
1712 	elem_map_flags = attr->batch.elem_flags;
1713 	if ((elem_map_flags & ~BPF_F_LOCK) ||
1714 	    ((elem_map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
1715 		return -EINVAL;
1716 
1717 	map_flags = attr->batch.flags;
1718 	if (map_flags)
1719 		return -EINVAL;
1720 
1721 	max_count = attr->batch.count;
1722 	if (!max_count)
1723 		return 0;
1724 
1725 	if (put_user(0, &uattr->batch.count))
1726 		return -EFAULT;
1727 
1728 	batch = 0;
1729 	if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1730 		return -EFAULT;
1731 
1732 	if (batch >= htab->n_buckets)
1733 		return -ENOENT;
1734 
1735 	key_size = htab->map.key_size;
1736 	roundup_key_size = round_up(htab->map.key_size, 8);
1737 	value_size = htab->map.value_size;
1738 	size = round_up(value_size, 8);
1739 	if (is_percpu)
1740 		value_size = size * num_possible_cpus();
1741 	total = 0;
1742 	/* while experimenting with hash tables with sizes ranging from 10 to
1743 	 * 1000, it was observed that a bucket can have up to 5 entries.
1744 	 */
1745 	bucket_size = 5;
1746 
1747 alloc:
1748 	/* We cannot do copy_from_user or copy_to_user inside
1749 	 * the rcu_read_lock. Allocate enough space here.
1750 	 */
1751 	keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1752 	values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1753 	if (!keys || !values) {
1754 		ret = -ENOMEM;
1755 		goto after_loop;
1756 	}
1757 
1758 again:
1759 	bpf_disable_instrumentation();
1760 	rcu_read_lock();
1761 again_nocopy:
1762 	dst_key = keys;
1763 	dst_val = values;
1764 	b = &htab->buckets[batch];
1765 	head = &b->head;
1766 	/* do not grab the lock unless need it (bucket_cnt > 0). */
1767 	if (locked) {
1768 		ret = htab_lock_bucket(htab, b, batch, &flags);
1769 		if (ret) {
1770 			rcu_read_unlock();
1771 			bpf_enable_instrumentation();
1772 			goto after_loop;
1773 		}
1774 	}
1775 
1776 	bucket_cnt = 0;
1777 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1778 		bucket_cnt++;
1779 
1780 	if (bucket_cnt && !locked) {
1781 		locked = true;
1782 		goto again_nocopy;
1783 	}
1784 
1785 	if (bucket_cnt > (max_count - total)) {
1786 		if (total == 0)
1787 			ret = -ENOSPC;
1788 		/* Note that since bucket_cnt > 0 here, it is implicit
1789 		 * that the locked was grabbed, so release it.
1790 		 */
1791 		htab_unlock_bucket(htab, b, batch, flags);
1792 		rcu_read_unlock();
1793 		bpf_enable_instrumentation();
1794 		goto after_loop;
1795 	}
1796 
1797 	if (bucket_cnt > bucket_size) {
1798 		bucket_size = bucket_cnt;
1799 		/* Note that since bucket_cnt > 0 here, it is implicit
1800 		 * that the locked was grabbed, so release it.
1801 		 */
1802 		htab_unlock_bucket(htab, b, batch, flags);
1803 		rcu_read_unlock();
1804 		bpf_enable_instrumentation();
1805 		kvfree(keys);
1806 		kvfree(values);
1807 		goto alloc;
1808 	}
1809 
1810 	/* Next block is only safe to run if you have grabbed the lock */
1811 	if (!locked)
1812 		goto next_batch;
1813 
1814 	hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1815 		memcpy(dst_key, l->key, key_size);
1816 
1817 		if (is_percpu) {
1818 			int off = 0, cpu;
1819 			void __percpu *pptr;
1820 
1821 			pptr = htab_elem_get_ptr(l, map->key_size);
1822 			for_each_possible_cpu(cpu) {
1823 				bpf_long_memcpy(dst_val + off,
1824 						per_cpu_ptr(pptr, cpu), size);
1825 				off += size;
1826 			}
1827 		} else {
1828 			value = l->key + roundup_key_size;
1829 			if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1830 				struct bpf_map **inner_map = value;
1831 
1832 				 /* Actual value is the id of the inner map */
1833 				map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
1834 				value = &map_id;
1835 			}
1836 
1837 			if (elem_map_flags & BPF_F_LOCK)
1838 				copy_map_value_locked(map, dst_val, value,
1839 						      true);
1840 			else
1841 				copy_map_value(map, dst_val, value);
1842 			check_and_init_map_value(map, dst_val);
1843 		}
1844 		if (do_delete) {
1845 			hlist_nulls_del_rcu(&l->hash_node);
1846 
1847 			/* bpf_lru_push_free() will acquire lru_lock, which
1848 			 * may cause deadlock. See comments in function
1849 			 * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1850 			 * after releasing the bucket lock.
1851 			 */
1852 			if (is_lru_map) {
1853 				l->batch_flink = node_to_free;
1854 				node_to_free = l;
1855 			} else {
1856 				free_htab_elem(htab, l);
1857 			}
1858 		}
1859 		dst_key += key_size;
1860 		dst_val += value_size;
1861 	}
1862 
1863 	htab_unlock_bucket(htab, b, batch, flags);
1864 	locked = false;
1865 
1866 	while (node_to_free) {
1867 		l = node_to_free;
1868 		node_to_free = node_to_free->batch_flink;
1869 		htab_lru_push_free(htab, l);
1870 	}
1871 
1872 next_batch:
1873 	/* If we are not copying data, we can go to next bucket and avoid
1874 	 * unlocking the rcu.
1875 	 */
1876 	if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1877 		batch++;
1878 		goto again_nocopy;
1879 	}
1880 
1881 	rcu_read_unlock();
1882 	bpf_enable_instrumentation();
1883 	if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1884 	    key_size * bucket_cnt) ||
1885 	    copy_to_user(uvalues + total * value_size, values,
1886 	    value_size * bucket_cnt))) {
1887 		ret = -EFAULT;
1888 		goto after_loop;
1889 	}
1890 
1891 	total += bucket_cnt;
1892 	batch++;
1893 	if (batch >= htab->n_buckets) {
1894 		ret = -ENOENT;
1895 		goto after_loop;
1896 	}
1897 	goto again;
1898 
1899 after_loop:
1900 	if (ret == -EFAULT)
1901 		goto out;
1902 
1903 	/* copy # of entries and next batch */
1904 	ubatch = u64_to_user_ptr(attr->batch.out_batch);
1905 	if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1906 	    put_user(total, &uattr->batch.count))
1907 		ret = -EFAULT;
1908 
1909 out:
1910 	kvfree(keys);
1911 	kvfree(values);
1912 	return ret;
1913 }
1914 
1915 static int
1916 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1917 			     union bpf_attr __user *uattr)
1918 {
1919 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1920 						  false, true);
1921 }
1922 
1923 static int
1924 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1925 					const union bpf_attr *attr,
1926 					union bpf_attr __user *uattr)
1927 {
1928 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1929 						  false, true);
1930 }
1931 
1932 static int
1933 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1934 		      union bpf_attr __user *uattr)
1935 {
1936 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1937 						  false, false);
1938 }
1939 
1940 static int
1941 htab_map_lookup_and_delete_batch(struct bpf_map *map,
1942 				 const union bpf_attr *attr,
1943 				 union bpf_attr __user *uattr)
1944 {
1945 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1946 						  false, false);
1947 }
1948 
1949 static int
1950 htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1951 				 const union bpf_attr *attr,
1952 				 union bpf_attr __user *uattr)
1953 {
1954 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1955 						  true, true);
1956 }
1957 
1958 static int
1959 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1960 					    const union bpf_attr *attr,
1961 					    union bpf_attr __user *uattr)
1962 {
1963 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1964 						  true, true);
1965 }
1966 
1967 static int
1968 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1969 			  union bpf_attr __user *uattr)
1970 {
1971 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1972 						  true, false);
1973 }
1974 
1975 static int
1976 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
1977 				     const union bpf_attr *attr,
1978 				     union bpf_attr __user *uattr)
1979 {
1980 	return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1981 						  true, false);
1982 }
1983 
1984 struct bpf_iter_seq_hash_map_info {
1985 	struct bpf_map *map;
1986 	struct bpf_htab *htab;
1987 	void *percpu_value_buf; // non-zero means percpu hash
1988 	u32 bucket_id;
1989 	u32 skip_elems;
1990 };
1991 
1992 static struct htab_elem *
1993 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
1994 			   struct htab_elem *prev_elem)
1995 {
1996 	const struct bpf_htab *htab = info->htab;
1997 	u32 skip_elems = info->skip_elems;
1998 	u32 bucket_id = info->bucket_id;
1999 	struct hlist_nulls_head *head;
2000 	struct hlist_nulls_node *n;
2001 	struct htab_elem *elem;
2002 	struct bucket *b;
2003 	u32 i, count;
2004 
2005 	if (bucket_id >= htab->n_buckets)
2006 		return NULL;
2007 
2008 	/* try to find next elem in the same bucket */
2009 	if (prev_elem) {
2010 		/* no update/deletion on this bucket, prev_elem should be still valid
2011 		 * and we won't skip elements.
2012 		 */
2013 		n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
2014 		elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
2015 		if (elem)
2016 			return elem;
2017 
2018 		/* not found, unlock and go to the next bucket */
2019 		b = &htab->buckets[bucket_id++];
2020 		rcu_read_unlock();
2021 		skip_elems = 0;
2022 	}
2023 
2024 	for (i = bucket_id; i < htab->n_buckets; i++) {
2025 		b = &htab->buckets[i];
2026 		rcu_read_lock();
2027 
2028 		count = 0;
2029 		head = &b->head;
2030 		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2031 			if (count >= skip_elems) {
2032 				info->bucket_id = i;
2033 				info->skip_elems = count;
2034 				return elem;
2035 			}
2036 			count++;
2037 		}
2038 
2039 		rcu_read_unlock();
2040 		skip_elems = 0;
2041 	}
2042 
2043 	info->bucket_id = i;
2044 	info->skip_elems = 0;
2045 	return NULL;
2046 }
2047 
2048 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
2049 {
2050 	struct bpf_iter_seq_hash_map_info *info = seq->private;
2051 	struct htab_elem *elem;
2052 
2053 	elem = bpf_hash_map_seq_find_next(info, NULL);
2054 	if (!elem)
2055 		return NULL;
2056 
2057 	if (*pos == 0)
2058 		++*pos;
2059 	return elem;
2060 }
2061 
2062 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2063 {
2064 	struct bpf_iter_seq_hash_map_info *info = seq->private;
2065 
2066 	++*pos;
2067 	++info->skip_elems;
2068 	return bpf_hash_map_seq_find_next(info, v);
2069 }
2070 
2071 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
2072 {
2073 	struct bpf_iter_seq_hash_map_info *info = seq->private;
2074 	u32 roundup_key_size, roundup_value_size;
2075 	struct bpf_iter__bpf_map_elem ctx = {};
2076 	struct bpf_map *map = info->map;
2077 	struct bpf_iter_meta meta;
2078 	int ret = 0, off = 0, cpu;
2079 	struct bpf_prog *prog;
2080 	void __percpu *pptr;
2081 
2082 	meta.seq = seq;
2083 	prog = bpf_iter_get_info(&meta, elem == NULL);
2084 	if (prog) {
2085 		ctx.meta = &meta;
2086 		ctx.map = info->map;
2087 		if (elem) {
2088 			roundup_key_size = round_up(map->key_size, 8);
2089 			ctx.key = elem->key;
2090 			if (!info->percpu_value_buf) {
2091 				ctx.value = elem->key + roundup_key_size;
2092 			} else {
2093 				roundup_value_size = round_up(map->value_size, 8);
2094 				pptr = htab_elem_get_ptr(elem, map->key_size);
2095 				for_each_possible_cpu(cpu) {
2096 					bpf_long_memcpy(info->percpu_value_buf + off,
2097 							per_cpu_ptr(pptr, cpu),
2098 							roundup_value_size);
2099 					off += roundup_value_size;
2100 				}
2101 				ctx.value = info->percpu_value_buf;
2102 			}
2103 		}
2104 		ret = bpf_iter_run_prog(prog, &ctx);
2105 	}
2106 
2107 	return ret;
2108 }
2109 
2110 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2111 {
2112 	return __bpf_hash_map_seq_show(seq, v);
2113 }
2114 
2115 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2116 {
2117 	if (!v)
2118 		(void)__bpf_hash_map_seq_show(seq, NULL);
2119 	else
2120 		rcu_read_unlock();
2121 }
2122 
2123 static int bpf_iter_init_hash_map(void *priv_data,
2124 				  struct bpf_iter_aux_info *aux)
2125 {
2126 	struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2127 	struct bpf_map *map = aux->map;
2128 	void *value_buf;
2129 	u32 buf_size;
2130 
2131 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2132 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2133 		buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2134 		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2135 		if (!value_buf)
2136 			return -ENOMEM;
2137 
2138 		seq_info->percpu_value_buf = value_buf;
2139 	}
2140 
2141 	bpf_map_inc_with_uref(map);
2142 	seq_info->map = map;
2143 	seq_info->htab = container_of(map, struct bpf_htab, map);
2144 	return 0;
2145 }
2146 
2147 static void bpf_iter_fini_hash_map(void *priv_data)
2148 {
2149 	struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2150 
2151 	bpf_map_put_with_uref(seq_info->map);
2152 	kfree(seq_info->percpu_value_buf);
2153 }
2154 
2155 static const struct seq_operations bpf_hash_map_seq_ops = {
2156 	.start	= bpf_hash_map_seq_start,
2157 	.next	= bpf_hash_map_seq_next,
2158 	.stop	= bpf_hash_map_seq_stop,
2159 	.show	= bpf_hash_map_seq_show,
2160 };
2161 
2162 static const struct bpf_iter_seq_info iter_seq_info = {
2163 	.seq_ops		= &bpf_hash_map_seq_ops,
2164 	.init_seq_private	= bpf_iter_init_hash_map,
2165 	.fini_seq_private	= bpf_iter_fini_hash_map,
2166 	.seq_priv_size		= sizeof(struct bpf_iter_seq_hash_map_info),
2167 };
2168 
2169 static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
2170 				  void *callback_ctx, u64 flags)
2171 {
2172 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2173 	struct hlist_nulls_head *head;
2174 	struct hlist_nulls_node *n;
2175 	struct htab_elem *elem;
2176 	u32 roundup_key_size;
2177 	int i, num_elems = 0;
2178 	void __percpu *pptr;
2179 	struct bucket *b;
2180 	void *key, *val;
2181 	bool is_percpu;
2182 	u64 ret = 0;
2183 
2184 	if (flags != 0)
2185 		return -EINVAL;
2186 
2187 	is_percpu = htab_is_percpu(htab);
2188 
2189 	roundup_key_size = round_up(map->key_size, 8);
2190 	/* disable migration so percpu value prepared here will be the
2191 	 * same as the one seen by the bpf program with bpf_map_lookup_elem().
2192 	 */
2193 	if (is_percpu)
2194 		migrate_disable();
2195 	for (i = 0; i < htab->n_buckets; i++) {
2196 		b = &htab->buckets[i];
2197 		rcu_read_lock();
2198 		head = &b->head;
2199 		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2200 			key = elem->key;
2201 			if (is_percpu) {
2202 				/* current cpu value for percpu map */
2203 				pptr = htab_elem_get_ptr(elem, map->key_size);
2204 				val = this_cpu_ptr(pptr);
2205 			} else {
2206 				val = elem->key + roundup_key_size;
2207 			}
2208 			num_elems++;
2209 			ret = callback_fn((u64)(long)map, (u64)(long)key,
2210 					  (u64)(long)val, (u64)(long)callback_ctx, 0);
2211 			/* return value: 0 - continue, 1 - stop and return */
2212 			if (ret) {
2213 				rcu_read_unlock();
2214 				goto out;
2215 			}
2216 		}
2217 		rcu_read_unlock();
2218 	}
2219 out:
2220 	if (is_percpu)
2221 		migrate_enable();
2222 	return num_elems;
2223 }
2224 
2225 BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
2226 const struct bpf_map_ops htab_map_ops = {
2227 	.map_meta_equal = bpf_map_meta_equal,
2228 	.map_alloc_check = htab_map_alloc_check,
2229 	.map_alloc = htab_map_alloc,
2230 	.map_free = htab_map_free,
2231 	.map_get_next_key = htab_map_get_next_key,
2232 	.map_release_uref = htab_map_free_timers,
2233 	.map_lookup_elem = htab_map_lookup_elem,
2234 	.map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2235 	.map_update_elem = htab_map_update_elem,
2236 	.map_delete_elem = htab_map_delete_elem,
2237 	.map_gen_lookup = htab_map_gen_lookup,
2238 	.map_seq_show_elem = htab_map_seq_show_elem,
2239 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2240 	.map_for_each_callback = bpf_for_each_hash_elem,
2241 	BATCH_OPS(htab),
2242 	.map_btf_id = &htab_map_btf_ids[0],
2243 	.iter_seq_info = &iter_seq_info,
2244 };
2245 
2246 const struct bpf_map_ops htab_lru_map_ops = {
2247 	.map_meta_equal = bpf_map_meta_equal,
2248 	.map_alloc_check = htab_map_alloc_check,
2249 	.map_alloc = htab_map_alloc,
2250 	.map_free = htab_map_free,
2251 	.map_get_next_key = htab_map_get_next_key,
2252 	.map_release_uref = htab_map_free_timers,
2253 	.map_lookup_elem = htab_lru_map_lookup_elem,
2254 	.map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2255 	.map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2256 	.map_update_elem = htab_lru_map_update_elem,
2257 	.map_delete_elem = htab_lru_map_delete_elem,
2258 	.map_gen_lookup = htab_lru_map_gen_lookup,
2259 	.map_seq_show_elem = htab_map_seq_show_elem,
2260 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2261 	.map_for_each_callback = bpf_for_each_hash_elem,
2262 	BATCH_OPS(htab_lru),
2263 	.map_btf_id = &htab_map_btf_ids[0],
2264 	.iter_seq_info = &iter_seq_info,
2265 };
2266 
2267 /* Called from eBPF program */
2268 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2269 {
2270 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
2271 
2272 	if (l)
2273 		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2274 	else
2275 		return NULL;
2276 }
2277 
2278 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2279 {
2280 	struct htab_elem *l;
2281 
2282 	if (cpu >= nr_cpu_ids)
2283 		return NULL;
2284 
2285 	l = __htab_map_lookup_elem(map, key);
2286 	if (l)
2287 		return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2288 	else
2289 		return NULL;
2290 }
2291 
2292 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2293 {
2294 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
2295 
2296 	if (l) {
2297 		bpf_lru_node_set_ref(&l->lru_node);
2298 		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2299 	}
2300 
2301 	return NULL;
2302 }
2303 
2304 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2305 {
2306 	struct htab_elem *l;
2307 
2308 	if (cpu >= nr_cpu_ids)
2309 		return NULL;
2310 
2311 	l = __htab_map_lookup_elem(map, key);
2312 	if (l) {
2313 		bpf_lru_node_set_ref(&l->lru_node);
2314 		return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2315 	}
2316 
2317 	return NULL;
2318 }
2319 
2320 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2321 {
2322 	struct htab_elem *l;
2323 	void __percpu *pptr;
2324 	int ret = -ENOENT;
2325 	int cpu, off = 0;
2326 	u32 size;
2327 
2328 	/* per_cpu areas are zero-filled and bpf programs can only
2329 	 * access 'value_size' of them, so copying rounded areas
2330 	 * will not leak any kernel data
2331 	 */
2332 	size = round_up(map->value_size, 8);
2333 	rcu_read_lock();
2334 	l = __htab_map_lookup_elem(map, key);
2335 	if (!l)
2336 		goto out;
2337 	/* We do not mark LRU map element here in order to not mess up
2338 	 * eviction heuristics when user space does a map walk.
2339 	 */
2340 	pptr = htab_elem_get_ptr(l, map->key_size);
2341 	for_each_possible_cpu(cpu) {
2342 		bpf_long_memcpy(value + off,
2343 				per_cpu_ptr(pptr, cpu), size);
2344 		off += size;
2345 	}
2346 	ret = 0;
2347 out:
2348 	rcu_read_unlock();
2349 	return ret;
2350 }
2351 
2352 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2353 			   u64 map_flags)
2354 {
2355 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2356 	int ret;
2357 
2358 	rcu_read_lock();
2359 	if (htab_is_lru(htab))
2360 		ret = __htab_lru_percpu_map_update_elem(map, key, value,
2361 							map_flags, true);
2362 	else
2363 		ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2364 						    true);
2365 	rcu_read_unlock();
2366 
2367 	return ret;
2368 }
2369 
2370 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2371 					  struct seq_file *m)
2372 {
2373 	struct htab_elem *l;
2374 	void __percpu *pptr;
2375 	int cpu;
2376 
2377 	rcu_read_lock();
2378 
2379 	l = __htab_map_lookup_elem(map, key);
2380 	if (!l) {
2381 		rcu_read_unlock();
2382 		return;
2383 	}
2384 
2385 	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2386 	seq_puts(m, ": {\n");
2387 	pptr = htab_elem_get_ptr(l, map->key_size);
2388 	for_each_possible_cpu(cpu) {
2389 		seq_printf(m, "\tcpu%d: ", cpu);
2390 		btf_type_seq_show(map->btf, map->btf_value_type_id,
2391 				  per_cpu_ptr(pptr, cpu), m);
2392 		seq_puts(m, "\n");
2393 	}
2394 	seq_puts(m, "}\n");
2395 
2396 	rcu_read_unlock();
2397 }
2398 
2399 const struct bpf_map_ops htab_percpu_map_ops = {
2400 	.map_meta_equal = bpf_map_meta_equal,
2401 	.map_alloc_check = htab_map_alloc_check,
2402 	.map_alloc = htab_map_alloc,
2403 	.map_free = htab_map_free,
2404 	.map_get_next_key = htab_map_get_next_key,
2405 	.map_lookup_elem = htab_percpu_map_lookup_elem,
2406 	.map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2407 	.map_update_elem = htab_percpu_map_update_elem,
2408 	.map_delete_elem = htab_map_delete_elem,
2409 	.map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
2410 	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
2411 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2412 	.map_for_each_callback = bpf_for_each_hash_elem,
2413 	BATCH_OPS(htab_percpu),
2414 	.map_btf_id = &htab_map_btf_ids[0],
2415 	.iter_seq_info = &iter_seq_info,
2416 };
2417 
2418 const struct bpf_map_ops htab_lru_percpu_map_ops = {
2419 	.map_meta_equal = bpf_map_meta_equal,
2420 	.map_alloc_check = htab_map_alloc_check,
2421 	.map_alloc = htab_map_alloc,
2422 	.map_free = htab_map_free,
2423 	.map_get_next_key = htab_map_get_next_key,
2424 	.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2425 	.map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2426 	.map_update_elem = htab_lru_percpu_map_update_elem,
2427 	.map_delete_elem = htab_lru_map_delete_elem,
2428 	.map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
2429 	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
2430 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
2431 	.map_for_each_callback = bpf_for_each_hash_elem,
2432 	BATCH_OPS(htab_lru_percpu),
2433 	.map_btf_id = &htab_map_btf_ids[0],
2434 	.iter_seq_info = &iter_seq_info,
2435 };
2436 
2437 static int fd_htab_map_alloc_check(union bpf_attr *attr)
2438 {
2439 	if (attr->value_size != sizeof(u32))
2440 		return -EINVAL;
2441 	return htab_map_alloc_check(attr);
2442 }
2443 
2444 static void fd_htab_map_free(struct bpf_map *map)
2445 {
2446 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2447 	struct hlist_nulls_node *n;
2448 	struct hlist_nulls_head *head;
2449 	struct htab_elem *l;
2450 	int i;
2451 
2452 	for (i = 0; i < htab->n_buckets; i++) {
2453 		head = select_bucket(htab, i);
2454 
2455 		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2456 			void *ptr = fd_htab_map_get_ptr(map, l);
2457 
2458 			map->ops->map_fd_put_ptr(ptr);
2459 		}
2460 	}
2461 
2462 	htab_map_free(map);
2463 }
2464 
2465 /* only called from syscall */
2466 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2467 {
2468 	void **ptr;
2469 	int ret = 0;
2470 
2471 	if (!map->ops->map_fd_sys_lookup_elem)
2472 		return -ENOTSUPP;
2473 
2474 	rcu_read_lock();
2475 	ptr = htab_map_lookup_elem(map, key);
2476 	if (ptr)
2477 		*value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2478 	else
2479 		ret = -ENOENT;
2480 	rcu_read_unlock();
2481 
2482 	return ret;
2483 }
2484 
2485 /* only called from syscall */
2486 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2487 				void *key, void *value, u64 map_flags)
2488 {
2489 	void *ptr;
2490 	int ret;
2491 	u32 ufd = *(u32 *)value;
2492 
2493 	ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2494 	if (IS_ERR(ptr))
2495 		return PTR_ERR(ptr);
2496 
2497 	ret = htab_map_update_elem(map, key, &ptr, map_flags);
2498 	if (ret)
2499 		map->ops->map_fd_put_ptr(ptr);
2500 
2501 	return ret;
2502 }
2503 
2504 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2505 {
2506 	struct bpf_map *map, *inner_map_meta;
2507 
2508 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2509 	if (IS_ERR(inner_map_meta))
2510 		return inner_map_meta;
2511 
2512 	map = htab_map_alloc(attr);
2513 	if (IS_ERR(map)) {
2514 		bpf_map_meta_free(inner_map_meta);
2515 		return map;
2516 	}
2517 
2518 	map->inner_map_meta = inner_map_meta;
2519 
2520 	return map;
2521 }
2522 
2523 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2524 {
2525 	struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
2526 
2527 	if (!inner_map)
2528 		return NULL;
2529 
2530 	return READ_ONCE(*inner_map);
2531 }
2532 
2533 static int htab_of_map_gen_lookup(struct bpf_map *map,
2534 				  struct bpf_insn *insn_buf)
2535 {
2536 	struct bpf_insn *insn = insn_buf;
2537 	const int ret = BPF_REG_0;
2538 
2539 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2540 		     (void *(*)(struct bpf_map *map, void *key))NULL));
2541 	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2542 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2543 	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2544 				offsetof(struct htab_elem, key) +
2545 				round_up(map->key_size, 8));
2546 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2547 
2548 	return insn - insn_buf;
2549 }
2550 
2551 static void htab_of_map_free(struct bpf_map *map)
2552 {
2553 	bpf_map_meta_free(map->inner_map_meta);
2554 	fd_htab_map_free(map);
2555 }
2556 
2557 const struct bpf_map_ops htab_of_maps_map_ops = {
2558 	.map_alloc_check = fd_htab_map_alloc_check,
2559 	.map_alloc = htab_of_map_alloc,
2560 	.map_free = htab_of_map_free,
2561 	.map_get_next_key = htab_map_get_next_key,
2562 	.map_lookup_elem = htab_of_map_lookup_elem,
2563 	.map_delete_elem = htab_map_delete_elem,
2564 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
2565 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
2566 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2567 	.map_gen_lookup = htab_of_map_gen_lookup,
2568 	.map_check_btf = map_check_no_btf,
2569 	BATCH_OPS(htab),
2570 	.map_btf_id = &htab_map_btf_ids[0],
2571 };
2572