xref: /openbmc/linux/kernel/bpf/hashtab.c (revision ccf7a31f)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/bpf.h>
14 #include <linux/btf.h>
15 #include <linux/jhash.h>
16 #include <linux/filter.h>
17 #include <linux/rculist_nulls.h>
18 #include <linux/random.h>
19 #include <uapi/linux/btf.h>
20 #include "percpu_freelist.h"
21 #include "bpf_lru_list.h"
22 #include "map_in_map.h"
23 
24 #define HTAB_CREATE_FLAG_MASK						\
25 	(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |	\
26 	 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
27 
28 struct bucket {
29 	struct hlist_nulls_head head;
30 	raw_spinlock_t lock;
31 };
32 
33 struct bpf_htab {
34 	struct bpf_map map;
35 	struct bucket *buckets;
36 	void *elems;
37 	union {
38 		struct pcpu_freelist freelist;
39 		struct bpf_lru lru;
40 	};
41 	struct htab_elem *__percpu *extra_elems;
42 	atomic_t count;	/* number of elements in this hashtable */
43 	u32 n_buckets;	/* number of hash buckets */
44 	u32 elem_size;	/* size of each element in bytes */
45 	u32 hashrnd;
46 };
47 
48 /* each htab element is struct htab_elem + key + value */
49 struct htab_elem {
50 	union {
51 		struct hlist_nulls_node hash_node;
52 		struct {
53 			void *padding;
54 			union {
55 				struct bpf_htab *htab;
56 				struct pcpu_freelist_node fnode;
57 			};
58 		};
59 	};
60 	union {
61 		struct rcu_head rcu;
62 		struct bpf_lru_node lru_node;
63 	};
64 	u32 hash;
65 	char key[0] __aligned(8);
66 };
67 
68 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
69 
70 static bool htab_is_lru(const struct bpf_htab *htab)
71 {
72 	return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
73 		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
74 }
75 
76 static bool htab_is_percpu(const struct bpf_htab *htab)
77 {
78 	return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
79 		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
80 }
81 
82 static bool htab_is_prealloc(const struct bpf_htab *htab)
83 {
84 	return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
85 }
86 
87 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
88 				     void __percpu *pptr)
89 {
90 	*(void __percpu **)(l->key + key_size) = pptr;
91 }
92 
93 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
94 {
95 	return *(void __percpu **)(l->key + key_size);
96 }
97 
98 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
99 {
100 	return *(void **)(l->key + roundup(map->key_size, 8));
101 }
102 
103 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
104 {
105 	return (struct htab_elem *) (htab->elems + i * htab->elem_size);
106 }
107 
108 static void htab_free_elems(struct bpf_htab *htab)
109 {
110 	int i;
111 
112 	if (!htab_is_percpu(htab))
113 		goto free_elems;
114 
115 	for (i = 0; i < htab->map.max_entries; i++) {
116 		void __percpu *pptr;
117 
118 		pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
119 					 htab->map.key_size);
120 		free_percpu(pptr);
121 		cond_resched();
122 	}
123 free_elems:
124 	bpf_map_area_free(htab->elems);
125 }
126 
127 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
128 					  u32 hash)
129 {
130 	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
131 	struct htab_elem *l;
132 
133 	if (node) {
134 		l = container_of(node, struct htab_elem, lru_node);
135 		memcpy(l->key, key, htab->map.key_size);
136 		return l;
137 	}
138 
139 	return NULL;
140 }
141 
142 static int prealloc_init(struct bpf_htab *htab)
143 {
144 	u32 num_entries = htab->map.max_entries;
145 	int err = -ENOMEM, i;
146 
147 	if (!htab_is_percpu(htab) && !htab_is_lru(htab))
148 		num_entries += num_possible_cpus();
149 
150 	htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
151 					 htab->map.numa_node);
152 	if (!htab->elems)
153 		return -ENOMEM;
154 
155 	if (!htab_is_percpu(htab))
156 		goto skip_percpu_elems;
157 
158 	for (i = 0; i < num_entries; i++) {
159 		u32 size = round_up(htab->map.value_size, 8);
160 		void __percpu *pptr;
161 
162 		pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
163 		if (!pptr)
164 			goto free_elems;
165 		htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
166 				  pptr);
167 		cond_resched();
168 	}
169 
170 skip_percpu_elems:
171 	if (htab_is_lru(htab))
172 		err = bpf_lru_init(&htab->lru,
173 				   htab->map.map_flags & BPF_F_NO_COMMON_LRU,
174 				   offsetof(struct htab_elem, hash) -
175 				   offsetof(struct htab_elem, lru_node),
176 				   htab_lru_map_delete_node,
177 				   htab);
178 	else
179 		err = pcpu_freelist_init(&htab->freelist);
180 
181 	if (err)
182 		goto free_elems;
183 
184 	if (htab_is_lru(htab))
185 		bpf_lru_populate(&htab->lru, htab->elems,
186 				 offsetof(struct htab_elem, lru_node),
187 				 htab->elem_size, num_entries);
188 	else
189 		pcpu_freelist_populate(&htab->freelist,
190 				       htab->elems + offsetof(struct htab_elem, fnode),
191 				       htab->elem_size, num_entries);
192 
193 	return 0;
194 
195 free_elems:
196 	htab_free_elems(htab);
197 	return err;
198 }
199 
200 static void prealloc_destroy(struct bpf_htab *htab)
201 {
202 	htab_free_elems(htab);
203 
204 	if (htab_is_lru(htab))
205 		bpf_lru_destroy(&htab->lru);
206 	else
207 		pcpu_freelist_destroy(&htab->freelist);
208 }
209 
210 static int alloc_extra_elems(struct bpf_htab *htab)
211 {
212 	struct htab_elem *__percpu *pptr, *l_new;
213 	struct pcpu_freelist_node *l;
214 	int cpu;
215 
216 	pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
217 				  GFP_USER | __GFP_NOWARN);
218 	if (!pptr)
219 		return -ENOMEM;
220 
221 	for_each_possible_cpu(cpu) {
222 		l = pcpu_freelist_pop(&htab->freelist);
223 		/* pop will succeed, since prealloc_init()
224 		 * preallocated extra num_possible_cpus elements
225 		 */
226 		l_new = container_of(l, struct htab_elem, fnode);
227 		*per_cpu_ptr(pptr, cpu) = l_new;
228 	}
229 	htab->extra_elems = pptr;
230 	return 0;
231 }
232 
233 /* Called from syscall */
234 static int htab_map_alloc_check(union bpf_attr *attr)
235 {
236 	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
237 		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
238 	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
239 		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
240 	/* percpu_lru means each cpu has its own LRU list.
241 	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
242 	 * the map's value itself is percpu.  percpu_lru has
243 	 * nothing to do with the map's value.
244 	 */
245 	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
246 	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
247 	bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
248 	int numa_node = bpf_map_attr_numa_node(attr);
249 
250 	BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
251 		     offsetof(struct htab_elem, hash_node.pprev));
252 	BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
253 		     offsetof(struct htab_elem, hash_node.pprev));
254 
255 	if (lru && !capable(CAP_SYS_ADMIN))
256 		/* LRU implementation is much complicated than other
257 		 * maps.  Hence, limit to CAP_SYS_ADMIN for now.
258 		 */
259 		return -EPERM;
260 
261 	if (zero_seed && !capable(CAP_SYS_ADMIN))
262 		/* Guard against local DoS, and discourage production use. */
263 		return -EPERM;
264 
265 	if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
266 	    !bpf_map_flags_access_ok(attr->map_flags))
267 		return -EINVAL;
268 
269 	if (!lru && percpu_lru)
270 		return -EINVAL;
271 
272 	if (lru && !prealloc)
273 		return -ENOTSUPP;
274 
275 	if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
276 		return -EINVAL;
277 
278 	/* check sanity of attributes.
279 	 * value_size == 0 may be allowed in the future to use map as a set
280 	 */
281 	if (attr->max_entries == 0 || attr->key_size == 0 ||
282 	    attr->value_size == 0)
283 		return -EINVAL;
284 
285 	if (attr->key_size > MAX_BPF_STACK)
286 		/* eBPF programs initialize keys on stack, so they cannot be
287 		 * larger than max stack size
288 		 */
289 		return -E2BIG;
290 
291 	if (attr->value_size >= KMALLOC_MAX_SIZE -
292 	    MAX_BPF_STACK - sizeof(struct htab_elem))
293 		/* if value_size is bigger, the user space won't be able to
294 		 * access the elements via bpf syscall. This check also makes
295 		 * sure that the elem_size doesn't overflow and it's
296 		 * kmalloc-able later in htab_map_update_elem()
297 		 */
298 		return -E2BIG;
299 
300 	return 0;
301 }
302 
303 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
304 {
305 	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
306 		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
307 	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
308 		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
309 	/* percpu_lru means each cpu has its own LRU list.
310 	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
311 	 * the map's value itself is percpu.  percpu_lru has
312 	 * nothing to do with the map's value.
313 	 */
314 	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
315 	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
316 	struct bpf_htab *htab;
317 	int err, i;
318 	u64 cost;
319 
320 	htab = kzalloc(sizeof(*htab), GFP_USER);
321 	if (!htab)
322 		return ERR_PTR(-ENOMEM);
323 
324 	bpf_map_init_from_attr(&htab->map, attr);
325 
326 	if (percpu_lru) {
327 		/* ensure each CPU's lru list has >=1 elements.
328 		 * since we are at it, make each lru list has the same
329 		 * number of elements.
330 		 */
331 		htab->map.max_entries = roundup(attr->max_entries,
332 						num_possible_cpus());
333 		if (htab->map.max_entries < attr->max_entries)
334 			htab->map.max_entries = rounddown(attr->max_entries,
335 							  num_possible_cpus());
336 	}
337 
338 	/* hash table size must be power of 2 */
339 	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
340 
341 	htab->elem_size = sizeof(struct htab_elem) +
342 			  round_up(htab->map.key_size, 8);
343 	if (percpu)
344 		htab->elem_size += sizeof(void *);
345 	else
346 		htab->elem_size += round_up(htab->map.value_size, 8);
347 
348 	err = -E2BIG;
349 	/* prevent zero size kmalloc and check for u32 overflow */
350 	if (htab->n_buckets == 0 ||
351 	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
352 		goto free_htab;
353 
354 	cost = (u64) htab->n_buckets * sizeof(struct bucket) +
355 	       (u64) htab->elem_size * htab->map.max_entries;
356 
357 	if (percpu)
358 		cost += (u64) round_up(htab->map.value_size, 8) *
359 			num_possible_cpus() * htab->map.max_entries;
360 	else
361 	       cost += (u64) htab->elem_size * num_possible_cpus();
362 
363 	if (cost >= U32_MAX - PAGE_SIZE)
364 		/* make sure page count doesn't overflow */
365 		goto free_htab;
366 
367 	htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
368 
369 	/* if map size is larger than memlock limit, reject it early */
370 	err = bpf_map_precharge_memlock(htab->map.pages);
371 	if (err)
372 		goto free_htab;
373 
374 	err = -ENOMEM;
375 	htab->buckets = bpf_map_area_alloc(htab->n_buckets *
376 					   sizeof(struct bucket),
377 					   htab->map.numa_node);
378 	if (!htab->buckets)
379 		goto free_htab;
380 
381 	if (htab->map.map_flags & BPF_F_ZERO_SEED)
382 		htab->hashrnd = 0;
383 	else
384 		htab->hashrnd = get_random_int();
385 
386 	for (i = 0; i < htab->n_buckets; i++) {
387 		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
388 		raw_spin_lock_init(&htab->buckets[i].lock);
389 	}
390 
391 	if (prealloc) {
392 		err = prealloc_init(htab);
393 		if (err)
394 			goto free_buckets;
395 
396 		if (!percpu && !lru) {
397 			/* lru itself can remove the least used element, so
398 			 * there is no need for an extra elem during map_update.
399 			 */
400 			err = alloc_extra_elems(htab);
401 			if (err)
402 				goto free_prealloc;
403 		}
404 	}
405 
406 	return &htab->map;
407 
408 free_prealloc:
409 	prealloc_destroy(htab);
410 free_buckets:
411 	bpf_map_area_free(htab->buckets);
412 free_htab:
413 	kfree(htab);
414 	return ERR_PTR(err);
415 }
416 
417 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
418 {
419 	return jhash(key, key_len, hashrnd);
420 }
421 
422 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
423 {
424 	return &htab->buckets[hash & (htab->n_buckets - 1)];
425 }
426 
427 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
428 {
429 	return &__select_bucket(htab, hash)->head;
430 }
431 
432 /* this lookup function can only be called with bucket lock taken */
433 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
434 					 void *key, u32 key_size)
435 {
436 	struct hlist_nulls_node *n;
437 	struct htab_elem *l;
438 
439 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
440 		if (l->hash == hash && !memcmp(&l->key, key, key_size))
441 			return l;
442 
443 	return NULL;
444 }
445 
446 /* can be called without bucket lock. it will repeat the loop in
447  * the unlikely event when elements moved from one bucket into another
448  * while link list is being walked
449  */
450 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
451 					       u32 hash, void *key,
452 					       u32 key_size, u32 n_buckets)
453 {
454 	struct hlist_nulls_node *n;
455 	struct htab_elem *l;
456 
457 again:
458 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
459 		if (l->hash == hash && !memcmp(&l->key, key, key_size))
460 			return l;
461 
462 	if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
463 		goto again;
464 
465 	return NULL;
466 }
467 
468 /* Called from syscall or from eBPF program directly, so
469  * arguments have to match bpf_map_lookup_elem() exactly.
470  * The return value is adjusted by BPF instructions
471  * in htab_map_gen_lookup().
472  */
473 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
474 {
475 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
476 	struct hlist_nulls_head *head;
477 	struct htab_elem *l;
478 	u32 hash, key_size;
479 
480 	/* Must be called with rcu_read_lock. */
481 	WARN_ON_ONCE(!rcu_read_lock_held());
482 
483 	key_size = map->key_size;
484 
485 	hash = htab_map_hash(key, key_size, htab->hashrnd);
486 
487 	head = select_bucket(htab, hash);
488 
489 	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
490 
491 	return l;
492 }
493 
494 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
495 {
496 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
497 
498 	if (l)
499 		return l->key + round_up(map->key_size, 8);
500 
501 	return NULL;
502 }
503 
504 /* inline bpf_map_lookup_elem() call.
505  * Instead of:
506  * bpf_prog
507  *   bpf_map_lookup_elem
508  *     map->ops->map_lookup_elem
509  *       htab_map_lookup_elem
510  *         __htab_map_lookup_elem
511  * do:
512  * bpf_prog
513  *   __htab_map_lookup_elem
514  */
515 static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
516 {
517 	struct bpf_insn *insn = insn_buf;
518 	const int ret = BPF_REG_0;
519 
520 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
521 		     (void *(*)(struct bpf_map *map, void *key))NULL));
522 	*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
523 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
524 	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
525 				offsetof(struct htab_elem, key) +
526 				round_up(map->key_size, 8));
527 	return insn - insn_buf;
528 }
529 
530 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
531 							void *key, const bool mark)
532 {
533 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
534 
535 	if (l) {
536 		if (mark)
537 			bpf_lru_node_set_ref(&l->lru_node);
538 		return l->key + round_up(map->key_size, 8);
539 	}
540 
541 	return NULL;
542 }
543 
544 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
545 {
546 	return __htab_lru_map_lookup_elem(map, key, true);
547 }
548 
549 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
550 {
551 	return __htab_lru_map_lookup_elem(map, key, false);
552 }
553 
554 static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
555 				   struct bpf_insn *insn_buf)
556 {
557 	struct bpf_insn *insn = insn_buf;
558 	const int ret = BPF_REG_0;
559 	const int ref_reg = BPF_REG_1;
560 
561 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
562 		     (void *(*)(struct bpf_map *map, void *key))NULL));
563 	*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
564 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
565 	*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
566 			      offsetof(struct htab_elem, lru_node) +
567 			      offsetof(struct bpf_lru_node, ref));
568 	*insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
569 	*insn++ = BPF_ST_MEM(BPF_B, ret,
570 			     offsetof(struct htab_elem, lru_node) +
571 			     offsetof(struct bpf_lru_node, ref),
572 			     1);
573 	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
574 				offsetof(struct htab_elem, key) +
575 				round_up(map->key_size, 8));
576 	return insn - insn_buf;
577 }
578 
579 /* It is called from the bpf_lru_list when the LRU needs to delete
580  * older elements from the htab.
581  */
582 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
583 {
584 	struct bpf_htab *htab = (struct bpf_htab *)arg;
585 	struct htab_elem *l = NULL, *tgt_l;
586 	struct hlist_nulls_head *head;
587 	struct hlist_nulls_node *n;
588 	unsigned long flags;
589 	struct bucket *b;
590 
591 	tgt_l = container_of(node, struct htab_elem, lru_node);
592 	b = __select_bucket(htab, tgt_l->hash);
593 	head = &b->head;
594 
595 	raw_spin_lock_irqsave(&b->lock, flags);
596 
597 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
598 		if (l == tgt_l) {
599 			hlist_nulls_del_rcu(&l->hash_node);
600 			break;
601 		}
602 
603 	raw_spin_unlock_irqrestore(&b->lock, flags);
604 
605 	return l == tgt_l;
606 }
607 
608 /* Called from syscall */
609 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
610 {
611 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
612 	struct hlist_nulls_head *head;
613 	struct htab_elem *l, *next_l;
614 	u32 hash, key_size;
615 	int i = 0;
616 
617 	WARN_ON_ONCE(!rcu_read_lock_held());
618 
619 	key_size = map->key_size;
620 
621 	if (!key)
622 		goto find_first_elem;
623 
624 	hash = htab_map_hash(key, key_size, htab->hashrnd);
625 
626 	head = select_bucket(htab, hash);
627 
628 	/* lookup the key */
629 	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
630 
631 	if (!l)
632 		goto find_first_elem;
633 
634 	/* key was found, get next key in the same bucket */
635 	next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
636 				  struct htab_elem, hash_node);
637 
638 	if (next_l) {
639 		/* if next elem in this hash list is non-zero, just return it */
640 		memcpy(next_key, next_l->key, key_size);
641 		return 0;
642 	}
643 
644 	/* no more elements in this hash list, go to the next bucket */
645 	i = hash & (htab->n_buckets - 1);
646 	i++;
647 
648 find_first_elem:
649 	/* iterate over buckets */
650 	for (; i < htab->n_buckets; i++) {
651 		head = select_bucket(htab, i);
652 
653 		/* pick first element in the bucket */
654 		next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
655 					  struct htab_elem, hash_node);
656 		if (next_l) {
657 			/* if it's not empty, just return it */
658 			memcpy(next_key, next_l->key, key_size);
659 			return 0;
660 		}
661 	}
662 
663 	/* iterated over all buckets and all elements */
664 	return -ENOENT;
665 }
666 
667 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
668 {
669 	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
670 		free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
671 	kfree(l);
672 }
673 
674 static void htab_elem_free_rcu(struct rcu_head *head)
675 {
676 	struct htab_elem *l = container_of(head, struct htab_elem, rcu);
677 	struct bpf_htab *htab = l->htab;
678 
679 	/* must increment bpf_prog_active to avoid kprobe+bpf triggering while
680 	 * we're calling kfree, otherwise deadlock is possible if kprobes
681 	 * are placed somewhere inside of slub
682 	 */
683 	preempt_disable();
684 	__this_cpu_inc(bpf_prog_active);
685 	htab_elem_free(htab, l);
686 	__this_cpu_dec(bpf_prog_active);
687 	preempt_enable();
688 }
689 
690 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
691 {
692 	struct bpf_map *map = &htab->map;
693 
694 	if (map->ops->map_fd_put_ptr) {
695 		void *ptr = fd_htab_map_get_ptr(map, l);
696 
697 		map->ops->map_fd_put_ptr(ptr);
698 	}
699 
700 	if (htab_is_prealloc(htab)) {
701 		__pcpu_freelist_push(&htab->freelist, &l->fnode);
702 	} else {
703 		atomic_dec(&htab->count);
704 		l->htab = htab;
705 		call_rcu(&l->rcu, htab_elem_free_rcu);
706 	}
707 }
708 
709 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
710 			    void *value, bool onallcpus)
711 {
712 	if (!onallcpus) {
713 		/* copy true value_size bytes */
714 		memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
715 	} else {
716 		u32 size = round_up(htab->map.value_size, 8);
717 		int off = 0, cpu;
718 
719 		for_each_possible_cpu(cpu) {
720 			bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
721 					value + off, size);
722 			off += size;
723 		}
724 	}
725 }
726 
727 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
728 {
729 	return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
730 	       BITS_PER_LONG == 64;
731 }
732 
733 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
734 					 void *value, u32 key_size, u32 hash,
735 					 bool percpu, bool onallcpus,
736 					 struct htab_elem *old_elem)
737 {
738 	u32 size = htab->map.value_size;
739 	bool prealloc = htab_is_prealloc(htab);
740 	struct htab_elem *l_new, **pl_new;
741 	void __percpu *pptr;
742 
743 	if (prealloc) {
744 		if (old_elem) {
745 			/* if we're updating the existing element,
746 			 * use per-cpu extra elems to avoid freelist_pop/push
747 			 */
748 			pl_new = this_cpu_ptr(htab->extra_elems);
749 			l_new = *pl_new;
750 			*pl_new = old_elem;
751 		} else {
752 			struct pcpu_freelist_node *l;
753 
754 			l = __pcpu_freelist_pop(&htab->freelist);
755 			if (!l)
756 				return ERR_PTR(-E2BIG);
757 			l_new = container_of(l, struct htab_elem, fnode);
758 		}
759 	} else {
760 		if (atomic_inc_return(&htab->count) > htab->map.max_entries)
761 			if (!old_elem) {
762 				/* when map is full and update() is replacing
763 				 * old element, it's ok to allocate, since
764 				 * old element will be freed immediately.
765 				 * Otherwise return an error
766 				 */
767 				l_new = ERR_PTR(-E2BIG);
768 				goto dec_count;
769 			}
770 		l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
771 				     htab->map.numa_node);
772 		if (!l_new) {
773 			l_new = ERR_PTR(-ENOMEM);
774 			goto dec_count;
775 		}
776 		check_and_init_map_lock(&htab->map,
777 					l_new->key + round_up(key_size, 8));
778 	}
779 
780 	memcpy(l_new->key, key, key_size);
781 	if (percpu) {
782 		size = round_up(size, 8);
783 		if (prealloc) {
784 			pptr = htab_elem_get_ptr(l_new, key_size);
785 		} else {
786 			/* alloc_percpu zero-fills */
787 			pptr = __alloc_percpu_gfp(size, 8,
788 						  GFP_ATOMIC | __GFP_NOWARN);
789 			if (!pptr) {
790 				kfree(l_new);
791 				l_new = ERR_PTR(-ENOMEM);
792 				goto dec_count;
793 			}
794 		}
795 
796 		pcpu_copy_value(htab, pptr, value, onallcpus);
797 
798 		if (!prealloc)
799 			htab_elem_set_ptr(l_new, key_size, pptr);
800 	} else if (fd_htab_map_needs_adjust(htab)) {
801 		size = round_up(size, 8);
802 		memcpy(l_new->key + round_up(key_size, 8), value, size);
803 	} else {
804 		copy_map_value(&htab->map,
805 			       l_new->key + round_up(key_size, 8),
806 			       value);
807 	}
808 
809 	l_new->hash = hash;
810 	return l_new;
811 dec_count:
812 	atomic_dec(&htab->count);
813 	return l_new;
814 }
815 
816 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
817 		       u64 map_flags)
818 {
819 	if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
820 		/* elem already exists */
821 		return -EEXIST;
822 
823 	if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
824 		/* elem doesn't exist, cannot update it */
825 		return -ENOENT;
826 
827 	return 0;
828 }
829 
830 /* Called from syscall or from eBPF program */
831 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
832 				u64 map_flags)
833 {
834 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
835 	struct htab_elem *l_new = NULL, *l_old;
836 	struct hlist_nulls_head *head;
837 	unsigned long flags;
838 	struct bucket *b;
839 	u32 key_size, hash;
840 	int ret;
841 
842 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
843 		/* unknown flags */
844 		return -EINVAL;
845 
846 	WARN_ON_ONCE(!rcu_read_lock_held());
847 
848 	key_size = map->key_size;
849 
850 	hash = htab_map_hash(key, key_size, htab->hashrnd);
851 
852 	b = __select_bucket(htab, hash);
853 	head = &b->head;
854 
855 	if (unlikely(map_flags & BPF_F_LOCK)) {
856 		if (unlikely(!map_value_has_spin_lock(map)))
857 			return -EINVAL;
858 		/* find an element without taking the bucket lock */
859 		l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
860 					      htab->n_buckets);
861 		ret = check_flags(htab, l_old, map_flags);
862 		if (ret)
863 			return ret;
864 		if (l_old) {
865 			/* grab the element lock and update value in place */
866 			copy_map_value_locked(map,
867 					      l_old->key + round_up(key_size, 8),
868 					      value, false);
869 			return 0;
870 		}
871 		/* fall through, grab the bucket lock and lookup again.
872 		 * 99.9% chance that the element won't be found,
873 		 * but second lookup under lock has to be done.
874 		 */
875 	}
876 
877 	/* bpf_map_update_elem() can be called in_irq() */
878 	raw_spin_lock_irqsave(&b->lock, flags);
879 
880 	l_old = lookup_elem_raw(head, hash, key, key_size);
881 
882 	ret = check_flags(htab, l_old, map_flags);
883 	if (ret)
884 		goto err;
885 
886 	if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
887 		/* first lookup without the bucket lock didn't find the element,
888 		 * but second lookup with the bucket lock found it.
889 		 * This case is highly unlikely, but has to be dealt with:
890 		 * grab the element lock in addition to the bucket lock
891 		 * and update element in place
892 		 */
893 		copy_map_value_locked(map,
894 				      l_old->key + round_up(key_size, 8),
895 				      value, false);
896 		ret = 0;
897 		goto err;
898 	}
899 
900 	l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
901 				l_old);
902 	if (IS_ERR(l_new)) {
903 		/* all pre-allocated elements are in use or memory exhausted */
904 		ret = PTR_ERR(l_new);
905 		goto err;
906 	}
907 
908 	/* add new element to the head of the list, so that
909 	 * concurrent search will find it before old elem
910 	 */
911 	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
912 	if (l_old) {
913 		hlist_nulls_del_rcu(&l_old->hash_node);
914 		if (!htab_is_prealloc(htab))
915 			free_htab_elem(htab, l_old);
916 	}
917 	ret = 0;
918 err:
919 	raw_spin_unlock_irqrestore(&b->lock, flags);
920 	return ret;
921 }
922 
923 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
924 				    u64 map_flags)
925 {
926 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
927 	struct htab_elem *l_new, *l_old = NULL;
928 	struct hlist_nulls_head *head;
929 	unsigned long flags;
930 	struct bucket *b;
931 	u32 key_size, hash;
932 	int ret;
933 
934 	if (unlikely(map_flags > BPF_EXIST))
935 		/* unknown flags */
936 		return -EINVAL;
937 
938 	WARN_ON_ONCE(!rcu_read_lock_held());
939 
940 	key_size = map->key_size;
941 
942 	hash = htab_map_hash(key, key_size, htab->hashrnd);
943 
944 	b = __select_bucket(htab, hash);
945 	head = &b->head;
946 
947 	/* For LRU, we need to alloc before taking bucket's
948 	 * spinlock because getting free nodes from LRU may need
949 	 * to remove older elements from htab and this removal
950 	 * operation will need a bucket lock.
951 	 */
952 	l_new = prealloc_lru_pop(htab, key, hash);
953 	if (!l_new)
954 		return -ENOMEM;
955 	memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
956 
957 	/* bpf_map_update_elem() can be called in_irq() */
958 	raw_spin_lock_irqsave(&b->lock, flags);
959 
960 	l_old = lookup_elem_raw(head, hash, key, key_size);
961 
962 	ret = check_flags(htab, l_old, map_flags);
963 	if (ret)
964 		goto err;
965 
966 	/* add new element to the head of the list, so that
967 	 * concurrent search will find it before old elem
968 	 */
969 	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
970 	if (l_old) {
971 		bpf_lru_node_set_ref(&l_new->lru_node);
972 		hlist_nulls_del_rcu(&l_old->hash_node);
973 	}
974 	ret = 0;
975 
976 err:
977 	raw_spin_unlock_irqrestore(&b->lock, flags);
978 
979 	if (ret)
980 		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
981 	else if (l_old)
982 		bpf_lru_push_free(&htab->lru, &l_old->lru_node);
983 
984 	return ret;
985 }
986 
987 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
988 					 void *value, u64 map_flags,
989 					 bool onallcpus)
990 {
991 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
992 	struct htab_elem *l_new = NULL, *l_old;
993 	struct hlist_nulls_head *head;
994 	unsigned long flags;
995 	struct bucket *b;
996 	u32 key_size, hash;
997 	int ret;
998 
999 	if (unlikely(map_flags > BPF_EXIST))
1000 		/* unknown flags */
1001 		return -EINVAL;
1002 
1003 	WARN_ON_ONCE(!rcu_read_lock_held());
1004 
1005 	key_size = map->key_size;
1006 
1007 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1008 
1009 	b = __select_bucket(htab, hash);
1010 	head = &b->head;
1011 
1012 	/* bpf_map_update_elem() can be called in_irq() */
1013 	raw_spin_lock_irqsave(&b->lock, flags);
1014 
1015 	l_old = lookup_elem_raw(head, hash, key, key_size);
1016 
1017 	ret = check_flags(htab, l_old, map_flags);
1018 	if (ret)
1019 		goto err;
1020 
1021 	if (l_old) {
1022 		/* per-cpu hash map can update value in-place */
1023 		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1024 				value, onallcpus);
1025 	} else {
1026 		l_new = alloc_htab_elem(htab, key, value, key_size,
1027 					hash, true, onallcpus, NULL);
1028 		if (IS_ERR(l_new)) {
1029 			ret = PTR_ERR(l_new);
1030 			goto err;
1031 		}
1032 		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1033 	}
1034 	ret = 0;
1035 err:
1036 	raw_spin_unlock_irqrestore(&b->lock, flags);
1037 	return ret;
1038 }
1039 
1040 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1041 					     void *value, u64 map_flags,
1042 					     bool onallcpus)
1043 {
1044 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1045 	struct htab_elem *l_new = NULL, *l_old;
1046 	struct hlist_nulls_head *head;
1047 	unsigned long flags;
1048 	struct bucket *b;
1049 	u32 key_size, hash;
1050 	int ret;
1051 
1052 	if (unlikely(map_flags > BPF_EXIST))
1053 		/* unknown flags */
1054 		return -EINVAL;
1055 
1056 	WARN_ON_ONCE(!rcu_read_lock_held());
1057 
1058 	key_size = map->key_size;
1059 
1060 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1061 
1062 	b = __select_bucket(htab, hash);
1063 	head = &b->head;
1064 
1065 	/* For LRU, we need to alloc before taking bucket's
1066 	 * spinlock because LRU's elem alloc may need
1067 	 * to remove older elem from htab and this removal
1068 	 * operation will need a bucket lock.
1069 	 */
1070 	if (map_flags != BPF_EXIST) {
1071 		l_new = prealloc_lru_pop(htab, key, hash);
1072 		if (!l_new)
1073 			return -ENOMEM;
1074 	}
1075 
1076 	/* bpf_map_update_elem() can be called in_irq() */
1077 	raw_spin_lock_irqsave(&b->lock, flags);
1078 
1079 	l_old = lookup_elem_raw(head, hash, key, key_size);
1080 
1081 	ret = check_flags(htab, l_old, map_flags);
1082 	if (ret)
1083 		goto err;
1084 
1085 	if (l_old) {
1086 		bpf_lru_node_set_ref(&l_old->lru_node);
1087 
1088 		/* per-cpu hash map can update value in-place */
1089 		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1090 				value, onallcpus);
1091 	} else {
1092 		pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
1093 				value, onallcpus);
1094 		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1095 		l_new = NULL;
1096 	}
1097 	ret = 0;
1098 err:
1099 	raw_spin_unlock_irqrestore(&b->lock, flags);
1100 	if (l_new)
1101 		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1102 	return ret;
1103 }
1104 
1105 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1106 				       void *value, u64 map_flags)
1107 {
1108 	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1109 }
1110 
1111 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1112 					   void *value, u64 map_flags)
1113 {
1114 	return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1115 						 false);
1116 }
1117 
1118 /* Called from syscall or from eBPF program */
1119 static int htab_map_delete_elem(struct bpf_map *map, void *key)
1120 {
1121 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1122 	struct hlist_nulls_head *head;
1123 	struct bucket *b;
1124 	struct htab_elem *l;
1125 	unsigned long flags;
1126 	u32 hash, key_size;
1127 	int ret = -ENOENT;
1128 
1129 	WARN_ON_ONCE(!rcu_read_lock_held());
1130 
1131 	key_size = map->key_size;
1132 
1133 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1134 	b = __select_bucket(htab, hash);
1135 	head = &b->head;
1136 
1137 	raw_spin_lock_irqsave(&b->lock, flags);
1138 
1139 	l = lookup_elem_raw(head, hash, key, key_size);
1140 
1141 	if (l) {
1142 		hlist_nulls_del_rcu(&l->hash_node);
1143 		free_htab_elem(htab, l);
1144 		ret = 0;
1145 	}
1146 
1147 	raw_spin_unlock_irqrestore(&b->lock, flags);
1148 	return ret;
1149 }
1150 
1151 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1152 {
1153 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1154 	struct hlist_nulls_head *head;
1155 	struct bucket *b;
1156 	struct htab_elem *l;
1157 	unsigned long flags;
1158 	u32 hash, key_size;
1159 	int ret = -ENOENT;
1160 
1161 	WARN_ON_ONCE(!rcu_read_lock_held());
1162 
1163 	key_size = map->key_size;
1164 
1165 	hash = htab_map_hash(key, key_size, htab->hashrnd);
1166 	b = __select_bucket(htab, hash);
1167 	head = &b->head;
1168 
1169 	raw_spin_lock_irqsave(&b->lock, flags);
1170 
1171 	l = lookup_elem_raw(head, hash, key, key_size);
1172 
1173 	if (l) {
1174 		hlist_nulls_del_rcu(&l->hash_node);
1175 		ret = 0;
1176 	}
1177 
1178 	raw_spin_unlock_irqrestore(&b->lock, flags);
1179 	if (l)
1180 		bpf_lru_push_free(&htab->lru, &l->lru_node);
1181 	return ret;
1182 }
1183 
1184 static void delete_all_elements(struct bpf_htab *htab)
1185 {
1186 	int i;
1187 
1188 	for (i = 0; i < htab->n_buckets; i++) {
1189 		struct hlist_nulls_head *head = select_bucket(htab, i);
1190 		struct hlist_nulls_node *n;
1191 		struct htab_elem *l;
1192 
1193 		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1194 			hlist_nulls_del_rcu(&l->hash_node);
1195 			htab_elem_free(htab, l);
1196 		}
1197 	}
1198 }
1199 
1200 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1201 static void htab_map_free(struct bpf_map *map)
1202 {
1203 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1204 
1205 	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
1206 	 * so the programs (can be more than one that used this map) were
1207 	 * disconnected from events. Wait for outstanding critical sections in
1208 	 * these programs to complete
1209 	 */
1210 	synchronize_rcu();
1211 
1212 	/* some of free_htab_elem() callbacks for elements of this map may
1213 	 * not have executed. Wait for them.
1214 	 */
1215 	rcu_barrier();
1216 	if (!htab_is_prealloc(htab))
1217 		delete_all_elements(htab);
1218 	else
1219 		prealloc_destroy(htab);
1220 
1221 	free_percpu(htab->extra_elems);
1222 	bpf_map_area_free(htab->buckets);
1223 	kfree(htab);
1224 }
1225 
1226 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1227 				   struct seq_file *m)
1228 {
1229 	void *value;
1230 
1231 	rcu_read_lock();
1232 
1233 	value = htab_map_lookup_elem(map, key);
1234 	if (!value) {
1235 		rcu_read_unlock();
1236 		return;
1237 	}
1238 
1239 	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1240 	seq_puts(m, ": ");
1241 	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1242 	seq_puts(m, "\n");
1243 
1244 	rcu_read_unlock();
1245 }
1246 
1247 const struct bpf_map_ops htab_map_ops = {
1248 	.map_alloc_check = htab_map_alloc_check,
1249 	.map_alloc = htab_map_alloc,
1250 	.map_free = htab_map_free,
1251 	.map_get_next_key = htab_map_get_next_key,
1252 	.map_lookup_elem = htab_map_lookup_elem,
1253 	.map_update_elem = htab_map_update_elem,
1254 	.map_delete_elem = htab_map_delete_elem,
1255 	.map_gen_lookup = htab_map_gen_lookup,
1256 	.map_seq_show_elem = htab_map_seq_show_elem,
1257 };
1258 
1259 const struct bpf_map_ops htab_lru_map_ops = {
1260 	.map_alloc_check = htab_map_alloc_check,
1261 	.map_alloc = htab_map_alloc,
1262 	.map_free = htab_map_free,
1263 	.map_get_next_key = htab_map_get_next_key,
1264 	.map_lookup_elem = htab_lru_map_lookup_elem,
1265 	.map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
1266 	.map_update_elem = htab_lru_map_update_elem,
1267 	.map_delete_elem = htab_lru_map_delete_elem,
1268 	.map_gen_lookup = htab_lru_map_gen_lookup,
1269 	.map_seq_show_elem = htab_map_seq_show_elem,
1270 };
1271 
1272 /* Called from eBPF program */
1273 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1274 {
1275 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
1276 
1277 	if (l)
1278 		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1279 	else
1280 		return NULL;
1281 }
1282 
1283 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1284 {
1285 	struct htab_elem *l = __htab_map_lookup_elem(map, key);
1286 
1287 	if (l) {
1288 		bpf_lru_node_set_ref(&l->lru_node);
1289 		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1290 	}
1291 
1292 	return NULL;
1293 }
1294 
1295 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
1296 {
1297 	struct htab_elem *l;
1298 	void __percpu *pptr;
1299 	int ret = -ENOENT;
1300 	int cpu, off = 0;
1301 	u32 size;
1302 
1303 	/* per_cpu areas are zero-filled and bpf programs can only
1304 	 * access 'value_size' of them, so copying rounded areas
1305 	 * will not leak any kernel data
1306 	 */
1307 	size = round_up(map->value_size, 8);
1308 	rcu_read_lock();
1309 	l = __htab_map_lookup_elem(map, key);
1310 	if (!l)
1311 		goto out;
1312 	/* We do not mark LRU map element here in order to not mess up
1313 	 * eviction heuristics when user space does a map walk.
1314 	 */
1315 	pptr = htab_elem_get_ptr(l, map->key_size);
1316 	for_each_possible_cpu(cpu) {
1317 		bpf_long_memcpy(value + off,
1318 				per_cpu_ptr(pptr, cpu), size);
1319 		off += size;
1320 	}
1321 	ret = 0;
1322 out:
1323 	rcu_read_unlock();
1324 	return ret;
1325 }
1326 
1327 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1328 			   u64 map_flags)
1329 {
1330 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1331 	int ret;
1332 
1333 	rcu_read_lock();
1334 	if (htab_is_lru(htab))
1335 		ret = __htab_lru_percpu_map_update_elem(map, key, value,
1336 							map_flags, true);
1337 	else
1338 		ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
1339 						    true);
1340 	rcu_read_unlock();
1341 
1342 	return ret;
1343 }
1344 
1345 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
1346 					  struct seq_file *m)
1347 {
1348 	struct htab_elem *l;
1349 	void __percpu *pptr;
1350 	int cpu;
1351 
1352 	rcu_read_lock();
1353 
1354 	l = __htab_map_lookup_elem(map, key);
1355 	if (!l) {
1356 		rcu_read_unlock();
1357 		return;
1358 	}
1359 
1360 	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1361 	seq_puts(m, ": {\n");
1362 	pptr = htab_elem_get_ptr(l, map->key_size);
1363 	for_each_possible_cpu(cpu) {
1364 		seq_printf(m, "\tcpu%d: ", cpu);
1365 		btf_type_seq_show(map->btf, map->btf_value_type_id,
1366 				  per_cpu_ptr(pptr, cpu), m);
1367 		seq_puts(m, "\n");
1368 	}
1369 	seq_puts(m, "}\n");
1370 
1371 	rcu_read_unlock();
1372 }
1373 
1374 const struct bpf_map_ops htab_percpu_map_ops = {
1375 	.map_alloc_check = htab_map_alloc_check,
1376 	.map_alloc = htab_map_alloc,
1377 	.map_free = htab_map_free,
1378 	.map_get_next_key = htab_map_get_next_key,
1379 	.map_lookup_elem = htab_percpu_map_lookup_elem,
1380 	.map_update_elem = htab_percpu_map_update_elem,
1381 	.map_delete_elem = htab_map_delete_elem,
1382 	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
1383 };
1384 
1385 const struct bpf_map_ops htab_lru_percpu_map_ops = {
1386 	.map_alloc_check = htab_map_alloc_check,
1387 	.map_alloc = htab_map_alloc,
1388 	.map_free = htab_map_free,
1389 	.map_get_next_key = htab_map_get_next_key,
1390 	.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
1391 	.map_update_elem = htab_lru_percpu_map_update_elem,
1392 	.map_delete_elem = htab_lru_map_delete_elem,
1393 	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
1394 };
1395 
1396 static int fd_htab_map_alloc_check(union bpf_attr *attr)
1397 {
1398 	if (attr->value_size != sizeof(u32))
1399 		return -EINVAL;
1400 	return htab_map_alloc_check(attr);
1401 }
1402 
1403 static void fd_htab_map_free(struct bpf_map *map)
1404 {
1405 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1406 	struct hlist_nulls_node *n;
1407 	struct hlist_nulls_head *head;
1408 	struct htab_elem *l;
1409 	int i;
1410 
1411 	for (i = 0; i < htab->n_buckets; i++) {
1412 		head = select_bucket(htab, i);
1413 
1414 		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1415 			void *ptr = fd_htab_map_get_ptr(map, l);
1416 
1417 			map->ops->map_fd_put_ptr(ptr);
1418 		}
1419 	}
1420 
1421 	htab_map_free(map);
1422 }
1423 
1424 /* only called from syscall */
1425 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
1426 {
1427 	void **ptr;
1428 	int ret = 0;
1429 
1430 	if (!map->ops->map_fd_sys_lookup_elem)
1431 		return -ENOTSUPP;
1432 
1433 	rcu_read_lock();
1434 	ptr = htab_map_lookup_elem(map, key);
1435 	if (ptr)
1436 		*value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
1437 	else
1438 		ret = -ENOENT;
1439 	rcu_read_unlock();
1440 
1441 	return ret;
1442 }
1443 
1444 /* only called from syscall */
1445 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1446 				void *key, void *value, u64 map_flags)
1447 {
1448 	void *ptr;
1449 	int ret;
1450 	u32 ufd = *(u32 *)value;
1451 
1452 	ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
1453 	if (IS_ERR(ptr))
1454 		return PTR_ERR(ptr);
1455 
1456 	ret = htab_map_update_elem(map, key, &ptr, map_flags);
1457 	if (ret)
1458 		map->ops->map_fd_put_ptr(ptr);
1459 
1460 	return ret;
1461 }
1462 
1463 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
1464 {
1465 	struct bpf_map *map, *inner_map_meta;
1466 
1467 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1468 	if (IS_ERR(inner_map_meta))
1469 		return inner_map_meta;
1470 
1471 	map = htab_map_alloc(attr);
1472 	if (IS_ERR(map)) {
1473 		bpf_map_meta_free(inner_map_meta);
1474 		return map;
1475 	}
1476 
1477 	map->inner_map_meta = inner_map_meta;
1478 
1479 	return map;
1480 }
1481 
1482 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
1483 {
1484 	struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
1485 
1486 	if (!inner_map)
1487 		return NULL;
1488 
1489 	return READ_ONCE(*inner_map);
1490 }
1491 
1492 static u32 htab_of_map_gen_lookup(struct bpf_map *map,
1493 				  struct bpf_insn *insn_buf)
1494 {
1495 	struct bpf_insn *insn = insn_buf;
1496 	const int ret = BPF_REG_0;
1497 
1498 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
1499 		     (void *(*)(struct bpf_map *map, void *key))NULL));
1500 	*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
1501 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
1502 	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
1503 				offsetof(struct htab_elem, key) +
1504 				round_up(map->key_size, 8));
1505 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1506 
1507 	return insn - insn_buf;
1508 }
1509 
1510 static void htab_of_map_free(struct bpf_map *map)
1511 {
1512 	bpf_map_meta_free(map->inner_map_meta);
1513 	fd_htab_map_free(map);
1514 }
1515 
1516 const struct bpf_map_ops htab_of_maps_map_ops = {
1517 	.map_alloc_check = fd_htab_map_alloc_check,
1518 	.map_alloc = htab_of_map_alloc,
1519 	.map_free = htab_of_map_free,
1520 	.map_get_next_key = htab_map_get_next_key,
1521 	.map_lookup_elem = htab_of_map_lookup_elem,
1522 	.map_delete_elem = htab_map_delete_elem,
1523 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1524 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1525 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1526 	.map_gen_lookup = htab_of_map_gen_lookup,
1527 	.map_check_btf = map_check_no_btf,
1528 };
1529