1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/bpf.h> 14 #include <linux/jhash.h> 15 #include <linux/filter.h> 16 #include <linux/rculist_nulls.h> 17 #include "percpu_freelist.h" 18 #include "bpf_lru_list.h" 19 #include "map_in_map.h" 20 21 #define HTAB_CREATE_FLAG_MASK \ 22 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \ 23 BPF_F_RDONLY | BPF_F_WRONLY) 24 25 struct bucket { 26 struct hlist_nulls_head head; 27 raw_spinlock_t lock; 28 }; 29 30 struct bpf_htab { 31 struct bpf_map map; 32 struct bucket *buckets; 33 void *elems; 34 union { 35 struct pcpu_freelist freelist; 36 struct bpf_lru lru; 37 }; 38 struct htab_elem *__percpu *extra_elems; 39 atomic_t count; /* number of elements in this hashtable */ 40 u32 n_buckets; /* number of hash buckets */ 41 u32 elem_size; /* size of each element in bytes */ 42 }; 43 44 /* each htab element is struct htab_elem + key + value */ 45 struct htab_elem { 46 union { 47 struct hlist_nulls_node hash_node; 48 struct { 49 void *padding; 50 union { 51 struct bpf_htab *htab; 52 struct pcpu_freelist_node fnode; 53 }; 54 }; 55 }; 56 union { 57 struct rcu_head rcu; 58 struct bpf_lru_node lru_node; 59 }; 60 u32 hash; 61 char key[0] __aligned(8); 62 }; 63 64 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); 65 66 static bool htab_is_lru(const struct bpf_htab *htab) 67 { 68 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || 69 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 70 } 71 72 static bool htab_is_percpu(const struct bpf_htab *htab) 73 { 74 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || 75 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 76 } 77 78 static bool htab_is_prealloc(const struct bpf_htab *htab) 79 { 80 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); 81 } 82 83 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, 84 void __percpu *pptr) 85 { 86 *(void __percpu **)(l->key + key_size) = pptr; 87 } 88 89 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) 90 { 91 return *(void __percpu **)(l->key + key_size); 92 } 93 94 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) 95 { 96 return *(void **)(l->key + roundup(map->key_size, 8)); 97 } 98 99 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) 100 { 101 return (struct htab_elem *) (htab->elems + i * htab->elem_size); 102 } 103 104 static void htab_free_elems(struct bpf_htab *htab) 105 { 106 int i; 107 108 if (!htab_is_percpu(htab)) 109 goto free_elems; 110 111 for (i = 0; i < htab->map.max_entries; i++) { 112 void __percpu *pptr; 113 114 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), 115 htab->map.key_size); 116 free_percpu(pptr); 117 cond_resched(); 118 } 119 free_elems: 120 bpf_map_area_free(htab->elems); 121 } 122 123 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, 124 u32 hash) 125 { 126 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); 127 struct htab_elem *l; 128 129 if (node) { 130 l = container_of(node, struct htab_elem, lru_node); 131 memcpy(l->key, key, htab->map.key_size); 132 return l; 133 } 134 135 return NULL; 136 } 137 138 static int prealloc_init(struct bpf_htab *htab) 139 { 140 u32 num_entries = htab->map.max_entries; 141 int err = -ENOMEM, i; 142 143 if (!htab_is_percpu(htab) && !htab_is_lru(htab)) 144 num_entries += num_possible_cpus(); 145 146 htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries, 147 htab->map.numa_node); 148 if (!htab->elems) 149 return -ENOMEM; 150 151 if (!htab_is_percpu(htab)) 152 goto skip_percpu_elems; 153 154 for (i = 0; i < num_entries; i++) { 155 u32 size = round_up(htab->map.value_size, 8); 156 void __percpu *pptr; 157 158 pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN); 159 if (!pptr) 160 goto free_elems; 161 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, 162 pptr); 163 cond_resched(); 164 } 165 166 skip_percpu_elems: 167 if (htab_is_lru(htab)) 168 err = bpf_lru_init(&htab->lru, 169 htab->map.map_flags & BPF_F_NO_COMMON_LRU, 170 offsetof(struct htab_elem, hash) - 171 offsetof(struct htab_elem, lru_node), 172 htab_lru_map_delete_node, 173 htab); 174 else 175 err = pcpu_freelist_init(&htab->freelist); 176 177 if (err) 178 goto free_elems; 179 180 if (htab_is_lru(htab)) 181 bpf_lru_populate(&htab->lru, htab->elems, 182 offsetof(struct htab_elem, lru_node), 183 htab->elem_size, num_entries); 184 else 185 pcpu_freelist_populate(&htab->freelist, 186 htab->elems + offsetof(struct htab_elem, fnode), 187 htab->elem_size, num_entries); 188 189 return 0; 190 191 free_elems: 192 htab_free_elems(htab); 193 return err; 194 } 195 196 static void prealloc_destroy(struct bpf_htab *htab) 197 { 198 htab_free_elems(htab); 199 200 if (htab_is_lru(htab)) 201 bpf_lru_destroy(&htab->lru); 202 else 203 pcpu_freelist_destroy(&htab->freelist); 204 } 205 206 static int alloc_extra_elems(struct bpf_htab *htab) 207 { 208 struct htab_elem *__percpu *pptr, *l_new; 209 struct pcpu_freelist_node *l; 210 int cpu; 211 212 pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8, 213 GFP_USER | __GFP_NOWARN); 214 if (!pptr) 215 return -ENOMEM; 216 217 for_each_possible_cpu(cpu) { 218 l = pcpu_freelist_pop(&htab->freelist); 219 /* pop will succeed, since prealloc_init() 220 * preallocated extra num_possible_cpus elements 221 */ 222 l_new = container_of(l, struct htab_elem, fnode); 223 *per_cpu_ptr(pptr, cpu) = l_new; 224 } 225 htab->extra_elems = pptr; 226 return 0; 227 } 228 229 /* Called from syscall */ 230 static int htab_map_alloc_check(union bpf_attr *attr) 231 { 232 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || 233 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 234 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || 235 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 236 /* percpu_lru means each cpu has its own LRU list. 237 * it is different from BPF_MAP_TYPE_PERCPU_HASH where 238 * the map's value itself is percpu. percpu_lru has 239 * nothing to do with the map's value. 240 */ 241 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 242 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 243 int numa_node = bpf_map_attr_numa_node(attr); 244 245 BUILD_BUG_ON(offsetof(struct htab_elem, htab) != 246 offsetof(struct htab_elem, hash_node.pprev)); 247 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != 248 offsetof(struct htab_elem, hash_node.pprev)); 249 250 if (lru && !capable(CAP_SYS_ADMIN)) 251 /* LRU implementation is much complicated than other 252 * maps. Hence, limit to CAP_SYS_ADMIN for now. 253 */ 254 return -EPERM; 255 256 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK) 257 /* reserved bits should not be used */ 258 return -EINVAL; 259 260 if (!lru && percpu_lru) 261 return -EINVAL; 262 263 if (lru && !prealloc) 264 return -ENOTSUPP; 265 266 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) 267 return -EINVAL; 268 269 /* check sanity of attributes. 270 * value_size == 0 may be allowed in the future to use map as a set 271 */ 272 if (attr->max_entries == 0 || attr->key_size == 0 || 273 attr->value_size == 0) 274 return -EINVAL; 275 276 if (attr->key_size > MAX_BPF_STACK) 277 /* eBPF programs initialize keys on stack, so they cannot be 278 * larger than max stack size 279 */ 280 return -E2BIG; 281 282 if (attr->value_size >= KMALLOC_MAX_SIZE - 283 MAX_BPF_STACK - sizeof(struct htab_elem)) 284 /* if value_size is bigger, the user space won't be able to 285 * access the elements via bpf syscall. This check also makes 286 * sure that the elem_size doesn't overflow and it's 287 * kmalloc-able later in htab_map_update_elem() 288 */ 289 return -E2BIG; 290 291 return 0; 292 } 293 294 static struct bpf_map *htab_map_alloc(union bpf_attr *attr) 295 { 296 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || 297 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 298 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || 299 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 300 /* percpu_lru means each cpu has its own LRU list. 301 * it is different from BPF_MAP_TYPE_PERCPU_HASH where 302 * the map's value itself is percpu. percpu_lru has 303 * nothing to do with the map's value. 304 */ 305 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 306 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 307 struct bpf_htab *htab; 308 int err, i; 309 u64 cost; 310 311 htab = kzalloc(sizeof(*htab), GFP_USER); 312 if (!htab) 313 return ERR_PTR(-ENOMEM); 314 315 bpf_map_init_from_attr(&htab->map, attr); 316 317 if (percpu_lru) { 318 /* ensure each CPU's lru list has >=1 elements. 319 * since we are at it, make each lru list has the same 320 * number of elements. 321 */ 322 htab->map.max_entries = roundup(attr->max_entries, 323 num_possible_cpus()); 324 if (htab->map.max_entries < attr->max_entries) 325 htab->map.max_entries = rounddown(attr->max_entries, 326 num_possible_cpus()); 327 } 328 329 /* hash table size must be power of 2 */ 330 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); 331 332 htab->elem_size = sizeof(struct htab_elem) + 333 round_up(htab->map.key_size, 8); 334 if (percpu) 335 htab->elem_size += sizeof(void *); 336 else 337 htab->elem_size += round_up(htab->map.value_size, 8); 338 339 err = -E2BIG; 340 /* prevent zero size kmalloc and check for u32 overflow */ 341 if (htab->n_buckets == 0 || 342 htab->n_buckets > U32_MAX / sizeof(struct bucket)) 343 goto free_htab; 344 345 cost = (u64) htab->n_buckets * sizeof(struct bucket) + 346 (u64) htab->elem_size * htab->map.max_entries; 347 348 if (percpu) 349 cost += (u64) round_up(htab->map.value_size, 8) * 350 num_possible_cpus() * htab->map.max_entries; 351 else 352 cost += (u64) htab->elem_size * num_possible_cpus(); 353 354 if (cost >= U32_MAX - PAGE_SIZE) 355 /* make sure page count doesn't overflow */ 356 goto free_htab; 357 358 htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 359 360 /* if map size is larger than memlock limit, reject it early */ 361 err = bpf_map_precharge_memlock(htab->map.pages); 362 if (err) 363 goto free_htab; 364 365 err = -ENOMEM; 366 htab->buckets = bpf_map_area_alloc(htab->n_buckets * 367 sizeof(struct bucket), 368 htab->map.numa_node); 369 if (!htab->buckets) 370 goto free_htab; 371 372 for (i = 0; i < htab->n_buckets; i++) { 373 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); 374 raw_spin_lock_init(&htab->buckets[i].lock); 375 } 376 377 if (prealloc) { 378 err = prealloc_init(htab); 379 if (err) 380 goto free_buckets; 381 382 if (!percpu && !lru) { 383 /* lru itself can remove the least used element, so 384 * there is no need for an extra elem during map_update. 385 */ 386 err = alloc_extra_elems(htab); 387 if (err) 388 goto free_prealloc; 389 } 390 } 391 392 return &htab->map; 393 394 free_prealloc: 395 prealloc_destroy(htab); 396 free_buckets: 397 bpf_map_area_free(htab->buckets); 398 free_htab: 399 kfree(htab); 400 return ERR_PTR(err); 401 } 402 403 static inline u32 htab_map_hash(const void *key, u32 key_len) 404 { 405 return jhash(key, key_len, 0); 406 } 407 408 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) 409 { 410 return &htab->buckets[hash & (htab->n_buckets - 1)]; 411 } 412 413 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) 414 { 415 return &__select_bucket(htab, hash)->head; 416 } 417 418 /* this lookup function can only be called with bucket lock taken */ 419 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, 420 void *key, u32 key_size) 421 { 422 struct hlist_nulls_node *n; 423 struct htab_elem *l; 424 425 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 426 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 427 return l; 428 429 return NULL; 430 } 431 432 /* can be called without bucket lock. it will repeat the loop in 433 * the unlikely event when elements moved from one bucket into another 434 * while link list is being walked 435 */ 436 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, 437 u32 hash, void *key, 438 u32 key_size, u32 n_buckets) 439 { 440 struct hlist_nulls_node *n; 441 struct htab_elem *l; 442 443 again: 444 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 445 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 446 return l; 447 448 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) 449 goto again; 450 451 return NULL; 452 } 453 454 /* Called from syscall or from eBPF program directly, so 455 * arguments have to match bpf_map_lookup_elem() exactly. 456 * The return value is adjusted by BPF instructions 457 * in htab_map_gen_lookup(). 458 */ 459 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) 460 { 461 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 462 struct hlist_nulls_head *head; 463 struct htab_elem *l; 464 u32 hash, key_size; 465 466 /* Must be called with rcu_read_lock. */ 467 WARN_ON_ONCE(!rcu_read_lock_held()); 468 469 key_size = map->key_size; 470 471 hash = htab_map_hash(key, key_size); 472 473 head = select_bucket(htab, hash); 474 475 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); 476 477 return l; 478 } 479 480 static void *htab_map_lookup_elem(struct bpf_map *map, void *key) 481 { 482 struct htab_elem *l = __htab_map_lookup_elem(map, key); 483 484 if (l) 485 return l->key + round_up(map->key_size, 8); 486 487 return NULL; 488 } 489 490 /* inline bpf_map_lookup_elem() call. 491 * Instead of: 492 * bpf_prog 493 * bpf_map_lookup_elem 494 * map->ops->map_lookup_elem 495 * htab_map_lookup_elem 496 * __htab_map_lookup_elem 497 * do: 498 * bpf_prog 499 * __htab_map_lookup_elem 500 */ 501 static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 502 { 503 struct bpf_insn *insn = insn_buf; 504 const int ret = BPF_REG_0; 505 506 *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); 507 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 508 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 509 offsetof(struct htab_elem, key) + 510 round_up(map->key_size, 8)); 511 return insn - insn_buf; 512 } 513 514 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) 515 { 516 struct htab_elem *l = __htab_map_lookup_elem(map, key); 517 518 if (l) { 519 bpf_lru_node_set_ref(&l->lru_node); 520 return l->key + round_up(map->key_size, 8); 521 } 522 523 return NULL; 524 } 525 526 static u32 htab_lru_map_gen_lookup(struct bpf_map *map, 527 struct bpf_insn *insn_buf) 528 { 529 struct bpf_insn *insn = insn_buf; 530 const int ret = BPF_REG_0; 531 const int ref_reg = BPF_REG_1; 532 533 *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); 534 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4); 535 *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret, 536 offsetof(struct htab_elem, lru_node) + 537 offsetof(struct bpf_lru_node, ref)); 538 *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1); 539 *insn++ = BPF_ST_MEM(BPF_B, ret, 540 offsetof(struct htab_elem, lru_node) + 541 offsetof(struct bpf_lru_node, ref), 542 1); 543 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 544 offsetof(struct htab_elem, key) + 545 round_up(map->key_size, 8)); 546 return insn - insn_buf; 547 } 548 549 /* It is called from the bpf_lru_list when the LRU needs to delete 550 * older elements from the htab. 551 */ 552 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) 553 { 554 struct bpf_htab *htab = (struct bpf_htab *)arg; 555 struct htab_elem *l = NULL, *tgt_l; 556 struct hlist_nulls_head *head; 557 struct hlist_nulls_node *n; 558 unsigned long flags; 559 struct bucket *b; 560 561 tgt_l = container_of(node, struct htab_elem, lru_node); 562 b = __select_bucket(htab, tgt_l->hash); 563 head = &b->head; 564 565 raw_spin_lock_irqsave(&b->lock, flags); 566 567 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 568 if (l == tgt_l) { 569 hlist_nulls_del_rcu(&l->hash_node); 570 break; 571 } 572 573 raw_spin_unlock_irqrestore(&b->lock, flags); 574 575 return l == tgt_l; 576 } 577 578 /* Called from syscall */ 579 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 580 { 581 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 582 struct hlist_nulls_head *head; 583 struct htab_elem *l, *next_l; 584 u32 hash, key_size; 585 int i = 0; 586 587 WARN_ON_ONCE(!rcu_read_lock_held()); 588 589 key_size = map->key_size; 590 591 if (!key) 592 goto find_first_elem; 593 594 hash = htab_map_hash(key, key_size); 595 596 head = select_bucket(htab, hash); 597 598 /* lookup the key */ 599 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); 600 601 if (!l) 602 goto find_first_elem; 603 604 /* key was found, get next key in the same bucket */ 605 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), 606 struct htab_elem, hash_node); 607 608 if (next_l) { 609 /* if next elem in this hash list is non-zero, just return it */ 610 memcpy(next_key, next_l->key, key_size); 611 return 0; 612 } 613 614 /* no more elements in this hash list, go to the next bucket */ 615 i = hash & (htab->n_buckets - 1); 616 i++; 617 618 find_first_elem: 619 /* iterate over buckets */ 620 for (; i < htab->n_buckets; i++) { 621 head = select_bucket(htab, i); 622 623 /* pick first element in the bucket */ 624 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)), 625 struct htab_elem, hash_node); 626 if (next_l) { 627 /* if it's not empty, just return it */ 628 memcpy(next_key, next_l->key, key_size); 629 return 0; 630 } 631 } 632 633 /* iterated over all buckets and all elements */ 634 return -ENOENT; 635 } 636 637 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) 638 { 639 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) 640 free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); 641 kfree(l); 642 } 643 644 static void htab_elem_free_rcu(struct rcu_head *head) 645 { 646 struct htab_elem *l = container_of(head, struct htab_elem, rcu); 647 struct bpf_htab *htab = l->htab; 648 649 /* must increment bpf_prog_active to avoid kprobe+bpf triggering while 650 * we're calling kfree, otherwise deadlock is possible if kprobes 651 * are placed somewhere inside of slub 652 */ 653 preempt_disable(); 654 __this_cpu_inc(bpf_prog_active); 655 htab_elem_free(htab, l); 656 __this_cpu_dec(bpf_prog_active); 657 preempt_enable(); 658 } 659 660 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) 661 { 662 struct bpf_map *map = &htab->map; 663 664 if (map->ops->map_fd_put_ptr) { 665 void *ptr = fd_htab_map_get_ptr(map, l); 666 667 map->ops->map_fd_put_ptr(ptr); 668 } 669 670 if (htab_is_prealloc(htab)) { 671 pcpu_freelist_push(&htab->freelist, &l->fnode); 672 } else { 673 atomic_dec(&htab->count); 674 l->htab = htab; 675 call_rcu(&l->rcu, htab_elem_free_rcu); 676 } 677 } 678 679 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, 680 void *value, bool onallcpus) 681 { 682 if (!onallcpus) { 683 /* copy true value_size bytes */ 684 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); 685 } else { 686 u32 size = round_up(htab->map.value_size, 8); 687 int off = 0, cpu; 688 689 for_each_possible_cpu(cpu) { 690 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), 691 value + off, size); 692 off += size; 693 } 694 } 695 } 696 697 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) 698 { 699 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && 700 BITS_PER_LONG == 64; 701 } 702 703 static u32 htab_size_value(const struct bpf_htab *htab, bool percpu) 704 { 705 u32 size = htab->map.value_size; 706 707 if (percpu || fd_htab_map_needs_adjust(htab)) 708 size = round_up(size, 8); 709 return size; 710 } 711 712 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, 713 void *value, u32 key_size, u32 hash, 714 bool percpu, bool onallcpus, 715 struct htab_elem *old_elem) 716 { 717 u32 size = htab_size_value(htab, percpu); 718 bool prealloc = htab_is_prealloc(htab); 719 struct htab_elem *l_new, **pl_new; 720 void __percpu *pptr; 721 722 if (prealloc) { 723 if (old_elem) { 724 /* if we're updating the existing element, 725 * use per-cpu extra elems to avoid freelist_pop/push 726 */ 727 pl_new = this_cpu_ptr(htab->extra_elems); 728 l_new = *pl_new; 729 *pl_new = old_elem; 730 } else { 731 struct pcpu_freelist_node *l; 732 733 l = pcpu_freelist_pop(&htab->freelist); 734 if (!l) 735 return ERR_PTR(-E2BIG); 736 l_new = container_of(l, struct htab_elem, fnode); 737 } 738 } else { 739 if (atomic_inc_return(&htab->count) > htab->map.max_entries) 740 if (!old_elem) { 741 /* when map is full and update() is replacing 742 * old element, it's ok to allocate, since 743 * old element will be freed immediately. 744 * Otherwise return an error 745 */ 746 atomic_dec(&htab->count); 747 return ERR_PTR(-E2BIG); 748 } 749 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, 750 htab->map.numa_node); 751 if (!l_new) 752 return ERR_PTR(-ENOMEM); 753 } 754 755 memcpy(l_new->key, key, key_size); 756 if (percpu) { 757 if (prealloc) { 758 pptr = htab_elem_get_ptr(l_new, key_size); 759 } else { 760 /* alloc_percpu zero-fills */ 761 pptr = __alloc_percpu_gfp(size, 8, 762 GFP_ATOMIC | __GFP_NOWARN); 763 if (!pptr) { 764 kfree(l_new); 765 return ERR_PTR(-ENOMEM); 766 } 767 } 768 769 pcpu_copy_value(htab, pptr, value, onallcpus); 770 771 if (!prealloc) 772 htab_elem_set_ptr(l_new, key_size, pptr); 773 } else { 774 memcpy(l_new->key + round_up(key_size, 8), value, size); 775 } 776 777 l_new->hash = hash; 778 return l_new; 779 } 780 781 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, 782 u64 map_flags) 783 { 784 if (l_old && map_flags == BPF_NOEXIST) 785 /* elem already exists */ 786 return -EEXIST; 787 788 if (!l_old && map_flags == BPF_EXIST) 789 /* elem doesn't exist, cannot update it */ 790 return -ENOENT; 791 792 return 0; 793 } 794 795 /* Called from syscall or from eBPF program */ 796 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, 797 u64 map_flags) 798 { 799 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 800 struct htab_elem *l_new = NULL, *l_old; 801 struct hlist_nulls_head *head; 802 unsigned long flags; 803 struct bucket *b; 804 u32 key_size, hash; 805 int ret; 806 807 if (unlikely(map_flags > BPF_EXIST)) 808 /* unknown flags */ 809 return -EINVAL; 810 811 WARN_ON_ONCE(!rcu_read_lock_held()); 812 813 key_size = map->key_size; 814 815 hash = htab_map_hash(key, key_size); 816 817 b = __select_bucket(htab, hash); 818 head = &b->head; 819 820 /* bpf_map_update_elem() can be called in_irq() */ 821 raw_spin_lock_irqsave(&b->lock, flags); 822 823 l_old = lookup_elem_raw(head, hash, key, key_size); 824 825 ret = check_flags(htab, l_old, map_flags); 826 if (ret) 827 goto err; 828 829 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, 830 l_old); 831 if (IS_ERR(l_new)) { 832 /* all pre-allocated elements are in use or memory exhausted */ 833 ret = PTR_ERR(l_new); 834 goto err; 835 } 836 837 /* add new element to the head of the list, so that 838 * concurrent search will find it before old elem 839 */ 840 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 841 if (l_old) { 842 hlist_nulls_del_rcu(&l_old->hash_node); 843 if (!htab_is_prealloc(htab)) 844 free_htab_elem(htab, l_old); 845 } 846 ret = 0; 847 err: 848 raw_spin_unlock_irqrestore(&b->lock, flags); 849 return ret; 850 } 851 852 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, 853 u64 map_flags) 854 { 855 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 856 struct htab_elem *l_new, *l_old = NULL; 857 struct hlist_nulls_head *head; 858 unsigned long flags; 859 struct bucket *b; 860 u32 key_size, hash; 861 int ret; 862 863 if (unlikely(map_flags > BPF_EXIST)) 864 /* unknown flags */ 865 return -EINVAL; 866 867 WARN_ON_ONCE(!rcu_read_lock_held()); 868 869 key_size = map->key_size; 870 871 hash = htab_map_hash(key, key_size); 872 873 b = __select_bucket(htab, hash); 874 head = &b->head; 875 876 /* For LRU, we need to alloc before taking bucket's 877 * spinlock because getting free nodes from LRU may need 878 * to remove older elements from htab and this removal 879 * operation will need a bucket lock. 880 */ 881 l_new = prealloc_lru_pop(htab, key, hash); 882 if (!l_new) 883 return -ENOMEM; 884 memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); 885 886 /* bpf_map_update_elem() can be called in_irq() */ 887 raw_spin_lock_irqsave(&b->lock, flags); 888 889 l_old = lookup_elem_raw(head, hash, key, key_size); 890 891 ret = check_flags(htab, l_old, map_flags); 892 if (ret) 893 goto err; 894 895 /* add new element to the head of the list, so that 896 * concurrent search will find it before old elem 897 */ 898 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 899 if (l_old) { 900 bpf_lru_node_set_ref(&l_new->lru_node); 901 hlist_nulls_del_rcu(&l_old->hash_node); 902 } 903 ret = 0; 904 905 err: 906 raw_spin_unlock_irqrestore(&b->lock, flags); 907 908 if (ret) 909 bpf_lru_push_free(&htab->lru, &l_new->lru_node); 910 else if (l_old) 911 bpf_lru_push_free(&htab->lru, &l_old->lru_node); 912 913 return ret; 914 } 915 916 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, 917 void *value, u64 map_flags, 918 bool onallcpus) 919 { 920 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 921 struct htab_elem *l_new = NULL, *l_old; 922 struct hlist_nulls_head *head; 923 unsigned long flags; 924 struct bucket *b; 925 u32 key_size, hash; 926 int ret; 927 928 if (unlikely(map_flags > BPF_EXIST)) 929 /* unknown flags */ 930 return -EINVAL; 931 932 WARN_ON_ONCE(!rcu_read_lock_held()); 933 934 key_size = map->key_size; 935 936 hash = htab_map_hash(key, key_size); 937 938 b = __select_bucket(htab, hash); 939 head = &b->head; 940 941 /* bpf_map_update_elem() can be called in_irq() */ 942 raw_spin_lock_irqsave(&b->lock, flags); 943 944 l_old = lookup_elem_raw(head, hash, key, key_size); 945 946 ret = check_flags(htab, l_old, map_flags); 947 if (ret) 948 goto err; 949 950 if (l_old) { 951 /* per-cpu hash map can update value in-place */ 952 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), 953 value, onallcpus); 954 } else { 955 l_new = alloc_htab_elem(htab, key, value, key_size, 956 hash, true, onallcpus, NULL); 957 if (IS_ERR(l_new)) { 958 ret = PTR_ERR(l_new); 959 goto err; 960 } 961 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 962 } 963 ret = 0; 964 err: 965 raw_spin_unlock_irqrestore(&b->lock, flags); 966 return ret; 967 } 968 969 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, 970 void *value, u64 map_flags, 971 bool onallcpus) 972 { 973 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 974 struct htab_elem *l_new = NULL, *l_old; 975 struct hlist_nulls_head *head; 976 unsigned long flags; 977 struct bucket *b; 978 u32 key_size, hash; 979 int ret; 980 981 if (unlikely(map_flags > BPF_EXIST)) 982 /* unknown flags */ 983 return -EINVAL; 984 985 WARN_ON_ONCE(!rcu_read_lock_held()); 986 987 key_size = map->key_size; 988 989 hash = htab_map_hash(key, key_size); 990 991 b = __select_bucket(htab, hash); 992 head = &b->head; 993 994 /* For LRU, we need to alloc before taking bucket's 995 * spinlock because LRU's elem alloc may need 996 * to remove older elem from htab and this removal 997 * operation will need a bucket lock. 998 */ 999 if (map_flags != BPF_EXIST) { 1000 l_new = prealloc_lru_pop(htab, key, hash); 1001 if (!l_new) 1002 return -ENOMEM; 1003 } 1004 1005 /* bpf_map_update_elem() can be called in_irq() */ 1006 raw_spin_lock_irqsave(&b->lock, flags); 1007 1008 l_old = lookup_elem_raw(head, hash, key, key_size); 1009 1010 ret = check_flags(htab, l_old, map_flags); 1011 if (ret) 1012 goto err; 1013 1014 if (l_old) { 1015 bpf_lru_node_set_ref(&l_old->lru_node); 1016 1017 /* per-cpu hash map can update value in-place */ 1018 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), 1019 value, onallcpus); 1020 } else { 1021 pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), 1022 value, onallcpus); 1023 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1024 l_new = NULL; 1025 } 1026 ret = 0; 1027 err: 1028 raw_spin_unlock_irqrestore(&b->lock, flags); 1029 if (l_new) 1030 bpf_lru_push_free(&htab->lru, &l_new->lru_node); 1031 return ret; 1032 } 1033 1034 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key, 1035 void *value, u64 map_flags) 1036 { 1037 return __htab_percpu_map_update_elem(map, key, value, map_flags, false); 1038 } 1039 1040 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, 1041 void *value, u64 map_flags) 1042 { 1043 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, 1044 false); 1045 } 1046 1047 /* Called from syscall or from eBPF program */ 1048 static int htab_map_delete_elem(struct bpf_map *map, void *key) 1049 { 1050 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1051 struct hlist_nulls_head *head; 1052 struct bucket *b; 1053 struct htab_elem *l; 1054 unsigned long flags; 1055 u32 hash, key_size; 1056 int ret = -ENOENT; 1057 1058 WARN_ON_ONCE(!rcu_read_lock_held()); 1059 1060 key_size = map->key_size; 1061 1062 hash = htab_map_hash(key, key_size); 1063 b = __select_bucket(htab, hash); 1064 head = &b->head; 1065 1066 raw_spin_lock_irqsave(&b->lock, flags); 1067 1068 l = lookup_elem_raw(head, hash, key, key_size); 1069 1070 if (l) { 1071 hlist_nulls_del_rcu(&l->hash_node); 1072 free_htab_elem(htab, l); 1073 ret = 0; 1074 } 1075 1076 raw_spin_unlock_irqrestore(&b->lock, flags); 1077 return ret; 1078 } 1079 1080 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) 1081 { 1082 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1083 struct hlist_nulls_head *head; 1084 struct bucket *b; 1085 struct htab_elem *l; 1086 unsigned long flags; 1087 u32 hash, key_size; 1088 int ret = -ENOENT; 1089 1090 WARN_ON_ONCE(!rcu_read_lock_held()); 1091 1092 key_size = map->key_size; 1093 1094 hash = htab_map_hash(key, key_size); 1095 b = __select_bucket(htab, hash); 1096 head = &b->head; 1097 1098 raw_spin_lock_irqsave(&b->lock, flags); 1099 1100 l = lookup_elem_raw(head, hash, key, key_size); 1101 1102 if (l) { 1103 hlist_nulls_del_rcu(&l->hash_node); 1104 ret = 0; 1105 } 1106 1107 raw_spin_unlock_irqrestore(&b->lock, flags); 1108 if (l) 1109 bpf_lru_push_free(&htab->lru, &l->lru_node); 1110 return ret; 1111 } 1112 1113 static void delete_all_elements(struct bpf_htab *htab) 1114 { 1115 int i; 1116 1117 for (i = 0; i < htab->n_buckets; i++) { 1118 struct hlist_nulls_head *head = select_bucket(htab, i); 1119 struct hlist_nulls_node *n; 1120 struct htab_elem *l; 1121 1122 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1123 hlist_nulls_del_rcu(&l->hash_node); 1124 htab_elem_free(htab, l); 1125 } 1126 } 1127 } 1128 1129 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 1130 static void htab_map_free(struct bpf_map *map) 1131 { 1132 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1133 1134 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 1135 * so the programs (can be more than one that used this map) were 1136 * disconnected from events. Wait for outstanding critical sections in 1137 * these programs to complete 1138 */ 1139 synchronize_rcu(); 1140 1141 /* some of free_htab_elem() callbacks for elements of this map may 1142 * not have executed. Wait for them. 1143 */ 1144 rcu_barrier(); 1145 if (!htab_is_prealloc(htab)) 1146 delete_all_elements(htab); 1147 else 1148 prealloc_destroy(htab); 1149 1150 free_percpu(htab->extra_elems); 1151 bpf_map_area_free(htab->buckets); 1152 kfree(htab); 1153 } 1154 1155 const struct bpf_map_ops htab_map_ops = { 1156 .map_alloc_check = htab_map_alloc_check, 1157 .map_alloc = htab_map_alloc, 1158 .map_free = htab_map_free, 1159 .map_get_next_key = htab_map_get_next_key, 1160 .map_lookup_elem = htab_map_lookup_elem, 1161 .map_update_elem = htab_map_update_elem, 1162 .map_delete_elem = htab_map_delete_elem, 1163 .map_gen_lookup = htab_map_gen_lookup, 1164 }; 1165 1166 const struct bpf_map_ops htab_lru_map_ops = { 1167 .map_alloc_check = htab_map_alloc_check, 1168 .map_alloc = htab_map_alloc, 1169 .map_free = htab_map_free, 1170 .map_get_next_key = htab_map_get_next_key, 1171 .map_lookup_elem = htab_lru_map_lookup_elem, 1172 .map_update_elem = htab_lru_map_update_elem, 1173 .map_delete_elem = htab_lru_map_delete_elem, 1174 .map_gen_lookup = htab_lru_map_gen_lookup, 1175 }; 1176 1177 /* Called from eBPF program */ 1178 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) 1179 { 1180 struct htab_elem *l = __htab_map_lookup_elem(map, key); 1181 1182 if (l) 1183 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); 1184 else 1185 return NULL; 1186 } 1187 1188 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) 1189 { 1190 struct htab_elem *l = __htab_map_lookup_elem(map, key); 1191 1192 if (l) { 1193 bpf_lru_node_set_ref(&l->lru_node); 1194 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); 1195 } 1196 1197 return NULL; 1198 } 1199 1200 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) 1201 { 1202 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1203 struct htab_elem *l; 1204 void __percpu *pptr; 1205 int ret = -ENOENT; 1206 int cpu, off = 0; 1207 u32 size; 1208 1209 /* per_cpu areas are zero-filled and bpf programs can only 1210 * access 'value_size' of them, so copying rounded areas 1211 * will not leak any kernel data 1212 */ 1213 size = round_up(map->value_size, 8); 1214 rcu_read_lock(); 1215 l = __htab_map_lookup_elem(map, key); 1216 if (!l) 1217 goto out; 1218 if (htab_is_lru(htab)) 1219 bpf_lru_node_set_ref(&l->lru_node); 1220 pptr = htab_elem_get_ptr(l, map->key_size); 1221 for_each_possible_cpu(cpu) { 1222 bpf_long_memcpy(value + off, 1223 per_cpu_ptr(pptr, cpu), size); 1224 off += size; 1225 } 1226 ret = 0; 1227 out: 1228 rcu_read_unlock(); 1229 return ret; 1230 } 1231 1232 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1233 u64 map_flags) 1234 { 1235 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1236 int ret; 1237 1238 rcu_read_lock(); 1239 if (htab_is_lru(htab)) 1240 ret = __htab_lru_percpu_map_update_elem(map, key, value, 1241 map_flags, true); 1242 else 1243 ret = __htab_percpu_map_update_elem(map, key, value, map_flags, 1244 true); 1245 rcu_read_unlock(); 1246 1247 return ret; 1248 } 1249 1250 const struct bpf_map_ops htab_percpu_map_ops = { 1251 .map_alloc_check = htab_map_alloc_check, 1252 .map_alloc = htab_map_alloc, 1253 .map_free = htab_map_free, 1254 .map_get_next_key = htab_map_get_next_key, 1255 .map_lookup_elem = htab_percpu_map_lookup_elem, 1256 .map_update_elem = htab_percpu_map_update_elem, 1257 .map_delete_elem = htab_map_delete_elem, 1258 }; 1259 1260 const struct bpf_map_ops htab_lru_percpu_map_ops = { 1261 .map_alloc_check = htab_map_alloc_check, 1262 .map_alloc = htab_map_alloc, 1263 .map_free = htab_map_free, 1264 .map_get_next_key = htab_map_get_next_key, 1265 .map_lookup_elem = htab_lru_percpu_map_lookup_elem, 1266 .map_update_elem = htab_lru_percpu_map_update_elem, 1267 .map_delete_elem = htab_lru_map_delete_elem, 1268 }; 1269 1270 static int fd_htab_map_alloc_check(union bpf_attr *attr) 1271 { 1272 if (attr->value_size != sizeof(u32)) 1273 return -EINVAL; 1274 return htab_map_alloc_check(attr); 1275 } 1276 1277 static void fd_htab_map_free(struct bpf_map *map) 1278 { 1279 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1280 struct hlist_nulls_node *n; 1281 struct hlist_nulls_head *head; 1282 struct htab_elem *l; 1283 int i; 1284 1285 for (i = 0; i < htab->n_buckets; i++) { 1286 head = select_bucket(htab, i); 1287 1288 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1289 void *ptr = fd_htab_map_get_ptr(map, l); 1290 1291 map->ops->map_fd_put_ptr(ptr); 1292 } 1293 } 1294 1295 htab_map_free(map); 1296 } 1297 1298 /* only called from syscall */ 1299 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 1300 { 1301 void **ptr; 1302 int ret = 0; 1303 1304 if (!map->ops->map_fd_sys_lookup_elem) 1305 return -ENOTSUPP; 1306 1307 rcu_read_lock(); 1308 ptr = htab_map_lookup_elem(map, key); 1309 if (ptr) 1310 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); 1311 else 1312 ret = -ENOENT; 1313 rcu_read_unlock(); 1314 1315 return ret; 1316 } 1317 1318 /* only called from syscall */ 1319 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1320 void *key, void *value, u64 map_flags) 1321 { 1322 void *ptr; 1323 int ret; 1324 u32 ufd = *(u32 *)value; 1325 1326 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 1327 if (IS_ERR(ptr)) 1328 return PTR_ERR(ptr); 1329 1330 ret = htab_map_update_elem(map, key, &ptr, map_flags); 1331 if (ret) 1332 map->ops->map_fd_put_ptr(ptr); 1333 1334 return ret; 1335 } 1336 1337 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) 1338 { 1339 struct bpf_map *map, *inner_map_meta; 1340 1341 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 1342 if (IS_ERR(inner_map_meta)) 1343 return inner_map_meta; 1344 1345 map = htab_map_alloc(attr); 1346 if (IS_ERR(map)) { 1347 bpf_map_meta_free(inner_map_meta); 1348 return map; 1349 } 1350 1351 map->inner_map_meta = inner_map_meta; 1352 1353 return map; 1354 } 1355 1356 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) 1357 { 1358 struct bpf_map **inner_map = htab_map_lookup_elem(map, key); 1359 1360 if (!inner_map) 1361 return NULL; 1362 1363 return READ_ONCE(*inner_map); 1364 } 1365 1366 static u32 htab_of_map_gen_lookup(struct bpf_map *map, 1367 struct bpf_insn *insn_buf) 1368 { 1369 struct bpf_insn *insn = insn_buf; 1370 const int ret = BPF_REG_0; 1371 1372 *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); 1373 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); 1374 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 1375 offsetof(struct htab_elem, key) + 1376 round_up(map->key_size, 8)); 1377 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 1378 1379 return insn - insn_buf; 1380 } 1381 1382 static void htab_of_map_free(struct bpf_map *map) 1383 { 1384 bpf_map_meta_free(map->inner_map_meta); 1385 fd_htab_map_free(map); 1386 } 1387 1388 const struct bpf_map_ops htab_of_maps_map_ops = { 1389 .map_alloc_check = fd_htab_map_alloc_check, 1390 .map_alloc = htab_of_map_alloc, 1391 .map_free = htab_of_map_free, 1392 .map_get_next_key = htab_map_get_next_key, 1393 .map_lookup_elem = htab_of_map_lookup_elem, 1394 .map_delete_elem = htab_map_delete_elem, 1395 .map_fd_get_ptr = bpf_map_fd_get_ptr, 1396 .map_fd_put_ptr = bpf_map_fd_put_ptr, 1397 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 1398 .map_gen_lookup = htab_of_map_gen_lookup, 1399 }; 1400