1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/bpf.h> 14 #include <linux/btf.h> 15 #include <linux/jhash.h> 16 #include <linux/filter.h> 17 #include <linux/rculist_nulls.h> 18 #include <uapi/linux/btf.h> 19 #include "percpu_freelist.h" 20 #include "bpf_lru_list.h" 21 #include "map_in_map.h" 22 23 #define HTAB_CREATE_FLAG_MASK \ 24 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \ 25 BPF_F_RDONLY | BPF_F_WRONLY) 26 27 struct bucket { 28 struct hlist_nulls_head head; 29 raw_spinlock_t lock; 30 }; 31 32 struct bpf_htab { 33 struct bpf_map map; 34 struct bucket *buckets; 35 void *elems; 36 union { 37 struct pcpu_freelist freelist; 38 struct bpf_lru lru; 39 }; 40 struct htab_elem *__percpu *extra_elems; 41 atomic_t count; /* number of elements in this hashtable */ 42 u32 n_buckets; /* number of hash buckets */ 43 u32 elem_size; /* size of each element in bytes */ 44 }; 45 46 /* each htab element is struct htab_elem + key + value */ 47 struct htab_elem { 48 union { 49 struct hlist_nulls_node hash_node; 50 struct { 51 void *padding; 52 union { 53 struct bpf_htab *htab; 54 struct pcpu_freelist_node fnode; 55 }; 56 }; 57 }; 58 union { 59 struct rcu_head rcu; 60 struct bpf_lru_node lru_node; 61 }; 62 u32 hash; 63 char key[0] __aligned(8); 64 }; 65 66 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); 67 68 static bool htab_is_lru(const struct bpf_htab *htab) 69 { 70 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || 71 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 72 } 73 74 static bool htab_is_percpu(const struct bpf_htab *htab) 75 { 76 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || 77 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 78 } 79 80 static bool htab_is_prealloc(const struct bpf_htab *htab) 81 { 82 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); 83 } 84 85 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, 86 void __percpu *pptr) 87 { 88 *(void __percpu **)(l->key + key_size) = pptr; 89 } 90 91 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) 92 { 93 return *(void __percpu **)(l->key + key_size); 94 } 95 96 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) 97 { 98 return *(void **)(l->key + roundup(map->key_size, 8)); 99 } 100 101 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) 102 { 103 return (struct htab_elem *) (htab->elems + i * htab->elem_size); 104 } 105 106 static void htab_free_elems(struct bpf_htab *htab) 107 { 108 int i; 109 110 if (!htab_is_percpu(htab)) 111 goto free_elems; 112 113 for (i = 0; i < htab->map.max_entries; i++) { 114 void __percpu *pptr; 115 116 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), 117 htab->map.key_size); 118 free_percpu(pptr); 119 cond_resched(); 120 } 121 free_elems: 122 bpf_map_area_free(htab->elems); 123 } 124 125 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, 126 u32 hash) 127 { 128 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); 129 struct htab_elem *l; 130 131 if (node) { 132 l = container_of(node, struct htab_elem, lru_node); 133 memcpy(l->key, key, htab->map.key_size); 134 return l; 135 } 136 137 return NULL; 138 } 139 140 static int prealloc_init(struct bpf_htab *htab) 141 { 142 u32 num_entries = htab->map.max_entries; 143 int err = -ENOMEM, i; 144 145 if (!htab_is_percpu(htab) && !htab_is_lru(htab)) 146 num_entries += num_possible_cpus(); 147 148 htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries, 149 htab->map.numa_node); 150 if (!htab->elems) 151 return -ENOMEM; 152 153 if (!htab_is_percpu(htab)) 154 goto skip_percpu_elems; 155 156 for (i = 0; i < num_entries; i++) { 157 u32 size = round_up(htab->map.value_size, 8); 158 void __percpu *pptr; 159 160 pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN); 161 if (!pptr) 162 goto free_elems; 163 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, 164 pptr); 165 cond_resched(); 166 } 167 168 skip_percpu_elems: 169 if (htab_is_lru(htab)) 170 err = bpf_lru_init(&htab->lru, 171 htab->map.map_flags & BPF_F_NO_COMMON_LRU, 172 offsetof(struct htab_elem, hash) - 173 offsetof(struct htab_elem, lru_node), 174 htab_lru_map_delete_node, 175 htab); 176 else 177 err = pcpu_freelist_init(&htab->freelist); 178 179 if (err) 180 goto free_elems; 181 182 if (htab_is_lru(htab)) 183 bpf_lru_populate(&htab->lru, htab->elems, 184 offsetof(struct htab_elem, lru_node), 185 htab->elem_size, num_entries); 186 else 187 pcpu_freelist_populate(&htab->freelist, 188 htab->elems + offsetof(struct htab_elem, fnode), 189 htab->elem_size, num_entries); 190 191 return 0; 192 193 free_elems: 194 htab_free_elems(htab); 195 return err; 196 } 197 198 static void prealloc_destroy(struct bpf_htab *htab) 199 { 200 htab_free_elems(htab); 201 202 if (htab_is_lru(htab)) 203 bpf_lru_destroy(&htab->lru); 204 else 205 pcpu_freelist_destroy(&htab->freelist); 206 } 207 208 static int alloc_extra_elems(struct bpf_htab *htab) 209 { 210 struct htab_elem *__percpu *pptr, *l_new; 211 struct pcpu_freelist_node *l; 212 int cpu; 213 214 pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8, 215 GFP_USER | __GFP_NOWARN); 216 if (!pptr) 217 return -ENOMEM; 218 219 for_each_possible_cpu(cpu) { 220 l = pcpu_freelist_pop(&htab->freelist); 221 /* pop will succeed, since prealloc_init() 222 * preallocated extra num_possible_cpus elements 223 */ 224 l_new = container_of(l, struct htab_elem, fnode); 225 *per_cpu_ptr(pptr, cpu) = l_new; 226 } 227 htab->extra_elems = pptr; 228 return 0; 229 } 230 231 /* Called from syscall */ 232 static int htab_map_alloc_check(union bpf_attr *attr) 233 { 234 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || 235 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 236 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || 237 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 238 /* percpu_lru means each cpu has its own LRU list. 239 * it is different from BPF_MAP_TYPE_PERCPU_HASH where 240 * the map's value itself is percpu. percpu_lru has 241 * nothing to do with the map's value. 242 */ 243 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 244 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 245 int numa_node = bpf_map_attr_numa_node(attr); 246 247 BUILD_BUG_ON(offsetof(struct htab_elem, htab) != 248 offsetof(struct htab_elem, hash_node.pprev)); 249 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != 250 offsetof(struct htab_elem, hash_node.pprev)); 251 252 if (lru && !capable(CAP_SYS_ADMIN)) 253 /* LRU implementation is much complicated than other 254 * maps. Hence, limit to CAP_SYS_ADMIN for now. 255 */ 256 return -EPERM; 257 258 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK) 259 /* reserved bits should not be used */ 260 return -EINVAL; 261 262 if (!lru && percpu_lru) 263 return -EINVAL; 264 265 if (lru && !prealloc) 266 return -ENOTSUPP; 267 268 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) 269 return -EINVAL; 270 271 /* check sanity of attributes. 272 * value_size == 0 may be allowed in the future to use map as a set 273 */ 274 if (attr->max_entries == 0 || attr->key_size == 0 || 275 attr->value_size == 0) 276 return -EINVAL; 277 278 if (attr->key_size > MAX_BPF_STACK) 279 /* eBPF programs initialize keys on stack, so they cannot be 280 * larger than max stack size 281 */ 282 return -E2BIG; 283 284 if (attr->value_size >= KMALLOC_MAX_SIZE - 285 MAX_BPF_STACK - sizeof(struct htab_elem)) 286 /* if value_size is bigger, the user space won't be able to 287 * access the elements via bpf syscall. This check also makes 288 * sure that the elem_size doesn't overflow and it's 289 * kmalloc-able later in htab_map_update_elem() 290 */ 291 return -E2BIG; 292 293 return 0; 294 } 295 296 static struct bpf_map *htab_map_alloc(union bpf_attr *attr) 297 { 298 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || 299 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 300 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || 301 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); 302 /* percpu_lru means each cpu has its own LRU list. 303 * it is different from BPF_MAP_TYPE_PERCPU_HASH where 304 * the map's value itself is percpu. percpu_lru has 305 * nothing to do with the map's value. 306 */ 307 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 308 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 309 struct bpf_htab *htab; 310 int err, i; 311 u64 cost; 312 313 htab = kzalloc(sizeof(*htab), GFP_USER); 314 if (!htab) 315 return ERR_PTR(-ENOMEM); 316 317 bpf_map_init_from_attr(&htab->map, attr); 318 319 if (percpu_lru) { 320 /* ensure each CPU's lru list has >=1 elements. 321 * since we are at it, make each lru list has the same 322 * number of elements. 323 */ 324 htab->map.max_entries = roundup(attr->max_entries, 325 num_possible_cpus()); 326 if (htab->map.max_entries < attr->max_entries) 327 htab->map.max_entries = rounddown(attr->max_entries, 328 num_possible_cpus()); 329 } 330 331 /* hash table size must be power of 2 */ 332 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); 333 334 htab->elem_size = sizeof(struct htab_elem) + 335 round_up(htab->map.key_size, 8); 336 if (percpu) 337 htab->elem_size += sizeof(void *); 338 else 339 htab->elem_size += round_up(htab->map.value_size, 8); 340 341 err = -E2BIG; 342 /* prevent zero size kmalloc and check for u32 overflow */ 343 if (htab->n_buckets == 0 || 344 htab->n_buckets > U32_MAX / sizeof(struct bucket)) 345 goto free_htab; 346 347 cost = (u64) htab->n_buckets * sizeof(struct bucket) + 348 (u64) htab->elem_size * htab->map.max_entries; 349 350 if (percpu) 351 cost += (u64) round_up(htab->map.value_size, 8) * 352 num_possible_cpus() * htab->map.max_entries; 353 else 354 cost += (u64) htab->elem_size * num_possible_cpus(); 355 356 if (cost >= U32_MAX - PAGE_SIZE) 357 /* make sure page count doesn't overflow */ 358 goto free_htab; 359 360 htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 361 362 /* if map size is larger than memlock limit, reject it early */ 363 err = bpf_map_precharge_memlock(htab->map.pages); 364 if (err) 365 goto free_htab; 366 367 err = -ENOMEM; 368 htab->buckets = bpf_map_area_alloc(htab->n_buckets * 369 sizeof(struct bucket), 370 htab->map.numa_node); 371 if (!htab->buckets) 372 goto free_htab; 373 374 for (i = 0; i < htab->n_buckets; i++) { 375 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); 376 raw_spin_lock_init(&htab->buckets[i].lock); 377 } 378 379 if (prealloc) { 380 err = prealloc_init(htab); 381 if (err) 382 goto free_buckets; 383 384 if (!percpu && !lru) { 385 /* lru itself can remove the least used element, so 386 * there is no need for an extra elem during map_update. 387 */ 388 err = alloc_extra_elems(htab); 389 if (err) 390 goto free_prealloc; 391 } 392 } 393 394 return &htab->map; 395 396 free_prealloc: 397 prealloc_destroy(htab); 398 free_buckets: 399 bpf_map_area_free(htab->buckets); 400 free_htab: 401 kfree(htab); 402 return ERR_PTR(err); 403 } 404 405 static inline u32 htab_map_hash(const void *key, u32 key_len) 406 { 407 return jhash(key, key_len, 0); 408 } 409 410 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) 411 { 412 return &htab->buckets[hash & (htab->n_buckets - 1)]; 413 } 414 415 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) 416 { 417 return &__select_bucket(htab, hash)->head; 418 } 419 420 /* this lookup function can only be called with bucket lock taken */ 421 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, 422 void *key, u32 key_size) 423 { 424 struct hlist_nulls_node *n; 425 struct htab_elem *l; 426 427 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 428 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 429 return l; 430 431 return NULL; 432 } 433 434 /* can be called without bucket lock. it will repeat the loop in 435 * the unlikely event when elements moved from one bucket into another 436 * while link list is being walked 437 */ 438 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, 439 u32 hash, void *key, 440 u32 key_size, u32 n_buckets) 441 { 442 struct hlist_nulls_node *n; 443 struct htab_elem *l; 444 445 again: 446 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 447 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 448 return l; 449 450 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) 451 goto again; 452 453 return NULL; 454 } 455 456 /* Called from syscall or from eBPF program directly, so 457 * arguments have to match bpf_map_lookup_elem() exactly. 458 * The return value is adjusted by BPF instructions 459 * in htab_map_gen_lookup(). 460 */ 461 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) 462 { 463 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 464 struct hlist_nulls_head *head; 465 struct htab_elem *l; 466 u32 hash, key_size; 467 468 /* Must be called with rcu_read_lock. */ 469 WARN_ON_ONCE(!rcu_read_lock_held()); 470 471 key_size = map->key_size; 472 473 hash = htab_map_hash(key, key_size); 474 475 head = select_bucket(htab, hash); 476 477 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); 478 479 return l; 480 } 481 482 static void *htab_map_lookup_elem(struct bpf_map *map, void *key) 483 { 484 struct htab_elem *l = __htab_map_lookup_elem(map, key); 485 486 if (l) 487 return l->key + round_up(map->key_size, 8); 488 489 return NULL; 490 } 491 492 /* inline bpf_map_lookup_elem() call. 493 * Instead of: 494 * bpf_prog 495 * bpf_map_lookup_elem 496 * map->ops->map_lookup_elem 497 * htab_map_lookup_elem 498 * __htab_map_lookup_elem 499 * do: 500 * bpf_prog 501 * __htab_map_lookup_elem 502 */ 503 static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 504 { 505 struct bpf_insn *insn = insn_buf; 506 const int ret = BPF_REG_0; 507 508 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 509 (void *(*)(struct bpf_map *map, void *key))NULL)); 510 *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); 511 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 512 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 513 offsetof(struct htab_elem, key) + 514 round_up(map->key_size, 8)); 515 return insn - insn_buf; 516 } 517 518 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) 519 { 520 struct htab_elem *l = __htab_map_lookup_elem(map, key); 521 522 if (l) { 523 bpf_lru_node_set_ref(&l->lru_node); 524 return l->key + round_up(map->key_size, 8); 525 } 526 527 return NULL; 528 } 529 530 static u32 htab_lru_map_gen_lookup(struct bpf_map *map, 531 struct bpf_insn *insn_buf) 532 { 533 struct bpf_insn *insn = insn_buf; 534 const int ret = BPF_REG_0; 535 const int ref_reg = BPF_REG_1; 536 537 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 538 (void *(*)(struct bpf_map *map, void *key))NULL)); 539 *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); 540 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4); 541 *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret, 542 offsetof(struct htab_elem, lru_node) + 543 offsetof(struct bpf_lru_node, ref)); 544 *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1); 545 *insn++ = BPF_ST_MEM(BPF_B, ret, 546 offsetof(struct htab_elem, lru_node) + 547 offsetof(struct bpf_lru_node, ref), 548 1); 549 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 550 offsetof(struct htab_elem, key) + 551 round_up(map->key_size, 8)); 552 return insn - insn_buf; 553 } 554 555 /* It is called from the bpf_lru_list when the LRU needs to delete 556 * older elements from the htab. 557 */ 558 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) 559 { 560 struct bpf_htab *htab = (struct bpf_htab *)arg; 561 struct htab_elem *l = NULL, *tgt_l; 562 struct hlist_nulls_head *head; 563 struct hlist_nulls_node *n; 564 unsigned long flags; 565 struct bucket *b; 566 567 tgt_l = container_of(node, struct htab_elem, lru_node); 568 b = __select_bucket(htab, tgt_l->hash); 569 head = &b->head; 570 571 raw_spin_lock_irqsave(&b->lock, flags); 572 573 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 574 if (l == tgt_l) { 575 hlist_nulls_del_rcu(&l->hash_node); 576 break; 577 } 578 579 raw_spin_unlock_irqrestore(&b->lock, flags); 580 581 return l == tgt_l; 582 } 583 584 /* Called from syscall */ 585 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 586 { 587 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 588 struct hlist_nulls_head *head; 589 struct htab_elem *l, *next_l; 590 u32 hash, key_size; 591 int i = 0; 592 593 WARN_ON_ONCE(!rcu_read_lock_held()); 594 595 key_size = map->key_size; 596 597 if (!key) 598 goto find_first_elem; 599 600 hash = htab_map_hash(key, key_size); 601 602 head = select_bucket(htab, hash); 603 604 /* lookup the key */ 605 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); 606 607 if (!l) 608 goto find_first_elem; 609 610 /* key was found, get next key in the same bucket */ 611 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), 612 struct htab_elem, hash_node); 613 614 if (next_l) { 615 /* if next elem in this hash list is non-zero, just return it */ 616 memcpy(next_key, next_l->key, key_size); 617 return 0; 618 } 619 620 /* no more elements in this hash list, go to the next bucket */ 621 i = hash & (htab->n_buckets - 1); 622 i++; 623 624 find_first_elem: 625 /* iterate over buckets */ 626 for (; i < htab->n_buckets; i++) { 627 head = select_bucket(htab, i); 628 629 /* pick first element in the bucket */ 630 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)), 631 struct htab_elem, hash_node); 632 if (next_l) { 633 /* if it's not empty, just return it */ 634 memcpy(next_key, next_l->key, key_size); 635 return 0; 636 } 637 } 638 639 /* iterated over all buckets and all elements */ 640 return -ENOENT; 641 } 642 643 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) 644 { 645 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) 646 free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); 647 kfree(l); 648 } 649 650 static void htab_elem_free_rcu(struct rcu_head *head) 651 { 652 struct htab_elem *l = container_of(head, struct htab_elem, rcu); 653 struct bpf_htab *htab = l->htab; 654 655 /* must increment bpf_prog_active to avoid kprobe+bpf triggering while 656 * we're calling kfree, otherwise deadlock is possible if kprobes 657 * are placed somewhere inside of slub 658 */ 659 preempt_disable(); 660 __this_cpu_inc(bpf_prog_active); 661 htab_elem_free(htab, l); 662 __this_cpu_dec(bpf_prog_active); 663 preempt_enable(); 664 } 665 666 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) 667 { 668 struct bpf_map *map = &htab->map; 669 670 if (map->ops->map_fd_put_ptr) { 671 void *ptr = fd_htab_map_get_ptr(map, l); 672 673 map->ops->map_fd_put_ptr(ptr); 674 } 675 676 if (htab_is_prealloc(htab)) { 677 pcpu_freelist_push(&htab->freelist, &l->fnode); 678 } else { 679 atomic_dec(&htab->count); 680 l->htab = htab; 681 call_rcu(&l->rcu, htab_elem_free_rcu); 682 } 683 } 684 685 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, 686 void *value, bool onallcpus) 687 { 688 if (!onallcpus) { 689 /* copy true value_size bytes */ 690 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); 691 } else { 692 u32 size = round_up(htab->map.value_size, 8); 693 int off = 0, cpu; 694 695 for_each_possible_cpu(cpu) { 696 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), 697 value + off, size); 698 off += size; 699 } 700 } 701 } 702 703 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) 704 { 705 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && 706 BITS_PER_LONG == 64; 707 } 708 709 static u32 htab_size_value(const struct bpf_htab *htab, bool percpu) 710 { 711 u32 size = htab->map.value_size; 712 713 if (percpu || fd_htab_map_needs_adjust(htab)) 714 size = round_up(size, 8); 715 return size; 716 } 717 718 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, 719 void *value, u32 key_size, u32 hash, 720 bool percpu, bool onallcpus, 721 struct htab_elem *old_elem) 722 { 723 u32 size = htab_size_value(htab, percpu); 724 bool prealloc = htab_is_prealloc(htab); 725 struct htab_elem *l_new, **pl_new; 726 void __percpu *pptr; 727 728 if (prealloc) { 729 if (old_elem) { 730 /* if we're updating the existing element, 731 * use per-cpu extra elems to avoid freelist_pop/push 732 */ 733 pl_new = this_cpu_ptr(htab->extra_elems); 734 l_new = *pl_new; 735 *pl_new = old_elem; 736 } else { 737 struct pcpu_freelist_node *l; 738 739 l = pcpu_freelist_pop(&htab->freelist); 740 if (!l) 741 return ERR_PTR(-E2BIG); 742 l_new = container_of(l, struct htab_elem, fnode); 743 } 744 } else { 745 if (atomic_inc_return(&htab->count) > htab->map.max_entries) 746 if (!old_elem) { 747 /* when map is full and update() is replacing 748 * old element, it's ok to allocate, since 749 * old element will be freed immediately. 750 * Otherwise return an error 751 */ 752 l_new = ERR_PTR(-E2BIG); 753 goto dec_count; 754 } 755 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, 756 htab->map.numa_node); 757 if (!l_new) { 758 l_new = ERR_PTR(-ENOMEM); 759 goto dec_count; 760 } 761 } 762 763 memcpy(l_new->key, key, key_size); 764 if (percpu) { 765 if (prealloc) { 766 pptr = htab_elem_get_ptr(l_new, key_size); 767 } else { 768 /* alloc_percpu zero-fills */ 769 pptr = __alloc_percpu_gfp(size, 8, 770 GFP_ATOMIC | __GFP_NOWARN); 771 if (!pptr) { 772 kfree(l_new); 773 l_new = ERR_PTR(-ENOMEM); 774 goto dec_count; 775 } 776 } 777 778 pcpu_copy_value(htab, pptr, value, onallcpus); 779 780 if (!prealloc) 781 htab_elem_set_ptr(l_new, key_size, pptr); 782 } else { 783 memcpy(l_new->key + round_up(key_size, 8), value, size); 784 } 785 786 l_new->hash = hash; 787 return l_new; 788 dec_count: 789 atomic_dec(&htab->count); 790 return l_new; 791 } 792 793 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, 794 u64 map_flags) 795 { 796 if (l_old && map_flags == BPF_NOEXIST) 797 /* elem already exists */ 798 return -EEXIST; 799 800 if (!l_old && map_flags == BPF_EXIST) 801 /* elem doesn't exist, cannot update it */ 802 return -ENOENT; 803 804 return 0; 805 } 806 807 /* Called from syscall or from eBPF program */ 808 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, 809 u64 map_flags) 810 { 811 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 812 struct htab_elem *l_new = NULL, *l_old; 813 struct hlist_nulls_head *head; 814 unsigned long flags; 815 struct bucket *b; 816 u32 key_size, hash; 817 int ret; 818 819 if (unlikely(map_flags > BPF_EXIST)) 820 /* unknown flags */ 821 return -EINVAL; 822 823 WARN_ON_ONCE(!rcu_read_lock_held()); 824 825 key_size = map->key_size; 826 827 hash = htab_map_hash(key, key_size); 828 829 b = __select_bucket(htab, hash); 830 head = &b->head; 831 832 /* bpf_map_update_elem() can be called in_irq() */ 833 raw_spin_lock_irqsave(&b->lock, flags); 834 835 l_old = lookup_elem_raw(head, hash, key, key_size); 836 837 ret = check_flags(htab, l_old, map_flags); 838 if (ret) 839 goto err; 840 841 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, 842 l_old); 843 if (IS_ERR(l_new)) { 844 /* all pre-allocated elements are in use or memory exhausted */ 845 ret = PTR_ERR(l_new); 846 goto err; 847 } 848 849 /* add new element to the head of the list, so that 850 * concurrent search will find it before old elem 851 */ 852 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 853 if (l_old) { 854 hlist_nulls_del_rcu(&l_old->hash_node); 855 if (!htab_is_prealloc(htab)) 856 free_htab_elem(htab, l_old); 857 } 858 ret = 0; 859 err: 860 raw_spin_unlock_irqrestore(&b->lock, flags); 861 return ret; 862 } 863 864 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, 865 u64 map_flags) 866 { 867 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 868 struct htab_elem *l_new, *l_old = NULL; 869 struct hlist_nulls_head *head; 870 unsigned long flags; 871 struct bucket *b; 872 u32 key_size, hash; 873 int ret; 874 875 if (unlikely(map_flags > BPF_EXIST)) 876 /* unknown flags */ 877 return -EINVAL; 878 879 WARN_ON_ONCE(!rcu_read_lock_held()); 880 881 key_size = map->key_size; 882 883 hash = htab_map_hash(key, key_size); 884 885 b = __select_bucket(htab, hash); 886 head = &b->head; 887 888 /* For LRU, we need to alloc before taking bucket's 889 * spinlock because getting free nodes from LRU may need 890 * to remove older elements from htab and this removal 891 * operation will need a bucket lock. 892 */ 893 l_new = prealloc_lru_pop(htab, key, hash); 894 if (!l_new) 895 return -ENOMEM; 896 memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); 897 898 /* bpf_map_update_elem() can be called in_irq() */ 899 raw_spin_lock_irqsave(&b->lock, flags); 900 901 l_old = lookup_elem_raw(head, hash, key, key_size); 902 903 ret = check_flags(htab, l_old, map_flags); 904 if (ret) 905 goto err; 906 907 /* add new element to the head of the list, so that 908 * concurrent search will find it before old elem 909 */ 910 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 911 if (l_old) { 912 bpf_lru_node_set_ref(&l_new->lru_node); 913 hlist_nulls_del_rcu(&l_old->hash_node); 914 } 915 ret = 0; 916 917 err: 918 raw_spin_unlock_irqrestore(&b->lock, flags); 919 920 if (ret) 921 bpf_lru_push_free(&htab->lru, &l_new->lru_node); 922 else if (l_old) 923 bpf_lru_push_free(&htab->lru, &l_old->lru_node); 924 925 return ret; 926 } 927 928 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, 929 void *value, u64 map_flags, 930 bool onallcpus) 931 { 932 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 933 struct htab_elem *l_new = NULL, *l_old; 934 struct hlist_nulls_head *head; 935 unsigned long flags; 936 struct bucket *b; 937 u32 key_size, hash; 938 int ret; 939 940 if (unlikely(map_flags > BPF_EXIST)) 941 /* unknown flags */ 942 return -EINVAL; 943 944 WARN_ON_ONCE(!rcu_read_lock_held()); 945 946 key_size = map->key_size; 947 948 hash = htab_map_hash(key, key_size); 949 950 b = __select_bucket(htab, hash); 951 head = &b->head; 952 953 /* bpf_map_update_elem() can be called in_irq() */ 954 raw_spin_lock_irqsave(&b->lock, flags); 955 956 l_old = lookup_elem_raw(head, hash, key, key_size); 957 958 ret = check_flags(htab, l_old, map_flags); 959 if (ret) 960 goto err; 961 962 if (l_old) { 963 /* per-cpu hash map can update value in-place */ 964 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), 965 value, onallcpus); 966 } else { 967 l_new = alloc_htab_elem(htab, key, value, key_size, 968 hash, true, onallcpus, NULL); 969 if (IS_ERR(l_new)) { 970 ret = PTR_ERR(l_new); 971 goto err; 972 } 973 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 974 } 975 ret = 0; 976 err: 977 raw_spin_unlock_irqrestore(&b->lock, flags); 978 return ret; 979 } 980 981 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, 982 void *value, u64 map_flags, 983 bool onallcpus) 984 { 985 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 986 struct htab_elem *l_new = NULL, *l_old; 987 struct hlist_nulls_head *head; 988 unsigned long flags; 989 struct bucket *b; 990 u32 key_size, hash; 991 int ret; 992 993 if (unlikely(map_flags > BPF_EXIST)) 994 /* unknown flags */ 995 return -EINVAL; 996 997 WARN_ON_ONCE(!rcu_read_lock_held()); 998 999 key_size = map->key_size; 1000 1001 hash = htab_map_hash(key, key_size); 1002 1003 b = __select_bucket(htab, hash); 1004 head = &b->head; 1005 1006 /* For LRU, we need to alloc before taking bucket's 1007 * spinlock because LRU's elem alloc may need 1008 * to remove older elem from htab and this removal 1009 * operation will need a bucket lock. 1010 */ 1011 if (map_flags != BPF_EXIST) { 1012 l_new = prealloc_lru_pop(htab, key, hash); 1013 if (!l_new) 1014 return -ENOMEM; 1015 } 1016 1017 /* bpf_map_update_elem() can be called in_irq() */ 1018 raw_spin_lock_irqsave(&b->lock, flags); 1019 1020 l_old = lookup_elem_raw(head, hash, key, key_size); 1021 1022 ret = check_flags(htab, l_old, map_flags); 1023 if (ret) 1024 goto err; 1025 1026 if (l_old) { 1027 bpf_lru_node_set_ref(&l_old->lru_node); 1028 1029 /* per-cpu hash map can update value in-place */ 1030 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), 1031 value, onallcpus); 1032 } else { 1033 pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), 1034 value, onallcpus); 1035 hlist_nulls_add_head_rcu(&l_new->hash_node, head); 1036 l_new = NULL; 1037 } 1038 ret = 0; 1039 err: 1040 raw_spin_unlock_irqrestore(&b->lock, flags); 1041 if (l_new) 1042 bpf_lru_push_free(&htab->lru, &l_new->lru_node); 1043 return ret; 1044 } 1045 1046 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key, 1047 void *value, u64 map_flags) 1048 { 1049 return __htab_percpu_map_update_elem(map, key, value, map_flags, false); 1050 } 1051 1052 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, 1053 void *value, u64 map_flags) 1054 { 1055 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, 1056 false); 1057 } 1058 1059 /* Called from syscall or from eBPF program */ 1060 static int htab_map_delete_elem(struct bpf_map *map, void *key) 1061 { 1062 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1063 struct hlist_nulls_head *head; 1064 struct bucket *b; 1065 struct htab_elem *l; 1066 unsigned long flags; 1067 u32 hash, key_size; 1068 int ret = -ENOENT; 1069 1070 WARN_ON_ONCE(!rcu_read_lock_held()); 1071 1072 key_size = map->key_size; 1073 1074 hash = htab_map_hash(key, key_size); 1075 b = __select_bucket(htab, hash); 1076 head = &b->head; 1077 1078 raw_spin_lock_irqsave(&b->lock, flags); 1079 1080 l = lookup_elem_raw(head, hash, key, key_size); 1081 1082 if (l) { 1083 hlist_nulls_del_rcu(&l->hash_node); 1084 free_htab_elem(htab, l); 1085 ret = 0; 1086 } 1087 1088 raw_spin_unlock_irqrestore(&b->lock, flags); 1089 return ret; 1090 } 1091 1092 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) 1093 { 1094 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1095 struct hlist_nulls_head *head; 1096 struct bucket *b; 1097 struct htab_elem *l; 1098 unsigned long flags; 1099 u32 hash, key_size; 1100 int ret = -ENOENT; 1101 1102 WARN_ON_ONCE(!rcu_read_lock_held()); 1103 1104 key_size = map->key_size; 1105 1106 hash = htab_map_hash(key, key_size); 1107 b = __select_bucket(htab, hash); 1108 head = &b->head; 1109 1110 raw_spin_lock_irqsave(&b->lock, flags); 1111 1112 l = lookup_elem_raw(head, hash, key, key_size); 1113 1114 if (l) { 1115 hlist_nulls_del_rcu(&l->hash_node); 1116 ret = 0; 1117 } 1118 1119 raw_spin_unlock_irqrestore(&b->lock, flags); 1120 if (l) 1121 bpf_lru_push_free(&htab->lru, &l->lru_node); 1122 return ret; 1123 } 1124 1125 static void delete_all_elements(struct bpf_htab *htab) 1126 { 1127 int i; 1128 1129 for (i = 0; i < htab->n_buckets; i++) { 1130 struct hlist_nulls_head *head = select_bucket(htab, i); 1131 struct hlist_nulls_node *n; 1132 struct htab_elem *l; 1133 1134 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1135 hlist_nulls_del_rcu(&l->hash_node); 1136 htab_elem_free(htab, l); 1137 } 1138 } 1139 } 1140 1141 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 1142 static void htab_map_free(struct bpf_map *map) 1143 { 1144 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1145 1146 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 1147 * so the programs (can be more than one that used this map) were 1148 * disconnected from events. Wait for outstanding critical sections in 1149 * these programs to complete 1150 */ 1151 synchronize_rcu(); 1152 1153 /* some of free_htab_elem() callbacks for elements of this map may 1154 * not have executed. Wait for them. 1155 */ 1156 rcu_barrier(); 1157 if (!htab_is_prealloc(htab)) 1158 delete_all_elements(htab); 1159 else 1160 prealloc_destroy(htab); 1161 1162 free_percpu(htab->extra_elems); 1163 bpf_map_area_free(htab->buckets); 1164 kfree(htab); 1165 } 1166 1167 static void htab_map_seq_show_elem(struct bpf_map *map, void *key, 1168 struct seq_file *m) 1169 { 1170 void *value; 1171 1172 rcu_read_lock(); 1173 1174 value = htab_map_lookup_elem(map, key); 1175 if (!value) { 1176 rcu_read_unlock(); 1177 return; 1178 } 1179 1180 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); 1181 seq_puts(m, ": "); 1182 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 1183 seq_puts(m, "\n"); 1184 1185 rcu_read_unlock(); 1186 } 1187 1188 const struct bpf_map_ops htab_map_ops = { 1189 .map_alloc_check = htab_map_alloc_check, 1190 .map_alloc = htab_map_alloc, 1191 .map_free = htab_map_free, 1192 .map_get_next_key = htab_map_get_next_key, 1193 .map_lookup_elem = htab_map_lookup_elem, 1194 .map_update_elem = htab_map_update_elem, 1195 .map_delete_elem = htab_map_delete_elem, 1196 .map_gen_lookup = htab_map_gen_lookup, 1197 .map_seq_show_elem = htab_map_seq_show_elem, 1198 }; 1199 1200 const struct bpf_map_ops htab_lru_map_ops = { 1201 .map_alloc_check = htab_map_alloc_check, 1202 .map_alloc = htab_map_alloc, 1203 .map_free = htab_map_free, 1204 .map_get_next_key = htab_map_get_next_key, 1205 .map_lookup_elem = htab_lru_map_lookup_elem, 1206 .map_update_elem = htab_lru_map_update_elem, 1207 .map_delete_elem = htab_lru_map_delete_elem, 1208 .map_gen_lookup = htab_lru_map_gen_lookup, 1209 .map_seq_show_elem = htab_map_seq_show_elem, 1210 }; 1211 1212 /* Called from eBPF program */ 1213 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) 1214 { 1215 struct htab_elem *l = __htab_map_lookup_elem(map, key); 1216 1217 if (l) 1218 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); 1219 else 1220 return NULL; 1221 } 1222 1223 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) 1224 { 1225 struct htab_elem *l = __htab_map_lookup_elem(map, key); 1226 1227 if (l) { 1228 bpf_lru_node_set_ref(&l->lru_node); 1229 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); 1230 } 1231 1232 return NULL; 1233 } 1234 1235 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) 1236 { 1237 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1238 struct htab_elem *l; 1239 void __percpu *pptr; 1240 int ret = -ENOENT; 1241 int cpu, off = 0; 1242 u32 size; 1243 1244 /* per_cpu areas are zero-filled and bpf programs can only 1245 * access 'value_size' of them, so copying rounded areas 1246 * will not leak any kernel data 1247 */ 1248 size = round_up(map->value_size, 8); 1249 rcu_read_lock(); 1250 l = __htab_map_lookup_elem(map, key); 1251 if (!l) 1252 goto out; 1253 if (htab_is_lru(htab)) 1254 bpf_lru_node_set_ref(&l->lru_node); 1255 pptr = htab_elem_get_ptr(l, map->key_size); 1256 for_each_possible_cpu(cpu) { 1257 bpf_long_memcpy(value + off, 1258 per_cpu_ptr(pptr, cpu), size); 1259 off += size; 1260 } 1261 ret = 0; 1262 out: 1263 rcu_read_unlock(); 1264 return ret; 1265 } 1266 1267 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 1268 u64 map_flags) 1269 { 1270 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1271 int ret; 1272 1273 rcu_read_lock(); 1274 if (htab_is_lru(htab)) 1275 ret = __htab_lru_percpu_map_update_elem(map, key, value, 1276 map_flags, true); 1277 else 1278 ret = __htab_percpu_map_update_elem(map, key, value, map_flags, 1279 true); 1280 rcu_read_unlock(); 1281 1282 return ret; 1283 } 1284 1285 const struct bpf_map_ops htab_percpu_map_ops = { 1286 .map_alloc_check = htab_map_alloc_check, 1287 .map_alloc = htab_map_alloc, 1288 .map_free = htab_map_free, 1289 .map_get_next_key = htab_map_get_next_key, 1290 .map_lookup_elem = htab_percpu_map_lookup_elem, 1291 .map_update_elem = htab_percpu_map_update_elem, 1292 .map_delete_elem = htab_map_delete_elem, 1293 }; 1294 1295 const struct bpf_map_ops htab_lru_percpu_map_ops = { 1296 .map_alloc_check = htab_map_alloc_check, 1297 .map_alloc = htab_map_alloc, 1298 .map_free = htab_map_free, 1299 .map_get_next_key = htab_map_get_next_key, 1300 .map_lookup_elem = htab_lru_percpu_map_lookup_elem, 1301 .map_update_elem = htab_lru_percpu_map_update_elem, 1302 .map_delete_elem = htab_lru_map_delete_elem, 1303 }; 1304 1305 static int fd_htab_map_alloc_check(union bpf_attr *attr) 1306 { 1307 if (attr->value_size != sizeof(u32)) 1308 return -EINVAL; 1309 return htab_map_alloc_check(attr); 1310 } 1311 1312 static void fd_htab_map_free(struct bpf_map *map) 1313 { 1314 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1315 struct hlist_nulls_node *n; 1316 struct hlist_nulls_head *head; 1317 struct htab_elem *l; 1318 int i; 1319 1320 for (i = 0; i < htab->n_buckets; i++) { 1321 head = select_bucket(htab, i); 1322 1323 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 1324 void *ptr = fd_htab_map_get_ptr(map, l); 1325 1326 map->ops->map_fd_put_ptr(ptr); 1327 } 1328 } 1329 1330 htab_map_free(map); 1331 } 1332 1333 /* only called from syscall */ 1334 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 1335 { 1336 void **ptr; 1337 int ret = 0; 1338 1339 if (!map->ops->map_fd_sys_lookup_elem) 1340 return -ENOTSUPP; 1341 1342 rcu_read_lock(); 1343 ptr = htab_map_lookup_elem(map, key); 1344 if (ptr) 1345 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); 1346 else 1347 ret = -ENOENT; 1348 rcu_read_unlock(); 1349 1350 return ret; 1351 } 1352 1353 /* only called from syscall */ 1354 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 1355 void *key, void *value, u64 map_flags) 1356 { 1357 void *ptr; 1358 int ret; 1359 u32 ufd = *(u32 *)value; 1360 1361 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 1362 if (IS_ERR(ptr)) 1363 return PTR_ERR(ptr); 1364 1365 ret = htab_map_update_elem(map, key, &ptr, map_flags); 1366 if (ret) 1367 map->ops->map_fd_put_ptr(ptr); 1368 1369 return ret; 1370 } 1371 1372 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) 1373 { 1374 struct bpf_map *map, *inner_map_meta; 1375 1376 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 1377 if (IS_ERR(inner_map_meta)) 1378 return inner_map_meta; 1379 1380 map = htab_map_alloc(attr); 1381 if (IS_ERR(map)) { 1382 bpf_map_meta_free(inner_map_meta); 1383 return map; 1384 } 1385 1386 map->inner_map_meta = inner_map_meta; 1387 1388 return map; 1389 } 1390 1391 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) 1392 { 1393 struct bpf_map **inner_map = htab_map_lookup_elem(map, key); 1394 1395 if (!inner_map) 1396 return NULL; 1397 1398 return READ_ONCE(*inner_map); 1399 } 1400 1401 static u32 htab_of_map_gen_lookup(struct bpf_map *map, 1402 struct bpf_insn *insn_buf) 1403 { 1404 struct bpf_insn *insn = insn_buf; 1405 const int ret = BPF_REG_0; 1406 1407 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, 1408 (void *(*)(struct bpf_map *map, void *key))NULL)); 1409 *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); 1410 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); 1411 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, 1412 offsetof(struct htab_elem, key) + 1413 round_up(map->key_size, 8)); 1414 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 1415 1416 return insn - insn_buf; 1417 } 1418 1419 static void htab_of_map_free(struct bpf_map *map) 1420 { 1421 bpf_map_meta_free(map->inner_map_meta); 1422 fd_htab_map_free(map); 1423 } 1424 1425 const struct bpf_map_ops htab_of_maps_map_ops = { 1426 .map_alloc_check = fd_htab_map_alloc_check, 1427 .map_alloc = htab_of_map_alloc, 1428 .map_free = htab_of_map_free, 1429 .map_get_next_key = htab_map_get_next_key, 1430 .map_lookup_elem = htab_of_map_lookup_elem, 1431 .map_delete_elem = htab_map_delete_elem, 1432 .map_fd_get_ptr = bpf_map_fd_get_ptr, 1433 .map_fd_put_ptr = bpf_map_fd_put_ptr, 1434 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 1435 .map_gen_lookup = htab_of_map_gen_lookup, 1436 .map_check_btf = map_check_no_btf, 1437 }; 1438