1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016,2017 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/bpf.h> 14 #include <linux/btf.h> 15 #include <linux/err.h> 16 #include <linux/slab.h> 17 #include <linux/mm.h> 18 #include <linux/filter.h> 19 #include <linux/perf_event.h> 20 #include <uapi/linux/btf.h> 21 22 #include "map_in_map.h" 23 24 #define ARRAY_CREATE_FLAG_MASK \ 25 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) 26 27 static void bpf_array_free_percpu(struct bpf_array *array) 28 { 29 int i; 30 31 for (i = 0; i < array->map.max_entries; i++) { 32 free_percpu(array->pptrs[i]); 33 cond_resched(); 34 } 35 } 36 37 static int bpf_array_alloc_percpu(struct bpf_array *array) 38 { 39 void __percpu *ptr; 40 int i; 41 42 for (i = 0; i < array->map.max_entries; i++) { 43 ptr = __alloc_percpu_gfp(array->elem_size, 8, 44 GFP_USER | __GFP_NOWARN); 45 if (!ptr) { 46 bpf_array_free_percpu(array); 47 return -ENOMEM; 48 } 49 array->pptrs[i] = ptr; 50 cond_resched(); 51 } 52 53 return 0; 54 } 55 56 /* Called from syscall */ 57 int array_map_alloc_check(union bpf_attr *attr) 58 { 59 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 60 int numa_node = bpf_map_attr_numa_node(attr); 61 62 /* check sanity of attributes */ 63 if (attr->max_entries == 0 || attr->key_size != 4 || 64 attr->value_size == 0 || 65 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || 66 !bpf_map_flags_access_ok(attr->map_flags) || 67 (percpu && numa_node != NUMA_NO_NODE)) 68 return -EINVAL; 69 70 if (attr->value_size > KMALLOC_MAX_SIZE) 71 /* if value_size is bigger, the user space won't be able to 72 * access the elements. 73 */ 74 return -E2BIG; 75 76 return 0; 77 } 78 79 static struct bpf_map *array_map_alloc(union bpf_attr *attr) 80 { 81 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 82 int ret, numa_node = bpf_map_attr_numa_node(attr); 83 u32 elem_size, index_mask, max_entries; 84 bool unpriv = !capable(CAP_SYS_ADMIN); 85 u64 cost, array_size, mask64; 86 struct bpf_array *array; 87 88 elem_size = round_up(attr->value_size, 8); 89 90 max_entries = attr->max_entries; 91 92 /* On 32 bit archs roundup_pow_of_two() with max_entries that has 93 * upper most bit set in u32 space is undefined behavior due to 94 * resulting 1U << 32, so do it manually here in u64 space. 95 */ 96 mask64 = fls_long(max_entries - 1); 97 mask64 = 1ULL << mask64; 98 mask64 -= 1; 99 100 index_mask = mask64; 101 if (unpriv) { 102 /* round up array size to nearest power of 2, 103 * since cpu will speculate within index_mask limits 104 */ 105 max_entries = index_mask + 1; 106 /* Check for overflows. */ 107 if (max_entries < attr->max_entries) 108 return ERR_PTR(-E2BIG); 109 } 110 111 array_size = sizeof(*array); 112 if (percpu) 113 array_size += (u64) max_entries * sizeof(void *); 114 else 115 array_size += (u64) max_entries * elem_size; 116 117 /* make sure there is no u32 overflow later in round_up() */ 118 cost = array_size; 119 if (cost >= U32_MAX - PAGE_SIZE) 120 return ERR_PTR(-ENOMEM); 121 if (percpu) { 122 cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); 123 if (cost >= U32_MAX - PAGE_SIZE) 124 return ERR_PTR(-ENOMEM); 125 } 126 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 127 128 ret = bpf_map_precharge_memlock(cost); 129 if (ret < 0) 130 return ERR_PTR(ret); 131 132 /* allocate all map elements and zero-initialize them */ 133 array = bpf_map_area_alloc(array_size, numa_node); 134 if (!array) 135 return ERR_PTR(-ENOMEM); 136 array->index_mask = index_mask; 137 array->map.unpriv_array = unpriv; 138 139 /* copy mandatory map attributes */ 140 bpf_map_init_from_attr(&array->map, attr); 141 array->map.pages = cost; 142 array->elem_size = elem_size; 143 144 if (percpu && bpf_array_alloc_percpu(array)) { 145 bpf_map_area_free(array); 146 return ERR_PTR(-ENOMEM); 147 } 148 149 return &array->map; 150 } 151 152 /* Called from syscall or from eBPF program */ 153 static void *array_map_lookup_elem(struct bpf_map *map, void *key) 154 { 155 struct bpf_array *array = container_of(map, struct bpf_array, map); 156 u32 index = *(u32 *)key; 157 158 if (unlikely(index >= array->map.max_entries)) 159 return NULL; 160 161 return array->value + array->elem_size * (index & array->index_mask); 162 } 163 164 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, 165 u32 off) 166 { 167 struct bpf_array *array = container_of(map, struct bpf_array, map); 168 169 if (map->max_entries != 1) 170 return -ENOTSUPP; 171 if (off >= map->value_size) 172 return -EINVAL; 173 174 *imm = (unsigned long)array->value; 175 return 0; 176 } 177 178 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, 179 u32 *off) 180 { 181 struct bpf_array *array = container_of(map, struct bpf_array, map); 182 u64 base = (unsigned long)array->value; 183 u64 range = array->elem_size; 184 185 if (map->max_entries != 1) 186 return -ENOTSUPP; 187 if (imm < base || imm >= base + range) 188 return -ENOENT; 189 190 *off = imm - base; 191 return 0; 192 } 193 194 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 195 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 196 { 197 struct bpf_array *array = container_of(map, struct bpf_array, map); 198 struct bpf_insn *insn = insn_buf; 199 u32 elem_size = round_up(map->value_size, 8); 200 const int ret = BPF_REG_0; 201 const int map_ptr = BPF_REG_1; 202 const int index = BPF_REG_2; 203 204 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 205 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 206 if (map->unpriv_array) { 207 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); 208 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 209 } else { 210 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 211 } 212 213 if (is_power_of_2(elem_size)) { 214 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 215 } else { 216 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 217 } 218 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 219 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 220 *insn++ = BPF_MOV64_IMM(ret, 0); 221 return insn - insn_buf; 222 } 223 224 /* Called from eBPF program */ 225 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) 226 { 227 struct bpf_array *array = container_of(map, struct bpf_array, map); 228 u32 index = *(u32 *)key; 229 230 if (unlikely(index >= array->map.max_entries)) 231 return NULL; 232 233 return this_cpu_ptr(array->pptrs[index & array->index_mask]); 234 } 235 236 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 237 { 238 struct bpf_array *array = container_of(map, struct bpf_array, map); 239 u32 index = *(u32 *)key; 240 void __percpu *pptr; 241 int cpu, off = 0; 242 u32 size; 243 244 if (unlikely(index >= array->map.max_entries)) 245 return -ENOENT; 246 247 /* per_cpu areas are zero-filled and bpf programs can only 248 * access 'value_size' of them, so copying rounded areas 249 * will not leak any kernel data 250 */ 251 size = round_up(map->value_size, 8); 252 rcu_read_lock(); 253 pptr = array->pptrs[index & array->index_mask]; 254 for_each_possible_cpu(cpu) { 255 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); 256 off += size; 257 } 258 rcu_read_unlock(); 259 return 0; 260 } 261 262 /* Called from syscall */ 263 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 264 { 265 struct bpf_array *array = container_of(map, struct bpf_array, map); 266 u32 index = key ? *(u32 *)key : U32_MAX; 267 u32 *next = (u32 *)next_key; 268 269 if (index >= array->map.max_entries) { 270 *next = 0; 271 return 0; 272 } 273 274 if (index == array->map.max_entries - 1) 275 return -ENOENT; 276 277 *next = index + 1; 278 return 0; 279 } 280 281 /* Called from syscall or from eBPF program */ 282 static int array_map_update_elem(struct bpf_map *map, void *key, void *value, 283 u64 map_flags) 284 { 285 struct bpf_array *array = container_of(map, struct bpf_array, map); 286 u32 index = *(u32 *)key; 287 char *val; 288 289 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) 290 /* unknown flags */ 291 return -EINVAL; 292 293 if (unlikely(index >= array->map.max_entries)) 294 /* all elements were pre-allocated, cannot insert a new one */ 295 return -E2BIG; 296 297 if (unlikely(map_flags & BPF_NOEXIST)) 298 /* all elements already exist */ 299 return -EEXIST; 300 301 if (unlikely((map_flags & BPF_F_LOCK) && 302 !map_value_has_spin_lock(map))) 303 return -EINVAL; 304 305 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 306 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), 307 value, map->value_size); 308 } else { 309 val = array->value + 310 array->elem_size * (index & array->index_mask); 311 if (map_flags & BPF_F_LOCK) 312 copy_map_value_locked(map, val, value, false); 313 else 314 copy_map_value(map, val, value); 315 } 316 return 0; 317 } 318 319 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 320 u64 map_flags) 321 { 322 struct bpf_array *array = container_of(map, struct bpf_array, map); 323 u32 index = *(u32 *)key; 324 void __percpu *pptr; 325 int cpu, off = 0; 326 u32 size; 327 328 if (unlikely(map_flags > BPF_EXIST)) 329 /* unknown flags */ 330 return -EINVAL; 331 332 if (unlikely(index >= array->map.max_entries)) 333 /* all elements were pre-allocated, cannot insert a new one */ 334 return -E2BIG; 335 336 if (unlikely(map_flags == BPF_NOEXIST)) 337 /* all elements already exist */ 338 return -EEXIST; 339 340 /* the user space will provide round_up(value_size, 8) bytes that 341 * will be copied into per-cpu area. bpf programs can only access 342 * value_size of it. During lookup the same extra bytes will be 343 * returned or zeros which were zero-filled by percpu_alloc, 344 * so no kernel data leaks possible 345 */ 346 size = round_up(map->value_size, 8); 347 rcu_read_lock(); 348 pptr = array->pptrs[index & array->index_mask]; 349 for_each_possible_cpu(cpu) { 350 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); 351 off += size; 352 } 353 rcu_read_unlock(); 354 return 0; 355 } 356 357 /* Called from syscall or from eBPF program */ 358 static int array_map_delete_elem(struct bpf_map *map, void *key) 359 { 360 return -EINVAL; 361 } 362 363 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 364 static void array_map_free(struct bpf_map *map) 365 { 366 struct bpf_array *array = container_of(map, struct bpf_array, map); 367 368 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 369 * so the programs (can be more than one that used this map) were 370 * disconnected from events. Wait for outstanding programs to complete 371 * and free the array 372 */ 373 synchronize_rcu(); 374 375 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 376 bpf_array_free_percpu(array); 377 378 bpf_map_area_free(array); 379 } 380 381 static void array_map_seq_show_elem(struct bpf_map *map, void *key, 382 struct seq_file *m) 383 { 384 void *value; 385 386 rcu_read_lock(); 387 388 value = array_map_lookup_elem(map, key); 389 if (!value) { 390 rcu_read_unlock(); 391 return; 392 } 393 394 if (map->btf_key_type_id) 395 seq_printf(m, "%u: ", *(u32 *)key); 396 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 397 seq_puts(m, "\n"); 398 399 rcu_read_unlock(); 400 } 401 402 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, 403 struct seq_file *m) 404 { 405 struct bpf_array *array = container_of(map, struct bpf_array, map); 406 u32 index = *(u32 *)key; 407 void __percpu *pptr; 408 int cpu; 409 410 rcu_read_lock(); 411 412 seq_printf(m, "%u: {\n", *(u32 *)key); 413 pptr = array->pptrs[index & array->index_mask]; 414 for_each_possible_cpu(cpu) { 415 seq_printf(m, "\tcpu%d: ", cpu); 416 btf_type_seq_show(map->btf, map->btf_value_type_id, 417 per_cpu_ptr(pptr, cpu), m); 418 seq_puts(m, "\n"); 419 } 420 seq_puts(m, "}\n"); 421 422 rcu_read_unlock(); 423 } 424 425 static int array_map_check_btf(const struct bpf_map *map, 426 const struct btf *btf, 427 const struct btf_type *key_type, 428 const struct btf_type *value_type) 429 { 430 u32 int_data; 431 432 /* One exception for keyless BTF: .bss/.data/.rodata map */ 433 if (btf_type_is_void(key_type)) { 434 if (map->map_type != BPF_MAP_TYPE_ARRAY || 435 map->max_entries != 1) 436 return -EINVAL; 437 438 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) 439 return -EINVAL; 440 441 return 0; 442 } 443 444 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) 445 return -EINVAL; 446 447 int_data = *(u32 *)(key_type + 1); 448 /* bpf array can only take a u32 key. This check makes sure 449 * that the btf matches the attr used during map_create. 450 */ 451 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) 452 return -EINVAL; 453 454 return 0; 455 } 456 457 const struct bpf_map_ops array_map_ops = { 458 .map_alloc_check = array_map_alloc_check, 459 .map_alloc = array_map_alloc, 460 .map_free = array_map_free, 461 .map_get_next_key = array_map_get_next_key, 462 .map_lookup_elem = array_map_lookup_elem, 463 .map_update_elem = array_map_update_elem, 464 .map_delete_elem = array_map_delete_elem, 465 .map_gen_lookup = array_map_gen_lookup, 466 .map_direct_value_addr = array_map_direct_value_addr, 467 .map_direct_value_meta = array_map_direct_value_meta, 468 .map_seq_show_elem = array_map_seq_show_elem, 469 .map_check_btf = array_map_check_btf, 470 }; 471 472 const struct bpf_map_ops percpu_array_map_ops = { 473 .map_alloc_check = array_map_alloc_check, 474 .map_alloc = array_map_alloc, 475 .map_free = array_map_free, 476 .map_get_next_key = array_map_get_next_key, 477 .map_lookup_elem = percpu_array_map_lookup_elem, 478 .map_update_elem = array_map_update_elem, 479 .map_delete_elem = array_map_delete_elem, 480 .map_seq_show_elem = percpu_array_map_seq_show_elem, 481 .map_check_btf = array_map_check_btf, 482 }; 483 484 static int fd_array_map_alloc_check(union bpf_attr *attr) 485 { 486 /* only file descriptors can be stored in this type of map */ 487 if (attr->value_size != sizeof(u32)) 488 return -EINVAL; 489 /* Program read-only/write-only not supported for special maps yet. */ 490 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) 491 return -EINVAL; 492 return array_map_alloc_check(attr); 493 } 494 495 static void fd_array_map_free(struct bpf_map *map) 496 { 497 struct bpf_array *array = container_of(map, struct bpf_array, map); 498 int i; 499 500 synchronize_rcu(); 501 502 /* make sure it's empty */ 503 for (i = 0; i < array->map.max_entries; i++) 504 BUG_ON(array->ptrs[i] != NULL); 505 506 bpf_map_area_free(array); 507 } 508 509 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 510 { 511 return ERR_PTR(-EOPNOTSUPP); 512 } 513 514 /* only called from syscall */ 515 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 516 { 517 void **elem, *ptr; 518 int ret = 0; 519 520 if (!map->ops->map_fd_sys_lookup_elem) 521 return -ENOTSUPP; 522 523 rcu_read_lock(); 524 elem = array_map_lookup_elem(map, key); 525 if (elem && (ptr = READ_ONCE(*elem))) 526 *value = map->ops->map_fd_sys_lookup_elem(ptr); 527 else 528 ret = -ENOENT; 529 rcu_read_unlock(); 530 531 return ret; 532 } 533 534 /* only called from syscall */ 535 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 536 void *key, void *value, u64 map_flags) 537 { 538 struct bpf_array *array = container_of(map, struct bpf_array, map); 539 void *new_ptr, *old_ptr; 540 u32 index = *(u32 *)key, ufd; 541 542 if (map_flags != BPF_ANY) 543 return -EINVAL; 544 545 if (index >= array->map.max_entries) 546 return -E2BIG; 547 548 ufd = *(u32 *)value; 549 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 550 if (IS_ERR(new_ptr)) 551 return PTR_ERR(new_ptr); 552 553 old_ptr = xchg(array->ptrs + index, new_ptr); 554 if (old_ptr) 555 map->ops->map_fd_put_ptr(old_ptr); 556 557 return 0; 558 } 559 560 static int fd_array_map_delete_elem(struct bpf_map *map, void *key) 561 { 562 struct bpf_array *array = container_of(map, struct bpf_array, map); 563 void *old_ptr; 564 u32 index = *(u32 *)key; 565 566 if (index >= array->map.max_entries) 567 return -E2BIG; 568 569 old_ptr = xchg(array->ptrs + index, NULL); 570 if (old_ptr) { 571 map->ops->map_fd_put_ptr(old_ptr); 572 return 0; 573 } else { 574 return -ENOENT; 575 } 576 } 577 578 static void *prog_fd_array_get_ptr(struct bpf_map *map, 579 struct file *map_file, int fd) 580 { 581 struct bpf_array *array = container_of(map, struct bpf_array, map); 582 struct bpf_prog *prog = bpf_prog_get(fd); 583 584 if (IS_ERR(prog)) 585 return prog; 586 587 if (!bpf_prog_array_compatible(array, prog)) { 588 bpf_prog_put(prog); 589 return ERR_PTR(-EINVAL); 590 } 591 592 return prog; 593 } 594 595 static void prog_fd_array_put_ptr(void *ptr) 596 { 597 bpf_prog_put(ptr); 598 } 599 600 static u32 prog_fd_array_sys_lookup_elem(void *ptr) 601 { 602 return ((struct bpf_prog *)ptr)->aux->id; 603 } 604 605 /* decrement refcnt of all bpf_progs that are stored in this map */ 606 static void bpf_fd_array_map_clear(struct bpf_map *map) 607 { 608 struct bpf_array *array = container_of(map, struct bpf_array, map); 609 int i; 610 611 for (i = 0; i < array->map.max_entries; i++) 612 fd_array_map_delete_elem(map, &i); 613 } 614 615 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, 616 struct seq_file *m) 617 { 618 void **elem, *ptr; 619 u32 prog_id; 620 621 rcu_read_lock(); 622 623 elem = array_map_lookup_elem(map, key); 624 if (elem) { 625 ptr = READ_ONCE(*elem); 626 if (ptr) { 627 seq_printf(m, "%u: ", *(u32 *)key); 628 prog_id = prog_fd_array_sys_lookup_elem(ptr); 629 btf_type_seq_show(map->btf, map->btf_value_type_id, 630 &prog_id, m); 631 seq_puts(m, "\n"); 632 } 633 } 634 635 rcu_read_unlock(); 636 } 637 638 const struct bpf_map_ops prog_array_map_ops = { 639 .map_alloc_check = fd_array_map_alloc_check, 640 .map_alloc = array_map_alloc, 641 .map_free = fd_array_map_free, 642 .map_get_next_key = array_map_get_next_key, 643 .map_lookup_elem = fd_array_map_lookup_elem, 644 .map_delete_elem = fd_array_map_delete_elem, 645 .map_fd_get_ptr = prog_fd_array_get_ptr, 646 .map_fd_put_ptr = prog_fd_array_put_ptr, 647 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 648 .map_release_uref = bpf_fd_array_map_clear, 649 .map_seq_show_elem = prog_array_map_seq_show_elem, 650 }; 651 652 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, 653 struct file *map_file) 654 { 655 struct bpf_event_entry *ee; 656 657 ee = kzalloc(sizeof(*ee), GFP_ATOMIC); 658 if (ee) { 659 ee->event = perf_file->private_data; 660 ee->perf_file = perf_file; 661 ee->map_file = map_file; 662 } 663 664 return ee; 665 } 666 667 static void __bpf_event_entry_free(struct rcu_head *rcu) 668 { 669 struct bpf_event_entry *ee; 670 671 ee = container_of(rcu, struct bpf_event_entry, rcu); 672 fput(ee->perf_file); 673 kfree(ee); 674 } 675 676 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) 677 { 678 call_rcu(&ee->rcu, __bpf_event_entry_free); 679 } 680 681 static void *perf_event_fd_array_get_ptr(struct bpf_map *map, 682 struct file *map_file, int fd) 683 { 684 struct bpf_event_entry *ee; 685 struct perf_event *event; 686 struct file *perf_file; 687 u64 value; 688 689 perf_file = perf_event_get(fd); 690 if (IS_ERR(perf_file)) 691 return perf_file; 692 693 ee = ERR_PTR(-EOPNOTSUPP); 694 event = perf_file->private_data; 695 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) 696 goto err_out; 697 698 ee = bpf_event_entry_gen(perf_file, map_file); 699 if (ee) 700 return ee; 701 ee = ERR_PTR(-ENOMEM); 702 err_out: 703 fput(perf_file); 704 return ee; 705 } 706 707 static void perf_event_fd_array_put_ptr(void *ptr) 708 { 709 bpf_event_entry_free_rcu(ptr); 710 } 711 712 static void perf_event_fd_array_release(struct bpf_map *map, 713 struct file *map_file) 714 { 715 struct bpf_array *array = container_of(map, struct bpf_array, map); 716 struct bpf_event_entry *ee; 717 int i; 718 719 rcu_read_lock(); 720 for (i = 0; i < array->map.max_entries; i++) { 721 ee = READ_ONCE(array->ptrs[i]); 722 if (ee && ee->map_file == map_file) 723 fd_array_map_delete_elem(map, &i); 724 } 725 rcu_read_unlock(); 726 } 727 728 const struct bpf_map_ops perf_event_array_map_ops = { 729 .map_alloc_check = fd_array_map_alloc_check, 730 .map_alloc = array_map_alloc, 731 .map_free = fd_array_map_free, 732 .map_get_next_key = array_map_get_next_key, 733 .map_lookup_elem = fd_array_map_lookup_elem, 734 .map_delete_elem = fd_array_map_delete_elem, 735 .map_fd_get_ptr = perf_event_fd_array_get_ptr, 736 .map_fd_put_ptr = perf_event_fd_array_put_ptr, 737 .map_release = perf_event_fd_array_release, 738 .map_check_btf = map_check_no_btf, 739 }; 740 741 #ifdef CONFIG_CGROUPS 742 static void *cgroup_fd_array_get_ptr(struct bpf_map *map, 743 struct file *map_file /* not used */, 744 int fd) 745 { 746 return cgroup_get_from_fd(fd); 747 } 748 749 static void cgroup_fd_array_put_ptr(void *ptr) 750 { 751 /* cgroup_put free cgrp after a rcu grace period */ 752 cgroup_put(ptr); 753 } 754 755 static void cgroup_fd_array_free(struct bpf_map *map) 756 { 757 bpf_fd_array_map_clear(map); 758 fd_array_map_free(map); 759 } 760 761 const struct bpf_map_ops cgroup_array_map_ops = { 762 .map_alloc_check = fd_array_map_alloc_check, 763 .map_alloc = array_map_alloc, 764 .map_free = cgroup_fd_array_free, 765 .map_get_next_key = array_map_get_next_key, 766 .map_lookup_elem = fd_array_map_lookup_elem, 767 .map_delete_elem = fd_array_map_delete_elem, 768 .map_fd_get_ptr = cgroup_fd_array_get_ptr, 769 .map_fd_put_ptr = cgroup_fd_array_put_ptr, 770 .map_check_btf = map_check_no_btf, 771 }; 772 #endif 773 774 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) 775 { 776 struct bpf_map *map, *inner_map_meta; 777 778 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 779 if (IS_ERR(inner_map_meta)) 780 return inner_map_meta; 781 782 map = array_map_alloc(attr); 783 if (IS_ERR(map)) { 784 bpf_map_meta_free(inner_map_meta); 785 return map; 786 } 787 788 map->inner_map_meta = inner_map_meta; 789 790 return map; 791 } 792 793 static void array_of_map_free(struct bpf_map *map) 794 { 795 /* map->inner_map_meta is only accessed by syscall which 796 * is protected by fdget/fdput. 797 */ 798 bpf_map_meta_free(map->inner_map_meta); 799 bpf_fd_array_map_clear(map); 800 fd_array_map_free(map); 801 } 802 803 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) 804 { 805 struct bpf_map **inner_map = array_map_lookup_elem(map, key); 806 807 if (!inner_map) 808 return NULL; 809 810 return READ_ONCE(*inner_map); 811 } 812 813 static u32 array_of_map_gen_lookup(struct bpf_map *map, 814 struct bpf_insn *insn_buf) 815 { 816 struct bpf_array *array = container_of(map, struct bpf_array, map); 817 u32 elem_size = round_up(map->value_size, 8); 818 struct bpf_insn *insn = insn_buf; 819 const int ret = BPF_REG_0; 820 const int map_ptr = BPF_REG_1; 821 const int index = BPF_REG_2; 822 823 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 824 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 825 if (map->unpriv_array) { 826 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); 827 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 828 } else { 829 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 830 } 831 if (is_power_of_2(elem_size)) 832 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 833 else 834 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 835 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 836 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 837 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 838 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 839 *insn++ = BPF_MOV64_IMM(ret, 0); 840 841 return insn - insn_buf; 842 } 843 844 const struct bpf_map_ops array_of_maps_map_ops = { 845 .map_alloc_check = fd_array_map_alloc_check, 846 .map_alloc = array_of_map_alloc, 847 .map_free = array_of_map_free, 848 .map_get_next_key = array_map_get_next_key, 849 .map_lookup_elem = array_of_map_lookup_elem, 850 .map_delete_elem = fd_array_map_delete_elem, 851 .map_fd_get_ptr = bpf_map_fd_get_ptr, 852 .map_fd_put_ptr = bpf_map_fd_put_ptr, 853 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 854 .map_gen_lookup = array_of_map_gen_lookup, 855 .map_check_btf = map_check_no_btf, 856 }; 857