1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016,2017 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/bpf.h> 14 #include <linux/err.h> 15 #include <linux/slab.h> 16 #include <linux/mm.h> 17 #include <linux/filter.h> 18 #include <linux/perf_event.h> 19 20 #include "map_in_map.h" 21 22 static void bpf_array_free_percpu(struct bpf_array *array) 23 { 24 int i; 25 26 for (i = 0; i < array->map.max_entries; i++) 27 free_percpu(array->pptrs[i]); 28 } 29 30 static int bpf_array_alloc_percpu(struct bpf_array *array) 31 { 32 void __percpu *ptr; 33 int i; 34 35 for (i = 0; i < array->map.max_entries; i++) { 36 ptr = __alloc_percpu_gfp(array->elem_size, 8, 37 GFP_USER | __GFP_NOWARN); 38 if (!ptr) { 39 bpf_array_free_percpu(array); 40 return -ENOMEM; 41 } 42 array->pptrs[i] = ptr; 43 } 44 45 return 0; 46 } 47 48 /* Called from syscall */ 49 static struct bpf_map *array_map_alloc(union bpf_attr *attr) 50 { 51 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 52 int numa_node = bpf_map_attr_numa_node(attr); 53 struct bpf_array *array; 54 u64 array_size; 55 u32 elem_size; 56 57 /* check sanity of attributes */ 58 if (attr->max_entries == 0 || attr->key_size != 4 || 59 attr->value_size == 0 || attr->map_flags & ~BPF_F_NUMA_NODE || 60 (percpu && numa_node != NUMA_NO_NODE)) 61 return ERR_PTR(-EINVAL); 62 63 if (attr->value_size > KMALLOC_MAX_SIZE) 64 /* if value_size is bigger, the user space won't be able to 65 * access the elements. 66 */ 67 return ERR_PTR(-E2BIG); 68 69 elem_size = round_up(attr->value_size, 8); 70 71 array_size = sizeof(*array); 72 if (percpu) 73 array_size += (u64) attr->max_entries * sizeof(void *); 74 else 75 array_size += (u64) attr->max_entries * elem_size; 76 77 /* make sure there is no u32 overflow later in round_up() */ 78 if (array_size >= U32_MAX - PAGE_SIZE) 79 return ERR_PTR(-ENOMEM); 80 81 /* allocate all map elements and zero-initialize them */ 82 array = bpf_map_area_alloc(array_size, numa_node); 83 if (!array) 84 return ERR_PTR(-ENOMEM); 85 86 /* copy mandatory map attributes */ 87 array->map.map_type = attr->map_type; 88 array->map.key_size = attr->key_size; 89 array->map.value_size = attr->value_size; 90 array->map.max_entries = attr->max_entries; 91 array->map.map_flags = attr->map_flags; 92 array->map.numa_node = numa_node; 93 array->elem_size = elem_size; 94 95 if (!percpu) 96 goto out; 97 98 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); 99 100 if (array_size >= U32_MAX - PAGE_SIZE || 101 bpf_array_alloc_percpu(array)) { 102 bpf_map_area_free(array); 103 return ERR_PTR(-ENOMEM); 104 } 105 out: 106 array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT; 107 108 return &array->map; 109 } 110 111 /* Called from syscall or from eBPF program */ 112 static void *array_map_lookup_elem(struct bpf_map *map, void *key) 113 { 114 struct bpf_array *array = container_of(map, struct bpf_array, map); 115 u32 index = *(u32 *)key; 116 117 if (unlikely(index >= array->map.max_entries)) 118 return NULL; 119 120 return array->value + array->elem_size * index; 121 } 122 123 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 124 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 125 { 126 struct bpf_insn *insn = insn_buf; 127 u32 elem_size = round_up(map->value_size, 8); 128 const int ret = BPF_REG_0; 129 const int map_ptr = BPF_REG_1; 130 const int index = BPF_REG_2; 131 132 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 133 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 134 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 135 136 if (is_power_of_2(elem_size)) { 137 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 138 } else { 139 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 140 } 141 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 142 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 143 *insn++ = BPF_MOV64_IMM(ret, 0); 144 return insn - insn_buf; 145 } 146 147 /* Called from eBPF program */ 148 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) 149 { 150 struct bpf_array *array = container_of(map, struct bpf_array, map); 151 u32 index = *(u32 *)key; 152 153 if (unlikely(index >= array->map.max_entries)) 154 return NULL; 155 156 return this_cpu_ptr(array->pptrs[index]); 157 } 158 159 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 160 { 161 struct bpf_array *array = container_of(map, struct bpf_array, map); 162 u32 index = *(u32 *)key; 163 void __percpu *pptr; 164 int cpu, off = 0; 165 u32 size; 166 167 if (unlikely(index >= array->map.max_entries)) 168 return -ENOENT; 169 170 /* per_cpu areas are zero-filled and bpf programs can only 171 * access 'value_size' of them, so copying rounded areas 172 * will not leak any kernel data 173 */ 174 size = round_up(map->value_size, 8); 175 rcu_read_lock(); 176 pptr = array->pptrs[index]; 177 for_each_possible_cpu(cpu) { 178 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); 179 off += size; 180 } 181 rcu_read_unlock(); 182 return 0; 183 } 184 185 /* Called from syscall */ 186 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 187 { 188 struct bpf_array *array = container_of(map, struct bpf_array, map); 189 u32 index = key ? *(u32 *)key : U32_MAX; 190 u32 *next = (u32 *)next_key; 191 192 if (index >= array->map.max_entries) { 193 *next = 0; 194 return 0; 195 } 196 197 if (index == array->map.max_entries - 1) 198 return -ENOENT; 199 200 *next = index + 1; 201 return 0; 202 } 203 204 /* Called from syscall or from eBPF program */ 205 static int array_map_update_elem(struct bpf_map *map, void *key, void *value, 206 u64 map_flags) 207 { 208 struct bpf_array *array = container_of(map, struct bpf_array, map); 209 u32 index = *(u32 *)key; 210 211 if (unlikely(map_flags > BPF_EXIST)) 212 /* unknown flags */ 213 return -EINVAL; 214 215 if (unlikely(index >= array->map.max_entries)) 216 /* all elements were pre-allocated, cannot insert a new one */ 217 return -E2BIG; 218 219 if (unlikely(map_flags == BPF_NOEXIST)) 220 /* all elements already exist */ 221 return -EEXIST; 222 223 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 224 memcpy(this_cpu_ptr(array->pptrs[index]), 225 value, map->value_size); 226 else 227 memcpy(array->value + array->elem_size * index, 228 value, map->value_size); 229 return 0; 230 } 231 232 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 233 u64 map_flags) 234 { 235 struct bpf_array *array = container_of(map, struct bpf_array, map); 236 u32 index = *(u32 *)key; 237 void __percpu *pptr; 238 int cpu, off = 0; 239 u32 size; 240 241 if (unlikely(map_flags > BPF_EXIST)) 242 /* unknown flags */ 243 return -EINVAL; 244 245 if (unlikely(index >= array->map.max_entries)) 246 /* all elements were pre-allocated, cannot insert a new one */ 247 return -E2BIG; 248 249 if (unlikely(map_flags == BPF_NOEXIST)) 250 /* all elements already exist */ 251 return -EEXIST; 252 253 /* the user space will provide round_up(value_size, 8) bytes that 254 * will be copied into per-cpu area. bpf programs can only access 255 * value_size of it. During lookup the same extra bytes will be 256 * returned or zeros which were zero-filled by percpu_alloc, 257 * so no kernel data leaks possible 258 */ 259 size = round_up(map->value_size, 8); 260 rcu_read_lock(); 261 pptr = array->pptrs[index]; 262 for_each_possible_cpu(cpu) { 263 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); 264 off += size; 265 } 266 rcu_read_unlock(); 267 return 0; 268 } 269 270 /* Called from syscall or from eBPF program */ 271 static int array_map_delete_elem(struct bpf_map *map, void *key) 272 { 273 return -EINVAL; 274 } 275 276 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 277 static void array_map_free(struct bpf_map *map) 278 { 279 struct bpf_array *array = container_of(map, struct bpf_array, map); 280 281 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 282 * so the programs (can be more than one that used this map) were 283 * disconnected from events. Wait for outstanding programs to complete 284 * and free the array 285 */ 286 synchronize_rcu(); 287 288 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 289 bpf_array_free_percpu(array); 290 291 bpf_map_area_free(array); 292 } 293 294 const struct bpf_map_ops array_map_ops = { 295 .map_alloc = array_map_alloc, 296 .map_free = array_map_free, 297 .map_get_next_key = array_map_get_next_key, 298 .map_lookup_elem = array_map_lookup_elem, 299 .map_update_elem = array_map_update_elem, 300 .map_delete_elem = array_map_delete_elem, 301 .map_gen_lookup = array_map_gen_lookup, 302 }; 303 304 const struct bpf_map_ops percpu_array_map_ops = { 305 .map_alloc = array_map_alloc, 306 .map_free = array_map_free, 307 .map_get_next_key = array_map_get_next_key, 308 .map_lookup_elem = percpu_array_map_lookup_elem, 309 .map_update_elem = array_map_update_elem, 310 .map_delete_elem = array_map_delete_elem, 311 }; 312 313 static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr) 314 { 315 /* only file descriptors can be stored in this type of map */ 316 if (attr->value_size != sizeof(u32)) 317 return ERR_PTR(-EINVAL); 318 return array_map_alloc(attr); 319 } 320 321 static void fd_array_map_free(struct bpf_map *map) 322 { 323 struct bpf_array *array = container_of(map, struct bpf_array, map); 324 int i; 325 326 synchronize_rcu(); 327 328 /* make sure it's empty */ 329 for (i = 0; i < array->map.max_entries; i++) 330 BUG_ON(array->ptrs[i] != NULL); 331 332 bpf_map_area_free(array); 333 } 334 335 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 336 { 337 return NULL; 338 } 339 340 /* only called from syscall */ 341 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 342 { 343 void **elem, *ptr; 344 int ret = 0; 345 346 if (!map->ops->map_fd_sys_lookup_elem) 347 return -ENOTSUPP; 348 349 rcu_read_lock(); 350 elem = array_map_lookup_elem(map, key); 351 if (elem && (ptr = READ_ONCE(*elem))) 352 *value = map->ops->map_fd_sys_lookup_elem(ptr); 353 else 354 ret = -ENOENT; 355 rcu_read_unlock(); 356 357 return ret; 358 } 359 360 /* only called from syscall */ 361 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 362 void *key, void *value, u64 map_flags) 363 { 364 struct bpf_array *array = container_of(map, struct bpf_array, map); 365 void *new_ptr, *old_ptr; 366 u32 index = *(u32 *)key, ufd; 367 368 if (map_flags != BPF_ANY) 369 return -EINVAL; 370 371 if (index >= array->map.max_entries) 372 return -E2BIG; 373 374 ufd = *(u32 *)value; 375 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 376 if (IS_ERR(new_ptr)) 377 return PTR_ERR(new_ptr); 378 379 old_ptr = xchg(array->ptrs + index, new_ptr); 380 if (old_ptr) 381 map->ops->map_fd_put_ptr(old_ptr); 382 383 return 0; 384 } 385 386 static int fd_array_map_delete_elem(struct bpf_map *map, void *key) 387 { 388 struct bpf_array *array = container_of(map, struct bpf_array, map); 389 void *old_ptr; 390 u32 index = *(u32 *)key; 391 392 if (index >= array->map.max_entries) 393 return -E2BIG; 394 395 old_ptr = xchg(array->ptrs + index, NULL); 396 if (old_ptr) { 397 map->ops->map_fd_put_ptr(old_ptr); 398 return 0; 399 } else { 400 return -ENOENT; 401 } 402 } 403 404 static void *prog_fd_array_get_ptr(struct bpf_map *map, 405 struct file *map_file, int fd) 406 { 407 struct bpf_array *array = container_of(map, struct bpf_array, map); 408 struct bpf_prog *prog = bpf_prog_get(fd); 409 410 if (IS_ERR(prog)) 411 return prog; 412 413 if (!bpf_prog_array_compatible(array, prog)) { 414 bpf_prog_put(prog); 415 return ERR_PTR(-EINVAL); 416 } 417 418 return prog; 419 } 420 421 static void prog_fd_array_put_ptr(void *ptr) 422 { 423 bpf_prog_put(ptr); 424 } 425 426 static u32 prog_fd_array_sys_lookup_elem(void *ptr) 427 { 428 return ((struct bpf_prog *)ptr)->aux->id; 429 } 430 431 /* decrement refcnt of all bpf_progs that are stored in this map */ 432 void bpf_fd_array_map_clear(struct bpf_map *map) 433 { 434 struct bpf_array *array = container_of(map, struct bpf_array, map); 435 int i; 436 437 for (i = 0; i < array->map.max_entries; i++) 438 fd_array_map_delete_elem(map, &i); 439 } 440 441 const struct bpf_map_ops prog_array_map_ops = { 442 .map_alloc = fd_array_map_alloc, 443 .map_free = fd_array_map_free, 444 .map_get_next_key = array_map_get_next_key, 445 .map_lookup_elem = fd_array_map_lookup_elem, 446 .map_delete_elem = fd_array_map_delete_elem, 447 .map_fd_get_ptr = prog_fd_array_get_ptr, 448 .map_fd_put_ptr = prog_fd_array_put_ptr, 449 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 450 }; 451 452 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, 453 struct file *map_file) 454 { 455 struct bpf_event_entry *ee; 456 457 ee = kzalloc(sizeof(*ee), GFP_ATOMIC); 458 if (ee) { 459 ee->event = perf_file->private_data; 460 ee->perf_file = perf_file; 461 ee->map_file = map_file; 462 } 463 464 return ee; 465 } 466 467 static void __bpf_event_entry_free(struct rcu_head *rcu) 468 { 469 struct bpf_event_entry *ee; 470 471 ee = container_of(rcu, struct bpf_event_entry, rcu); 472 fput(ee->perf_file); 473 kfree(ee); 474 } 475 476 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) 477 { 478 call_rcu(&ee->rcu, __bpf_event_entry_free); 479 } 480 481 static void *perf_event_fd_array_get_ptr(struct bpf_map *map, 482 struct file *map_file, int fd) 483 { 484 struct bpf_event_entry *ee; 485 struct perf_event *event; 486 struct file *perf_file; 487 u64 value; 488 489 perf_file = perf_event_get(fd); 490 if (IS_ERR(perf_file)) 491 return perf_file; 492 493 ee = ERR_PTR(-EOPNOTSUPP); 494 event = perf_file->private_data; 495 if (perf_event_read_local(event, &value) == -EOPNOTSUPP) 496 goto err_out; 497 498 ee = bpf_event_entry_gen(perf_file, map_file); 499 if (ee) 500 return ee; 501 ee = ERR_PTR(-ENOMEM); 502 err_out: 503 fput(perf_file); 504 return ee; 505 } 506 507 static void perf_event_fd_array_put_ptr(void *ptr) 508 { 509 bpf_event_entry_free_rcu(ptr); 510 } 511 512 static void perf_event_fd_array_release(struct bpf_map *map, 513 struct file *map_file) 514 { 515 struct bpf_array *array = container_of(map, struct bpf_array, map); 516 struct bpf_event_entry *ee; 517 int i; 518 519 rcu_read_lock(); 520 for (i = 0; i < array->map.max_entries; i++) { 521 ee = READ_ONCE(array->ptrs[i]); 522 if (ee && ee->map_file == map_file) 523 fd_array_map_delete_elem(map, &i); 524 } 525 rcu_read_unlock(); 526 } 527 528 const struct bpf_map_ops perf_event_array_map_ops = { 529 .map_alloc = fd_array_map_alloc, 530 .map_free = fd_array_map_free, 531 .map_get_next_key = array_map_get_next_key, 532 .map_lookup_elem = fd_array_map_lookup_elem, 533 .map_delete_elem = fd_array_map_delete_elem, 534 .map_fd_get_ptr = perf_event_fd_array_get_ptr, 535 .map_fd_put_ptr = perf_event_fd_array_put_ptr, 536 .map_release = perf_event_fd_array_release, 537 }; 538 539 #ifdef CONFIG_CGROUPS 540 static void *cgroup_fd_array_get_ptr(struct bpf_map *map, 541 struct file *map_file /* not used */, 542 int fd) 543 { 544 return cgroup_get_from_fd(fd); 545 } 546 547 static void cgroup_fd_array_put_ptr(void *ptr) 548 { 549 /* cgroup_put free cgrp after a rcu grace period */ 550 cgroup_put(ptr); 551 } 552 553 static void cgroup_fd_array_free(struct bpf_map *map) 554 { 555 bpf_fd_array_map_clear(map); 556 fd_array_map_free(map); 557 } 558 559 const struct bpf_map_ops cgroup_array_map_ops = { 560 .map_alloc = fd_array_map_alloc, 561 .map_free = cgroup_fd_array_free, 562 .map_get_next_key = array_map_get_next_key, 563 .map_lookup_elem = fd_array_map_lookup_elem, 564 .map_delete_elem = fd_array_map_delete_elem, 565 .map_fd_get_ptr = cgroup_fd_array_get_ptr, 566 .map_fd_put_ptr = cgroup_fd_array_put_ptr, 567 }; 568 #endif 569 570 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) 571 { 572 struct bpf_map *map, *inner_map_meta; 573 574 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 575 if (IS_ERR(inner_map_meta)) 576 return inner_map_meta; 577 578 map = fd_array_map_alloc(attr); 579 if (IS_ERR(map)) { 580 bpf_map_meta_free(inner_map_meta); 581 return map; 582 } 583 584 map->inner_map_meta = inner_map_meta; 585 586 return map; 587 } 588 589 static void array_of_map_free(struct bpf_map *map) 590 { 591 /* map->inner_map_meta is only accessed by syscall which 592 * is protected by fdget/fdput. 593 */ 594 bpf_map_meta_free(map->inner_map_meta); 595 bpf_fd_array_map_clear(map); 596 fd_array_map_free(map); 597 } 598 599 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) 600 { 601 struct bpf_map **inner_map = array_map_lookup_elem(map, key); 602 603 if (!inner_map) 604 return NULL; 605 606 return READ_ONCE(*inner_map); 607 } 608 609 static u32 array_of_map_gen_lookup(struct bpf_map *map, 610 struct bpf_insn *insn_buf) 611 { 612 u32 elem_size = round_up(map->value_size, 8); 613 struct bpf_insn *insn = insn_buf; 614 const int ret = BPF_REG_0; 615 const int map_ptr = BPF_REG_1; 616 const int index = BPF_REG_2; 617 618 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 619 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 620 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 621 if (is_power_of_2(elem_size)) 622 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 623 else 624 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 625 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 626 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 627 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 628 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 629 *insn++ = BPF_MOV64_IMM(ret, 0); 630 631 return insn - insn_buf; 632 } 633 634 const struct bpf_map_ops array_of_maps_map_ops = { 635 .map_alloc = array_of_map_alloc, 636 .map_free = array_of_map_free, 637 .map_get_next_key = array_map_get_next_key, 638 .map_lookup_elem = array_of_map_lookup_elem, 639 .map_delete_elem = fd_array_map_delete_elem, 640 .map_fd_get_ptr = bpf_map_fd_get_ptr, 641 .map_fd_put_ptr = bpf_map_fd_put_ptr, 642 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 643 .map_gen_lookup = array_of_map_gen_lookup, 644 }; 645