1 #include <linux/bpf.h> 2 #include <linux/btf.h> 3 #include <linux/err.h> 4 #include <linux/irq_work.h> 5 #include <linux/slab.h> 6 #include <linux/filter.h> 7 #include <linux/mm.h> 8 #include <linux/vmalloc.h> 9 #include <linux/wait.h> 10 #include <linux/poll.h> 11 #include <linux/kmemleak.h> 12 #include <uapi/linux/btf.h> 13 #include <linux/btf_ids.h> 14 15 #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE) 16 17 /* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */ 18 #define RINGBUF_PGOFF \ 19 (offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT) 20 /* consumer page and producer page */ 21 #define RINGBUF_POS_PAGES 2 22 23 #define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4) 24 25 /* Maximum size of ring buffer area is limited by 32-bit page offset within 26 * record header, counted in pages. Reserve 8 bits for extensibility, and take 27 * into account few extra pages for consumer/producer pages and 28 * non-mmap()'able parts. This gives 64GB limit, which seems plenty for single 29 * ring buffer. 30 */ 31 #define RINGBUF_MAX_DATA_SZ \ 32 (((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE) 33 34 struct bpf_ringbuf { 35 wait_queue_head_t waitq; 36 struct irq_work work; 37 u64 mask; 38 struct page **pages; 39 int nr_pages; 40 spinlock_t spinlock ____cacheline_aligned_in_smp; 41 /* Consumer and producer counters are put into separate pages to 42 * allow each position to be mapped with different permissions. 43 * This prevents a user-space application from modifying the 44 * position and ruining in-kernel tracking. The permissions of the 45 * pages depend on who is producing samples: user-space or the 46 * kernel. 47 * 48 * Kernel-producer 49 * --------------- 50 * The producer position and data pages are mapped as r/o in 51 * userspace. For this approach, bits in the header of samples are 52 * used to signal to user-space, and to other producers, whether a 53 * sample is currently being written. 54 * 55 * User-space producer 56 * ------------------- 57 * Only the page containing the consumer position is mapped r/o in 58 * user-space. User-space producers also use bits of the header to 59 * communicate to the kernel, but the kernel must carefully check and 60 * validate each sample to ensure that they're correctly formatted, and 61 * fully contained within the ring buffer. 62 */ 63 unsigned long consumer_pos __aligned(PAGE_SIZE); 64 unsigned long producer_pos __aligned(PAGE_SIZE); 65 char data[] __aligned(PAGE_SIZE); 66 }; 67 68 struct bpf_ringbuf_map { 69 struct bpf_map map; 70 struct bpf_ringbuf *rb; 71 }; 72 73 /* 8-byte ring buffer record header structure */ 74 struct bpf_ringbuf_hdr { 75 u32 len; 76 u32 pg_off; 77 }; 78 79 static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node) 80 { 81 const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL | 82 __GFP_NOWARN | __GFP_ZERO; 83 int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES; 84 int nr_data_pages = data_sz >> PAGE_SHIFT; 85 int nr_pages = nr_meta_pages + nr_data_pages; 86 struct page **pages, *page; 87 struct bpf_ringbuf *rb; 88 size_t array_size; 89 int i; 90 91 /* Each data page is mapped twice to allow "virtual" 92 * continuous read of samples wrapping around the end of ring 93 * buffer area: 94 * ------------------------------------------------------ 95 * | meta pages | real data pages | same data pages | 96 * ------------------------------------------------------ 97 * | | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 | 98 * ------------------------------------------------------ 99 * | | TA DA | TA DA | 100 * ------------------------------------------------------ 101 * ^^^^^^^ 102 * | 103 * Here, no need to worry about special handling of wrapped-around 104 * data due to double-mapped data pages. This works both in kernel and 105 * when mmap()'ed in user-space, simplifying both kernel and 106 * user-space implementations significantly. 107 */ 108 array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages); 109 pages = bpf_map_area_alloc(array_size, numa_node); 110 if (!pages) 111 return NULL; 112 113 for (i = 0; i < nr_pages; i++) { 114 page = alloc_pages_node(numa_node, flags, 0); 115 if (!page) { 116 nr_pages = i; 117 goto err_free_pages; 118 } 119 pages[i] = page; 120 if (i >= nr_meta_pages) 121 pages[nr_data_pages + i] = page; 122 } 123 124 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, 125 VM_MAP | VM_USERMAP, PAGE_KERNEL); 126 if (rb) { 127 kmemleak_not_leak(pages); 128 rb->pages = pages; 129 rb->nr_pages = nr_pages; 130 return rb; 131 } 132 133 err_free_pages: 134 for (i = 0; i < nr_pages; i++) 135 __free_page(pages[i]); 136 bpf_map_area_free(pages); 137 return NULL; 138 } 139 140 static void bpf_ringbuf_notify(struct irq_work *work) 141 { 142 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); 143 144 wake_up_all(&rb->waitq); 145 } 146 147 static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node) 148 { 149 struct bpf_ringbuf *rb; 150 151 rb = bpf_ringbuf_area_alloc(data_sz, numa_node); 152 if (!rb) 153 return NULL; 154 155 spin_lock_init(&rb->spinlock); 156 init_waitqueue_head(&rb->waitq); 157 init_irq_work(&rb->work, bpf_ringbuf_notify); 158 159 rb->mask = data_sz - 1; 160 rb->consumer_pos = 0; 161 rb->producer_pos = 0; 162 163 return rb; 164 } 165 166 static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr) 167 { 168 struct bpf_ringbuf_map *rb_map; 169 170 if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK) 171 return ERR_PTR(-EINVAL); 172 173 if (attr->key_size || attr->value_size || 174 !is_power_of_2(attr->max_entries) || 175 !PAGE_ALIGNED(attr->max_entries)) 176 return ERR_PTR(-EINVAL); 177 178 #ifdef CONFIG_64BIT 179 /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */ 180 if (attr->max_entries > RINGBUF_MAX_DATA_SZ) 181 return ERR_PTR(-E2BIG); 182 #endif 183 184 rb_map = bpf_map_area_alloc(sizeof(*rb_map), NUMA_NO_NODE); 185 if (!rb_map) 186 return ERR_PTR(-ENOMEM); 187 188 bpf_map_init_from_attr(&rb_map->map, attr); 189 190 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); 191 if (!rb_map->rb) { 192 bpf_map_area_free(rb_map); 193 return ERR_PTR(-ENOMEM); 194 } 195 196 return &rb_map->map; 197 } 198 199 static void bpf_ringbuf_free(struct bpf_ringbuf *rb) 200 { 201 /* copy pages pointer and nr_pages to local variable, as we are going 202 * to unmap rb itself with vunmap() below 203 */ 204 struct page **pages = rb->pages; 205 int i, nr_pages = rb->nr_pages; 206 207 vunmap(rb); 208 for (i = 0; i < nr_pages; i++) 209 __free_page(pages[i]); 210 bpf_map_area_free(pages); 211 } 212 213 static void ringbuf_map_free(struct bpf_map *map) 214 { 215 struct bpf_ringbuf_map *rb_map; 216 217 rb_map = container_of(map, struct bpf_ringbuf_map, map); 218 bpf_ringbuf_free(rb_map->rb); 219 bpf_map_area_free(rb_map); 220 } 221 222 static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key) 223 { 224 return ERR_PTR(-ENOTSUPP); 225 } 226 227 static int ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value, 228 u64 flags) 229 { 230 return -ENOTSUPP; 231 } 232 233 static int ringbuf_map_delete_elem(struct bpf_map *map, void *key) 234 { 235 return -ENOTSUPP; 236 } 237 238 static int ringbuf_map_get_next_key(struct bpf_map *map, void *key, 239 void *next_key) 240 { 241 return -ENOTSUPP; 242 } 243 244 static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma) 245 { 246 struct bpf_ringbuf_map *rb_map; 247 248 rb_map = container_of(map, struct bpf_ringbuf_map, map); 249 250 if (vma->vm_flags & VM_WRITE) { 251 /* allow writable mapping for the consumer_pos only */ 252 if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE) 253 return -EPERM; 254 } else { 255 vma->vm_flags &= ~VM_MAYWRITE; 256 } 257 /* remap_vmalloc_range() checks size and offset constraints */ 258 return remap_vmalloc_range(vma, rb_map->rb, 259 vma->vm_pgoff + RINGBUF_PGOFF); 260 } 261 262 static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma) 263 { 264 struct bpf_ringbuf_map *rb_map; 265 266 rb_map = container_of(map, struct bpf_ringbuf_map, map); 267 268 if (vma->vm_flags & VM_WRITE) { 269 if (vma->vm_pgoff == 0) 270 /* Disallow writable mappings to the consumer pointer, 271 * and allow writable mappings to both the producer 272 * position, and the ring buffer data itself. 273 */ 274 return -EPERM; 275 } else { 276 vma->vm_flags &= ~VM_MAYWRITE; 277 } 278 /* remap_vmalloc_range() checks size and offset constraints */ 279 return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF); 280 } 281 282 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb) 283 { 284 unsigned long cons_pos, prod_pos; 285 286 cons_pos = smp_load_acquire(&rb->consumer_pos); 287 prod_pos = smp_load_acquire(&rb->producer_pos); 288 return prod_pos - cons_pos; 289 } 290 291 static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp, 292 struct poll_table_struct *pts) 293 { 294 struct bpf_ringbuf_map *rb_map; 295 296 rb_map = container_of(map, struct bpf_ringbuf_map, map); 297 poll_wait(filp, &rb_map->rb->waitq, pts); 298 299 if (ringbuf_avail_data_sz(rb_map->rb)) 300 return EPOLLIN | EPOLLRDNORM; 301 return 0; 302 } 303 304 BTF_ID_LIST_SINGLE(ringbuf_map_btf_ids, struct, bpf_ringbuf_map) 305 const struct bpf_map_ops ringbuf_map_ops = { 306 .map_meta_equal = bpf_map_meta_equal, 307 .map_alloc = ringbuf_map_alloc, 308 .map_free = ringbuf_map_free, 309 .map_mmap = ringbuf_map_mmap_kern, 310 .map_poll = ringbuf_map_poll, 311 .map_lookup_elem = ringbuf_map_lookup_elem, 312 .map_update_elem = ringbuf_map_update_elem, 313 .map_delete_elem = ringbuf_map_delete_elem, 314 .map_get_next_key = ringbuf_map_get_next_key, 315 .map_btf_id = &ringbuf_map_btf_ids[0], 316 }; 317 318 BTF_ID_LIST_SINGLE(user_ringbuf_map_btf_ids, struct, bpf_ringbuf_map) 319 const struct bpf_map_ops user_ringbuf_map_ops = { 320 .map_meta_equal = bpf_map_meta_equal, 321 .map_alloc = ringbuf_map_alloc, 322 .map_free = ringbuf_map_free, 323 .map_mmap = ringbuf_map_mmap_user, 324 .map_lookup_elem = ringbuf_map_lookup_elem, 325 .map_update_elem = ringbuf_map_update_elem, 326 .map_delete_elem = ringbuf_map_delete_elem, 327 .map_get_next_key = ringbuf_map_get_next_key, 328 .map_btf_id = &user_ringbuf_map_btf_ids[0], 329 }; 330 331 /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself, 332 * calculate offset from record metadata to ring buffer in pages, rounded 333 * down. This page offset is stored as part of record metadata and allows to 334 * restore struct bpf_ringbuf * from record pointer. This page offset is 335 * stored at offset 4 of record metadata header. 336 */ 337 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb, 338 struct bpf_ringbuf_hdr *hdr) 339 { 340 return ((void *)hdr - (void *)rb) >> PAGE_SHIFT; 341 } 342 343 /* Given pointer to ring buffer record header, restore pointer to struct 344 * bpf_ringbuf itself by using page offset stored at offset 4 345 */ 346 static struct bpf_ringbuf * 347 bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr) 348 { 349 unsigned long addr = (unsigned long)(void *)hdr; 350 unsigned long off = (unsigned long)hdr->pg_off << PAGE_SHIFT; 351 352 return (void*)((addr & PAGE_MASK) - off); 353 } 354 355 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) 356 { 357 unsigned long cons_pos, prod_pos, new_prod_pos, flags; 358 u32 len, pg_off; 359 struct bpf_ringbuf_hdr *hdr; 360 361 if (unlikely(size > RINGBUF_MAX_RECORD_SZ)) 362 return NULL; 363 364 len = round_up(size + BPF_RINGBUF_HDR_SZ, 8); 365 if (len > rb->mask + 1) 366 return NULL; 367 368 cons_pos = smp_load_acquire(&rb->consumer_pos); 369 370 if (in_nmi()) { 371 if (!spin_trylock_irqsave(&rb->spinlock, flags)) 372 return NULL; 373 } else { 374 spin_lock_irqsave(&rb->spinlock, flags); 375 } 376 377 prod_pos = rb->producer_pos; 378 new_prod_pos = prod_pos + len; 379 380 /* check for out of ringbuf space by ensuring producer position 381 * doesn't advance more than (ringbuf_size - 1) ahead 382 */ 383 if (new_prod_pos - cons_pos > rb->mask) { 384 spin_unlock_irqrestore(&rb->spinlock, flags); 385 return NULL; 386 } 387 388 hdr = (void *)rb->data + (prod_pos & rb->mask); 389 pg_off = bpf_ringbuf_rec_pg_off(rb, hdr); 390 hdr->len = size | BPF_RINGBUF_BUSY_BIT; 391 hdr->pg_off = pg_off; 392 393 /* pairs with consumer's smp_load_acquire() */ 394 smp_store_release(&rb->producer_pos, new_prod_pos); 395 396 spin_unlock_irqrestore(&rb->spinlock, flags); 397 398 return (void *)hdr + BPF_RINGBUF_HDR_SZ; 399 } 400 401 BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags) 402 { 403 struct bpf_ringbuf_map *rb_map; 404 405 if (unlikely(flags)) 406 return 0; 407 408 rb_map = container_of(map, struct bpf_ringbuf_map, map); 409 return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size); 410 } 411 412 const struct bpf_func_proto bpf_ringbuf_reserve_proto = { 413 .func = bpf_ringbuf_reserve, 414 .ret_type = RET_PTR_TO_ALLOC_MEM_OR_NULL, 415 .arg1_type = ARG_CONST_MAP_PTR, 416 .arg2_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, 417 .arg3_type = ARG_ANYTHING, 418 }; 419 420 static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard) 421 { 422 unsigned long rec_pos, cons_pos; 423 struct bpf_ringbuf_hdr *hdr; 424 struct bpf_ringbuf *rb; 425 u32 new_len; 426 427 hdr = sample - BPF_RINGBUF_HDR_SZ; 428 rb = bpf_ringbuf_restore_from_rec(hdr); 429 new_len = hdr->len ^ BPF_RINGBUF_BUSY_BIT; 430 if (discard) 431 new_len |= BPF_RINGBUF_DISCARD_BIT; 432 433 /* update record header with correct final size prefix */ 434 xchg(&hdr->len, new_len); 435 436 /* if consumer caught up and is waiting for our record, notify about 437 * new data availability 438 */ 439 rec_pos = (void *)hdr - (void *)rb->data; 440 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; 441 442 if (flags & BPF_RB_FORCE_WAKEUP) 443 irq_work_queue(&rb->work); 444 else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP)) 445 irq_work_queue(&rb->work); 446 } 447 448 BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags) 449 { 450 bpf_ringbuf_commit(sample, flags, false /* discard */); 451 return 0; 452 } 453 454 const struct bpf_func_proto bpf_ringbuf_submit_proto = { 455 .func = bpf_ringbuf_submit, 456 .ret_type = RET_VOID, 457 .arg1_type = ARG_PTR_TO_ALLOC_MEM | OBJ_RELEASE, 458 .arg2_type = ARG_ANYTHING, 459 }; 460 461 BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags) 462 { 463 bpf_ringbuf_commit(sample, flags, true /* discard */); 464 return 0; 465 } 466 467 const struct bpf_func_proto bpf_ringbuf_discard_proto = { 468 .func = bpf_ringbuf_discard, 469 .ret_type = RET_VOID, 470 .arg1_type = ARG_PTR_TO_ALLOC_MEM | OBJ_RELEASE, 471 .arg2_type = ARG_ANYTHING, 472 }; 473 474 BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size, 475 u64, flags) 476 { 477 struct bpf_ringbuf_map *rb_map; 478 void *rec; 479 480 if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP))) 481 return -EINVAL; 482 483 rb_map = container_of(map, struct bpf_ringbuf_map, map); 484 rec = __bpf_ringbuf_reserve(rb_map->rb, size); 485 if (!rec) 486 return -EAGAIN; 487 488 memcpy(rec, data, size); 489 bpf_ringbuf_commit(rec, flags, false /* discard */); 490 return 0; 491 } 492 493 const struct bpf_func_proto bpf_ringbuf_output_proto = { 494 .func = bpf_ringbuf_output, 495 .ret_type = RET_INTEGER, 496 .arg1_type = ARG_CONST_MAP_PTR, 497 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 498 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 499 .arg4_type = ARG_ANYTHING, 500 }; 501 502 BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags) 503 { 504 struct bpf_ringbuf *rb; 505 506 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; 507 508 switch (flags) { 509 case BPF_RB_AVAIL_DATA: 510 return ringbuf_avail_data_sz(rb); 511 case BPF_RB_RING_SIZE: 512 return rb->mask + 1; 513 case BPF_RB_CONS_POS: 514 return smp_load_acquire(&rb->consumer_pos); 515 case BPF_RB_PROD_POS: 516 return smp_load_acquire(&rb->producer_pos); 517 default: 518 return 0; 519 } 520 } 521 522 const struct bpf_func_proto bpf_ringbuf_query_proto = { 523 .func = bpf_ringbuf_query, 524 .ret_type = RET_INTEGER, 525 .arg1_type = ARG_CONST_MAP_PTR, 526 .arg2_type = ARG_ANYTHING, 527 }; 528 529 BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags, 530 struct bpf_dynptr_kern *, ptr) 531 { 532 struct bpf_ringbuf_map *rb_map; 533 void *sample; 534 int err; 535 536 if (unlikely(flags)) { 537 bpf_dynptr_set_null(ptr); 538 return -EINVAL; 539 } 540 541 err = bpf_dynptr_check_size(size); 542 if (err) { 543 bpf_dynptr_set_null(ptr); 544 return err; 545 } 546 547 rb_map = container_of(map, struct bpf_ringbuf_map, map); 548 549 sample = __bpf_ringbuf_reserve(rb_map->rb, size); 550 if (!sample) { 551 bpf_dynptr_set_null(ptr); 552 return -EINVAL; 553 } 554 555 bpf_dynptr_init(ptr, sample, BPF_DYNPTR_TYPE_RINGBUF, 0, size); 556 557 return 0; 558 } 559 560 const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = { 561 .func = bpf_ringbuf_reserve_dynptr, 562 .ret_type = RET_INTEGER, 563 .arg1_type = ARG_CONST_MAP_PTR, 564 .arg2_type = ARG_ANYTHING, 565 .arg3_type = ARG_ANYTHING, 566 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT, 567 }; 568 569 BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags) 570 { 571 if (!ptr->data) 572 return 0; 573 574 bpf_ringbuf_commit(ptr->data, flags, false /* discard */); 575 576 bpf_dynptr_set_null(ptr); 577 578 return 0; 579 } 580 581 const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto = { 582 .func = bpf_ringbuf_submit_dynptr, 583 .ret_type = RET_VOID, 584 .arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE, 585 .arg2_type = ARG_ANYTHING, 586 }; 587 588 BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags) 589 { 590 if (!ptr->data) 591 return 0; 592 593 bpf_ringbuf_commit(ptr->data, flags, true /* discard */); 594 595 bpf_dynptr_set_null(ptr); 596 597 return 0; 598 } 599 600 const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto = { 601 .func = bpf_ringbuf_discard_dynptr, 602 .ret_type = RET_VOID, 603 .arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE, 604 .arg2_type = ARG_ANYTHING, 605 }; 606