1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* 4 * HID-BPF support for Linux 5 * 6 * Copyright (c) 2022 Benjamin Tissoires 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/btf.h> 11 #include <linux/btf_ids.h> 12 #include <linux/circ_buf.h> 13 #include <linux/filter.h> 14 #include <linux/hid.h> 15 #include <linux/hid_bpf.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/workqueue.h> 19 #include "hid_bpf_dispatch.h" 20 #include "entrypoints/entrypoints.lskel.h" 21 22 #define HID_BPF_MAX_PROGS 1024 /* keep this in sync with preloaded bpf, 23 * needs to be a power of 2 as we use it as 24 * a circular buffer 25 */ 26 27 #define NEXT(idx) (((idx) + 1) & (HID_BPF_MAX_PROGS - 1)) 28 #define PREV(idx) (((idx) - 1) & (HID_BPF_MAX_PROGS - 1)) 29 30 /* 31 * represents one attached program stored in the hid jump table 32 */ 33 struct hid_bpf_prog_entry { 34 struct bpf_prog *prog; 35 struct hid_device *hdev; 36 enum hid_bpf_prog_type type; 37 u16 idx; 38 }; 39 40 struct hid_bpf_jmp_table { 41 struct bpf_map *map; 42 struct hid_bpf_prog_entry entries[HID_BPF_MAX_PROGS]; /* compacted list, circular buffer */ 43 int tail, head; 44 struct bpf_prog *progs[HID_BPF_MAX_PROGS]; /* idx -> progs mapping */ 45 unsigned long enabled[BITS_TO_LONGS(HID_BPF_MAX_PROGS)]; 46 }; 47 48 #define FOR_ENTRIES(__i, __start, __end) \ 49 for (__i = __start; CIRC_CNT(__end, __i, HID_BPF_MAX_PROGS); __i = NEXT(__i)) 50 51 static struct hid_bpf_jmp_table jmp_table; 52 53 static DEFINE_MUTEX(hid_bpf_attach_lock); /* held when attaching/detaching programs */ 54 55 static void hid_bpf_release_progs(struct work_struct *work); 56 57 static DECLARE_WORK(release_work, hid_bpf_release_progs); 58 59 BTF_ID_LIST(hid_bpf_btf_ids) 60 BTF_ID(func, hid_bpf_device_event) /* HID_BPF_PROG_TYPE_DEVICE_EVENT */ 61 62 static int hid_bpf_max_programs(enum hid_bpf_prog_type type) 63 { 64 switch (type) { 65 case HID_BPF_PROG_TYPE_DEVICE_EVENT: 66 return HID_BPF_MAX_PROGS_PER_DEV; 67 default: 68 return -EINVAL; 69 } 70 } 71 72 static int hid_bpf_program_count(struct hid_device *hdev, 73 struct bpf_prog *prog, 74 enum hid_bpf_prog_type type) 75 { 76 int i, n = 0; 77 78 if (type >= HID_BPF_PROG_TYPE_MAX) 79 return -EINVAL; 80 81 FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) { 82 struct hid_bpf_prog_entry *entry = &jmp_table.entries[i]; 83 84 if (type != HID_BPF_PROG_TYPE_UNDEF && entry->type != type) 85 continue; 86 87 if (hdev && entry->hdev != hdev) 88 continue; 89 90 if (prog && entry->prog != prog) 91 continue; 92 93 n++; 94 } 95 96 return n; 97 } 98 99 __weak noinline int __hid_bpf_tail_call(struct hid_bpf_ctx *ctx) 100 { 101 return 0; 102 } 103 ALLOW_ERROR_INJECTION(__hid_bpf_tail_call, ERRNO); 104 105 int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type, 106 struct hid_bpf_ctx_kern *ctx_kern) 107 { 108 struct hid_bpf_prog_list *prog_list; 109 int i, idx, err = 0; 110 111 rcu_read_lock(); 112 prog_list = rcu_dereference(hdev->bpf.progs[type]); 113 114 if (!prog_list) 115 goto out_unlock; 116 117 for (i = 0; i < prog_list->prog_cnt; i++) { 118 idx = prog_list->prog_idx[i]; 119 120 if (!test_bit(idx, jmp_table.enabled)) 121 continue; 122 123 ctx_kern->ctx.index = idx; 124 err = __hid_bpf_tail_call(&ctx_kern->ctx); 125 if (err) 126 break; 127 } 128 129 out_unlock: 130 rcu_read_unlock(); 131 132 return err; 133 } 134 135 /* 136 * assign the list of programs attached to a given hid device. 137 */ 138 static void __hid_bpf_set_hdev_progs(struct hid_device *hdev, struct hid_bpf_prog_list *new_list, 139 enum hid_bpf_prog_type type) 140 { 141 struct hid_bpf_prog_list *old_list; 142 143 spin_lock(&hdev->bpf.progs_lock); 144 old_list = rcu_dereference_protected(hdev->bpf.progs[type], 145 lockdep_is_held(&hdev->bpf.progs_lock)); 146 rcu_assign_pointer(hdev->bpf.progs[type], new_list); 147 spin_unlock(&hdev->bpf.progs_lock); 148 synchronize_rcu(); 149 150 kfree(old_list); 151 } 152 153 /* 154 * allocate and populate the list of programs attached to a given hid device. 155 * 156 * Must be called under lock. 157 */ 158 static int hid_bpf_populate_hdev(struct hid_device *hdev, enum hid_bpf_prog_type type) 159 { 160 struct hid_bpf_prog_list *new_list; 161 int i; 162 163 if (type >= HID_BPF_PROG_TYPE_MAX || !hdev) 164 return -EINVAL; 165 166 if (hdev->bpf.destroyed) 167 return 0; 168 169 new_list = kzalloc(sizeof(*new_list), GFP_KERNEL); 170 if (!new_list) 171 return -ENOMEM; 172 173 FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) { 174 struct hid_bpf_prog_entry *entry = &jmp_table.entries[i]; 175 176 if (entry->type == type && entry->hdev == hdev && 177 test_bit(entry->idx, jmp_table.enabled)) 178 new_list->prog_idx[new_list->prog_cnt++] = entry->idx; 179 } 180 181 __hid_bpf_set_hdev_progs(hdev, new_list, type); 182 183 return 0; 184 } 185 186 static void __hid_bpf_do_release_prog(int map_fd, unsigned int idx) 187 { 188 skel_map_delete_elem(map_fd, &idx); 189 jmp_table.progs[idx] = NULL; 190 } 191 192 static void hid_bpf_release_progs(struct work_struct *work) 193 { 194 int i, j, n, map_fd = -1; 195 196 if (!jmp_table.map) 197 return; 198 199 /* retrieve a fd of our prog_array map in BPF */ 200 map_fd = skel_map_get_fd_by_id(jmp_table.map->id); 201 if (map_fd < 0) 202 return; 203 204 mutex_lock(&hid_bpf_attach_lock); /* protects against attaching new programs */ 205 206 /* detach unused progs from HID devices */ 207 FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) { 208 struct hid_bpf_prog_entry *entry = &jmp_table.entries[i]; 209 enum hid_bpf_prog_type type; 210 struct hid_device *hdev; 211 212 if (test_bit(entry->idx, jmp_table.enabled)) 213 continue; 214 215 /* we have an attached prog */ 216 if (entry->hdev) { 217 hdev = entry->hdev; 218 type = entry->type; 219 220 hid_bpf_populate_hdev(hdev, type); 221 222 /* mark all other disabled progs from hdev of the given type as detached */ 223 FOR_ENTRIES(j, i, jmp_table.head) { 224 struct hid_bpf_prog_entry *next; 225 226 next = &jmp_table.entries[j]; 227 228 if (test_bit(next->idx, jmp_table.enabled)) 229 continue; 230 231 if (next->hdev == hdev && next->type == type) 232 next->hdev = NULL; 233 } 234 } 235 } 236 237 /* remove all unused progs from the jump table */ 238 FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) { 239 struct hid_bpf_prog_entry *entry = &jmp_table.entries[i]; 240 241 if (test_bit(entry->idx, jmp_table.enabled)) 242 continue; 243 244 if (entry->prog) 245 __hid_bpf_do_release_prog(map_fd, entry->idx); 246 } 247 248 /* compact the entry list */ 249 n = jmp_table.tail; 250 FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) { 251 struct hid_bpf_prog_entry *entry = &jmp_table.entries[i]; 252 253 if (!test_bit(entry->idx, jmp_table.enabled)) 254 continue; 255 256 jmp_table.entries[n] = jmp_table.entries[i]; 257 n = NEXT(n); 258 } 259 260 jmp_table.head = n; 261 262 mutex_unlock(&hid_bpf_attach_lock); 263 264 if (map_fd >= 0) 265 close_fd(map_fd); 266 } 267 268 static void hid_bpf_release_prog_at(int idx) 269 { 270 int map_fd = -1; 271 272 /* retrieve a fd of our prog_array map in BPF */ 273 map_fd = skel_map_get_fd_by_id(jmp_table.map->id); 274 if (map_fd < 0) 275 return; 276 277 __hid_bpf_do_release_prog(map_fd, idx); 278 279 close(map_fd); 280 } 281 282 /* 283 * Insert the given BPF program represented by its fd in the jmp table. 284 * Returns the index in the jump table or a negative error. 285 */ 286 static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog) 287 { 288 int i, index = -1, map_fd = -1, err = -EINVAL; 289 290 /* retrieve a fd of our prog_array map in BPF */ 291 map_fd = skel_map_get_fd_by_id(jmp_table.map->id); 292 293 if (map_fd < 0) { 294 err = -EINVAL; 295 goto out; 296 } 297 298 /* find the first available index in the jmp_table */ 299 for (i = 0; i < HID_BPF_MAX_PROGS; i++) { 300 if (!jmp_table.progs[i] && index < 0) { 301 /* mark the index as used */ 302 jmp_table.progs[i] = prog; 303 index = i; 304 __set_bit(i, jmp_table.enabled); 305 } 306 } 307 if (index < 0) { 308 err = -ENOMEM; 309 goto out; 310 } 311 312 /* insert the program in the jump table */ 313 err = skel_map_update_elem(map_fd, &index, &prog_fd, 0); 314 if (err) 315 goto out; 316 317 /* 318 * The program has been safely inserted, decrement the reference count 319 * so it doesn't interfere with the number of actual user handles. 320 * This is safe to do because: 321 * - we overrite the put_ptr in the prog fd map 322 * - we also have a cleanup function that monitors when a program gets 323 * released and we manually do the cleanup in the prog fd map 324 */ 325 bpf_prog_sub(prog, 1); 326 327 /* return the index */ 328 err = index; 329 330 out: 331 if (err < 0) 332 __hid_bpf_do_release_prog(map_fd, index); 333 if (map_fd >= 0) 334 close_fd(map_fd); 335 return err; 336 } 337 338 int hid_bpf_get_prog_attach_type(int prog_fd) 339 { 340 struct bpf_prog *prog = NULL; 341 int i; 342 int prog_type = HID_BPF_PROG_TYPE_UNDEF; 343 344 prog = bpf_prog_get(prog_fd); 345 if (IS_ERR(prog)) 346 return PTR_ERR(prog); 347 348 for (i = 0; i < HID_BPF_PROG_TYPE_MAX; i++) { 349 if (hid_bpf_btf_ids[i] == prog->aux->attach_btf_id) { 350 prog_type = i; 351 break; 352 } 353 } 354 355 bpf_prog_put(prog); 356 357 return prog_type; 358 } 359 360 /* called from syscall */ 361 noinline int 362 __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type, 363 int prog_fd, __u32 flags) 364 { 365 struct bpf_prog *prog = NULL; 366 struct hid_bpf_prog_entry *prog_entry; 367 int cnt, err = -EINVAL, prog_idx = -1; 368 369 /* take a ref on the prog itself */ 370 prog = bpf_prog_get(prog_fd); 371 if (IS_ERR(prog)) 372 return PTR_ERR(prog); 373 374 mutex_lock(&hid_bpf_attach_lock); 375 376 /* do not attach too many programs to a given HID device */ 377 cnt = hid_bpf_program_count(hdev, NULL, prog_type); 378 if (cnt < 0) { 379 err = cnt; 380 goto out_unlock; 381 } 382 383 if (cnt >= hid_bpf_max_programs(prog_type)) { 384 err = -E2BIG; 385 goto out_unlock; 386 } 387 388 prog_idx = hid_bpf_insert_prog(prog_fd, prog); 389 /* if the jmp table is full, abort */ 390 if (prog_idx < 0) { 391 err = prog_idx; 392 goto out_unlock; 393 } 394 395 if (flags & HID_BPF_FLAG_INSERT_HEAD) { 396 /* take the previous prog_entry slot */ 397 jmp_table.tail = PREV(jmp_table.tail); 398 prog_entry = &jmp_table.entries[jmp_table.tail]; 399 } else { 400 /* take the next prog_entry slot */ 401 prog_entry = &jmp_table.entries[jmp_table.head]; 402 jmp_table.head = NEXT(jmp_table.head); 403 } 404 405 /* we steal the ref here */ 406 prog_entry->prog = prog; 407 prog_entry->idx = prog_idx; 408 prog_entry->hdev = hdev; 409 prog_entry->type = prog_type; 410 411 /* finally store the index in the device list */ 412 err = hid_bpf_populate_hdev(hdev, prog_type); 413 if (err) 414 hid_bpf_release_prog_at(prog_idx); 415 416 out_unlock: 417 mutex_unlock(&hid_bpf_attach_lock); 418 419 /* we only use prog as a key in the various tables, so we don't need to actually 420 * increment the ref count. 421 */ 422 bpf_prog_put(prog); 423 424 return err; 425 } 426 427 void __hid_bpf_destroy_device(struct hid_device *hdev) 428 { 429 int type, i; 430 struct hid_bpf_prog_list *prog_list; 431 432 rcu_read_lock(); 433 434 for (type = 0; type < HID_BPF_PROG_TYPE_MAX; type++) { 435 prog_list = rcu_dereference(hdev->bpf.progs[type]); 436 437 if (!prog_list) 438 continue; 439 440 for (i = 0; i < prog_list->prog_cnt; i++) 441 __clear_bit(prog_list->prog_idx[i], jmp_table.enabled); 442 } 443 444 rcu_read_unlock(); 445 446 for (type = 0; type < HID_BPF_PROG_TYPE_MAX; type++) 447 __hid_bpf_set_hdev_progs(hdev, NULL, type); 448 449 /* schedule release of all detached progs */ 450 schedule_work(&release_work); 451 } 452 453 void call_hid_bpf_prog_put_deferred(struct work_struct *work) 454 { 455 struct bpf_prog_aux *aux; 456 struct bpf_prog *prog; 457 bool found = false; 458 int i; 459 460 aux = container_of(work, struct bpf_prog_aux, work); 461 prog = aux->prog; 462 463 /* we don't need locking here because the entries in the progs table 464 * are stable: 465 * if there are other users (and the progs entries might change), we 466 * would simply not have been called. 467 */ 468 for (i = 0; i < HID_BPF_MAX_PROGS; i++) { 469 if (jmp_table.progs[i] == prog) { 470 __clear_bit(i, jmp_table.enabled); 471 found = true; 472 } 473 } 474 475 if (found) 476 /* schedule release of all detached progs */ 477 schedule_work(&release_work); 478 } 479 480 static void hid_bpf_prog_fd_array_put_ptr(void *ptr) 481 { 482 } 483 484 #define HID_BPF_PROGS_COUNT 2 485 486 static struct bpf_link *links[HID_BPF_PROGS_COUNT]; 487 static struct entrypoints_bpf *skel; 488 489 void hid_bpf_free_links_and_skel(void) 490 { 491 int i; 492 493 /* the following is enough to release all programs attached to hid */ 494 if (jmp_table.map) 495 bpf_map_put_with_uref(jmp_table.map); 496 497 for (i = 0; i < ARRAY_SIZE(links); i++) { 498 if (!IS_ERR_OR_NULL(links[i])) 499 bpf_link_put(links[i]); 500 } 501 entrypoints_bpf__destroy(skel); 502 } 503 504 #define ATTACH_AND_STORE_LINK(__name) do { \ 505 err = entrypoints_bpf__##__name##__attach(skel); \ 506 if (err) \ 507 goto out; \ 508 \ 509 links[idx] = bpf_link_get_from_fd(skel->links.__name##_fd); \ 510 if (IS_ERR(links[idx])) { \ 511 err = PTR_ERR(links[idx]); \ 512 goto out; \ 513 } \ 514 \ 515 /* Avoid taking over stdin/stdout/stderr of init process. Zeroing out \ 516 * makes skel_closenz() a no-op later in iterators_bpf__destroy(). \ 517 */ \ 518 close_fd(skel->links.__name##_fd); \ 519 skel->links.__name##_fd = 0; \ 520 idx++; \ 521 } while (0) 522 523 static struct bpf_map_ops hid_bpf_prog_fd_maps_ops; 524 525 int hid_bpf_preload_skel(void) 526 { 527 int err, idx = 0; 528 529 skel = entrypoints_bpf__open(); 530 if (!skel) 531 return -ENOMEM; 532 533 err = entrypoints_bpf__load(skel); 534 if (err) 535 goto out; 536 537 jmp_table.map = bpf_map_get_with_uref(skel->maps.hid_jmp_table.map_fd); 538 if (IS_ERR(jmp_table.map)) { 539 err = PTR_ERR(jmp_table.map); 540 goto out; 541 } 542 543 /* our jump table is stealing refs, so we should not decrement on removal of elements */ 544 hid_bpf_prog_fd_maps_ops = *jmp_table.map->ops; 545 hid_bpf_prog_fd_maps_ops.map_fd_put_ptr = hid_bpf_prog_fd_array_put_ptr; 546 547 jmp_table.map->ops = &hid_bpf_prog_fd_maps_ops; 548 549 ATTACH_AND_STORE_LINK(hid_tail_call); 550 ATTACH_AND_STORE_LINK(hid_bpf_prog_put_deferred); 551 552 return 0; 553 out: 554 hid_bpf_free_links_and_skel(); 555 return err; 556 } 557