xref: /openbmc/linux/drivers/hid/bpf/hid_bpf_jmp_table.c (revision 86020156c8840f2a47430f028e6ab557d8a9ed27)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  *  HID-BPF support for Linux
5  *
6  *  Copyright (c) 2022 Benjamin Tissoires
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/btf.h>
11 #include <linux/btf_ids.h>
12 #include <linux/circ_buf.h>
13 #include <linux/filter.h>
14 #include <linux/hid.h>
15 #include <linux/hid_bpf.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/workqueue.h>
19 #include "hid_bpf_dispatch.h"
20 #include "entrypoints/entrypoints.lskel.h"
21 
22 #define HID_BPF_MAX_PROGS 1024 /* keep this in sync with preloaded bpf,
23 				* needs to be a power of 2 as we use it as
24 				* a circular buffer
25 				*/
26 
27 #define NEXT(idx) (((idx) + 1) & (HID_BPF_MAX_PROGS - 1))
28 #define PREV(idx) (((idx) - 1) & (HID_BPF_MAX_PROGS - 1))
29 
30 /*
31  * represents one attached program stored in the hid jump table
32  */
33 struct hid_bpf_prog_entry {
34 	struct bpf_prog *prog;
35 	struct hid_device *hdev;
36 	enum hid_bpf_prog_type type;
37 	u16 idx;
38 };
39 
40 struct hid_bpf_jmp_table {
41 	struct bpf_map *map;
42 	struct hid_bpf_prog_entry entries[HID_BPF_MAX_PROGS]; /* compacted list, circular buffer */
43 	int tail, head;
44 	struct bpf_prog *progs[HID_BPF_MAX_PROGS]; /* idx -> progs mapping */
45 	unsigned long enabled[BITS_TO_LONGS(HID_BPF_MAX_PROGS)];
46 };
47 
48 #define FOR_ENTRIES(__i, __start, __end) \
49 	for (__i = __start; CIRC_CNT(__end, __i, HID_BPF_MAX_PROGS); __i = NEXT(__i))
50 
51 static struct hid_bpf_jmp_table jmp_table;
52 
53 static DEFINE_MUTEX(hid_bpf_attach_lock);		/* held when attaching/detaching programs */
54 
55 static void hid_bpf_release_progs(struct work_struct *work);
56 
57 static DECLARE_WORK(release_work, hid_bpf_release_progs);
58 
59 BTF_ID_LIST(hid_bpf_btf_ids)
60 BTF_ID(func, hid_bpf_device_event)			/* HID_BPF_PROG_TYPE_DEVICE_EVENT */
61 BTF_ID(func, hid_bpf_rdesc_fixup)			/* HID_BPF_PROG_TYPE_RDESC_FIXUP */
62 
63 static int hid_bpf_max_programs(enum hid_bpf_prog_type type)
64 {
65 	switch (type) {
66 	case HID_BPF_PROG_TYPE_DEVICE_EVENT:
67 		return HID_BPF_MAX_PROGS_PER_DEV;
68 	case HID_BPF_PROG_TYPE_RDESC_FIXUP:
69 		return 1;
70 	default:
71 		return -EINVAL;
72 	}
73 }
74 
75 static int hid_bpf_program_count(struct hid_device *hdev,
76 				 struct bpf_prog *prog,
77 				 enum hid_bpf_prog_type type)
78 {
79 	int i, n = 0;
80 
81 	if (type >= HID_BPF_PROG_TYPE_MAX)
82 		return -EINVAL;
83 
84 	FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
85 		struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
86 
87 		if (type != HID_BPF_PROG_TYPE_UNDEF && entry->type != type)
88 			continue;
89 
90 		if (hdev && entry->hdev != hdev)
91 			continue;
92 
93 		if (prog && entry->prog != prog)
94 			continue;
95 
96 		n++;
97 	}
98 
99 	return n;
100 }
101 
102 __weak noinline int __hid_bpf_tail_call(struct hid_bpf_ctx *ctx)
103 {
104 	return 0;
105 }
106 
107 int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
108 		     struct hid_bpf_ctx_kern *ctx_kern)
109 {
110 	struct hid_bpf_prog_list *prog_list;
111 	int i, idx, err = 0;
112 
113 	rcu_read_lock();
114 	prog_list = rcu_dereference(hdev->bpf.progs[type]);
115 
116 	if (!prog_list)
117 		goto out_unlock;
118 
119 	for (i = 0; i < prog_list->prog_cnt; i++) {
120 		idx = prog_list->prog_idx[i];
121 
122 		if (!test_bit(idx, jmp_table.enabled))
123 			continue;
124 
125 		ctx_kern->ctx.index = idx;
126 		err = __hid_bpf_tail_call(&ctx_kern->ctx);
127 		if (err < 0)
128 			break;
129 		if (err)
130 			ctx_kern->ctx.retval = err;
131 	}
132 
133  out_unlock:
134 	rcu_read_unlock();
135 
136 	return err;
137 }
138 
139 /*
140  * assign the list of programs attached to a given hid device.
141  */
142 static void __hid_bpf_set_hdev_progs(struct hid_device *hdev, struct hid_bpf_prog_list *new_list,
143 				     enum hid_bpf_prog_type type)
144 {
145 	struct hid_bpf_prog_list *old_list;
146 
147 	spin_lock(&hdev->bpf.progs_lock);
148 	old_list = rcu_dereference_protected(hdev->bpf.progs[type],
149 					     lockdep_is_held(&hdev->bpf.progs_lock));
150 	rcu_assign_pointer(hdev->bpf.progs[type], new_list);
151 	spin_unlock(&hdev->bpf.progs_lock);
152 	synchronize_rcu();
153 
154 	kfree(old_list);
155 }
156 
157 /*
158  * allocate and populate the list of programs attached to a given hid device.
159  *
160  * Must be called under lock.
161  */
162 static int hid_bpf_populate_hdev(struct hid_device *hdev, enum hid_bpf_prog_type type)
163 {
164 	struct hid_bpf_prog_list *new_list;
165 	int i;
166 
167 	if (type >= HID_BPF_PROG_TYPE_MAX || !hdev)
168 		return -EINVAL;
169 
170 	if (hdev->bpf.destroyed)
171 		return 0;
172 
173 	new_list = kzalloc(sizeof(*new_list), GFP_KERNEL);
174 	if (!new_list)
175 		return -ENOMEM;
176 
177 	FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
178 		struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
179 
180 		if (entry->type == type && entry->hdev == hdev &&
181 		    test_bit(entry->idx, jmp_table.enabled))
182 			new_list->prog_idx[new_list->prog_cnt++] = entry->idx;
183 	}
184 
185 	__hid_bpf_set_hdev_progs(hdev, new_list, type);
186 
187 	return 0;
188 }
189 
190 static void __hid_bpf_do_release_prog(int map_fd, unsigned int idx)
191 {
192 	skel_map_delete_elem(map_fd, &idx);
193 	jmp_table.progs[idx] = NULL;
194 }
195 
196 static void hid_bpf_release_progs(struct work_struct *work)
197 {
198 	int i, j, n, map_fd = -1;
199 
200 	if (!jmp_table.map)
201 		return;
202 
203 	/* retrieve a fd of our prog_array map in BPF */
204 	map_fd = skel_map_get_fd_by_id(jmp_table.map->id);
205 	if (map_fd < 0)
206 		return;
207 
208 	mutex_lock(&hid_bpf_attach_lock); /* protects against attaching new programs */
209 
210 	/* detach unused progs from HID devices */
211 	FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
212 		struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
213 		enum hid_bpf_prog_type type;
214 		struct hid_device *hdev;
215 
216 		if (test_bit(entry->idx, jmp_table.enabled))
217 			continue;
218 
219 		/* we have an attached prog */
220 		if (entry->hdev) {
221 			hdev = entry->hdev;
222 			type = entry->type;
223 
224 			hid_bpf_populate_hdev(hdev, type);
225 
226 			/* mark all other disabled progs from hdev of the given type as detached */
227 			FOR_ENTRIES(j, i, jmp_table.head) {
228 				struct hid_bpf_prog_entry *next;
229 
230 				next = &jmp_table.entries[j];
231 
232 				if (test_bit(next->idx, jmp_table.enabled))
233 					continue;
234 
235 				if (next->hdev == hdev && next->type == type)
236 					next->hdev = NULL;
237 			}
238 
239 			/* if type was rdesc fixup, reconnect device */
240 			if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP)
241 				hid_bpf_reconnect(hdev);
242 		}
243 	}
244 
245 	/* remove all unused progs from the jump table */
246 	FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
247 		struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
248 
249 		if (test_bit(entry->idx, jmp_table.enabled))
250 			continue;
251 
252 		if (entry->prog)
253 			__hid_bpf_do_release_prog(map_fd, entry->idx);
254 	}
255 
256 	/* compact the entry list */
257 	n = jmp_table.tail;
258 	FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
259 		struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
260 
261 		if (!test_bit(entry->idx, jmp_table.enabled))
262 			continue;
263 
264 		jmp_table.entries[n] = jmp_table.entries[i];
265 		n = NEXT(n);
266 	}
267 
268 	jmp_table.head = n;
269 
270 	mutex_unlock(&hid_bpf_attach_lock);
271 
272 	if (map_fd >= 0)
273 		close_fd(map_fd);
274 }
275 
276 static void hid_bpf_release_prog_at(int idx)
277 {
278 	int map_fd = -1;
279 
280 	/* retrieve a fd of our prog_array map in BPF */
281 	map_fd = skel_map_get_fd_by_id(jmp_table.map->id);
282 	if (map_fd < 0)
283 		return;
284 
285 	__hid_bpf_do_release_prog(map_fd, idx);
286 
287 	close(map_fd);
288 }
289 
290 /*
291  * Insert the given BPF program represented by its fd in the jmp table.
292  * Returns the index in the jump table or a negative error.
293  */
294 static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
295 {
296 	int i, index = -1, map_fd = -1, err = -EINVAL;
297 
298 	/* retrieve a fd of our prog_array map in BPF */
299 	map_fd = skel_map_get_fd_by_id(jmp_table.map->id);
300 
301 	if (map_fd < 0) {
302 		err = -EINVAL;
303 		goto out;
304 	}
305 
306 	/* find the first available index in the jmp_table */
307 	for (i = 0; i < HID_BPF_MAX_PROGS; i++) {
308 		if (!jmp_table.progs[i] && index < 0) {
309 			/* mark the index as used */
310 			jmp_table.progs[i] = prog;
311 			index = i;
312 			__set_bit(i, jmp_table.enabled);
313 		}
314 	}
315 	if (index < 0) {
316 		err = -ENOMEM;
317 		goto out;
318 	}
319 
320 	/* insert the program in the jump table */
321 	err = skel_map_update_elem(map_fd, &index, &prog_fd, 0);
322 	if (err)
323 		goto out;
324 
325 	/*
326 	 * The program has been safely inserted, decrement the reference count
327 	 * so it doesn't interfere with the number of actual user handles.
328 	 * This is safe to do because:
329 	 * - we overrite the put_ptr in the prog fd map
330 	 * - we also have a cleanup function that monitors when a program gets
331 	 *   released and we manually do the cleanup in the prog fd map
332 	 */
333 	bpf_prog_sub(prog, 1);
334 
335 	/* return the index */
336 	err = index;
337 
338  out:
339 	if (err < 0)
340 		__hid_bpf_do_release_prog(map_fd, index);
341 	if (map_fd >= 0)
342 		close_fd(map_fd);
343 	return err;
344 }
345 
346 int hid_bpf_get_prog_attach_type(int prog_fd)
347 {
348 	struct bpf_prog *prog = NULL;
349 	int i;
350 	int prog_type = HID_BPF_PROG_TYPE_UNDEF;
351 
352 	prog = bpf_prog_get(prog_fd);
353 	if (IS_ERR(prog))
354 		return PTR_ERR(prog);
355 
356 	for (i = 0; i < HID_BPF_PROG_TYPE_MAX; i++) {
357 		if (hid_bpf_btf_ids[i] == prog->aux->attach_btf_id) {
358 			prog_type = i;
359 			break;
360 		}
361 	}
362 
363 	bpf_prog_put(prog);
364 
365 	return prog_type;
366 }
367 
368 /* called from syscall */
369 noinline int
370 __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
371 		      int prog_fd, __u32 flags)
372 {
373 	struct bpf_prog *prog = NULL;
374 	struct hid_bpf_prog_entry *prog_entry;
375 	int cnt, err = -EINVAL, prog_idx = -1;
376 
377 	/* take a ref on the prog itself */
378 	prog = bpf_prog_get(prog_fd);
379 	if (IS_ERR(prog))
380 		return PTR_ERR(prog);
381 
382 	mutex_lock(&hid_bpf_attach_lock);
383 
384 	/* do not attach too many programs to a given HID device */
385 	cnt = hid_bpf_program_count(hdev, NULL, prog_type);
386 	if (cnt < 0) {
387 		err = cnt;
388 		goto out_unlock;
389 	}
390 
391 	if (cnt >= hid_bpf_max_programs(prog_type)) {
392 		err = -E2BIG;
393 		goto out_unlock;
394 	}
395 
396 	prog_idx = hid_bpf_insert_prog(prog_fd, prog);
397 	/* if the jmp table is full, abort */
398 	if (prog_idx < 0) {
399 		err = prog_idx;
400 		goto out_unlock;
401 	}
402 
403 	if (flags & HID_BPF_FLAG_INSERT_HEAD) {
404 		/* take the previous prog_entry slot */
405 		jmp_table.tail = PREV(jmp_table.tail);
406 		prog_entry = &jmp_table.entries[jmp_table.tail];
407 	} else {
408 		/* take the next prog_entry slot */
409 		prog_entry = &jmp_table.entries[jmp_table.head];
410 		jmp_table.head = NEXT(jmp_table.head);
411 	}
412 
413 	/* we steal the ref here */
414 	prog_entry->prog = prog;
415 	prog_entry->idx = prog_idx;
416 	prog_entry->hdev = hdev;
417 	prog_entry->type = prog_type;
418 
419 	/* finally store the index in the device list */
420 	err = hid_bpf_populate_hdev(hdev, prog_type);
421 	if (err)
422 		hid_bpf_release_prog_at(prog_idx);
423 
424  out_unlock:
425 	mutex_unlock(&hid_bpf_attach_lock);
426 
427 	/* we only use prog as a key in the various tables, so we don't need to actually
428 	 * increment the ref count.
429 	 */
430 	bpf_prog_put(prog);
431 
432 	return err;
433 }
434 
435 void __hid_bpf_destroy_device(struct hid_device *hdev)
436 {
437 	int type, i;
438 	struct hid_bpf_prog_list *prog_list;
439 
440 	rcu_read_lock();
441 
442 	for (type = 0; type < HID_BPF_PROG_TYPE_MAX; type++) {
443 		prog_list = rcu_dereference(hdev->bpf.progs[type]);
444 
445 		if (!prog_list)
446 			continue;
447 
448 		for (i = 0; i < prog_list->prog_cnt; i++)
449 			__clear_bit(prog_list->prog_idx[i], jmp_table.enabled);
450 	}
451 
452 	rcu_read_unlock();
453 
454 	for (type = 0; type < HID_BPF_PROG_TYPE_MAX; type++)
455 		__hid_bpf_set_hdev_progs(hdev, NULL, type);
456 
457 	/* schedule release of all detached progs */
458 	schedule_work(&release_work);
459 }
460 
461 void call_hid_bpf_prog_put_deferred(struct work_struct *work)
462 {
463 	struct bpf_prog_aux *aux;
464 	struct bpf_prog *prog;
465 	bool found = false;
466 	int i;
467 
468 	aux = container_of(work, struct bpf_prog_aux, work);
469 	prog = aux->prog;
470 
471 	/* we don't need locking here because the entries in the progs table
472 	 * are stable:
473 	 * if there are other users (and the progs entries might change), we
474 	 * would simply not have been called.
475 	 */
476 	for (i = 0; i < HID_BPF_MAX_PROGS; i++) {
477 		if (jmp_table.progs[i] == prog) {
478 			__clear_bit(i, jmp_table.enabled);
479 			found = true;
480 		}
481 	}
482 
483 	if (found)
484 		/* schedule release of all detached progs */
485 		schedule_work(&release_work);
486 }
487 
488 static void hid_bpf_prog_fd_array_put_ptr(void *ptr)
489 {
490 }
491 
492 #define HID_BPF_PROGS_COUNT 2
493 
494 static struct bpf_link *links[HID_BPF_PROGS_COUNT];
495 static struct entrypoints_bpf *skel;
496 
497 void hid_bpf_free_links_and_skel(void)
498 {
499 	int i;
500 
501 	/* the following is enough to release all programs attached to hid */
502 	if (jmp_table.map)
503 		bpf_map_put_with_uref(jmp_table.map);
504 
505 	for (i = 0; i < ARRAY_SIZE(links); i++) {
506 		if (!IS_ERR_OR_NULL(links[i]))
507 			bpf_link_put(links[i]);
508 	}
509 	entrypoints_bpf__destroy(skel);
510 }
511 
512 #define ATTACH_AND_STORE_LINK(__name) do {					\
513 	err = entrypoints_bpf__##__name##__attach(skel);			\
514 	if (err)								\
515 		goto out;							\
516 										\
517 	links[idx] = bpf_link_get_from_fd(skel->links.__name##_fd);		\
518 	if (IS_ERR(links[idx])) {						\
519 		err = PTR_ERR(links[idx]);					\
520 		goto out;							\
521 	}									\
522 										\
523 	/* Avoid taking over stdin/stdout/stderr of init process. Zeroing out	\
524 	 * makes skel_closenz() a no-op later in iterators_bpf__destroy().	\
525 	 */									\
526 	close_fd(skel->links.__name##_fd);					\
527 	skel->links.__name##_fd = 0;						\
528 	idx++;									\
529 } while (0)
530 
531 static struct bpf_map_ops hid_bpf_prog_fd_maps_ops;
532 
533 int hid_bpf_preload_skel(void)
534 {
535 	int err, idx = 0;
536 
537 	skel = entrypoints_bpf__open();
538 	if (!skel)
539 		return -ENOMEM;
540 
541 	err = entrypoints_bpf__load(skel);
542 	if (err)
543 		goto out;
544 
545 	jmp_table.map = bpf_map_get_with_uref(skel->maps.hid_jmp_table.map_fd);
546 	if (IS_ERR(jmp_table.map)) {
547 		err = PTR_ERR(jmp_table.map);
548 		goto out;
549 	}
550 
551 	/* our jump table is stealing refs, so we should not decrement on removal of elements */
552 	hid_bpf_prog_fd_maps_ops = *jmp_table.map->ops;
553 	hid_bpf_prog_fd_maps_ops.map_fd_put_ptr = hid_bpf_prog_fd_array_put_ptr;
554 
555 	jmp_table.map->ops = &hid_bpf_prog_fd_maps_ops;
556 
557 	ATTACH_AND_STORE_LINK(hid_tail_call);
558 	ATTACH_AND_STORE_LINK(hid_bpf_prog_put_deferred);
559 
560 	return 0;
561 out:
562 	hid_bpf_free_links_and_skel();
563 	return err;
564 }
565