xref: /openbmc/linux/kernel/bpf/dispatcher.c (revision 6c8c1406)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2019 Intel Corporation. */
3 
4 #include <linux/hash.h>
5 #include <linux/bpf.h>
6 #include <linux/filter.h>
7 #include <linux/init.h>
8 
9 /* The BPF dispatcher is a multiway branch code generator. The
10  * dispatcher is a mechanism to avoid the performance penalty of an
11  * indirect call, which is expensive when retpolines are enabled. A
12  * dispatch client registers a BPF program into the dispatcher, and if
13  * there is available room in the dispatcher a direct call to the BPF
14  * program will be generated. All calls to the BPF programs called via
15  * the dispatcher will then be a direct call, instead of an
16  * indirect. The dispatcher hijacks a trampoline function it via the
17  * __fentry__ of the trampoline. The trampoline function has the
18  * following signature:
19  *
20  * unsigned int trampoline(const void *ctx, const struct bpf_insn *insnsi,
21  *                         unsigned int (*bpf_func)(const void *,
22  *                                                  const struct bpf_insn *));
23  */
24 
25 static struct bpf_dispatcher_prog *bpf_dispatcher_find_prog(
26 	struct bpf_dispatcher *d, struct bpf_prog *prog)
27 {
28 	int i;
29 
30 	for (i = 0; i < BPF_DISPATCHER_MAX; i++) {
31 		if (prog == d->progs[i].prog)
32 			return &d->progs[i];
33 	}
34 	return NULL;
35 }
36 
37 static struct bpf_dispatcher_prog *bpf_dispatcher_find_free(
38 	struct bpf_dispatcher *d)
39 {
40 	return bpf_dispatcher_find_prog(d, NULL);
41 }
42 
43 static bool bpf_dispatcher_add_prog(struct bpf_dispatcher *d,
44 				    struct bpf_prog *prog)
45 {
46 	struct bpf_dispatcher_prog *entry;
47 
48 	if (!prog)
49 		return false;
50 
51 	entry = bpf_dispatcher_find_prog(d, prog);
52 	if (entry) {
53 		refcount_inc(&entry->users);
54 		return false;
55 	}
56 
57 	entry = bpf_dispatcher_find_free(d);
58 	if (!entry)
59 		return false;
60 
61 	bpf_prog_inc(prog);
62 	entry->prog = prog;
63 	refcount_set(&entry->users, 1);
64 	d->num_progs++;
65 	return true;
66 }
67 
68 static bool bpf_dispatcher_remove_prog(struct bpf_dispatcher *d,
69 				       struct bpf_prog *prog)
70 {
71 	struct bpf_dispatcher_prog *entry;
72 
73 	if (!prog)
74 		return false;
75 
76 	entry = bpf_dispatcher_find_prog(d, prog);
77 	if (!entry)
78 		return false;
79 
80 	if (refcount_dec_and_test(&entry->users)) {
81 		entry->prog = NULL;
82 		bpf_prog_put(prog);
83 		d->num_progs--;
84 		return true;
85 	}
86 	return false;
87 }
88 
89 int __weak arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
90 {
91 	return -ENOTSUPP;
92 }
93 
94 int __weak __init bpf_arch_init_dispatcher_early(void *ip)
95 {
96 	return -ENOTSUPP;
97 }
98 
99 static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *buf)
100 {
101 	s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0];
102 	int i;
103 
104 	for (i = 0; i < BPF_DISPATCHER_MAX; i++) {
105 		if (d->progs[i].prog)
106 			*ipsp++ = (s64)(uintptr_t)d->progs[i].prog->bpf_func;
107 	}
108 	return arch_prepare_bpf_dispatcher(image, buf, &ips[0], d->num_progs);
109 }
110 
111 static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
112 {
113 	void *old, *new, *tmp;
114 	u32 noff;
115 	int err;
116 
117 	if (!prev_num_progs) {
118 		old = NULL;
119 		noff = 0;
120 	} else {
121 		old = d->image + d->image_off;
122 		noff = d->image_off ^ (PAGE_SIZE / 2);
123 	}
124 
125 	new = d->num_progs ? d->image + noff : NULL;
126 	tmp = d->num_progs ? d->rw_image + noff : NULL;
127 	if (new) {
128 		/* Prepare the dispatcher in d->rw_image. Then use
129 		 * bpf_arch_text_copy to update d->image, which is RO+X.
130 		 */
131 		if (bpf_dispatcher_prepare(d, new, tmp))
132 			return;
133 		if (IS_ERR(bpf_arch_text_copy(new, tmp, PAGE_SIZE / 2)))
134 			return;
135 	}
136 
137 	err = bpf_arch_text_poke(d->func, BPF_MOD_JUMP, old, new);
138 	if (err || !new)
139 		return;
140 
141 	d->image_off = noff;
142 }
143 
144 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
145 				struct bpf_prog *to)
146 {
147 	bool changed = false;
148 	int prev_num_progs;
149 
150 	if (from == to)
151 		return;
152 
153 	mutex_lock(&d->mutex);
154 	if (!d->image) {
155 		d->image = bpf_prog_pack_alloc(PAGE_SIZE, bpf_jit_fill_hole_with_zero);
156 		if (!d->image)
157 			goto out;
158 		d->rw_image = bpf_jit_alloc_exec(PAGE_SIZE);
159 		if (!d->rw_image) {
160 			u32 size = PAGE_SIZE;
161 
162 			bpf_arch_text_copy(d->image, &size, sizeof(size));
163 			bpf_prog_pack_free((struct bpf_binary_header *)d->image);
164 			d->image = NULL;
165 			goto out;
166 		}
167 		bpf_image_ksym_add(d->image, &d->ksym);
168 	}
169 
170 	prev_num_progs = d->num_progs;
171 	changed |= bpf_dispatcher_remove_prog(d, from);
172 	changed |= bpf_dispatcher_add_prog(d, to);
173 
174 	if (!changed)
175 		goto out;
176 
177 	bpf_dispatcher_update(d, prev_num_progs);
178 out:
179 	mutex_unlock(&d->mutex);
180 }
181