1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common functionality for HPPA32 and HPPA64 BPF JIT compilers
4 *
5 * Copyright (c) 2023 Helge Deller <deller@gmx.de>
6 *
7 */
8
9 #include <linux/bpf.h>
10 #include <linux/filter.h>
11 #include "bpf_jit.h"
12
13 /* Number of iterations to try until offsets converge. */
14 #define NR_JIT_ITERATIONS 35
15
build_body(struct hppa_jit_context * ctx,bool extra_pass,int * offset)16 static int build_body(struct hppa_jit_context *ctx, bool extra_pass, int *offset)
17 {
18 const struct bpf_prog *prog = ctx->prog;
19 int i;
20
21 ctx->reg_seen_collect = true;
22 for (i = 0; i < prog->len; i++) {
23 const struct bpf_insn *insn = &prog->insnsi[i];
24 int ret;
25
26 ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
27 /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
28 if (ret > 0)
29 i++;
30 if (offset)
31 offset[i] = ctx->ninsns;
32 if (ret < 0)
33 return ret;
34 }
35 ctx->reg_seen_collect = false;
36 return 0;
37 }
38
bpf_jit_needs_zext(void)39 bool bpf_jit_needs_zext(void)
40 {
41 return true;
42 }
43
bpf_int_jit_compile(struct bpf_prog * prog)44 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
45 {
46 unsigned int prog_size = 0, extable_size = 0;
47 bool tmp_blinded = false, extra_pass = false;
48 struct bpf_prog *tmp, *orig_prog = prog;
49 int pass = 0, prev_ninsns = 0, prologue_len, i;
50 struct hppa_jit_data *jit_data;
51 struct hppa_jit_context *ctx;
52
53 if (!prog->jit_requested)
54 return orig_prog;
55
56 tmp = bpf_jit_blind_constants(prog);
57 if (IS_ERR(tmp))
58 return orig_prog;
59 if (tmp != prog) {
60 tmp_blinded = true;
61 prog = tmp;
62 }
63
64 jit_data = prog->aux->jit_data;
65 if (!jit_data) {
66 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
67 if (!jit_data) {
68 prog = orig_prog;
69 goto out;
70 }
71 prog->aux->jit_data = jit_data;
72 }
73
74 ctx = &jit_data->ctx;
75
76 if (ctx->offset) {
77 extra_pass = true;
78 prog_size = sizeof(*ctx->insns) * ctx->ninsns;
79 goto skip_init_ctx;
80 }
81
82 ctx->prog = prog;
83 ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
84 if (!ctx->offset) {
85 prog = orig_prog;
86 goto out_offset;
87 }
88 for (i = 0; i < prog->len; i++) {
89 prev_ninsns += 20;
90 ctx->offset[i] = prev_ninsns;
91 }
92
93 for (i = 0; i < NR_JIT_ITERATIONS; i++) {
94 pass++;
95 ctx->ninsns = 0;
96 if (build_body(ctx, extra_pass, ctx->offset)) {
97 prog = orig_prog;
98 goto out_offset;
99 }
100 ctx->body_len = ctx->ninsns;
101 bpf_jit_build_prologue(ctx);
102 ctx->prologue_len = ctx->ninsns - ctx->body_len;
103 ctx->epilogue_offset = ctx->ninsns;
104 bpf_jit_build_epilogue(ctx);
105
106 if (ctx->ninsns == prev_ninsns) {
107 if (jit_data->header)
108 break;
109 /* obtain the actual image size */
110 extable_size = prog->aux->num_exentries *
111 sizeof(struct exception_table_entry);
112 prog_size = sizeof(*ctx->insns) * ctx->ninsns;
113
114 jit_data->header =
115 bpf_jit_binary_alloc(prog_size + extable_size,
116 &jit_data->image,
117 sizeof(long),
118 bpf_fill_ill_insns);
119 if (!jit_data->header) {
120 prog = orig_prog;
121 goto out_offset;
122 }
123
124 ctx->insns = (u32 *)jit_data->image;
125 /*
126 * Now, when the image is allocated, the image can
127 * potentially shrink more (auipc/jalr -> jal).
128 */
129 }
130 prev_ninsns = ctx->ninsns;
131 }
132
133 if (i == NR_JIT_ITERATIONS) {
134 pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
135 if (jit_data->header)
136 bpf_jit_binary_free(jit_data->header);
137 prog = orig_prog;
138 goto out_offset;
139 }
140
141 if (extable_size)
142 prog->aux->extable = (void *)ctx->insns + prog_size;
143
144 skip_init_ctx:
145 pass++;
146 ctx->ninsns = 0;
147
148 bpf_jit_build_prologue(ctx);
149 if (build_body(ctx, extra_pass, NULL)) {
150 bpf_jit_binary_free(jit_data->header);
151 prog = orig_prog;
152 goto out_offset;
153 }
154 bpf_jit_build_epilogue(ctx);
155
156 if (HPPA_JIT_DEBUG || bpf_jit_enable > 1) {
157 if (HPPA_JIT_DUMP)
158 bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
159 if (HPPA_JIT_REBOOT)
160 { extern int machine_restart(char *); machine_restart(""); }
161 }
162
163 prog->bpf_func = (void *)ctx->insns;
164 prog->jited = 1;
165 prog->jited_len = prog_size;
166
167 bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
168
169 if (!prog->is_func || extra_pass) {
170 bpf_jit_binary_lock_ro(jit_data->header);
171 prologue_len = ctx->epilogue_offset - ctx->body_len;
172 for (i = 0; i < prog->len; i++)
173 ctx->offset[i] += prologue_len;
174 bpf_prog_fill_jited_linfo(prog, ctx->offset);
175 out_offset:
176 kfree(ctx->offset);
177 kfree(jit_data);
178 prog->aux->jit_data = NULL;
179 }
180 out:
181 if (HPPA_JIT_REBOOT)
182 { extern int machine_restart(char *); machine_restart(""); }
183
184 if (tmp_blinded)
185 bpf_jit_prog_release_other(prog, prog == orig_prog ?
186 tmp : orig_prog);
187 return prog;
188 }
189
hppa_div64(u64 div,u64 divisor)190 u64 hppa_div64(u64 div, u64 divisor)
191 {
192 div = div64_u64(div, divisor);
193 return div;
194 }
195
hppa_div64_rem(u64 div,u64 divisor)196 u64 hppa_div64_rem(u64 div, u64 divisor)
197 {
198 u64 rem;
199 div64_u64_rem(div, divisor, &rem);
200 return rem;
201 }
202