xref: /openbmc/linux/arch/mips/net/bpf_jit_comp.h (revision 099f83aa)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Just-In-Time compiler for eBPF bytecode on 32-bit and 64-bit MIPS.
4  *
5  * Copyright (c) 2021 Anyfi Networks AB.
6  * Author: Johan Almbladh <johan.almbladh@gmail.com>
7  *
8  * Based on code and ideas from
9  * Copyright (c) 2017 Cavium, Inc.
10  * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
11  * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
12  */
13 
14 #ifndef _BPF_JIT_COMP_H
15 #define _BPF_JIT_COMP_H
16 
17 /* MIPS registers */
18 #define MIPS_R_ZERO	0   /* Const zero */
19 #define MIPS_R_AT	1   /* Asm temp   */
20 #define MIPS_R_V0	2   /* Result     */
21 #define MIPS_R_V1	3   /* Result     */
22 #define MIPS_R_A0	4   /* Argument   */
23 #define MIPS_R_A1	5   /* Argument   */
24 #define MIPS_R_A2	6   /* Argument   */
25 #define MIPS_R_A3	7   /* Argument   */
26 #define MIPS_R_A4	8   /* Arg (n64)  */
27 #define MIPS_R_A5	9   /* Arg (n64)  */
28 #define MIPS_R_A6	10  /* Arg (n64)  */
29 #define MIPS_R_A7	11  /* Arg (n64)  */
30 #define MIPS_R_T0	8   /* Temp (o32) */
31 #define MIPS_R_T1	9   /* Temp (o32) */
32 #define MIPS_R_T2	10  /* Temp (o32) */
33 #define MIPS_R_T3	11  /* Temp (o32) */
34 #define MIPS_R_T4	12  /* Temporary  */
35 #define MIPS_R_T5	13  /* Temporary  */
36 #define MIPS_R_T6	14  /* Temporary  */
37 #define MIPS_R_T7	15  /* Temporary  */
38 #define MIPS_R_S0	16  /* Saved      */
39 #define MIPS_R_S1	17  /* Saved      */
40 #define MIPS_R_S2	18  /* Saved      */
41 #define MIPS_R_S3	19  /* Saved      */
42 #define MIPS_R_S4	20  /* Saved      */
43 #define MIPS_R_S5	21  /* Saved      */
44 #define MIPS_R_S6	22  /* Saved      */
45 #define MIPS_R_S7	23  /* Saved      */
46 #define MIPS_R_T8	24  /* Temporary  */
47 #define MIPS_R_T9	25  /* Temporary  */
48 /*      MIPS_R_K0	26     Reserved   */
49 /*      MIPS_R_K1	27     Reserved   */
50 #define MIPS_R_GP	28  /* Global ptr */
51 #define MIPS_R_SP	29  /* Stack ptr  */
52 #define MIPS_R_FP	30  /* Frame ptr  */
53 #define MIPS_R_RA	31  /* Return     */
54 
55 /*
56  * Jump address mask for immediate jumps. The four most significant bits
57  * must be equal to PC.
58  */
59 #define MIPS_JMP_MASK	0x0fffffffUL
60 
61 /* Maximum number of iterations in offset table computation */
62 #define JIT_MAX_ITERATIONS	8
63 
64 /*
65  * Jump pseudo-instructions used internally
66  * for branch conversion and branch optimization.
67  */
68 #define JIT_JNSET	0xe0
69 #define JIT_JNOP	0xf0
70 
71 /* Descriptor flag for PC-relative branch conversion */
72 #define JIT_DESC_CONVERT	BIT(31)
73 
74 /* JIT context for an eBPF program */
75 struct jit_context {
76 	struct bpf_prog *program;     /* The eBPF program being JITed        */
77 	u32 *descriptors;             /* eBPF to JITed CPU insn descriptors  */
78 	u32 *target;                  /* JITed code buffer                   */
79 	u32 bpf_index;                /* Index of current BPF program insn   */
80 	u32 jit_index;                /* Index of current JIT target insn    */
81 	u32 changes;                  /* Number of PC-relative branch conv   */
82 	u32 accessed;                 /* Bit mask of read eBPF registers     */
83 	u32 clobbered;                /* Bit mask of modified CPU registers  */
84 	u32 stack_size;               /* Total allocated stack size in bytes */
85 	u32 saved_size;               /* Size of callee-saved registers      */
86 	u32 stack_used;               /* Stack size used for function calls  */
87 };
88 
89 /* Emit the instruction if the JIT memory space has been allocated */
90 #define __emit(ctx, func, ...)					\
91 do {								\
92 	if ((ctx)->target != NULL) {				\
93 		u32 *p = &(ctx)->target[ctx->jit_index];	\
94 		uasm_i_##func(&p, ##__VA_ARGS__);		\
95 	}							\
96 	(ctx)->jit_index++;					\
97 } while (0)
98 #define emit(...) __emit(__VA_ARGS__)
99 
100 /* Workaround for R10000 ll/sc errata */
101 #ifdef CONFIG_WAR_R10000_LLSC
102 #define LLSC_beqz	beqzl
103 #else
104 #define LLSC_beqz	beqz
105 #endif
106 
107 /* Workaround for Loongson-3 ll/sc errata */
108 #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
109 #define LLSC_sync(ctx)	emit(ctx, sync, 0)
110 #define LLSC_offset	4
111 #else
112 #define LLSC_sync(ctx)
113 #define LLSC_offset	0
114 #endif
115 
116 /* Workaround for Loongson-2F jump errata */
117 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
118 #define JALR_MASK	0xffffffffcfffffffULL
119 #else
120 #define JALR_MASK	(~0ULL)
121 #endif
122 
123 /*
124  * Mark a BPF register as accessed, it needs to be
125  * initialized by the program if expected, e.g. FP.
126  */
access_reg(struct jit_context * ctx,u8 reg)127 static inline void access_reg(struct jit_context *ctx, u8 reg)
128 {
129 	ctx->accessed |= BIT(reg);
130 }
131 
132 /*
133  * Mark a CPU register as clobbered, it needs to be
134  * saved/restored by the program if callee-saved.
135  */
clobber_reg(struct jit_context * ctx,u8 reg)136 static inline void clobber_reg(struct jit_context *ctx, u8 reg)
137 {
138 	ctx->clobbered |= BIT(reg);
139 }
140 
141 /*
142  * Push registers on the stack, starting at a given depth from the stack
143  * pointer and increasing. The next depth to be written is returned.
144  */
145 int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth);
146 
147 /*
148  * Pop registers from the stack, starting at a given depth from the stack
149  * pointer and increasing. The next depth to be read is returned.
150  */
151 int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth);
152 
153 /* Compute the 28-bit jump target address from a BPF program location */
154 int get_target(struct jit_context *ctx, u32 loc);
155 
156 /* Compute the PC-relative offset to relative BPF program offset */
157 int get_offset(const struct jit_context *ctx, int off);
158 
159 /* dst = imm (32-bit) */
160 void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm);
161 
162 /* dst = src (32-bit) */
163 void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src);
164 
165 /* Validate ALU/ALU64 immediate range */
166 bool valid_alu_i(u8 op, s32 imm);
167 
168 /* Rewrite ALU/ALU64 immediate operation */
169 bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val);
170 
171 /* ALU immediate operation (32-bit) */
172 void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op);
173 
174 /* ALU register operation (32-bit) */
175 void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op);
176 
177 /* Atomic read-modify-write (32-bit) */
178 void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code);
179 
180 /* Atomic compare-and-exchange (32-bit) */
181 void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off);
182 
183 /* Swap bytes and truncate a register word or half word */
184 void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width);
185 
186 /* Validate JMP/JMP32 immediate range */
187 bool valid_jmp_i(u8 op, s32 imm);
188 
189 /* Prepare a PC-relative jump operation with immediate conditional */
190 void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width,
191 		 u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off);
192 
193 /* Prepare a PC-relative jump operation with register conditional */
194 void setup_jmp_r(struct jit_context *ctx, bool same_reg,
195 		 u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off);
196 
197 /* Finish a PC-relative jump operation */
198 int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off);
199 
200 /* Conditional JMP/JMP32 immediate */
201 void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op);
202 
203 /* Conditional JMP/JMP32 register */
204 void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op);
205 
206 /* Jump always */
207 int emit_ja(struct jit_context *ctx, s16 off);
208 
209 /* Jump to epilogue */
210 int emit_exit(struct jit_context *ctx);
211 
212 /*
213  * Build program prologue to set up the stack and registers.
214  * This function is implemented separately for 32-bit and 64-bit JITs.
215  */
216 void build_prologue(struct jit_context *ctx);
217 
218 /*
219  * Build the program epilogue to restore the stack and registers.
220  * This function is implemented separately for 32-bit and 64-bit JITs.
221  */
222 void build_epilogue(struct jit_context *ctx, int dest_reg);
223 
224 /*
225  * Convert an eBPF instruction to native instruction, i.e
226  * JITs an eBPF instruction.
227  * Returns :
228  *	0  - Successfully JITed an 8-byte eBPF instruction
229  *	>0 - Successfully JITed a 16-byte eBPF instruction
230  *	<0 - Failed to JIT.
231  * This function is implemented separately for 32-bit and 64-bit JITs.
232  */
233 int build_insn(const struct bpf_insn *insn, struct jit_context *ctx);
234 
235 #endif /* _BPF_JIT_COMP_H */
236