xref: /openbmc/linux/arch/x86/net/bpf_jit_comp.c (revision 098c0a373cdd51d3a735da7394acd6e57fae45a0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler
4  *
5  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6  * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7  */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/ftrace.h>
16 #include <asm/set_memory.h>
17 #include <asm/nospec-branch.h>
18 #include <asm/text-patching.h>
19 
emit_code(u8 * ptr,u32 bytes,unsigned int len)20 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
21 {
22 	if (len == 1)
23 		*ptr = bytes;
24 	else if (len == 2)
25 		*(u16 *)ptr = bytes;
26 	else {
27 		*(u32 *)ptr = bytes;
28 		barrier();
29 	}
30 	return ptr + len;
31 }
32 
33 #define EMIT(bytes, len) \
34 	do { prog = emit_code(prog, bytes, len); } while (0)
35 
36 #define EMIT1(b1)		EMIT(b1, 1)
37 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
38 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
39 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
40 #define EMIT5(b1, b2, b3, b4, b5) \
41 	do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
42 
43 #define EMIT1_off32(b1, off) \
44 	do { EMIT1(b1); EMIT(off, 4); } while (0)
45 #define EMIT2_off32(b1, b2, off) \
46 	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
47 #define EMIT3_off32(b1, b2, b3, off) \
48 	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
49 #define EMIT4_off32(b1, b2, b3, b4, off) \
50 	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
51 
52 #ifdef CONFIG_X86_KERNEL_IBT
53 #define EMIT_ENDBR()	EMIT(gen_endbr(), 4)
54 #else
55 #define EMIT_ENDBR()
56 #endif
57 
is_imm8(int value)58 static bool is_imm8(int value)
59 {
60 	return value <= 127 && value >= -128;
61 }
62 
63 /*
64  * Let us limit the positive offset to be <= 123.
65  * This is to ensure eventual jit convergence For the following patterns:
66  * ...
67  * pass4, final_proglen=4391:
68  *   ...
69  *   20e:    48 85 ff                test   rdi,rdi
70  *   211:    74 7d                   je     0x290
71  *   213:    48 8b 77 00             mov    rsi,QWORD PTR [rdi+0x0]
72  *   ...
73  *   289:    48 85 ff                test   rdi,rdi
74  *   28c:    74 17                   je     0x2a5
75  *   28e:    e9 7f ff ff ff          jmp    0x212
76  *   293:    bf 03 00 00 00          mov    edi,0x3
77  * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
78  * and insn at 0x28e is 5-byte jmp insn with offset -129.
79  *
80  * pass5, final_proglen=4392:
81  *   ...
82  *   20e:    48 85 ff                test   rdi,rdi
83  *   211:    0f 84 80 00 00 00       je     0x297
84  *   217:    48 8b 77 00             mov    rsi,QWORD PTR [rdi+0x0]
85  *   ...
86  *   28d:    48 85 ff                test   rdi,rdi
87  *   290:    74 1a                   je     0x2ac
88  *   292:    eb 84                   jmp    0x218
89  *   294:    bf 03 00 00 00          mov    edi,0x3
90  * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
91  * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
92  * At the same time, insn at 0x292 is a 2-byte insn since its offset is
93  * -124.
94  *
95  * pass6 will repeat the same code as in pass4 and this will prevent
96  * eventual convergence.
97  *
98  * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
99  * cycle in the above. In the above example je offset <= 0x7c should work.
100  *
101  * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
102  * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
103  * avoid no convergence issue.
104  *
105  * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
106  * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
107  */
is_imm8_jmp_offset(int value)108 static bool is_imm8_jmp_offset(int value)
109 {
110 	return value <= 123 && value >= -128;
111 }
112 
is_simm32(s64 value)113 static bool is_simm32(s64 value)
114 {
115 	return value == (s64)(s32)value;
116 }
117 
is_uimm32(u64 value)118 static bool is_uimm32(u64 value)
119 {
120 	return value == (u64)(u32)value;
121 }
122 
123 /* mov dst, src */
124 #define EMIT_mov(DST, SRC)								 \
125 	do {										 \
126 		if (DST != SRC)								 \
127 			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
128 	} while (0)
129 
bpf_size_to_x86_bytes(int bpf_size)130 static int bpf_size_to_x86_bytes(int bpf_size)
131 {
132 	if (bpf_size == BPF_W)
133 		return 4;
134 	else if (bpf_size == BPF_H)
135 		return 2;
136 	else if (bpf_size == BPF_B)
137 		return 1;
138 	else if (bpf_size == BPF_DW)
139 		return 4; /* imm32 */
140 	else
141 		return 0;
142 }
143 
144 /*
145  * List of x86 cond jumps opcodes (. + s8)
146  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
147  */
148 #define X86_JB  0x72
149 #define X86_JAE 0x73
150 #define X86_JE  0x74
151 #define X86_JNE 0x75
152 #define X86_JBE 0x76
153 #define X86_JA  0x77
154 #define X86_JL  0x7C
155 #define X86_JGE 0x7D
156 #define X86_JLE 0x7E
157 #define X86_JG  0x7F
158 
159 /* Pick a register outside of BPF range for JIT internal work */
160 #define AUX_REG (MAX_BPF_JIT_REG + 1)
161 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
162 
163 /*
164  * The following table maps BPF registers to x86-64 registers.
165  *
166  * x86-64 register R12 is unused, since if used as base address
167  * register in load/store instructions, it always needs an
168  * extra byte of encoding and is callee saved.
169  *
170  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
171  * trampoline. x86-64 register R10 is used for blinding (if enabled).
172  */
173 static const int reg2hex[] = {
174 	[BPF_REG_0] = 0,  /* RAX */
175 	[BPF_REG_1] = 7,  /* RDI */
176 	[BPF_REG_2] = 6,  /* RSI */
177 	[BPF_REG_3] = 2,  /* RDX */
178 	[BPF_REG_4] = 1,  /* RCX */
179 	[BPF_REG_5] = 0,  /* R8  */
180 	[BPF_REG_6] = 3,  /* RBX callee saved */
181 	[BPF_REG_7] = 5,  /* R13 callee saved */
182 	[BPF_REG_8] = 6,  /* R14 callee saved */
183 	[BPF_REG_9] = 7,  /* R15 callee saved */
184 	[BPF_REG_FP] = 5, /* RBP readonly */
185 	[BPF_REG_AX] = 2, /* R10 temp register */
186 	[AUX_REG] = 3,    /* R11 temp register */
187 	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
188 };
189 
190 static const int reg2pt_regs[] = {
191 	[BPF_REG_0] = offsetof(struct pt_regs, ax),
192 	[BPF_REG_1] = offsetof(struct pt_regs, di),
193 	[BPF_REG_2] = offsetof(struct pt_regs, si),
194 	[BPF_REG_3] = offsetof(struct pt_regs, dx),
195 	[BPF_REG_4] = offsetof(struct pt_regs, cx),
196 	[BPF_REG_5] = offsetof(struct pt_regs, r8),
197 	[BPF_REG_6] = offsetof(struct pt_regs, bx),
198 	[BPF_REG_7] = offsetof(struct pt_regs, r13),
199 	[BPF_REG_8] = offsetof(struct pt_regs, r14),
200 	[BPF_REG_9] = offsetof(struct pt_regs, r15),
201 };
202 
203 /*
204  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
205  * which need extra byte of encoding.
206  * rax,rcx,...,rbp have simpler encoding
207  */
is_ereg(u32 reg)208 static bool is_ereg(u32 reg)
209 {
210 	return (1 << reg) & (BIT(BPF_REG_5) |
211 			     BIT(AUX_REG) |
212 			     BIT(BPF_REG_7) |
213 			     BIT(BPF_REG_8) |
214 			     BIT(BPF_REG_9) |
215 			     BIT(X86_REG_R9) |
216 			     BIT(BPF_REG_AX));
217 }
218 
219 /*
220  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
221  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
222  * of encoding. al,cl,dl,bl have simpler encoding.
223  */
is_ereg_8l(u32 reg)224 static bool is_ereg_8l(u32 reg)
225 {
226 	return is_ereg(reg) ||
227 	    (1 << reg) & (BIT(BPF_REG_1) |
228 			  BIT(BPF_REG_2) |
229 			  BIT(BPF_REG_FP));
230 }
231 
is_axreg(u32 reg)232 static bool is_axreg(u32 reg)
233 {
234 	return reg == BPF_REG_0;
235 }
236 
237 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
add_1mod(u8 byte,u32 reg)238 static u8 add_1mod(u8 byte, u32 reg)
239 {
240 	if (is_ereg(reg))
241 		byte |= 1;
242 	return byte;
243 }
244 
add_2mod(u8 byte,u32 r1,u32 r2)245 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
246 {
247 	if (is_ereg(r1))
248 		byte |= 1;
249 	if (is_ereg(r2))
250 		byte |= 4;
251 	return byte;
252 }
253 
254 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
add_1reg(u8 byte,u32 dst_reg)255 static u8 add_1reg(u8 byte, u32 dst_reg)
256 {
257 	return byte + reg2hex[dst_reg];
258 }
259 
260 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
add_2reg(u8 byte,u32 dst_reg,u32 src_reg)261 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
262 {
263 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
264 }
265 
266 /* Some 1-byte opcodes for binary ALU operations */
267 static u8 simple_alu_opcodes[] = {
268 	[BPF_ADD] = 0x01,
269 	[BPF_SUB] = 0x29,
270 	[BPF_AND] = 0x21,
271 	[BPF_OR] = 0x09,
272 	[BPF_XOR] = 0x31,
273 	[BPF_LSH] = 0xE0,
274 	[BPF_RSH] = 0xE8,
275 	[BPF_ARSH] = 0xF8,
276 };
277 
jit_fill_hole(void * area,unsigned int size)278 static void jit_fill_hole(void *area, unsigned int size)
279 {
280 	/* Fill whole space with INT3 instructions */
281 	memset(area, 0xcc, size);
282 }
283 
bpf_arch_text_invalidate(void * dst,size_t len)284 int bpf_arch_text_invalidate(void *dst, size_t len)
285 {
286 	return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
287 }
288 
289 struct jit_context {
290 	int cleanup_addr; /* Epilogue code offset */
291 
292 	/*
293 	 * Program specific offsets of labels in the code; these rely on the
294 	 * JIT doing at least 2 passes, recording the position on the first
295 	 * pass, only to generate the correct offset on the second pass.
296 	 */
297 	int tail_call_direct_label;
298 	int tail_call_indirect_label;
299 };
300 
301 /* Maximum number of bytes emitted while JITing one eBPF insn */
302 #define BPF_MAX_INSN_SIZE	128
303 #define BPF_INSN_SAFETY		64
304 
305 /* Number of bytes emit_patch() needs to generate instructions */
306 #define X86_PATCH_SIZE		5
307 /* Number of bytes that will be skipped on tailcall */
308 #define X86_TAIL_CALL_OFFSET	(11 + ENDBR_INSN_SIZE)
309 
push_callee_regs(u8 ** pprog,bool * callee_regs_used)310 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
311 {
312 	u8 *prog = *pprog;
313 
314 	if (callee_regs_used[0])
315 		EMIT1(0x53);         /* push rbx */
316 	if (callee_regs_used[1])
317 		EMIT2(0x41, 0x55);   /* push r13 */
318 	if (callee_regs_used[2])
319 		EMIT2(0x41, 0x56);   /* push r14 */
320 	if (callee_regs_used[3])
321 		EMIT2(0x41, 0x57);   /* push r15 */
322 	*pprog = prog;
323 }
324 
pop_callee_regs(u8 ** pprog,bool * callee_regs_used)325 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
326 {
327 	u8 *prog = *pprog;
328 
329 	if (callee_regs_used[3])
330 		EMIT2(0x41, 0x5F);   /* pop r15 */
331 	if (callee_regs_used[2])
332 		EMIT2(0x41, 0x5E);   /* pop r14 */
333 	if (callee_regs_used[1])
334 		EMIT2(0x41, 0x5D);   /* pop r13 */
335 	if (callee_regs_used[0])
336 		EMIT1(0x5B);         /* pop rbx */
337 	*pprog = prog;
338 }
339 
340 /*
341  * Emit x86-64 prologue code for BPF program.
342  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
343  * while jumping to another program
344  */
emit_prologue(u8 ** pprog,u32 stack_depth,bool ebpf_from_cbpf,bool tail_call_reachable,bool is_subprog)345 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
346 			  bool tail_call_reachable, bool is_subprog)
347 {
348 	u8 *prog = *pprog;
349 
350 	/* BPF trampoline can be made to work without these nops,
351 	 * but let's waste 5 bytes for now and optimize later
352 	 */
353 	EMIT_ENDBR();
354 	memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
355 	prog += X86_PATCH_SIZE;
356 	if (!ebpf_from_cbpf) {
357 		if (tail_call_reachable && !is_subprog)
358 			EMIT2(0x31, 0xC0); /* xor eax, eax */
359 		else
360 			EMIT2(0x66, 0x90); /* nop2 */
361 	}
362 	EMIT1(0x55);             /* push rbp */
363 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
364 
365 	/* X86_TAIL_CALL_OFFSET is here */
366 	EMIT_ENDBR();
367 
368 	/* sub rsp, rounded_stack_depth */
369 	if (stack_depth)
370 		EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
371 	if (tail_call_reachable)
372 		EMIT1(0x50);         /* push rax */
373 	*pprog = prog;
374 }
375 
emit_patch(u8 ** pprog,void * func,void * ip,u8 opcode)376 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
377 {
378 	u8 *prog = *pprog;
379 	s64 offset;
380 
381 	offset = func - (ip + X86_PATCH_SIZE);
382 	if (!is_simm32(offset)) {
383 		pr_err("Target call %p is out of range\n", func);
384 		return -ERANGE;
385 	}
386 	EMIT1_off32(opcode, offset);
387 	*pprog = prog;
388 	return 0;
389 }
390 
emit_call(u8 ** pprog,void * func,void * ip)391 static int emit_call(u8 **pprog, void *func, void *ip)
392 {
393 	return emit_patch(pprog, func, ip, 0xE8);
394 }
395 
emit_rsb_call(u8 ** pprog,void * func,void * ip)396 static int emit_rsb_call(u8 **pprog, void *func, void *ip)
397 {
398 	OPTIMIZER_HIDE_VAR(func);
399 	ip += x86_call_depth_emit_accounting(pprog, func);
400 	return emit_patch(pprog, func, ip, 0xE8);
401 }
402 
emit_jump(u8 ** pprog,void * func,void * ip)403 static int emit_jump(u8 **pprog, void *func, void *ip)
404 {
405 	return emit_patch(pprog, func, ip, 0xE9);
406 }
407 
__bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)408 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
409 				void *old_addr, void *new_addr)
410 {
411 	const u8 *nop_insn = x86_nops[5];
412 	u8 old_insn[X86_PATCH_SIZE];
413 	u8 new_insn[X86_PATCH_SIZE];
414 	u8 *prog;
415 	int ret;
416 
417 	memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
418 	if (old_addr) {
419 		prog = old_insn;
420 		ret = t == BPF_MOD_CALL ?
421 		      emit_call(&prog, old_addr, ip) :
422 		      emit_jump(&prog, old_addr, ip);
423 		if (ret)
424 			return ret;
425 	}
426 
427 	memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
428 	if (new_addr) {
429 		prog = new_insn;
430 		ret = t == BPF_MOD_CALL ?
431 		      emit_call(&prog, new_addr, ip) :
432 		      emit_jump(&prog, new_addr, ip);
433 		if (ret)
434 			return ret;
435 	}
436 
437 	ret = -EBUSY;
438 	mutex_lock(&text_mutex);
439 	if (memcmp(ip, old_insn, X86_PATCH_SIZE))
440 		goto out;
441 	ret = 1;
442 	if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
443 		text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
444 		ret = 0;
445 	}
446 out:
447 	mutex_unlock(&text_mutex);
448 	return ret;
449 }
450 
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)451 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
452 		       void *old_addr, void *new_addr)
453 {
454 	if (!is_kernel_text((long)ip) &&
455 	    !is_bpf_text_address((long)ip))
456 		/* BPF poking in modules is not supported */
457 		return -EINVAL;
458 
459 	/*
460 	 * See emit_prologue(), for IBT builds the trampoline hook is preceded
461 	 * with an ENDBR instruction.
462 	 */
463 	if (is_endbr(*(u32 *)ip))
464 		ip += ENDBR_INSN_SIZE;
465 
466 	return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
467 }
468 
469 #define EMIT_LFENCE()	EMIT3(0x0F, 0xAE, 0xE8)
470 
emit_indirect_jump(u8 ** pprog,int reg,u8 * ip)471 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
472 {
473 	u8 *prog = *pprog;
474 
475 	if (IS_ENABLED(CONFIG_MITIGATION_ITS) &&
476 	    cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
477 		OPTIMIZER_HIDE_VAR(reg);
478 		emit_jump(&prog, its_static_thunk(reg), ip);
479 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
480 		EMIT_LFENCE();
481 		EMIT2(0xFF, 0xE0 + reg);
482 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
483 		OPTIMIZER_HIDE_VAR(reg);
484 		if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
485 			emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
486 		else
487 			emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
488 	} else {
489 		EMIT2(0xFF, 0xE0 + reg);	/* jmp *%\reg */
490 		if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS))
491 			EMIT1(0xCC);		/* int3 */
492 	}
493 
494 	*pprog = prog;
495 }
496 
emit_return(u8 ** pprog,u8 * ip)497 static void emit_return(u8 **pprog, u8 *ip)
498 {
499 	u8 *prog = *pprog;
500 
501 	if (cpu_wants_rethunk()) {
502 		emit_jump(&prog, x86_return_thunk, ip);
503 	} else {
504 		EMIT1(0xC3);		/* ret */
505 		if (IS_ENABLED(CONFIG_SLS))
506 			EMIT1(0xCC);	/* int3 */
507 	}
508 
509 	*pprog = prog;
510 }
511 
512 /*
513  * Generate the following code:
514  *
515  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
516  *   if (index >= array->map.max_entries)
517  *     goto out;
518  *   if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
519  *     goto out;
520  *   prog = array->ptrs[index];
521  *   if (prog == NULL)
522  *     goto out;
523  *   goto *(prog->bpf_func + prologue_size);
524  * out:
525  */
emit_bpf_tail_call_indirect(u8 ** pprog,bool * callee_regs_used,u32 stack_depth,u8 * ip,struct jit_context * ctx)526 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
527 					u32 stack_depth, u8 *ip,
528 					struct jit_context *ctx)
529 {
530 	int tcc_off = -4 - round_up(stack_depth, 8);
531 	u8 *prog = *pprog, *start = *pprog;
532 	int offset;
533 
534 	/*
535 	 * rdi - pointer to ctx
536 	 * rsi - pointer to bpf_array
537 	 * rdx - index in bpf_array
538 	 */
539 
540 	/*
541 	 * if (index >= array->map.max_entries)
542 	 *	goto out;
543 	 */
544 	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
545 	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
546 	      offsetof(struct bpf_array, map.max_entries));
547 
548 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
549 	EMIT2(X86_JBE, offset);                   /* jbe out */
550 
551 	/*
552 	 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
553 	 *	goto out;
554 	 */
555 	EMIT2_off32(0x8B, 0x85, tcc_off);         /* mov eax, dword ptr [rbp - tcc_off] */
556 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
557 
558 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
559 	EMIT2(X86_JAE, offset);                   /* jae out */
560 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
561 	EMIT2_off32(0x89, 0x85, tcc_off);         /* mov dword ptr [rbp - tcc_off], eax */
562 
563 	/* prog = array->ptrs[index]; */
564 	EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
565 		    offsetof(struct bpf_array, ptrs));
566 
567 	/*
568 	 * if (prog == NULL)
569 	 *	goto out;
570 	 */
571 	EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
572 
573 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
574 	EMIT2(X86_JE, offset);                    /* je out */
575 
576 	pop_callee_regs(&prog, callee_regs_used);
577 
578 	EMIT1(0x58);                              /* pop rax */
579 	if (stack_depth)
580 		EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
581 			    round_up(stack_depth, 8));
582 
583 	/* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
584 	EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
585 	      offsetof(struct bpf_prog, bpf_func));
586 	EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
587 	      X86_TAIL_CALL_OFFSET);
588 	/*
589 	 * Now we're ready to jump into next BPF program
590 	 * rdi == ctx (1st arg)
591 	 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
592 	 */
593 	emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
594 
595 	/* out: */
596 	ctx->tail_call_indirect_label = prog - start;
597 	*pprog = prog;
598 }
599 
emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor * poke,u8 ** pprog,u8 * ip,bool * callee_regs_used,u32 stack_depth,struct jit_context * ctx)600 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
601 				      u8 **pprog, u8 *ip,
602 				      bool *callee_regs_used, u32 stack_depth,
603 				      struct jit_context *ctx)
604 {
605 	int tcc_off = -4 - round_up(stack_depth, 8);
606 	u8 *prog = *pprog, *start = *pprog;
607 	int offset;
608 
609 	/*
610 	 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
611 	 *	goto out;
612 	 */
613 	EMIT2_off32(0x8B, 0x85, tcc_off);             /* mov eax, dword ptr [rbp - tcc_off] */
614 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
615 
616 	offset = ctx->tail_call_direct_label - (prog + 2 - start);
617 	EMIT2(X86_JAE, offset);                       /* jae out */
618 	EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
619 	EMIT2_off32(0x89, 0x85, tcc_off);             /* mov dword ptr [rbp - tcc_off], eax */
620 
621 	poke->tailcall_bypass = ip + (prog - start);
622 	poke->adj_off = X86_TAIL_CALL_OFFSET;
623 	poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
624 	poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
625 
626 	emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
627 		  poke->tailcall_bypass);
628 
629 	pop_callee_regs(&prog, callee_regs_used);
630 	EMIT1(0x58);                                  /* pop rax */
631 	if (stack_depth)
632 		EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
633 
634 	memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
635 	prog += X86_PATCH_SIZE;
636 
637 	/* out: */
638 	ctx->tail_call_direct_label = prog - start;
639 
640 	*pprog = prog;
641 }
642 
bpf_tail_call_direct_fixup(struct bpf_prog * prog)643 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
644 {
645 	struct bpf_jit_poke_descriptor *poke;
646 	struct bpf_array *array;
647 	struct bpf_prog *target;
648 	int i, ret;
649 
650 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
651 		poke = &prog->aux->poke_tab[i];
652 		if (poke->aux && poke->aux != prog->aux)
653 			continue;
654 
655 		WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
656 
657 		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
658 			continue;
659 
660 		array = container_of(poke->tail_call.map, struct bpf_array, map);
661 		mutex_lock(&array->aux->poke_mutex);
662 		target = array->ptrs[poke->tail_call.key];
663 		if (target) {
664 			ret = __bpf_arch_text_poke(poke->tailcall_target,
665 						   BPF_MOD_JUMP, NULL,
666 						   (u8 *)target->bpf_func +
667 						   poke->adj_off);
668 			BUG_ON(ret < 0);
669 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
670 						   BPF_MOD_JUMP,
671 						   (u8 *)poke->tailcall_target +
672 						   X86_PATCH_SIZE, NULL);
673 			BUG_ON(ret < 0);
674 		}
675 		WRITE_ONCE(poke->tailcall_target_stable, true);
676 		mutex_unlock(&array->aux->poke_mutex);
677 	}
678 }
679 
emit_mov_imm32(u8 ** pprog,bool sign_propagate,u32 dst_reg,const u32 imm32)680 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
681 			   u32 dst_reg, const u32 imm32)
682 {
683 	u8 *prog = *pprog;
684 	u8 b1, b2, b3;
685 
686 	/*
687 	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
688 	 * (which zero-extends imm32) to save 2 bytes.
689 	 */
690 	if (sign_propagate && (s32)imm32 < 0) {
691 		/* 'mov %rax, imm32' sign extends imm32 */
692 		b1 = add_1mod(0x48, dst_reg);
693 		b2 = 0xC7;
694 		b3 = 0xC0;
695 		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
696 		goto done;
697 	}
698 
699 	/*
700 	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
701 	 * to save 3 bytes.
702 	 */
703 	if (imm32 == 0) {
704 		if (is_ereg(dst_reg))
705 			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
706 		b2 = 0x31; /* xor */
707 		b3 = 0xC0;
708 		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
709 		goto done;
710 	}
711 
712 	/* mov %eax, imm32 */
713 	if (is_ereg(dst_reg))
714 		EMIT1(add_1mod(0x40, dst_reg));
715 	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
716 done:
717 	*pprog = prog;
718 }
719 
emit_mov_imm64(u8 ** pprog,u32 dst_reg,const u32 imm32_hi,const u32 imm32_lo)720 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
721 			   const u32 imm32_hi, const u32 imm32_lo)
722 {
723 	u8 *prog = *pprog;
724 
725 	if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
726 		/*
727 		 * For emitting plain u32, where sign bit must not be
728 		 * propagated LLVM tends to load imm64 over mov32
729 		 * directly, so save couple of bytes by just doing
730 		 * 'mov %eax, imm32' instead.
731 		 */
732 		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
733 	} else {
734 		/* movabsq rax, imm64 */
735 		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
736 		EMIT(imm32_lo, 4);
737 		EMIT(imm32_hi, 4);
738 	}
739 
740 	*pprog = prog;
741 }
742 
emit_mov_reg(u8 ** pprog,bool is64,u32 dst_reg,u32 src_reg)743 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
744 {
745 	u8 *prog = *pprog;
746 
747 	if (is64) {
748 		/* mov dst, src */
749 		EMIT_mov(dst_reg, src_reg);
750 	} else {
751 		/* mov32 dst, src */
752 		if (is_ereg(dst_reg) || is_ereg(src_reg))
753 			EMIT1(add_2mod(0x40, dst_reg, src_reg));
754 		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
755 	}
756 
757 	*pprog = prog;
758 }
759 
emit_movsx_reg(u8 ** pprog,int num_bits,bool is64,u32 dst_reg,u32 src_reg)760 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
761 			   u32 src_reg)
762 {
763 	u8 *prog = *pprog;
764 
765 	if (is64) {
766 		/* movs[b,w,l]q dst, src */
767 		if (num_bits == 8)
768 			EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
769 			      add_2reg(0xC0, src_reg, dst_reg));
770 		else if (num_bits == 16)
771 			EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
772 			      add_2reg(0xC0, src_reg, dst_reg));
773 		else if (num_bits == 32)
774 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
775 			      add_2reg(0xC0, src_reg, dst_reg));
776 	} else {
777 		/* movs[b,w]l dst, src */
778 		if (num_bits == 8) {
779 			EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
780 			      add_2reg(0xC0, src_reg, dst_reg));
781 		} else if (num_bits == 16) {
782 			if (is_ereg(dst_reg) || is_ereg(src_reg))
783 				EMIT1(add_2mod(0x40, src_reg, dst_reg));
784 			EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
785 			      add_2reg(0xC0, src_reg, dst_reg));
786 		}
787 	}
788 
789 	*pprog = prog;
790 }
791 
792 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
emit_insn_suffix(u8 ** pprog,u32 ptr_reg,u32 val_reg,int off)793 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
794 {
795 	u8 *prog = *pprog;
796 
797 	if (is_imm8(off)) {
798 		/* 1-byte signed displacement.
799 		 *
800 		 * If off == 0 we could skip this and save one extra byte, but
801 		 * special case of x86 R13 which always needs an offset is not
802 		 * worth the hassle
803 		 */
804 		EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
805 	} else {
806 		/* 4-byte signed displacement */
807 		EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
808 	}
809 	*pprog = prog;
810 }
811 
812 /*
813  * Emit a REX byte if it will be necessary to address these registers
814  */
maybe_emit_mod(u8 ** pprog,u32 dst_reg,u32 src_reg,bool is64)815 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
816 {
817 	u8 *prog = *pprog;
818 
819 	if (is64)
820 		EMIT1(add_2mod(0x48, dst_reg, src_reg));
821 	else if (is_ereg(dst_reg) || is_ereg(src_reg))
822 		EMIT1(add_2mod(0x40, dst_reg, src_reg));
823 	*pprog = prog;
824 }
825 
826 /*
827  * Similar version of maybe_emit_mod() for a single register
828  */
maybe_emit_1mod(u8 ** pprog,u32 reg,bool is64)829 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
830 {
831 	u8 *prog = *pprog;
832 
833 	if (is64)
834 		EMIT1(add_1mod(0x48, reg));
835 	else if (is_ereg(reg))
836 		EMIT1(add_1mod(0x40, reg));
837 	*pprog = prog;
838 }
839 
840 /* LDX: dst_reg = *(u8*)(src_reg + off) */
emit_ldx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)841 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
842 {
843 	u8 *prog = *pprog;
844 
845 	switch (size) {
846 	case BPF_B:
847 		/* Emit 'movzx rax, byte ptr [rax + off]' */
848 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
849 		break;
850 	case BPF_H:
851 		/* Emit 'movzx rax, word ptr [rax + off]' */
852 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
853 		break;
854 	case BPF_W:
855 		/* Emit 'mov eax, dword ptr [rax+0x14]' */
856 		if (is_ereg(dst_reg) || is_ereg(src_reg))
857 			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
858 		else
859 			EMIT1(0x8B);
860 		break;
861 	case BPF_DW:
862 		/* Emit 'mov rax, qword ptr [rax+0x14]' */
863 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
864 		break;
865 	}
866 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
867 	*pprog = prog;
868 }
869 
870 /* LDSX: dst_reg = *(s8*)(src_reg + off) */
emit_ldsx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)871 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
872 {
873 	u8 *prog = *pprog;
874 
875 	switch (size) {
876 	case BPF_B:
877 		/* Emit 'movsx rax, byte ptr [rax + off]' */
878 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
879 		break;
880 	case BPF_H:
881 		/* Emit 'movsx rax, word ptr [rax + off]' */
882 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
883 		break;
884 	case BPF_W:
885 		/* Emit 'movsx rax, dword ptr [rax+0x14]' */
886 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
887 		break;
888 	}
889 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
890 	*pprog = prog;
891 }
892 
893 /* STX: *(u8*)(dst_reg + off) = src_reg */
emit_stx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)894 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
895 {
896 	u8 *prog = *pprog;
897 
898 	switch (size) {
899 	case BPF_B:
900 		/* Emit 'mov byte ptr [rax + off], al' */
901 		if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
902 			/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
903 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
904 		else
905 			EMIT1(0x88);
906 		break;
907 	case BPF_H:
908 		if (is_ereg(dst_reg) || is_ereg(src_reg))
909 			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
910 		else
911 			EMIT2(0x66, 0x89);
912 		break;
913 	case BPF_W:
914 		if (is_ereg(dst_reg) || is_ereg(src_reg))
915 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
916 		else
917 			EMIT1(0x89);
918 		break;
919 	case BPF_DW:
920 		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
921 		break;
922 	}
923 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
924 	*pprog = prog;
925 }
926 
emit_atomic(u8 ** pprog,u8 atomic_op,u32 dst_reg,u32 src_reg,s16 off,u8 bpf_size)927 static int emit_atomic(u8 **pprog, u8 atomic_op,
928 		       u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
929 {
930 	u8 *prog = *pprog;
931 
932 	EMIT1(0xF0); /* lock prefix */
933 
934 	maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
935 
936 	/* emit opcode */
937 	switch (atomic_op) {
938 	case BPF_ADD:
939 	case BPF_AND:
940 	case BPF_OR:
941 	case BPF_XOR:
942 		/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
943 		EMIT1(simple_alu_opcodes[atomic_op]);
944 		break;
945 	case BPF_ADD | BPF_FETCH:
946 		/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
947 		EMIT2(0x0F, 0xC1);
948 		break;
949 	case BPF_XCHG:
950 		/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
951 		EMIT1(0x87);
952 		break;
953 	case BPF_CMPXCHG:
954 		/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
955 		EMIT2(0x0F, 0xB1);
956 		break;
957 	default:
958 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
959 		return -EFAULT;
960 	}
961 
962 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
963 
964 	*pprog = prog;
965 	return 0;
966 }
967 
ex_handler_bpf(const struct exception_table_entry * x,struct pt_regs * regs)968 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
969 {
970 	u32 reg = x->fixup >> 8;
971 
972 	/* jump over faulting load and clear dest register */
973 	*(unsigned long *)((void *)regs + reg) = 0;
974 	regs->ip += x->fixup & 0xff;
975 	return true;
976 }
977 
detect_reg_usage(struct bpf_insn * insn,int insn_cnt,bool * regs_used,bool * tail_call_seen)978 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
979 			     bool *regs_used, bool *tail_call_seen)
980 {
981 	int i;
982 
983 	for (i = 1; i <= insn_cnt; i++, insn++) {
984 		if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
985 			*tail_call_seen = true;
986 		if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
987 			regs_used[0] = true;
988 		if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
989 			regs_used[1] = true;
990 		if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
991 			regs_used[2] = true;
992 		if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
993 			regs_used[3] = true;
994 	}
995 }
996 
emit_nops(u8 ** pprog,int len)997 static void emit_nops(u8 **pprog, int len)
998 {
999 	u8 *prog = *pprog;
1000 	int i, noplen;
1001 
1002 	while (len > 0) {
1003 		noplen = len;
1004 
1005 		if (noplen > ASM_NOP_MAX)
1006 			noplen = ASM_NOP_MAX;
1007 
1008 		for (i = 0; i < noplen; i++)
1009 			EMIT1(x86_nops[noplen][i]);
1010 		len -= noplen;
1011 	}
1012 
1013 	*pprog = prog;
1014 }
1015 
1016 /* emit the 3-byte VEX prefix
1017  *
1018  * r: same as rex.r, extra bit for ModRM reg field
1019  * x: same as rex.x, extra bit for SIB index field
1020  * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1021  * m: opcode map select, encoding escape bytes e.g. 0x0f38
1022  * w: same as rex.w (32 bit or 64 bit) or opcode specific
1023  * src_reg2: additional source reg (encoded as BPF reg)
1024  * l: vector length (128 bit or 256 bit) or reserved
1025  * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1026  */
emit_3vex(u8 ** pprog,bool r,bool x,bool b,u8 m,bool w,u8 src_reg2,bool l,u8 pp)1027 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
1028 		      bool w, u8 src_reg2, bool l, u8 pp)
1029 {
1030 	u8 *prog = *pprog;
1031 	const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
1032 	u8 b1, b2;
1033 	u8 vvvv = reg2hex[src_reg2];
1034 
1035 	/* reg2hex gives only the lower 3 bit of vvvv */
1036 	if (is_ereg(src_reg2))
1037 		vvvv |= 1 << 3;
1038 
1039 	/*
1040 	 * 2nd byte of 3-byte VEX prefix
1041 	 * ~ means bit inverted encoding
1042 	 *
1043 	 *    7                           0
1044 	 *  +---+---+---+---+---+---+---+---+
1045 	 *  |~R |~X |~B |         m         |
1046 	 *  +---+---+---+---+---+---+---+---+
1047 	 */
1048 	b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
1049 	/*
1050 	 * 3rd byte of 3-byte VEX prefix
1051 	 *
1052 	 *    7                           0
1053 	 *  +---+---+---+---+---+---+---+---+
1054 	 *  | W |     ~vvvv     | L |   pp  |
1055 	 *  +---+---+---+---+---+---+---+---+
1056 	 */
1057 	b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
1058 
1059 	EMIT3(b0, b1, b2);
1060 	*pprog = prog;
1061 }
1062 
1063 /* emit BMI2 shift instruction */
emit_shiftx(u8 ** pprog,u32 dst_reg,u8 src_reg,bool is64,u8 op)1064 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
1065 {
1066 	u8 *prog = *pprog;
1067 	bool r = is_ereg(dst_reg);
1068 	u8 m = 2; /* escape code 0f38 */
1069 
1070 	emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1071 	EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
1072 	*pprog = prog;
1073 }
1074 
1075 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1076 
1077 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
1078 #define RESTORE_TAIL_CALL_CNT(stack)				\
1079 	EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
1080 
emit_spectre_bhb_barrier(u8 ** pprog,u8 * ip,struct bpf_prog * bpf_prog)1081 static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
1082 				    struct bpf_prog *bpf_prog)
1083 {
1084 	u8 *prog = *pprog;
1085 	u8 *func;
1086 
1087 	if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
1088 		/* The clearing sequence clobbers eax and ecx. */
1089 		EMIT1(0x50); /* push rax */
1090 		EMIT1(0x51); /* push rcx */
1091 		ip += 2;
1092 
1093 		func = (u8 *)clear_bhb_loop;
1094 		ip += x86_call_depth_emit_accounting(&prog, func);
1095 
1096 		if (emit_call(&prog, func, ip))
1097 			return -EINVAL;
1098 		EMIT1(0x59); /* pop rcx */
1099 		EMIT1(0x58); /* pop rax */
1100 	}
1101 	/* Insert IBHF instruction */
1102 	if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
1103 	     cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
1104 	    cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
1105 		/*
1106 		 * Add an Indirect Branch History Fence (IBHF). IBHF acts as a
1107 		 * fence preventing branch history from before the fence from
1108 		 * affecting indirect branches after the fence. This is
1109 		 * specifically used in cBPF jitted code to prevent Intra-mode
1110 		 * BHI attacks. The IBHF instruction is designed to be a NOP on
1111 		 * hardware that doesn't need or support it.  The REP and REX.W
1112 		 * prefixes are required by the microcode, and they also ensure
1113 		 * that the NOP is unlikely to be used in existing code.
1114 		 *
1115 		 * IBHF is not a valid instruction in 32-bit mode.
1116 		 */
1117 		EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
1118 	}
1119 	*pprog = prog;
1120 	return 0;
1121 }
1122 
do_jit(struct bpf_prog * bpf_prog,int * addrs,u8 * image,u8 * rw_image,int oldproglen,struct jit_context * ctx,bool jmp_padding)1123 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
1124 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
1125 {
1126 	bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
1127 	struct bpf_insn *insn = bpf_prog->insnsi;
1128 	bool callee_regs_used[4] = {};
1129 	int insn_cnt = bpf_prog->len;
1130 	bool tail_call_seen = false;
1131 	bool seen_exit = false;
1132 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1133 	int i, excnt = 0;
1134 	int ilen, proglen = 0;
1135 	u8 *prog = temp;
1136 	int err;
1137 
1138 	detect_reg_usage(insn, insn_cnt, callee_regs_used,
1139 			 &tail_call_seen);
1140 
1141 	/* tail call's presence in current prog implies it is reachable */
1142 	tail_call_reachable |= tail_call_seen;
1143 
1144 	emit_prologue(&prog, bpf_prog->aux->stack_depth,
1145 		      bpf_prog_was_classic(bpf_prog), tail_call_reachable,
1146 		      bpf_prog->aux->func_idx != 0);
1147 	push_callee_regs(&prog, callee_regs_used);
1148 
1149 	ilen = prog - temp;
1150 	if (rw_image)
1151 		memcpy(rw_image + proglen, temp, ilen);
1152 	proglen += ilen;
1153 	addrs[0] = proglen;
1154 	prog = temp;
1155 
1156 	for (i = 1; i <= insn_cnt; i++, insn++) {
1157 		const s32 imm32 = insn->imm;
1158 		u32 dst_reg = insn->dst_reg;
1159 		u32 src_reg = insn->src_reg;
1160 		u8 b2 = 0, b3 = 0;
1161 		u8 *start_of_ldx;
1162 		s64 jmp_offset;
1163 		s16 insn_off;
1164 		u8 jmp_cond;
1165 		u8 *func;
1166 		int nops;
1167 
1168 		switch (insn->code) {
1169 			/* ALU */
1170 		case BPF_ALU | BPF_ADD | BPF_X:
1171 		case BPF_ALU | BPF_SUB | BPF_X:
1172 		case BPF_ALU | BPF_AND | BPF_X:
1173 		case BPF_ALU | BPF_OR | BPF_X:
1174 		case BPF_ALU | BPF_XOR | BPF_X:
1175 		case BPF_ALU64 | BPF_ADD | BPF_X:
1176 		case BPF_ALU64 | BPF_SUB | BPF_X:
1177 		case BPF_ALU64 | BPF_AND | BPF_X:
1178 		case BPF_ALU64 | BPF_OR | BPF_X:
1179 		case BPF_ALU64 | BPF_XOR | BPF_X:
1180 			maybe_emit_mod(&prog, dst_reg, src_reg,
1181 				       BPF_CLASS(insn->code) == BPF_ALU64);
1182 			b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1183 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1184 			break;
1185 
1186 		case BPF_ALU64 | BPF_MOV | BPF_X:
1187 		case BPF_ALU | BPF_MOV | BPF_X:
1188 			if (insn->off == 0)
1189 				emit_mov_reg(&prog,
1190 					     BPF_CLASS(insn->code) == BPF_ALU64,
1191 					     dst_reg, src_reg);
1192 			else
1193 				emit_movsx_reg(&prog, insn->off,
1194 					       BPF_CLASS(insn->code) == BPF_ALU64,
1195 					       dst_reg, src_reg);
1196 			break;
1197 
1198 			/* neg dst */
1199 		case BPF_ALU | BPF_NEG:
1200 		case BPF_ALU64 | BPF_NEG:
1201 			maybe_emit_1mod(&prog, dst_reg,
1202 					BPF_CLASS(insn->code) == BPF_ALU64);
1203 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1204 			break;
1205 
1206 		case BPF_ALU | BPF_ADD | BPF_K:
1207 		case BPF_ALU | BPF_SUB | BPF_K:
1208 		case BPF_ALU | BPF_AND | BPF_K:
1209 		case BPF_ALU | BPF_OR | BPF_K:
1210 		case BPF_ALU | BPF_XOR | BPF_K:
1211 		case BPF_ALU64 | BPF_ADD | BPF_K:
1212 		case BPF_ALU64 | BPF_SUB | BPF_K:
1213 		case BPF_ALU64 | BPF_AND | BPF_K:
1214 		case BPF_ALU64 | BPF_OR | BPF_K:
1215 		case BPF_ALU64 | BPF_XOR | BPF_K:
1216 			maybe_emit_1mod(&prog, dst_reg,
1217 					BPF_CLASS(insn->code) == BPF_ALU64);
1218 
1219 			/*
1220 			 * b3 holds 'normal' opcode, b2 short form only valid
1221 			 * in case dst is eax/rax.
1222 			 */
1223 			switch (BPF_OP(insn->code)) {
1224 			case BPF_ADD:
1225 				b3 = 0xC0;
1226 				b2 = 0x05;
1227 				break;
1228 			case BPF_SUB:
1229 				b3 = 0xE8;
1230 				b2 = 0x2D;
1231 				break;
1232 			case BPF_AND:
1233 				b3 = 0xE0;
1234 				b2 = 0x25;
1235 				break;
1236 			case BPF_OR:
1237 				b3 = 0xC8;
1238 				b2 = 0x0D;
1239 				break;
1240 			case BPF_XOR:
1241 				b3 = 0xF0;
1242 				b2 = 0x35;
1243 				break;
1244 			}
1245 
1246 			if (is_imm8(imm32))
1247 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1248 			else if (is_axreg(dst_reg))
1249 				EMIT1_off32(b2, imm32);
1250 			else
1251 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1252 			break;
1253 
1254 		case BPF_ALU64 | BPF_MOV | BPF_K:
1255 		case BPF_ALU | BPF_MOV | BPF_K:
1256 			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1257 				       dst_reg, imm32);
1258 			break;
1259 
1260 		case BPF_LD | BPF_IMM | BPF_DW:
1261 			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1262 			insn++;
1263 			i++;
1264 			break;
1265 
1266 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1267 		case BPF_ALU | BPF_MOD | BPF_X:
1268 		case BPF_ALU | BPF_DIV | BPF_X:
1269 		case BPF_ALU | BPF_MOD | BPF_K:
1270 		case BPF_ALU | BPF_DIV | BPF_K:
1271 		case BPF_ALU64 | BPF_MOD | BPF_X:
1272 		case BPF_ALU64 | BPF_DIV | BPF_X:
1273 		case BPF_ALU64 | BPF_MOD | BPF_K:
1274 		case BPF_ALU64 | BPF_DIV | BPF_K: {
1275 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1276 
1277 			if (dst_reg != BPF_REG_0)
1278 				EMIT1(0x50); /* push rax */
1279 			if (dst_reg != BPF_REG_3)
1280 				EMIT1(0x52); /* push rdx */
1281 
1282 			if (BPF_SRC(insn->code) == BPF_X) {
1283 				if (src_reg == BPF_REG_0 ||
1284 				    src_reg == BPF_REG_3) {
1285 					/* mov r11, src_reg */
1286 					EMIT_mov(AUX_REG, src_reg);
1287 					src_reg = AUX_REG;
1288 				}
1289 			} else {
1290 				/* mov r11, imm32 */
1291 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1292 				src_reg = AUX_REG;
1293 			}
1294 
1295 			if (dst_reg != BPF_REG_0)
1296 				/* mov rax, dst_reg */
1297 				emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1298 
1299 			if (insn->off == 0) {
1300 				/*
1301 				 * xor edx, edx
1302 				 * equivalent to 'xor rdx, rdx', but one byte less
1303 				 */
1304 				EMIT2(0x31, 0xd2);
1305 
1306 				/* div src_reg */
1307 				maybe_emit_1mod(&prog, src_reg, is64);
1308 				EMIT2(0xF7, add_1reg(0xF0, src_reg));
1309 			} else {
1310 				if (BPF_CLASS(insn->code) == BPF_ALU)
1311 					EMIT1(0x99); /* cdq */
1312 				else
1313 					EMIT2(0x48, 0x99); /* cqo */
1314 
1315 				/* idiv src_reg */
1316 				maybe_emit_1mod(&prog, src_reg, is64);
1317 				EMIT2(0xF7, add_1reg(0xF8, src_reg));
1318 			}
1319 
1320 			if (BPF_OP(insn->code) == BPF_MOD &&
1321 			    dst_reg != BPF_REG_3)
1322 				/* mov dst_reg, rdx */
1323 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1324 			else if (BPF_OP(insn->code) == BPF_DIV &&
1325 				 dst_reg != BPF_REG_0)
1326 				/* mov dst_reg, rax */
1327 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1328 
1329 			if (dst_reg != BPF_REG_3)
1330 				EMIT1(0x5A); /* pop rdx */
1331 			if (dst_reg != BPF_REG_0)
1332 				EMIT1(0x58); /* pop rax */
1333 			break;
1334 		}
1335 
1336 		case BPF_ALU | BPF_MUL | BPF_K:
1337 		case BPF_ALU64 | BPF_MUL | BPF_K:
1338 			maybe_emit_mod(&prog, dst_reg, dst_reg,
1339 				       BPF_CLASS(insn->code) == BPF_ALU64);
1340 
1341 			if (is_imm8(imm32))
1342 				/* imul dst_reg, dst_reg, imm8 */
1343 				EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1344 				      imm32);
1345 			else
1346 				/* imul dst_reg, dst_reg, imm32 */
1347 				EMIT2_off32(0x69,
1348 					    add_2reg(0xC0, dst_reg, dst_reg),
1349 					    imm32);
1350 			break;
1351 
1352 		case BPF_ALU | BPF_MUL | BPF_X:
1353 		case BPF_ALU64 | BPF_MUL | BPF_X:
1354 			maybe_emit_mod(&prog, src_reg, dst_reg,
1355 				       BPF_CLASS(insn->code) == BPF_ALU64);
1356 
1357 			/* imul dst_reg, src_reg */
1358 			EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1359 			break;
1360 
1361 			/* Shifts */
1362 		case BPF_ALU | BPF_LSH | BPF_K:
1363 		case BPF_ALU | BPF_RSH | BPF_K:
1364 		case BPF_ALU | BPF_ARSH | BPF_K:
1365 		case BPF_ALU64 | BPF_LSH | BPF_K:
1366 		case BPF_ALU64 | BPF_RSH | BPF_K:
1367 		case BPF_ALU64 | BPF_ARSH | BPF_K:
1368 			maybe_emit_1mod(&prog, dst_reg,
1369 					BPF_CLASS(insn->code) == BPF_ALU64);
1370 
1371 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1372 			if (imm32 == 1)
1373 				EMIT2(0xD1, add_1reg(b3, dst_reg));
1374 			else
1375 				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1376 			break;
1377 
1378 		case BPF_ALU | BPF_LSH | BPF_X:
1379 		case BPF_ALU | BPF_RSH | BPF_X:
1380 		case BPF_ALU | BPF_ARSH | BPF_X:
1381 		case BPF_ALU64 | BPF_LSH | BPF_X:
1382 		case BPF_ALU64 | BPF_RSH | BPF_X:
1383 		case BPF_ALU64 | BPF_ARSH | BPF_X:
1384 			/* BMI2 shifts aren't better when shift count is already in rcx */
1385 			if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
1386 				/* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1387 				bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
1388 				u8 op;
1389 
1390 				switch (BPF_OP(insn->code)) {
1391 				case BPF_LSH:
1392 					op = 1; /* prefix 0x66 */
1393 					break;
1394 				case BPF_RSH:
1395 					op = 3; /* prefix 0xf2 */
1396 					break;
1397 				case BPF_ARSH:
1398 					op = 2; /* prefix 0xf3 */
1399 					break;
1400 				}
1401 
1402 				emit_shiftx(&prog, dst_reg, src_reg, w, op);
1403 
1404 				break;
1405 			}
1406 
1407 			if (src_reg != BPF_REG_4) { /* common case */
1408 				/* Check for bad case when dst_reg == rcx */
1409 				if (dst_reg == BPF_REG_4) {
1410 					/* mov r11, dst_reg */
1411 					EMIT_mov(AUX_REG, dst_reg);
1412 					dst_reg = AUX_REG;
1413 				} else {
1414 					EMIT1(0x51); /* push rcx */
1415 				}
1416 				/* mov rcx, src_reg */
1417 				EMIT_mov(BPF_REG_4, src_reg);
1418 			}
1419 
1420 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1421 			maybe_emit_1mod(&prog, dst_reg,
1422 					BPF_CLASS(insn->code) == BPF_ALU64);
1423 
1424 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1425 			EMIT2(0xD3, add_1reg(b3, dst_reg));
1426 
1427 			if (src_reg != BPF_REG_4) {
1428 				if (insn->dst_reg == BPF_REG_4)
1429 					/* mov dst_reg, r11 */
1430 					EMIT_mov(insn->dst_reg, AUX_REG);
1431 				else
1432 					EMIT1(0x59); /* pop rcx */
1433 			}
1434 
1435 			break;
1436 
1437 		case BPF_ALU | BPF_END | BPF_FROM_BE:
1438 		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1439 			switch (imm32) {
1440 			case 16:
1441 				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
1442 				EMIT1(0x66);
1443 				if (is_ereg(dst_reg))
1444 					EMIT1(0x41);
1445 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1446 
1447 				/* Emit 'movzwl eax, ax' */
1448 				if (is_ereg(dst_reg))
1449 					EMIT3(0x45, 0x0F, 0xB7);
1450 				else
1451 					EMIT2(0x0F, 0xB7);
1452 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1453 				break;
1454 			case 32:
1455 				/* Emit 'bswap eax' to swap lower 4 bytes */
1456 				if (is_ereg(dst_reg))
1457 					EMIT2(0x41, 0x0F);
1458 				else
1459 					EMIT1(0x0F);
1460 				EMIT1(add_1reg(0xC8, dst_reg));
1461 				break;
1462 			case 64:
1463 				/* Emit 'bswap rax' to swap 8 bytes */
1464 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1465 				      add_1reg(0xC8, dst_reg));
1466 				break;
1467 			}
1468 			break;
1469 
1470 		case BPF_ALU | BPF_END | BPF_FROM_LE:
1471 			switch (imm32) {
1472 			case 16:
1473 				/*
1474 				 * Emit 'movzwl eax, ax' to zero extend 16-bit
1475 				 * into 64 bit
1476 				 */
1477 				if (is_ereg(dst_reg))
1478 					EMIT3(0x45, 0x0F, 0xB7);
1479 				else
1480 					EMIT2(0x0F, 0xB7);
1481 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1482 				break;
1483 			case 32:
1484 				/* Emit 'mov eax, eax' to clear upper 32-bits */
1485 				if (is_ereg(dst_reg))
1486 					EMIT1(0x45);
1487 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1488 				break;
1489 			case 64:
1490 				/* nop */
1491 				break;
1492 			}
1493 			break;
1494 
1495 			/* speculation barrier */
1496 		case BPF_ST | BPF_NOSPEC:
1497 			EMIT_LFENCE();
1498 			break;
1499 
1500 			/* ST: *(u8*)(dst_reg + off) = imm */
1501 		case BPF_ST | BPF_MEM | BPF_B:
1502 			if (is_ereg(dst_reg))
1503 				EMIT2(0x41, 0xC6);
1504 			else
1505 				EMIT1(0xC6);
1506 			goto st;
1507 		case BPF_ST | BPF_MEM | BPF_H:
1508 			if (is_ereg(dst_reg))
1509 				EMIT3(0x66, 0x41, 0xC7);
1510 			else
1511 				EMIT2(0x66, 0xC7);
1512 			goto st;
1513 		case BPF_ST | BPF_MEM | BPF_W:
1514 			if (is_ereg(dst_reg))
1515 				EMIT2(0x41, 0xC7);
1516 			else
1517 				EMIT1(0xC7);
1518 			goto st;
1519 		case BPF_ST | BPF_MEM | BPF_DW:
1520 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1521 
1522 st:			if (is_imm8(insn->off))
1523 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
1524 			else
1525 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1526 
1527 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1528 			break;
1529 
1530 			/* STX: *(u8*)(dst_reg + off) = src_reg */
1531 		case BPF_STX | BPF_MEM | BPF_B:
1532 		case BPF_STX | BPF_MEM | BPF_H:
1533 		case BPF_STX | BPF_MEM | BPF_W:
1534 		case BPF_STX | BPF_MEM | BPF_DW:
1535 			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1536 			break;
1537 
1538 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
1539 		case BPF_LDX | BPF_MEM | BPF_B:
1540 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1541 		case BPF_LDX | BPF_MEM | BPF_H:
1542 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1543 		case BPF_LDX | BPF_MEM | BPF_W:
1544 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1545 		case BPF_LDX | BPF_MEM | BPF_DW:
1546 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1547 			/* LDXS: dst_reg = *(s8*)(src_reg + off) */
1548 		case BPF_LDX | BPF_MEMSX | BPF_B:
1549 		case BPF_LDX | BPF_MEMSX | BPF_H:
1550 		case BPF_LDX | BPF_MEMSX | BPF_W:
1551 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1552 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1553 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1554 			insn_off = insn->off;
1555 
1556 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
1557 			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
1558 				/* Conservatively check that src_reg + insn->off is a kernel address:
1559 				 *   src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
1560 				 *   and
1561 				 *   src_reg + insn->off < VSYSCALL_ADDR
1562 				 */
1563 
1564 				u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
1565 				u8 *end_of_jmp;
1566 
1567 				/* movabsq r10, VSYSCALL_ADDR */
1568 				emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
1569 					       (u32)(long)VSYSCALL_ADDR);
1570 
1571 				/* mov src_reg, r11 */
1572 				EMIT_mov(AUX_REG, src_reg);
1573 
1574 				if (insn->off) {
1575 					/* add r11, insn->off */
1576 					maybe_emit_1mod(&prog, AUX_REG, true);
1577 					EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
1578 				}
1579 
1580 				/* sub r11, r10 */
1581 				maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
1582 				EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
1583 
1584 				/* movabsq r10, limit */
1585 				emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
1586 					       (u32)(long)limit);
1587 
1588 				/* cmp r10, r11 */
1589 				maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
1590 				EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
1591 
1592 				/* if unsigned '>', goto load */
1593 				EMIT2(X86_JA, 0);
1594 				end_of_jmp = prog;
1595 
1596 				/* xor dst_reg, dst_reg */
1597 				emit_mov_imm32(&prog, false, dst_reg, 0);
1598 				/* jmp byte_after_ldx */
1599 				EMIT2(0xEB, 0);
1600 
1601 				/* populate jmp_offset for JAE above to jump to start_of_ldx */
1602 				start_of_ldx = prog;
1603 				end_of_jmp[-1] = start_of_ldx - end_of_jmp;
1604 			}
1605 			if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
1606 			    BPF_MODE(insn->code) == BPF_MEMSX)
1607 				emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1608 			else
1609 				emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1610 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
1611 			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
1612 				struct exception_table_entry *ex;
1613 				u8 *_insn = image + proglen + (start_of_ldx - temp);
1614 				s64 delta;
1615 
1616 				/* populate jmp_offset for JMP above */
1617 				start_of_ldx[-1] = prog - start_of_ldx;
1618 
1619 				if (!bpf_prog->aux->extable)
1620 					break;
1621 
1622 				if (excnt >= bpf_prog->aux->num_exentries) {
1623 					pr_err("ex gen bug\n");
1624 					return -EFAULT;
1625 				}
1626 				ex = &bpf_prog->aux->extable[excnt++];
1627 
1628 				delta = _insn - (u8 *)&ex->insn;
1629 				if (!is_simm32(delta)) {
1630 					pr_err("extable->insn doesn't fit into 32-bit\n");
1631 					return -EFAULT;
1632 				}
1633 				/* switch ex to rw buffer for writes */
1634 				ex = (void *)rw_image + ((void *)ex - (void *)image);
1635 
1636 				ex->insn = delta;
1637 
1638 				ex->data = EX_TYPE_BPF;
1639 
1640 				if (dst_reg > BPF_REG_9) {
1641 					pr_err("verifier error\n");
1642 					return -EFAULT;
1643 				}
1644 				/*
1645 				 * Compute size of x86 insn and its target dest x86 register.
1646 				 * ex_handler_bpf() will use lower 8 bits to adjust
1647 				 * pt_regs->ip to jump over this x86 instruction
1648 				 * and upper bits to figure out which pt_regs to zero out.
1649 				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1650 				 * of 4 bytes will be ignored and rbx will be zero inited.
1651 				 */
1652 				ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1653 			}
1654 			break;
1655 
1656 		case BPF_STX | BPF_ATOMIC | BPF_W:
1657 		case BPF_STX | BPF_ATOMIC | BPF_DW:
1658 			if (insn->imm == (BPF_AND | BPF_FETCH) ||
1659 			    insn->imm == (BPF_OR | BPF_FETCH) ||
1660 			    insn->imm == (BPF_XOR | BPF_FETCH)) {
1661 				bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1662 				u32 real_src_reg = src_reg;
1663 				u32 real_dst_reg = dst_reg;
1664 				u8 *branch_target;
1665 
1666 				/*
1667 				 * Can't be implemented with a single x86 insn.
1668 				 * Need to do a CMPXCHG loop.
1669 				 */
1670 
1671 				/* Will need RAX as a CMPXCHG operand so save R0 */
1672 				emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1673 				if (src_reg == BPF_REG_0)
1674 					real_src_reg = BPF_REG_AX;
1675 				if (dst_reg == BPF_REG_0)
1676 					real_dst_reg = BPF_REG_AX;
1677 
1678 				branch_target = prog;
1679 				/* Load old value */
1680 				emit_ldx(&prog, BPF_SIZE(insn->code),
1681 					 BPF_REG_0, real_dst_reg, insn->off);
1682 				/*
1683 				 * Perform the (commutative) operation locally,
1684 				 * put the result in the AUX_REG.
1685 				 */
1686 				emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1687 				maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1688 				EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1689 				      add_2reg(0xC0, AUX_REG, real_src_reg));
1690 				/* Attempt to swap in new value */
1691 				err = emit_atomic(&prog, BPF_CMPXCHG,
1692 						  real_dst_reg, AUX_REG,
1693 						  insn->off,
1694 						  BPF_SIZE(insn->code));
1695 				if (WARN_ON(err))
1696 					return err;
1697 				/*
1698 				 * ZF tells us whether we won the race. If it's
1699 				 * cleared we need to try again.
1700 				 */
1701 				EMIT2(X86_JNE, -(prog - branch_target) - 2);
1702 				/* Return the pre-modification value */
1703 				emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1704 				/* Restore R0 after clobbering RAX */
1705 				emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1706 				break;
1707 			}
1708 
1709 			err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1710 					  insn->off, BPF_SIZE(insn->code));
1711 			if (err)
1712 				return err;
1713 			break;
1714 
1715 			/* call */
1716 		case BPF_JMP | BPF_CALL: {
1717 			int offs;
1718 
1719 			func = (u8 *) __bpf_call_base + imm32;
1720 			if (tail_call_reachable) {
1721 				RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
1722 				if (!imm32)
1723 					return -EINVAL;
1724 				offs = 7 + x86_call_depth_emit_accounting(&prog, func);
1725 			} else {
1726 				if (!imm32)
1727 					return -EINVAL;
1728 				offs = x86_call_depth_emit_accounting(&prog, func);
1729 			}
1730 			if (emit_call(&prog, func, image + addrs[i - 1] + offs))
1731 				return -EINVAL;
1732 			break;
1733 		}
1734 
1735 		case BPF_JMP | BPF_TAIL_CALL:
1736 			if (imm32)
1737 				emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1738 							  &prog, image + addrs[i - 1],
1739 							  callee_regs_used,
1740 							  bpf_prog->aux->stack_depth,
1741 							  ctx);
1742 			else
1743 				emit_bpf_tail_call_indirect(&prog,
1744 							    callee_regs_used,
1745 							    bpf_prog->aux->stack_depth,
1746 							    image + addrs[i - 1],
1747 							    ctx);
1748 			break;
1749 
1750 			/* cond jump */
1751 		case BPF_JMP | BPF_JEQ | BPF_X:
1752 		case BPF_JMP | BPF_JNE | BPF_X:
1753 		case BPF_JMP | BPF_JGT | BPF_X:
1754 		case BPF_JMP | BPF_JLT | BPF_X:
1755 		case BPF_JMP | BPF_JGE | BPF_X:
1756 		case BPF_JMP | BPF_JLE | BPF_X:
1757 		case BPF_JMP | BPF_JSGT | BPF_X:
1758 		case BPF_JMP | BPF_JSLT | BPF_X:
1759 		case BPF_JMP | BPF_JSGE | BPF_X:
1760 		case BPF_JMP | BPF_JSLE | BPF_X:
1761 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1762 		case BPF_JMP32 | BPF_JNE | BPF_X:
1763 		case BPF_JMP32 | BPF_JGT | BPF_X:
1764 		case BPF_JMP32 | BPF_JLT | BPF_X:
1765 		case BPF_JMP32 | BPF_JGE | BPF_X:
1766 		case BPF_JMP32 | BPF_JLE | BPF_X:
1767 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1768 		case BPF_JMP32 | BPF_JSLT | BPF_X:
1769 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1770 		case BPF_JMP32 | BPF_JSLE | BPF_X:
1771 			/* cmp dst_reg, src_reg */
1772 			maybe_emit_mod(&prog, dst_reg, src_reg,
1773 				       BPF_CLASS(insn->code) == BPF_JMP);
1774 			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1775 			goto emit_cond_jmp;
1776 
1777 		case BPF_JMP | BPF_JSET | BPF_X:
1778 		case BPF_JMP32 | BPF_JSET | BPF_X:
1779 			/* test dst_reg, src_reg */
1780 			maybe_emit_mod(&prog, dst_reg, src_reg,
1781 				       BPF_CLASS(insn->code) == BPF_JMP);
1782 			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1783 			goto emit_cond_jmp;
1784 
1785 		case BPF_JMP | BPF_JSET | BPF_K:
1786 		case BPF_JMP32 | BPF_JSET | BPF_K:
1787 			/* test dst_reg, imm32 */
1788 			maybe_emit_1mod(&prog, dst_reg,
1789 					BPF_CLASS(insn->code) == BPF_JMP);
1790 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1791 			goto emit_cond_jmp;
1792 
1793 		case BPF_JMP | BPF_JEQ | BPF_K:
1794 		case BPF_JMP | BPF_JNE | BPF_K:
1795 		case BPF_JMP | BPF_JGT | BPF_K:
1796 		case BPF_JMP | BPF_JLT | BPF_K:
1797 		case BPF_JMP | BPF_JGE | BPF_K:
1798 		case BPF_JMP | BPF_JLE | BPF_K:
1799 		case BPF_JMP | BPF_JSGT | BPF_K:
1800 		case BPF_JMP | BPF_JSLT | BPF_K:
1801 		case BPF_JMP | BPF_JSGE | BPF_K:
1802 		case BPF_JMP | BPF_JSLE | BPF_K:
1803 		case BPF_JMP32 | BPF_JEQ | BPF_K:
1804 		case BPF_JMP32 | BPF_JNE | BPF_K:
1805 		case BPF_JMP32 | BPF_JGT | BPF_K:
1806 		case BPF_JMP32 | BPF_JLT | BPF_K:
1807 		case BPF_JMP32 | BPF_JGE | BPF_K:
1808 		case BPF_JMP32 | BPF_JLE | BPF_K:
1809 		case BPF_JMP32 | BPF_JSGT | BPF_K:
1810 		case BPF_JMP32 | BPF_JSLT | BPF_K:
1811 		case BPF_JMP32 | BPF_JSGE | BPF_K:
1812 		case BPF_JMP32 | BPF_JSLE | BPF_K:
1813 			/* test dst_reg, dst_reg to save one extra byte */
1814 			if (imm32 == 0) {
1815 				maybe_emit_mod(&prog, dst_reg, dst_reg,
1816 					       BPF_CLASS(insn->code) == BPF_JMP);
1817 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1818 				goto emit_cond_jmp;
1819 			}
1820 
1821 			/* cmp dst_reg, imm8/32 */
1822 			maybe_emit_1mod(&prog, dst_reg,
1823 					BPF_CLASS(insn->code) == BPF_JMP);
1824 
1825 			if (is_imm8(imm32))
1826 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1827 			else
1828 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1829 
1830 emit_cond_jmp:		/* Convert BPF opcode to x86 */
1831 			switch (BPF_OP(insn->code)) {
1832 			case BPF_JEQ:
1833 				jmp_cond = X86_JE;
1834 				break;
1835 			case BPF_JSET:
1836 			case BPF_JNE:
1837 				jmp_cond = X86_JNE;
1838 				break;
1839 			case BPF_JGT:
1840 				/* GT is unsigned '>', JA in x86 */
1841 				jmp_cond = X86_JA;
1842 				break;
1843 			case BPF_JLT:
1844 				/* LT is unsigned '<', JB in x86 */
1845 				jmp_cond = X86_JB;
1846 				break;
1847 			case BPF_JGE:
1848 				/* GE is unsigned '>=', JAE in x86 */
1849 				jmp_cond = X86_JAE;
1850 				break;
1851 			case BPF_JLE:
1852 				/* LE is unsigned '<=', JBE in x86 */
1853 				jmp_cond = X86_JBE;
1854 				break;
1855 			case BPF_JSGT:
1856 				/* Signed '>', GT in x86 */
1857 				jmp_cond = X86_JG;
1858 				break;
1859 			case BPF_JSLT:
1860 				/* Signed '<', LT in x86 */
1861 				jmp_cond = X86_JL;
1862 				break;
1863 			case BPF_JSGE:
1864 				/* Signed '>=', GE in x86 */
1865 				jmp_cond = X86_JGE;
1866 				break;
1867 			case BPF_JSLE:
1868 				/* Signed '<=', LE in x86 */
1869 				jmp_cond = X86_JLE;
1870 				break;
1871 			default: /* to silence GCC warning */
1872 				return -EFAULT;
1873 			}
1874 			jmp_offset = addrs[i + insn->off] - addrs[i];
1875 			if (is_imm8_jmp_offset(jmp_offset)) {
1876 				if (jmp_padding) {
1877 					/* To keep the jmp_offset valid, the extra bytes are
1878 					 * padded before the jump insn, so we subtract the
1879 					 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1880 					 *
1881 					 * If the previous pass already emits an imm8
1882 					 * jmp_cond, then this BPF insn won't shrink, so
1883 					 * "nops" is 0.
1884 					 *
1885 					 * On the other hand, if the previous pass emits an
1886 					 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1887 					 * keep the image from shrinking further.
1888 					 *
1889 					 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1890 					 *     is 2 bytes, so the size difference is 4 bytes.
1891 					 */
1892 					nops = INSN_SZ_DIFF - 2;
1893 					if (nops != 0 && nops != 4) {
1894 						pr_err("unexpected jmp_cond padding: %d bytes\n",
1895 						       nops);
1896 						return -EFAULT;
1897 					}
1898 					emit_nops(&prog, nops);
1899 				}
1900 				EMIT2(jmp_cond, jmp_offset);
1901 			} else if (is_simm32(jmp_offset)) {
1902 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1903 			} else {
1904 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1905 				return -EFAULT;
1906 			}
1907 
1908 			break;
1909 
1910 		case BPF_JMP | BPF_JA:
1911 		case BPF_JMP32 | BPF_JA:
1912 			if (BPF_CLASS(insn->code) == BPF_JMP) {
1913 				if (insn->off == -1)
1914 					/* -1 jmp instructions will always jump
1915 					 * backwards two bytes. Explicitly handling
1916 					 * this case avoids wasting too many passes
1917 					 * when there are long sequences of replaced
1918 					 * dead code.
1919 					 */
1920 					jmp_offset = -2;
1921 				else
1922 					jmp_offset = addrs[i + insn->off] - addrs[i];
1923 			} else {
1924 				if (insn->imm == -1)
1925 					jmp_offset = -2;
1926 				else
1927 					jmp_offset = addrs[i + insn->imm] - addrs[i];
1928 			}
1929 
1930 			if (!jmp_offset) {
1931 				/*
1932 				 * If jmp_padding is enabled, the extra nops will
1933 				 * be inserted. Otherwise, optimize out nop jumps.
1934 				 */
1935 				if (jmp_padding) {
1936 					/* There are 3 possible conditions.
1937 					 * (1) This BPF_JA is already optimized out in
1938 					 *     the previous run, so there is no need
1939 					 *     to pad any extra byte (0 byte).
1940 					 * (2) The previous pass emits an imm8 jmp,
1941 					 *     so we pad 2 bytes to match the previous
1942 					 *     insn size.
1943 					 * (3) Similarly, the previous pass emits an
1944 					 *     imm32 jmp, and 5 bytes is padded.
1945 					 */
1946 					nops = INSN_SZ_DIFF;
1947 					if (nops != 0 && nops != 2 && nops != 5) {
1948 						pr_err("unexpected nop jump padding: %d bytes\n",
1949 						       nops);
1950 						return -EFAULT;
1951 					}
1952 					emit_nops(&prog, nops);
1953 				}
1954 				break;
1955 			}
1956 emit_jmp:
1957 			if (is_imm8_jmp_offset(jmp_offset)) {
1958 				if (jmp_padding) {
1959 					/* To avoid breaking jmp_offset, the extra bytes
1960 					 * are padded before the actual jmp insn, so
1961 					 * 2 bytes is subtracted from INSN_SZ_DIFF.
1962 					 *
1963 					 * If the previous pass already emits an imm8
1964 					 * jmp, there is nothing to pad (0 byte).
1965 					 *
1966 					 * If it emits an imm32 jmp (5 bytes) previously
1967 					 * and now an imm8 jmp (2 bytes), then we pad
1968 					 * (5 - 2 = 3) bytes to stop the image from
1969 					 * shrinking further.
1970 					 */
1971 					nops = INSN_SZ_DIFF - 2;
1972 					if (nops != 0 && nops != 3) {
1973 						pr_err("unexpected jump padding: %d bytes\n",
1974 						       nops);
1975 						return -EFAULT;
1976 					}
1977 					emit_nops(&prog, INSN_SZ_DIFF - 2);
1978 				}
1979 				EMIT2(0xEB, jmp_offset);
1980 			} else if (is_simm32(jmp_offset)) {
1981 				EMIT1_off32(0xE9, jmp_offset);
1982 			} else {
1983 				pr_err("jmp gen bug %llx\n", jmp_offset);
1984 				return -EFAULT;
1985 			}
1986 			break;
1987 
1988 		case BPF_JMP | BPF_EXIT:
1989 			if (seen_exit) {
1990 				jmp_offset = ctx->cleanup_addr - addrs[i];
1991 				goto emit_jmp;
1992 			}
1993 			seen_exit = true;
1994 			/* Update cleanup_addr */
1995 			ctx->cleanup_addr = proglen;
1996 
1997 			if (bpf_prog_was_classic(bpf_prog) &&
1998 			    !capable(CAP_SYS_ADMIN)) {
1999 				u8 *ip = image + addrs[i - 1];
2000 
2001 				if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
2002 					return -EINVAL;
2003 			}
2004 
2005 			pop_callee_regs(&prog, callee_regs_used);
2006 			EMIT1(0xC9);         /* leave */
2007 			emit_return(&prog, image + addrs[i - 1] + (prog - temp));
2008 			break;
2009 
2010 		default:
2011 			/*
2012 			 * By design x86-64 JIT should support all BPF instructions.
2013 			 * This error will be seen if new instruction was added
2014 			 * to the interpreter, but not to the JIT, or if there is
2015 			 * junk in bpf_prog.
2016 			 */
2017 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
2018 			return -EINVAL;
2019 		}
2020 
2021 		ilen = prog - temp;
2022 		if (ilen > BPF_MAX_INSN_SIZE) {
2023 			pr_err("bpf_jit: fatal insn size error\n");
2024 			return -EFAULT;
2025 		}
2026 
2027 		if (image) {
2028 			/*
2029 			 * When populating the image, assert that:
2030 			 *
2031 			 *  i) We do not write beyond the allocated space, and
2032 			 * ii) addrs[i] did not change from the prior run, in order
2033 			 *     to validate assumptions made for computing branch
2034 			 *     displacements.
2035 			 */
2036 			if (unlikely(proglen + ilen > oldproglen ||
2037 				     proglen + ilen != addrs[i])) {
2038 				pr_err("bpf_jit: fatal error\n");
2039 				return -EFAULT;
2040 			}
2041 			memcpy(rw_image + proglen, temp, ilen);
2042 		}
2043 		proglen += ilen;
2044 		addrs[i] = proglen;
2045 		prog = temp;
2046 	}
2047 
2048 	if (image && excnt != bpf_prog->aux->num_exentries) {
2049 		pr_err("extable is not populated\n");
2050 		return -EFAULT;
2051 	}
2052 	return proglen;
2053 }
2054 
clean_stack_garbage(const struct btf_func_model * m,u8 ** pprog,int nr_stack_slots,int stack_size)2055 static void clean_stack_garbage(const struct btf_func_model *m,
2056 				u8 **pprog, int nr_stack_slots,
2057 				int stack_size)
2058 {
2059 	int arg_size, off;
2060 	u8 *prog;
2061 
2062 	/* Generally speaking, the compiler will pass the arguments
2063 	 * on-stack with "push" instruction, which will take 8-byte
2064 	 * on the stack. In this case, there won't be garbage values
2065 	 * while we copy the arguments from origin stack frame to current
2066 	 * in BPF_DW.
2067 	 *
2068 	 * However, sometimes the compiler will only allocate 4-byte on
2069 	 * the stack for the arguments. For now, this case will only
2070 	 * happen if there is only one argument on-stack and its size
2071 	 * not more than 4 byte. In this case, there will be garbage
2072 	 * values on the upper 4-byte where we store the argument on
2073 	 * current stack frame.
2074 	 *
2075 	 * arguments on origin stack:
2076 	 *
2077 	 * stack_arg_1(4-byte) xxx(4-byte)
2078 	 *
2079 	 * what we copy:
2080 	 *
2081 	 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2082 	 *
2083 	 * and the xxx is the garbage values which we should clean here.
2084 	 */
2085 	if (nr_stack_slots != 1)
2086 		return;
2087 
2088 	/* the size of the last argument */
2089 	arg_size = m->arg_size[m->nr_args - 1];
2090 	if (arg_size <= 4) {
2091 		off = -(stack_size - 4);
2092 		prog = *pprog;
2093 		/* mov DWORD PTR [rbp + off], 0 */
2094 		if (!is_imm8(off))
2095 			EMIT2_off32(0xC7, 0x85, off);
2096 		else
2097 			EMIT3(0xC7, 0x45, off);
2098 		EMIT(0, 4);
2099 		*pprog = prog;
2100 	}
2101 }
2102 
2103 /* get the count of the regs that are used to pass arguments */
get_nr_used_regs(const struct btf_func_model * m)2104 static int get_nr_used_regs(const struct btf_func_model *m)
2105 {
2106 	int i, arg_regs, nr_used_regs = 0;
2107 
2108 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2109 		arg_regs = (m->arg_size[i] + 7) / 8;
2110 		if (nr_used_regs + arg_regs <= 6)
2111 			nr_used_regs += arg_regs;
2112 
2113 		if (nr_used_regs >= 6)
2114 			break;
2115 	}
2116 
2117 	return nr_used_regs;
2118 }
2119 
save_args(const struct btf_func_model * m,u8 ** prog,int stack_size,bool for_call_origin)2120 static void save_args(const struct btf_func_model *m, u8 **prog,
2121 		      int stack_size, bool for_call_origin)
2122 {
2123 	int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
2124 	int i, j;
2125 
2126 	/* Store function arguments to stack.
2127 	 * For a function that accepts two pointers the sequence will be:
2128 	 * mov QWORD PTR [rbp-0x10],rdi
2129 	 * mov QWORD PTR [rbp-0x8],rsi
2130 	 */
2131 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2132 		arg_regs = (m->arg_size[i] + 7) / 8;
2133 
2134 		/* According to the research of Yonghong, struct members
2135 		 * should be all in register or all on the stack.
2136 		 * Meanwhile, the compiler will pass the argument on regs
2137 		 * if the remaining regs can hold the argument.
2138 		 *
2139 		 * Disorder of the args can happen. For example:
2140 		 *
2141 		 * struct foo_struct {
2142 		 *     long a;
2143 		 *     int b;
2144 		 * };
2145 		 * int foo(char, char, char, char, char, struct foo_struct,
2146 		 *         char);
2147 		 *
2148 		 * the arg1-5,arg7 will be passed by regs, and arg6 will
2149 		 * by stack.
2150 		 */
2151 		if (nr_regs + arg_regs > 6) {
2152 			/* copy function arguments from origin stack frame
2153 			 * into current stack frame.
2154 			 *
2155 			 * The starting address of the arguments on-stack
2156 			 * is:
2157 			 *   rbp + 8(push rbp) +
2158 			 *   8(return addr of origin call) +
2159 			 *   8(return addr of the caller)
2160 			 * which means: rbp + 24
2161 			 */
2162 			for (j = 0; j < arg_regs; j++) {
2163 				emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2164 					 nr_stack_slots * 8 + 0x18);
2165 				emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2166 					 -stack_size);
2167 
2168 				if (!nr_stack_slots)
2169 					first_off = stack_size;
2170 				stack_size -= 8;
2171 				nr_stack_slots++;
2172 			}
2173 		} else {
2174 			/* Only copy the arguments on-stack to current
2175 			 * 'stack_size' and ignore the regs, used to
2176 			 * prepare the arguments on-stack for orign call.
2177 			 */
2178 			if (for_call_origin) {
2179 				nr_regs += arg_regs;
2180 				continue;
2181 			}
2182 
2183 			/* copy the arguments from regs into stack */
2184 			for (j = 0; j < arg_regs; j++) {
2185 				emit_stx(prog, BPF_DW, BPF_REG_FP,
2186 					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2187 					 -stack_size);
2188 				stack_size -= 8;
2189 				nr_regs++;
2190 			}
2191 		}
2192 	}
2193 
2194 	clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2195 }
2196 
restore_regs(const struct btf_func_model * m,u8 ** prog,int stack_size)2197 static void restore_regs(const struct btf_func_model *m, u8 **prog,
2198 			 int stack_size)
2199 {
2200 	int i, j, arg_regs, nr_regs = 0;
2201 
2202 	/* Restore function arguments from stack.
2203 	 * For a function that accepts two pointers the sequence will be:
2204 	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2205 	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2206 	 *
2207 	 * The logic here is similar to what we do in save_args()
2208 	 */
2209 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2210 		arg_regs = (m->arg_size[i] + 7) / 8;
2211 		if (nr_regs + arg_regs <= 6) {
2212 			for (j = 0; j < arg_regs; j++) {
2213 				emit_ldx(prog, BPF_DW,
2214 					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2215 					 BPF_REG_FP,
2216 					 -stack_size);
2217 				stack_size -= 8;
2218 				nr_regs++;
2219 			}
2220 		} else {
2221 			stack_size -= 8 * arg_regs;
2222 		}
2223 
2224 		if (nr_regs >= 6)
2225 			break;
2226 	}
2227 }
2228 
invoke_bpf_prog(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_link * l,int stack_size,int run_ctx_off,bool save_ret)2229 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
2230 			   struct bpf_tramp_link *l, int stack_size,
2231 			   int run_ctx_off, bool save_ret)
2232 {
2233 	u8 *prog = *pprog;
2234 	u8 *jmp_insn;
2235 	int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2236 	struct bpf_prog *p = l->link.prog;
2237 	u64 cookie = l->cookie;
2238 
2239 	/* mov rdi, cookie */
2240 	emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2241 
2242 	/* Prepare struct bpf_tramp_run_ctx.
2243 	 *
2244 	 * bpf_tramp_run_ctx is already preserved by
2245 	 * arch_prepare_bpf_trampoline().
2246 	 *
2247 	 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2248 	 */
2249 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2250 
2251 	/* arg1: mov rdi, progs[i] */
2252 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2253 	/* arg2: lea rsi, [rbp - ctx_cookie_off] */
2254 	if (!is_imm8(-run_ctx_off))
2255 		EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
2256 	else
2257 		EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
2258 
2259 	if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog))
2260 		return -EINVAL;
2261 	/* remember prog start time returned by __bpf_prog_enter */
2262 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
2263 
2264 	/* if (__bpf_prog_enter*(prog) == 0)
2265 	 *	goto skip_exec_of_prog;
2266 	 */
2267 	EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
2268 	/* emit 2 nops that will be replaced with JE insn */
2269 	jmp_insn = prog;
2270 	emit_nops(&prog, 2);
2271 
2272 	/* arg1: lea rdi, [rbp - stack_size] */
2273 	if (!is_imm8(-stack_size))
2274 		EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
2275 	else
2276 		EMIT4(0x48, 0x8D, 0x7D, -stack_size);
2277 	/* arg2: progs[i]->insnsi for interpreter */
2278 	if (!p->jited)
2279 		emit_mov_imm64(&prog, BPF_REG_2,
2280 			       (long) p->insnsi >> 32,
2281 			       (u32) (long) p->insnsi);
2282 	/* call JITed bpf program or interpreter */
2283 	if (emit_rsb_call(&prog, p->bpf_func, prog))
2284 		return -EINVAL;
2285 
2286 	/*
2287 	 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
2288 	 * of the previous call which is then passed on the stack to
2289 	 * the next BPF program.
2290 	 *
2291 	 * BPF_TRAMP_FENTRY trampoline may need to return the return
2292 	 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
2293 	 */
2294 	if (save_ret)
2295 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2296 
2297 	/* replace 2 nops with JE insn, since jmp target is known */
2298 	jmp_insn[0] = X86_JE;
2299 	jmp_insn[1] = prog - jmp_insn - 2;
2300 
2301 	/* arg1: mov rdi, progs[i] */
2302 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2303 	/* arg2: mov rsi, rbx <- start time in nsec */
2304 	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
2305 	/* arg3: lea rdx, [rbp - run_ctx_off] */
2306 	if (!is_imm8(-run_ctx_off))
2307 		EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
2308 	else
2309 		EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
2310 	if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog))
2311 		return -EINVAL;
2312 
2313 	*pprog = prog;
2314 	return 0;
2315 }
2316 
emit_align(u8 ** pprog,u32 align)2317 static void emit_align(u8 **pprog, u32 align)
2318 {
2319 	u8 *target, *prog = *pprog;
2320 
2321 	target = PTR_ALIGN(prog, align);
2322 	if (target != prog)
2323 		emit_nops(&prog, target - prog);
2324 
2325 	*pprog = prog;
2326 }
2327 
emit_cond_near_jump(u8 ** pprog,void * func,void * ip,u8 jmp_cond)2328 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
2329 {
2330 	u8 *prog = *pprog;
2331 	s64 offset;
2332 
2333 	offset = func - (ip + 2 + 4);
2334 	if (!is_simm32(offset)) {
2335 		pr_err("Target %p is out of range\n", func);
2336 		return -EINVAL;
2337 	}
2338 	EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
2339 	*pprog = prog;
2340 	return 0;
2341 }
2342 
invoke_bpf(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,bool save_ret)2343 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
2344 		      struct bpf_tramp_links *tl, int stack_size,
2345 		      int run_ctx_off, bool save_ret)
2346 {
2347 	int i;
2348 	u8 *prog = *pprog;
2349 
2350 	for (i = 0; i < tl->nr_links; i++) {
2351 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
2352 				    run_ctx_off, save_ret))
2353 			return -EINVAL;
2354 	}
2355 	*pprog = prog;
2356 	return 0;
2357 }
2358 
invoke_bpf_mod_ret(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,u8 ** branches)2359 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
2360 			      struct bpf_tramp_links *tl, int stack_size,
2361 			      int run_ctx_off, u8 **branches)
2362 {
2363 	u8 *prog = *pprog;
2364 	int i;
2365 
2366 	/* The first fmod_ret program will receive a garbage return value.
2367 	 * Set this to 0 to avoid confusing the program.
2368 	 */
2369 	emit_mov_imm32(&prog, false, BPF_REG_0, 0);
2370 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2371 	for (i = 0; i < tl->nr_links; i++) {
2372 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true))
2373 			return -EINVAL;
2374 
2375 		/* mod_ret prog stored return value into [rbp - 8]. Emit:
2376 		 * if (*(u64 *)(rbp - 8) !=  0)
2377 		 *	goto do_fexit;
2378 		 */
2379 		/* cmp QWORD PTR [rbp - 0x8], 0x0 */
2380 		EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
2381 
2382 		/* Save the location of the branch and Generate 6 nops
2383 		 * (4 bytes for an offset and 2 bytes for the jump) These nops
2384 		 * are replaced with a conditional jump once do_fexit (i.e. the
2385 		 * start of the fexit invocation) is finalized.
2386 		 */
2387 		branches[i] = prog;
2388 		emit_nops(&prog, 4 + 2);
2389 	}
2390 
2391 	*pprog = prog;
2392 	return 0;
2393 }
2394 
2395 /* Example:
2396  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
2397  * its 'struct btf_func_model' will be nr_args=2
2398  * The assembly code when eth_type_trans is executing after trampoline:
2399  *
2400  * push rbp
2401  * mov rbp, rsp
2402  * sub rsp, 16                     // space for skb and dev
2403  * push rbx                        // temp regs to pass start time
2404  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
2405  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
2406  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2407  * mov rbx, rax                    // remember start time in bpf stats are enabled
2408  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
2409  * call addr_of_jited_FENTRY_prog
2410  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2411  * mov rsi, rbx                    // prog start time
2412  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2413  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
2414  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
2415  * pop rbx
2416  * leave
2417  * ret
2418  *
2419  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2420  * replaced with 'call generated_bpf_trampoline'. When it returns
2421  * eth_type_trans will continue executing with original skb and dev pointers.
2422  *
2423  * The assembly code when eth_type_trans is called from trampoline:
2424  *
2425  * push rbp
2426  * mov rbp, rsp
2427  * sub rsp, 24                     // space for skb, dev, return value
2428  * push rbx                        // temp regs to pass start time
2429  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
2430  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
2431  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2432  * mov rbx, rax                    // remember start time if bpf stats are enabled
2433  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2434  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
2435  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2436  * mov rsi, rbx                    // prog start time
2437  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2438  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
2439  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
2440  * call eth_type_trans+5           // execute body of eth_type_trans
2441  * mov qword ptr [rbp - 8], rax    // save return value
2442  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2443  * mov rbx, rax                    // remember start time in bpf stats are enabled
2444  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2445  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
2446  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2447  * mov rsi, rbx                    // prog start time
2448  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2449  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
2450  * pop rbx
2451  * leave
2452  * add rsp, 8                      // skip eth_type_trans's frame
2453  * ret                             // return to its caller
2454  */
arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)2455 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
2456 				const struct btf_func_model *m, u32 flags,
2457 				struct bpf_tramp_links *tlinks,
2458 				void *func_addr)
2459 {
2460 	int i, ret, nr_regs = m->nr_args, stack_size = 0;
2461 	int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
2462 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2463 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2464 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2465 	void *orig_call = func_addr;
2466 	u8 **branches = NULL;
2467 	u8 *prog;
2468 	bool save_ret;
2469 
2470 	/* extra registers for struct arguments */
2471 	for (i = 0; i < m->nr_args; i++)
2472 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
2473 			nr_regs += (m->arg_size[i] + 7) / 8 - 1;
2474 
2475 	/* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
2476 	 * are passed through regs, the remains are through stack.
2477 	 */
2478 	if (nr_regs > MAX_BPF_FUNC_ARGS)
2479 		return -ENOTSUPP;
2480 
2481 	/* Generated trampoline stack layout:
2482 	 *
2483 	 * RBP + 8         [ return address  ]
2484 	 * RBP + 0         [ RBP             ]
2485 	 *
2486 	 * RBP - 8         [ return value    ]  BPF_TRAMP_F_CALL_ORIG or
2487 	 *                                      BPF_TRAMP_F_RET_FENTRY_RET flags
2488 	 *
2489 	 *                 [ reg_argN        ]  always
2490 	 *                 [ ...             ]
2491 	 * RBP - regs_off  [ reg_arg1        ]  program's ctx pointer
2492 	 *
2493 	 * RBP - nregs_off [ regs count	     ]  always
2494 	 *
2495 	 * RBP - ip_off    [ traced function ]  BPF_TRAMP_F_IP_ARG flag
2496 	 *
2497 	 * RBP - rbx_off   [ rbx value       ]  always
2498 	 *
2499 	 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2500 	 *
2501 	 *                     [ stack_argN ]  BPF_TRAMP_F_CALL_ORIG
2502 	 *                     [ ...        ]
2503 	 *                     [ stack_arg2 ]
2504 	 * RBP - arg_stack_off [ stack_arg1 ]
2505 	 * RSP                 [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
2506 	 */
2507 
2508 	/* room for return value of orig_call or fentry prog */
2509 	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2510 	if (save_ret)
2511 		stack_size += 8;
2512 
2513 	stack_size += nr_regs * 8;
2514 	regs_off = stack_size;
2515 
2516 	/* regs count  */
2517 	stack_size += 8;
2518 	nregs_off = stack_size;
2519 
2520 	if (flags & BPF_TRAMP_F_IP_ARG)
2521 		stack_size += 8; /* room for IP address argument */
2522 
2523 	ip_off = stack_size;
2524 
2525 	stack_size += 8;
2526 	rbx_off = stack_size;
2527 
2528 	stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
2529 	run_ctx_off = stack_size;
2530 
2531 	if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
2532 		/* the space that used to pass arguments on-stack */
2533 		stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
2534 		/* make sure the stack pointer is 16-byte aligned if we
2535 		 * need pass arguments on stack, which means
2536 		 *  [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
2537 		 * should be 16-byte aligned. Following code depend on
2538 		 * that stack_size is already 8-byte aligned.
2539 		 */
2540 		stack_size += (stack_size % 16) ? 0 : 8;
2541 	}
2542 
2543 	arg_stack_off = stack_size;
2544 
2545 	if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2546 		/* skip patched call instruction and point orig_call to actual
2547 		 * body of the kernel function.
2548 		 */
2549 		if (is_endbr(*(u32 *)orig_call))
2550 			orig_call += ENDBR_INSN_SIZE;
2551 		orig_call += X86_PATCH_SIZE;
2552 	}
2553 
2554 	prog = image;
2555 
2556 	EMIT_ENDBR();
2557 	/*
2558 	 * This is the direct-call trampoline, as such it needs accounting
2559 	 * for the __fentry__ call.
2560 	 */
2561 	x86_call_depth_emit_accounting(&prog, NULL);
2562 	EMIT1(0x55);		 /* push rbp */
2563 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2564 	if (!is_imm8(stack_size))
2565 		/* sub rsp, stack_size */
2566 		EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
2567 	else
2568 		/* sub rsp, stack_size */
2569 		EMIT4(0x48, 0x83, 0xEC, stack_size);
2570 	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
2571 		EMIT1(0x50);		/* push rax */
2572 	/* mov QWORD PTR [rbp - rbx_off], rbx */
2573 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
2574 
2575 	/* Store number of argument registers of the traced function:
2576 	 *   mov rax, nr_regs
2577 	 *   mov QWORD PTR [rbp - nregs_off], rax
2578 	 */
2579 	emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
2580 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
2581 
2582 	if (flags & BPF_TRAMP_F_IP_ARG) {
2583 		/* Store IP address of the traced function:
2584 		 * movabsq rax, func_addr
2585 		 * mov QWORD PTR [rbp - ip_off], rax
2586 		 */
2587 		emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
2588 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
2589 	}
2590 
2591 	save_args(m, &prog, regs_off, false);
2592 
2593 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2594 		/* arg1: mov rdi, im */
2595 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2596 		if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) {
2597 			ret = -EINVAL;
2598 			goto cleanup;
2599 		}
2600 	}
2601 
2602 	if (fentry->nr_links)
2603 		if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
2604 			       flags & BPF_TRAMP_F_RET_FENTRY_RET))
2605 			return -EINVAL;
2606 
2607 	if (fmod_ret->nr_links) {
2608 		branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
2609 				   GFP_KERNEL);
2610 		if (!branches)
2611 			return -ENOMEM;
2612 
2613 		if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2614 				       run_ctx_off, branches)) {
2615 			ret = -EINVAL;
2616 			goto cleanup;
2617 		}
2618 	}
2619 
2620 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2621 		restore_regs(m, &prog, regs_off);
2622 		save_args(m, &prog, arg_stack_off, true);
2623 
2624 		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
2625 			/* Before calling the original function, restore the
2626 			 * tail_call_cnt from stack to rax.
2627 			 */
2628 			RESTORE_TAIL_CALL_CNT(stack_size);
2629 
2630 		if (flags & BPF_TRAMP_F_ORIG_STACK) {
2631 			emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
2632 			EMIT2(0xff, 0xd3); /* call *rbx */
2633 		} else {
2634 			/* call original function */
2635 			if (emit_rsb_call(&prog, orig_call, prog)) {
2636 				ret = -EINVAL;
2637 				goto cleanup;
2638 			}
2639 		}
2640 		/* remember return value in a stack for bpf prog to access */
2641 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2642 		im->ip_after_call = prog;
2643 		memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2644 		prog += X86_PATCH_SIZE;
2645 	}
2646 
2647 	if (fmod_ret->nr_links) {
2648 		/* From Intel 64 and IA-32 Architectures Optimization
2649 		 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2650 		 * Coding Rule 11: All branch targets should be 16-byte
2651 		 * aligned.
2652 		 */
2653 		emit_align(&prog, 16);
2654 		/* Update the branches saved in invoke_bpf_mod_ret with the
2655 		 * aligned address of do_fexit.
2656 		 */
2657 		for (i = 0; i < fmod_ret->nr_links; i++)
2658 			emit_cond_near_jump(&branches[i], prog, branches[i],
2659 					    X86_JNE);
2660 	}
2661 
2662 	if (fexit->nr_links)
2663 		if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) {
2664 			ret = -EINVAL;
2665 			goto cleanup;
2666 		}
2667 
2668 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
2669 		restore_regs(m, &prog, regs_off);
2670 
2671 	/* This needs to be done regardless. If there were fmod_ret programs,
2672 	 * the return value is only updated on the stack and still needs to be
2673 	 * restored to R0.
2674 	 */
2675 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2676 		im->ip_epilogue = prog;
2677 		/* arg1: mov rdi, im */
2678 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2679 		if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) {
2680 			ret = -EINVAL;
2681 			goto cleanup;
2682 		}
2683 	} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
2684 		/* Before running the original function, restore the
2685 		 * tail_call_cnt from stack to rax.
2686 		 */
2687 		RESTORE_TAIL_CALL_CNT(stack_size);
2688 
2689 	/* restore return value of orig_call or fentry prog back into RAX */
2690 	if (save_ret)
2691 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2692 
2693 	emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
2694 	EMIT1(0xC9); /* leave */
2695 	if (flags & BPF_TRAMP_F_SKIP_FRAME)
2696 		/* skip our return address and return to parent */
2697 		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2698 	emit_return(&prog, prog);
2699 	/* Make sure the trampoline generation logic doesn't overflow */
2700 	if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2701 		ret = -EFAULT;
2702 		goto cleanup;
2703 	}
2704 	ret = prog - (u8 *)image;
2705 
2706 cleanup:
2707 	kfree(branches);
2708 	return ret;
2709 }
2710 
emit_bpf_dispatcher(u8 ** pprog,int a,int b,s64 * progs,u8 * image,u8 * buf)2711 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
2712 {
2713 	u8 *jg_reloc, *prog = *pprog;
2714 	int pivot, err, jg_bytes = 1;
2715 	s64 jg_offset;
2716 
2717 	if (a == b) {
2718 		/* Leaf node of recursion, i.e. not a range of indices
2719 		 * anymore.
2720 		 */
2721 		EMIT1(add_1mod(0x48, BPF_REG_3));	/* cmp rdx,func */
2722 		if (!is_simm32(progs[a]))
2723 			return -1;
2724 		EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2725 			    progs[a]);
2726 		err = emit_cond_near_jump(&prog,	/* je func */
2727 					  (void *)progs[a], image + (prog - buf),
2728 					  X86_JE);
2729 		if (err)
2730 			return err;
2731 
2732 		emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
2733 
2734 		*pprog = prog;
2735 		return 0;
2736 	}
2737 
2738 	/* Not a leaf node, so we pivot, and recursively descend into
2739 	 * the lower and upper ranges.
2740 	 */
2741 	pivot = (b - a) / 2;
2742 	EMIT1(add_1mod(0x48, BPF_REG_3));		/* cmp rdx,func */
2743 	if (!is_simm32(progs[a + pivot]))
2744 		return -1;
2745 	EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2746 
2747 	if (pivot > 2) {				/* jg upper_part */
2748 		/* Require near jump. */
2749 		jg_bytes = 4;
2750 		EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2751 	} else {
2752 		EMIT2(X86_JG, 0);
2753 	}
2754 	jg_reloc = prog;
2755 
2756 	err = emit_bpf_dispatcher(&prog, a, a + pivot,	/* emit lower_part */
2757 				  progs, image, buf);
2758 	if (err)
2759 		return err;
2760 
2761 	/* From Intel 64 and IA-32 Architectures Optimization
2762 	 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2763 	 * Coding Rule 11: All branch targets should be 16-byte
2764 	 * aligned.
2765 	 */
2766 	emit_align(&prog, 16);
2767 	jg_offset = prog - jg_reloc;
2768 	emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2769 
2770 	err = emit_bpf_dispatcher(&prog, a + pivot + 1,	/* emit upper_part */
2771 				  b, progs, image, buf);
2772 	if (err)
2773 		return err;
2774 
2775 	*pprog = prog;
2776 	return 0;
2777 }
2778 
cmp_ips(const void * a,const void * b)2779 static int cmp_ips(const void *a, const void *b)
2780 {
2781 	const s64 *ipa = a;
2782 	const s64 *ipb = b;
2783 
2784 	if (*ipa > *ipb)
2785 		return 1;
2786 	if (*ipa < *ipb)
2787 		return -1;
2788 	return 0;
2789 }
2790 
arch_prepare_bpf_dispatcher(void * image,void * buf,s64 * funcs,int num_funcs)2791 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
2792 {
2793 	u8 *prog = buf;
2794 
2795 	sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2796 	return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
2797 }
2798 
2799 struct x64_jit_data {
2800 	struct bpf_binary_header *rw_header;
2801 	struct bpf_binary_header *header;
2802 	int *addrs;
2803 	u8 *image;
2804 	int proglen;
2805 	struct jit_context ctx;
2806 };
2807 
2808 #define MAX_PASSES 20
2809 #define PADDING_PASSES (MAX_PASSES - 5)
2810 
bpf_int_jit_compile(struct bpf_prog * prog)2811 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2812 {
2813 	struct bpf_binary_header *rw_header = NULL;
2814 	struct bpf_binary_header *header = NULL;
2815 	struct bpf_prog *tmp, *orig_prog = prog;
2816 	struct x64_jit_data *jit_data;
2817 	int proglen, oldproglen = 0;
2818 	struct jit_context ctx = {};
2819 	bool tmp_blinded = false;
2820 	bool extra_pass = false;
2821 	bool padding = false;
2822 	u8 *rw_image = NULL;
2823 	u8 *image = NULL;
2824 	int *addrs;
2825 	int pass;
2826 	int i;
2827 
2828 	if (!prog->jit_requested)
2829 		return orig_prog;
2830 
2831 	tmp = bpf_jit_blind_constants(prog);
2832 	/*
2833 	 * If blinding was requested and we failed during blinding,
2834 	 * we must fall back to the interpreter.
2835 	 */
2836 	if (IS_ERR(tmp))
2837 		return orig_prog;
2838 	if (tmp != prog) {
2839 		tmp_blinded = true;
2840 		prog = tmp;
2841 	}
2842 
2843 	jit_data = prog->aux->jit_data;
2844 	if (!jit_data) {
2845 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2846 		if (!jit_data) {
2847 			prog = orig_prog;
2848 			goto out;
2849 		}
2850 		prog->aux->jit_data = jit_data;
2851 	}
2852 	addrs = jit_data->addrs;
2853 	if (addrs) {
2854 		ctx = jit_data->ctx;
2855 		oldproglen = jit_data->proglen;
2856 		image = jit_data->image;
2857 		header = jit_data->header;
2858 		rw_header = jit_data->rw_header;
2859 		rw_image = (void *)rw_header + ((void *)image - (void *)header);
2860 		extra_pass = true;
2861 		padding = true;
2862 		goto skip_init_addrs;
2863 	}
2864 	addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2865 	if (!addrs) {
2866 		prog = orig_prog;
2867 		goto out_addrs;
2868 	}
2869 
2870 	/*
2871 	 * Before first pass, make a rough estimation of addrs[]
2872 	 * each BPF instruction is translated to less than 64 bytes
2873 	 */
2874 	for (proglen = 0, i = 0; i <= prog->len; i++) {
2875 		proglen += 64;
2876 		addrs[i] = proglen;
2877 	}
2878 	ctx.cleanup_addr = proglen;
2879 skip_init_addrs:
2880 
2881 	/*
2882 	 * JITed image shrinks with every pass and the loop iterates
2883 	 * until the image stops shrinking. Very large BPF programs
2884 	 * may converge on the last pass. In such case do one more
2885 	 * pass to emit the final image.
2886 	 */
2887 	for (pass = 0; pass < MAX_PASSES || image; pass++) {
2888 		if (!padding && pass >= PADDING_PASSES)
2889 			padding = true;
2890 		proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
2891 		if (proglen <= 0) {
2892 out_image:
2893 			image = NULL;
2894 			if (header) {
2895 				bpf_arch_text_copy(&header->size, &rw_header->size,
2896 						   sizeof(rw_header->size));
2897 				bpf_jit_binary_pack_free(header, rw_header);
2898 			}
2899 			/* Fall back to interpreter mode */
2900 			prog = orig_prog;
2901 			if (extra_pass) {
2902 				prog->bpf_func = NULL;
2903 				prog->jited = 0;
2904 				prog->jited_len = 0;
2905 			}
2906 			goto out_addrs;
2907 		}
2908 		if (image) {
2909 			if (proglen != oldproglen) {
2910 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2911 				       proglen, oldproglen);
2912 				goto out_image;
2913 			}
2914 			break;
2915 		}
2916 		if (proglen == oldproglen) {
2917 			/*
2918 			 * The number of entries in extable is the number of BPF_LDX
2919 			 * insns that access kernel memory via "pointer to BTF type".
2920 			 * The verifier changed their opcode from LDX|MEM|size
2921 			 * to LDX|PROBE_MEM|size to make JITing easier.
2922 			 */
2923 			u32 align = __alignof__(struct exception_table_entry);
2924 			u32 extable_size = prog->aux->num_exentries *
2925 				sizeof(struct exception_table_entry);
2926 
2927 			/* allocate module memory for x86 insns and extable */
2928 			header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
2929 							   &image, align, &rw_header, &rw_image,
2930 							   jit_fill_hole);
2931 			if (!header) {
2932 				prog = orig_prog;
2933 				goto out_addrs;
2934 			}
2935 			prog->aux->extable = (void *) image + roundup(proglen, align);
2936 		}
2937 		oldproglen = proglen;
2938 		cond_resched();
2939 	}
2940 
2941 	if (bpf_jit_enable > 1)
2942 		bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
2943 
2944 	if (image) {
2945 		if (!prog->is_func || extra_pass) {
2946 			/*
2947 			 * bpf_jit_binary_pack_finalize fails in two scenarios:
2948 			 *   1) header is not pointing to proper module memory;
2949 			 *   2) the arch doesn't support bpf_arch_text_copy().
2950 			 *
2951 			 * Both cases are serious bugs and justify WARN_ON.
2952 			 */
2953 			if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
2954 				/* header has been freed */
2955 				header = NULL;
2956 				goto out_image;
2957 			}
2958 
2959 			bpf_tail_call_direct_fixup(prog);
2960 		} else {
2961 			jit_data->addrs = addrs;
2962 			jit_data->ctx = ctx;
2963 			jit_data->proglen = proglen;
2964 			jit_data->image = image;
2965 			jit_data->header = header;
2966 			jit_data->rw_header = rw_header;
2967 		}
2968 		prog->bpf_func = (void *)image;
2969 		prog->jited = 1;
2970 		prog->jited_len = proglen;
2971 	} else {
2972 		prog = orig_prog;
2973 	}
2974 
2975 	if (!image || !prog->is_func || extra_pass) {
2976 		if (image)
2977 			bpf_prog_fill_jited_linfo(prog, addrs + 1);
2978 out_addrs:
2979 		kvfree(addrs);
2980 		kfree(jit_data);
2981 		prog->aux->jit_data = NULL;
2982 	}
2983 out:
2984 	if (tmp_blinded)
2985 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
2986 					   tmp : orig_prog);
2987 	return prog;
2988 }
2989 
bpf_jit_supports_kfunc_call(void)2990 bool bpf_jit_supports_kfunc_call(void)
2991 {
2992 	return true;
2993 }
2994 
bpf_arch_text_copy(void * dst,void * src,size_t len)2995 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
2996 {
2997 	if (text_poke_copy(dst, src, len) == NULL)
2998 		return ERR_PTR(-EINVAL);
2999 	return dst;
3000 }
3001 
3002 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)3003 bool bpf_jit_supports_subprog_tailcalls(void)
3004 {
3005 	return true;
3006 }
3007 
bpf_jit_free(struct bpf_prog * prog)3008 void bpf_jit_free(struct bpf_prog *prog)
3009 {
3010 	if (prog->jited) {
3011 		struct x64_jit_data *jit_data = prog->aux->jit_data;
3012 		struct bpf_binary_header *hdr;
3013 
3014 		/*
3015 		 * If we fail the final pass of JIT (from jit_subprogs),
3016 		 * the program may not be finalized yet. Call finalize here
3017 		 * before freeing it.
3018 		 */
3019 		if (jit_data) {
3020 			bpf_jit_binary_pack_finalize(prog, jit_data->header,
3021 						     jit_data->rw_header);
3022 			kvfree(jit_data->addrs);
3023 			kfree(jit_data);
3024 		}
3025 		hdr = bpf_jit_binary_pack_hdr(prog);
3026 		bpf_jit_binary_pack_free(hdr, NULL);
3027 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
3028 	}
3029 
3030 	bpf_prog_unlock_free(prog);
3031 }
3032 
bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor * poke,struct bpf_prog * new,struct bpf_prog * old)3033 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
3034 			       struct bpf_prog *new, struct bpf_prog *old)
3035 {
3036 	u8 *old_addr, *new_addr, *old_bypass_addr;
3037 	int ret;
3038 
3039 	old_bypass_addr = old ? NULL : poke->bypass_addr;
3040 	old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
3041 	new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
3042 
3043 	/*
3044 	 * On program loading or teardown, the program's kallsym entry
3045 	 * might not be in place, so we use __bpf_arch_text_poke to skip
3046 	 * the kallsyms check.
3047 	 */
3048 	if (new) {
3049 		ret = __bpf_arch_text_poke(poke->tailcall_target,
3050 					   BPF_MOD_JUMP,
3051 					   old_addr, new_addr);
3052 		BUG_ON(ret < 0);
3053 		if (!old) {
3054 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3055 						   BPF_MOD_JUMP,
3056 						   poke->bypass_addr,
3057 						   NULL);
3058 			BUG_ON(ret < 0);
3059 		}
3060 	} else {
3061 		ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3062 					   BPF_MOD_JUMP,
3063 					   old_bypass_addr,
3064 					   poke->bypass_addr);
3065 		BUG_ON(ret < 0);
3066 		/* let other CPUs finish the execution of program
3067 		 * so that it will not possible to expose them
3068 		 * to invalid nop, stack unwind, nop state
3069 		 */
3070 		if (!ret)
3071 			synchronize_rcu();
3072 		ret = __bpf_arch_text_poke(poke->tailcall_target,
3073 					   BPF_MOD_JUMP,
3074 					   old_addr, NULL);
3075 		BUG_ON(ret < 0);
3076 	}
3077 }
3078