xref: /openbmc/linux/arch/x86/net/bpf_jit_comp.c (revision 58ffa1b4)
1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2a2c7a983SIngo Molnar /*
3*58ffa1b4SChristoph Hellwig  * BPF JIT compiler
40a14842fSEric Dumazet  *
53b58908aSEric Dumazet  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6*58ffa1b4SChristoph Hellwig  * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
70a14842fSEric Dumazet  */
80a14842fSEric Dumazet #include <linux/netdevice.h>
90a14842fSEric Dumazet #include <linux/filter.h>
10855ddb56SEric Dumazet #include <linux/if_vlan.h>
1171d22d58SDaniel Borkmann #include <linux/bpf.h>
125964b200SAlexei Starovoitov #include <linux/memory.h>
1375ccbef6SBjörn Töpel #include <linux/sort.h>
143dec541bSAlexei Starovoitov #include <asm/extable.h>
15d1163651SLaura Abbott #include <asm/set_memory.h>
16a493a87fSDaniel Borkmann #include <asm/nospec-branch.h>
175964b200SAlexei Starovoitov #include <asm/text-patching.h>
180a14842fSEric Dumazet 
195cccc702SJoe Perches static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
200a14842fSEric Dumazet {
210a14842fSEric Dumazet 	if (len == 1)
220a14842fSEric Dumazet 		*ptr = bytes;
230a14842fSEric Dumazet 	else if (len == 2)
240a14842fSEric Dumazet 		*(u16 *)ptr = bytes;
250a14842fSEric Dumazet 	else {
260a14842fSEric Dumazet 		*(u32 *)ptr = bytes;
270a14842fSEric Dumazet 		barrier();
280a14842fSEric Dumazet 	}
290a14842fSEric Dumazet 	return ptr + len;
300a14842fSEric Dumazet }
310a14842fSEric Dumazet 
32b52f00e6SAlexei Starovoitov #define EMIT(bytes, len) \
33ced50fc4SJiri Olsa 	do { prog = emit_code(prog, bytes, len); } while (0)
340a14842fSEric Dumazet 
350a14842fSEric Dumazet #define EMIT1(b1)		EMIT(b1, 1)
360a14842fSEric Dumazet #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
370a14842fSEric Dumazet #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
380a14842fSEric Dumazet #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
39a2c7a983SIngo Molnar 
4062258278SAlexei Starovoitov #define EMIT1_off32(b1, off) \
4162258278SAlexei Starovoitov 	do { EMIT1(b1); EMIT(off, 4); } while (0)
4262258278SAlexei Starovoitov #define EMIT2_off32(b1, b2, off) \
4362258278SAlexei Starovoitov 	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
4462258278SAlexei Starovoitov #define EMIT3_off32(b1, b2, b3, off) \
4562258278SAlexei Starovoitov 	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
4662258278SAlexei Starovoitov #define EMIT4_off32(b1, b2, b3, b4, off) \
4762258278SAlexei Starovoitov 	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
480a14842fSEric Dumazet 
495cccc702SJoe Perches static bool is_imm8(int value)
500a14842fSEric Dumazet {
510a14842fSEric Dumazet 	return value <= 127 && value >= -128;
520a14842fSEric Dumazet }
530a14842fSEric Dumazet 
545cccc702SJoe Perches static bool is_simm32(s64 value)
550a14842fSEric Dumazet {
5662258278SAlexei Starovoitov 	return value == (s64)(s32)value;
570a14842fSEric Dumazet }
580a14842fSEric Dumazet 
596fe8b9c1SDaniel Borkmann static bool is_uimm32(u64 value)
606fe8b9c1SDaniel Borkmann {
616fe8b9c1SDaniel Borkmann 	return value == (u64)(u32)value;
626fe8b9c1SDaniel Borkmann }
636fe8b9c1SDaniel Borkmann 
64e430f34eSAlexei Starovoitov /* mov dst, src */
65e430f34eSAlexei Starovoitov #define EMIT_mov(DST, SRC)								 \
66a2c7a983SIngo Molnar 	do {										 \
67a2c7a983SIngo Molnar 		if (DST != SRC)								 \
68e430f34eSAlexei Starovoitov 			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
690a14842fSEric Dumazet 	} while (0)
700a14842fSEric Dumazet 
7162258278SAlexei Starovoitov static int bpf_size_to_x86_bytes(int bpf_size)
7262258278SAlexei Starovoitov {
7362258278SAlexei Starovoitov 	if (bpf_size == BPF_W)
7462258278SAlexei Starovoitov 		return 4;
7562258278SAlexei Starovoitov 	else if (bpf_size == BPF_H)
7662258278SAlexei Starovoitov 		return 2;
7762258278SAlexei Starovoitov 	else if (bpf_size == BPF_B)
7862258278SAlexei Starovoitov 		return 1;
7962258278SAlexei Starovoitov 	else if (bpf_size == BPF_DW)
8062258278SAlexei Starovoitov 		return 4; /* imm32 */
8162258278SAlexei Starovoitov 	else
8262258278SAlexei Starovoitov 		return 0;
8362258278SAlexei Starovoitov }
8462258278SAlexei Starovoitov 
85a2c7a983SIngo Molnar /*
86a2c7a983SIngo Molnar  * List of x86 cond jumps opcodes (. + s8)
870a14842fSEric Dumazet  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
880a14842fSEric Dumazet  */
890a14842fSEric Dumazet #define X86_JB  0x72
900a14842fSEric Dumazet #define X86_JAE 0x73
910a14842fSEric Dumazet #define X86_JE  0x74
920a14842fSEric Dumazet #define X86_JNE 0x75
930a14842fSEric Dumazet #define X86_JBE 0x76
940a14842fSEric Dumazet #define X86_JA  0x77
9552afc51eSDaniel Borkmann #define X86_JL  0x7C
9662258278SAlexei Starovoitov #define X86_JGE 0x7D
9752afc51eSDaniel Borkmann #define X86_JLE 0x7E
9862258278SAlexei Starovoitov #define X86_JG  0x7F
990a14842fSEric Dumazet 
100a2c7a983SIngo Molnar /* Pick a register outside of BPF range for JIT internal work */
101959a7579SDaniel Borkmann #define AUX_REG (MAX_BPF_JIT_REG + 1)
102fec56f58SAlexei Starovoitov #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
10362258278SAlexei Starovoitov 
104a2c7a983SIngo Molnar /*
105a2c7a983SIngo Molnar  * The following table maps BPF registers to x86-64 registers.
106959a7579SDaniel Borkmann  *
107a2c7a983SIngo Molnar  * x86-64 register R12 is unused, since if used as base address
108959a7579SDaniel Borkmann  * register in load/store instructions, it always needs an
109959a7579SDaniel Borkmann  * extra byte of encoding and is callee saved.
110959a7579SDaniel Borkmann  *
111fec56f58SAlexei Starovoitov  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
112fec56f58SAlexei Starovoitov  * trampoline. x86-64 register R10 is used for blinding (if enabled).
11362258278SAlexei Starovoitov  */
11462258278SAlexei Starovoitov static const int reg2hex[] = {
115a2c7a983SIngo Molnar 	[BPF_REG_0] = 0,  /* RAX */
116a2c7a983SIngo Molnar 	[BPF_REG_1] = 7,  /* RDI */
117a2c7a983SIngo Molnar 	[BPF_REG_2] = 6,  /* RSI */
118a2c7a983SIngo Molnar 	[BPF_REG_3] = 2,  /* RDX */
119a2c7a983SIngo Molnar 	[BPF_REG_4] = 1,  /* RCX */
120a2c7a983SIngo Molnar 	[BPF_REG_5] = 0,  /* R8  */
121a2c7a983SIngo Molnar 	[BPF_REG_6] = 3,  /* RBX callee saved */
122a2c7a983SIngo Molnar 	[BPF_REG_7] = 5,  /* R13 callee saved */
123a2c7a983SIngo Molnar 	[BPF_REG_8] = 6,  /* R14 callee saved */
124a2c7a983SIngo Molnar 	[BPF_REG_9] = 7,  /* R15 callee saved */
125a2c7a983SIngo Molnar 	[BPF_REG_FP] = 5, /* RBP readonly */
126a2c7a983SIngo Molnar 	[BPF_REG_AX] = 2, /* R10 temp register */
127a2c7a983SIngo Molnar 	[AUX_REG] = 3,    /* R11 temp register */
128fec56f58SAlexei Starovoitov 	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
12962258278SAlexei Starovoitov };
13062258278SAlexei Starovoitov 
1313dec541bSAlexei Starovoitov static const int reg2pt_regs[] = {
1323dec541bSAlexei Starovoitov 	[BPF_REG_0] = offsetof(struct pt_regs, ax),
1333dec541bSAlexei Starovoitov 	[BPF_REG_1] = offsetof(struct pt_regs, di),
1343dec541bSAlexei Starovoitov 	[BPF_REG_2] = offsetof(struct pt_regs, si),
1353dec541bSAlexei Starovoitov 	[BPF_REG_3] = offsetof(struct pt_regs, dx),
1363dec541bSAlexei Starovoitov 	[BPF_REG_4] = offsetof(struct pt_regs, cx),
1373dec541bSAlexei Starovoitov 	[BPF_REG_5] = offsetof(struct pt_regs, r8),
1383dec541bSAlexei Starovoitov 	[BPF_REG_6] = offsetof(struct pt_regs, bx),
1393dec541bSAlexei Starovoitov 	[BPF_REG_7] = offsetof(struct pt_regs, r13),
1403dec541bSAlexei Starovoitov 	[BPF_REG_8] = offsetof(struct pt_regs, r14),
1413dec541bSAlexei Starovoitov 	[BPF_REG_9] = offsetof(struct pt_regs, r15),
1423dec541bSAlexei Starovoitov };
1433dec541bSAlexei Starovoitov 
144a2c7a983SIngo Molnar /*
145a2c7a983SIngo Molnar  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
14662258278SAlexei Starovoitov  * which need extra byte of encoding.
14762258278SAlexei Starovoitov  * rax,rcx,...,rbp have simpler encoding
14862258278SAlexei Starovoitov  */
1495cccc702SJoe Perches static bool is_ereg(u32 reg)
15062258278SAlexei Starovoitov {
151d148134bSJoe Perches 	return (1 << reg) & (BIT(BPF_REG_5) |
152d148134bSJoe Perches 			     BIT(AUX_REG) |
153d148134bSJoe Perches 			     BIT(BPF_REG_7) |
154d148134bSJoe Perches 			     BIT(BPF_REG_8) |
155959a7579SDaniel Borkmann 			     BIT(BPF_REG_9) |
156fec56f58SAlexei Starovoitov 			     BIT(X86_REG_R9) |
157959a7579SDaniel Borkmann 			     BIT(BPF_REG_AX));
15862258278SAlexei Starovoitov }
15962258278SAlexei Starovoitov 
160aee194b1SLuke Nelson /*
161aee194b1SLuke Nelson  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
162aee194b1SLuke Nelson  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
163aee194b1SLuke Nelson  * of encoding. al,cl,dl,bl have simpler encoding.
164aee194b1SLuke Nelson  */
165aee194b1SLuke Nelson static bool is_ereg_8l(u32 reg)
166aee194b1SLuke Nelson {
167aee194b1SLuke Nelson 	return is_ereg(reg) ||
168aee194b1SLuke Nelson 	    (1 << reg) & (BIT(BPF_REG_1) |
169aee194b1SLuke Nelson 			  BIT(BPF_REG_2) |
170aee194b1SLuke Nelson 			  BIT(BPF_REG_FP));
171aee194b1SLuke Nelson }
172aee194b1SLuke Nelson 
173de0a444dSDaniel Borkmann static bool is_axreg(u32 reg)
174de0a444dSDaniel Borkmann {
175de0a444dSDaniel Borkmann 	return reg == BPF_REG_0;
176de0a444dSDaniel Borkmann }
177de0a444dSDaniel Borkmann 
178a2c7a983SIngo Molnar /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
1795cccc702SJoe Perches static u8 add_1mod(u8 byte, u32 reg)
18062258278SAlexei Starovoitov {
18162258278SAlexei Starovoitov 	if (is_ereg(reg))
18262258278SAlexei Starovoitov 		byte |= 1;
18362258278SAlexei Starovoitov 	return byte;
18462258278SAlexei Starovoitov }
18562258278SAlexei Starovoitov 
1865cccc702SJoe Perches static u8 add_2mod(u8 byte, u32 r1, u32 r2)
18762258278SAlexei Starovoitov {
18862258278SAlexei Starovoitov 	if (is_ereg(r1))
18962258278SAlexei Starovoitov 		byte |= 1;
19062258278SAlexei Starovoitov 	if (is_ereg(r2))
19162258278SAlexei Starovoitov 		byte |= 4;
19262258278SAlexei Starovoitov 	return byte;
19362258278SAlexei Starovoitov }
19462258278SAlexei Starovoitov 
195a2c7a983SIngo Molnar /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
1965cccc702SJoe Perches static u8 add_1reg(u8 byte, u32 dst_reg)
19762258278SAlexei Starovoitov {
198e430f34eSAlexei Starovoitov 	return byte + reg2hex[dst_reg];
19962258278SAlexei Starovoitov }
20062258278SAlexei Starovoitov 
201a2c7a983SIngo Molnar /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
2025cccc702SJoe Perches static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
20362258278SAlexei Starovoitov {
204e430f34eSAlexei Starovoitov 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
20562258278SAlexei Starovoitov }
20662258278SAlexei Starovoitov 
207e5f02cacSBrendan Jackman /* Some 1-byte opcodes for binary ALU operations */
208e5f02cacSBrendan Jackman static u8 simple_alu_opcodes[] = {
209e5f02cacSBrendan Jackman 	[BPF_ADD] = 0x01,
210e5f02cacSBrendan Jackman 	[BPF_SUB] = 0x29,
211e5f02cacSBrendan Jackman 	[BPF_AND] = 0x21,
212e5f02cacSBrendan Jackman 	[BPF_OR] = 0x09,
213e5f02cacSBrendan Jackman 	[BPF_XOR] = 0x31,
214e5f02cacSBrendan Jackman 	[BPF_LSH] = 0xE0,
215e5f02cacSBrendan Jackman 	[BPF_RSH] = 0xE8,
216e5f02cacSBrendan Jackman 	[BPF_ARSH] = 0xF8,
217e5f02cacSBrendan Jackman };
218e5f02cacSBrendan Jackman 
219738cbe72SDaniel Borkmann static void jit_fill_hole(void *area, unsigned int size)
220738cbe72SDaniel Borkmann {
221a2c7a983SIngo Molnar 	/* Fill whole space with INT3 instructions */
222738cbe72SDaniel Borkmann 	memset(area, 0xcc, size);
223738cbe72SDaniel Borkmann }
224738cbe72SDaniel Borkmann 
225f3c2af7bSAlexei Starovoitov struct jit_context {
226a2c7a983SIngo Molnar 	int cleanup_addr; /* Epilogue code offset */
227dceba081SPeter Zijlstra 
228dceba081SPeter Zijlstra 	/*
229dceba081SPeter Zijlstra 	 * Program specific offsets of labels in the code; these rely on the
230dceba081SPeter Zijlstra 	 * JIT doing at least 2 passes, recording the position on the first
231dceba081SPeter Zijlstra 	 * pass, only to generate the correct offset on the second pass.
232dceba081SPeter Zijlstra 	 */
233dceba081SPeter Zijlstra 	int tail_call_direct_label;
234dceba081SPeter Zijlstra 	int tail_call_indirect_label;
235f3c2af7bSAlexei Starovoitov };
236f3c2af7bSAlexei Starovoitov 
237a2c7a983SIngo Molnar /* Maximum number of bytes emitted while JITing one eBPF insn */
238e0ee9c12SAlexei Starovoitov #define BPF_MAX_INSN_SIZE	128
239e0ee9c12SAlexei Starovoitov #define BPF_INSN_SAFETY		64
2404b3da77bSDaniel Borkmann 
2414b3da77bSDaniel Borkmann /* Number of bytes emit_patch() needs to generate instructions */
2424b3da77bSDaniel Borkmann #define X86_PATCH_SIZE		5
243ebf7d1f5SMaciej Fijalkowski /* Number of bytes that will be skipped on tailcall */
244ebf7d1f5SMaciej Fijalkowski #define X86_TAIL_CALL_OFFSET	11
245e0ee9c12SAlexei Starovoitov 
246ebf7d1f5SMaciej Fijalkowski static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
247ebf7d1f5SMaciej Fijalkowski {
248ebf7d1f5SMaciej Fijalkowski 	u8 *prog = *pprog;
249ebf7d1f5SMaciej Fijalkowski 
250ebf7d1f5SMaciej Fijalkowski 	if (callee_regs_used[0])
251ebf7d1f5SMaciej Fijalkowski 		EMIT1(0x53);         /* push rbx */
252ebf7d1f5SMaciej Fijalkowski 	if (callee_regs_used[1])
253ebf7d1f5SMaciej Fijalkowski 		EMIT2(0x41, 0x55);   /* push r13 */
254ebf7d1f5SMaciej Fijalkowski 	if (callee_regs_used[2])
255ebf7d1f5SMaciej Fijalkowski 		EMIT2(0x41, 0x56);   /* push r14 */
256ebf7d1f5SMaciej Fijalkowski 	if (callee_regs_used[3])
257ebf7d1f5SMaciej Fijalkowski 		EMIT2(0x41, 0x57);   /* push r15 */
258ebf7d1f5SMaciej Fijalkowski 	*pprog = prog;
259ebf7d1f5SMaciej Fijalkowski }
260ebf7d1f5SMaciej Fijalkowski 
261ebf7d1f5SMaciej Fijalkowski static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
262ebf7d1f5SMaciej Fijalkowski {
263ebf7d1f5SMaciej Fijalkowski 	u8 *prog = *pprog;
264ebf7d1f5SMaciej Fijalkowski 
265ebf7d1f5SMaciej Fijalkowski 	if (callee_regs_used[3])
266ebf7d1f5SMaciej Fijalkowski 		EMIT2(0x41, 0x5F);   /* pop r15 */
267ebf7d1f5SMaciej Fijalkowski 	if (callee_regs_used[2])
268ebf7d1f5SMaciej Fijalkowski 		EMIT2(0x41, 0x5E);   /* pop r14 */
269ebf7d1f5SMaciej Fijalkowski 	if (callee_regs_used[1])
270ebf7d1f5SMaciej Fijalkowski 		EMIT2(0x41, 0x5D);   /* pop r13 */
271ebf7d1f5SMaciej Fijalkowski 	if (callee_regs_used[0])
272ebf7d1f5SMaciej Fijalkowski 		EMIT1(0x5B);         /* pop rbx */
273ebf7d1f5SMaciej Fijalkowski 	*pprog = prog;
274ebf7d1f5SMaciej Fijalkowski }
275b52f00e6SAlexei Starovoitov 
276a2c7a983SIngo Molnar /*
277ebf7d1f5SMaciej Fijalkowski  * Emit x86-64 prologue code for BPF program.
278ebf7d1f5SMaciej Fijalkowski  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
279ebf7d1f5SMaciej Fijalkowski  * while jumping to another program
280b52f00e6SAlexei Starovoitov  */
281ebf7d1f5SMaciej Fijalkowski static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
282ebf7d1f5SMaciej Fijalkowski 			  bool tail_call_reachable, bool is_subprog)
2830a14842fSEric Dumazet {
284b52f00e6SAlexei Starovoitov 	u8 *prog = *pprog;
2850a14842fSEric Dumazet 
2869fd4a39dSAlexei Starovoitov 	/* BPF trampoline can be made to work without these nops,
2879fd4a39dSAlexei Starovoitov 	 * but let's waste 5 bytes for now and optimize later
2889fd4a39dSAlexei Starovoitov 	 */
289ced50fc4SJiri Olsa 	memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
290ced50fc4SJiri Olsa 	prog += X86_PATCH_SIZE;
291ebf7d1f5SMaciej Fijalkowski 	if (!ebpf_from_cbpf) {
292ebf7d1f5SMaciej Fijalkowski 		if (tail_call_reachable && !is_subprog)
293ebf7d1f5SMaciej Fijalkowski 			EMIT2(0x31, 0xC0); /* xor eax, eax */
294ebf7d1f5SMaciej Fijalkowski 		else
295ebf7d1f5SMaciej Fijalkowski 			EMIT2(0x66, 0x90); /* nop2 */
296ebf7d1f5SMaciej Fijalkowski 	}
297fe8d9571SAlexei Starovoitov 	EMIT1(0x55);             /* push rbp */
298fe8d9571SAlexei Starovoitov 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
299fe8d9571SAlexei Starovoitov 	/* sub rsp, rounded_stack_depth */
3004d0b8c0bSMaciej Fijalkowski 	if (stack_depth)
301fe8d9571SAlexei Starovoitov 		EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
302ebf7d1f5SMaciej Fijalkowski 	if (tail_call_reachable)
303ebf7d1f5SMaciej Fijalkowski 		EMIT1(0x50);         /* push rax */
304b52f00e6SAlexei Starovoitov 	*pprog = prog;
305b52f00e6SAlexei Starovoitov }
306b52f00e6SAlexei Starovoitov 
307428d5df1SDaniel Borkmann static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
308428d5df1SDaniel Borkmann {
309428d5df1SDaniel Borkmann 	u8 *prog = *pprog;
310428d5df1SDaniel Borkmann 	s64 offset;
311428d5df1SDaniel Borkmann 
312428d5df1SDaniel Borkmann 	offset = func - (ip + X86_PATCH_SIZE);
313428d5df1SDaniel Borkmann 	if (!is_simm32(offset)) {
314428d5df1SDaniel Borkmann 		pr_err("Target call %p is out of range\n", func);
315428d5df1SDaniel Borkmann 		return -ERANGE;
316428d5df1SDaniel Borkmann 	}
317428d5df1SDaniel Borkmann 	EMIT1_off32(opcode, offset);
318428d5df1SDaniel Borkmann 	*pprog = prog;
319428d5df1SDaniel Borkmann 	return 0;
320428d5df1SDaniel Borkmann }
321428d5df1SDaniel Borkmann 
322428d5df1SDaniel Borkmann static int emit_call(u8 **pprog, void *func, void *ip)
323428d5df1SDaniel Borkmann {
324428d5df1SDaniel Borkmann 	return emit_patch(pprog, func, ip, 0xE8);
325428d5df1SDaniel Borkmann }
326428d5df1SDaniel Borkmann 
327428d5df1SDaniel Borkmann static int emit_jump(u8 **pprog, void *func, void *ip)
328428d5df1SDaniel Borkmann {
329428d5df1SDaniel Borkmann 	return emit_patch(pprog, func, ip, 0xE9);
330428d5df1SDaniel Borkmann }
331428d5df1SDaniel Borkmann 
332428d5df1SDaniel Borkmann static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
333428d5df1SDaniel Borkmann 				void *old_addr, void *new_addr,
334428d5df1SDaniel Borkmann 				const bool text_live)
335428d5df1SDaniel Borkmann {
336a89dfde3SPeter Zijlstra 	const u8 *nop_insn = x86_nops[5];
337b553a6ecSDaniel Borkmann 	u8 old_insn[X86_PATCH_SIZE];
338b553a6ecSDaniel Borkmann 	u8 new_insn[X86_PATCH_SIZE];
339428d5df1SDaniel Borkmann 	u8 *prog;
340428d5df1SDaniel Borkmann 	int ret;
341428d5df1SDaniel Borkmann 
342428d5df1SDaniel Borkmann 	memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
343b553a6ecSDaniel Borkmann 	if (old_addr) {
344428d5df1SDaniel Borkmann 		prog = old_insn;
345b553a6ecSDaniel Borkmann 		ret = t == BPF_MOD_CALL ?
346b553a6ecSDaniel Borkmann 		      emit_call(&prog, old_addr, ip) :
347b553a6ecSDaniel Borkmann 		      emit_jump(&prog, old_addr, ip);
348428d5df1SDaniel Borkmann 		if (ret)
349428d5df1SDaniel Borkmann 			return ret;
350428d5df1SDaniel Borkmann 	}
351b553a6ecSDaniel Borkmann 
352428d5df1SDaniel Borkmann 	memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
353b553a6ecSDaniel Borkmann 	if (new_addr) {
354b553a6ecSDaniel Borkmann 		prog = new_insn;
355b553a6ecSDaniel Borkmann 		ret = t == BPF_MOD_CALL ?
356b553a6ecSDaniel Borkmann 		      emit_call(&prog, new_addr, ip) :
357b553a6ecSDaniel Borkmann 		      emit_jump(&prog, new_addr, ip);
358428d5df1SDaniel Borkmann 		if (ret)
359428d5df1SDaniel Borkmann 			return ret;
360428d5df1SDaniel Borkmann 	}
361428d5df1SDaniel Borkmann 
362428d5df1SDaniel Borkmann 	ret = -EBUSY;
363428d5df1SDaniel Borkmann 	mutex_lock(&text_mutex);
364428d5df1SDaniel Borkmann 	if (memcmp(ip, old_insn, X86_PATCH_SIZE))
365428d5df1SDaniel Borkmann 		goto out;
366ebf7d1f5SMaciej Fijalkowski 	ret = 1;
367b553a6ecSDaniel Borkmann 	if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
368428d5df1SDaniel Borkmann 		if (text_live)
369428d5df1SDaniel Borkmann 			text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
370428d5df1SDaniel Borkmann 		else
371428d5df1SDaniel Borkmann 			memcpy(ip, new_insn, X86_PATCH_SIZE);
372428d5df1SDaniel Borkmann 		ret = 0;
373ebf7d1f5SMaciej Fijalkowski 	}
374428d5df1SDaniel Borkmann out:
375428d5df1SDaniel Borkmann 	mutex_unlock(&text_mutex);
376428d5df1SDaniel Borkmann 	return ret;
377428d5df1SDaniel Borkmann }
378428d5df1SDaniel Borkmann 
379428d5df1SDaniel Borkmann int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
380428d5df1SDaniel Borkmann 		       void *old_addr, void *new_addr)
381428d5df1SDaniel Borkmann {
382428d5df1SDaniel Borkmann 	if (!is_kernel_text((long)ip) &&
383428d5df1SDaniel Borkmann 	    !is_bpf_text_address((long)ip))
384428d5df1SDaniel Borkmann 		/* BPF poking in modules is not supported */
385428d5df1SDaniel Borkmann 		return -EINVAL;
386428d5df1SDaniel Borkmann 
387428d5df1SDaniel Borkmann 	return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
388428d5df1SDaniel Borkmann }
389428d5df1SDaniel Borkmann 
39087c87ecdSPeter Zijlstra #define EMIT_LFENCE()	EMIT3(0x0F, 0xAE, 0xE8)
39187c87ecdSPeter Zijlstra 
39287c87ecdSPeter Zijlstra static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
39387c87ecdSPeter Zijlstra {
39487c87ecdSPeter Zijlstra 	u8 *prog = *pprog;
39587c87ecdSPeter Zijlstra 
39687c87ecdSPeter Zijlstra #ifdef CONFIG_RETPOLINE
39787c87ecdSPeter Zijlstra 	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD)) {
39887c87ecdSPeter Zijlstra 		EMIT_LFENCE();
39987c87ecdSPeter Zijlstra 		EMIT2(0xFF, 0xE0 + reg);
40087c87ecdSPeter Zijlstra 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
40187c87ecdSPeter Zijlstra 		emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
40287c87ecdSPeter Zijlstra 	} else
40387c87ecdSPeter Zijlstra #endif
40487c87ecdSPeter Zijlstra 	EMIT2(0xFF, 0xE0 + reg);
40587c87ecdSPeter Zijlstra 
40687c87ecdSPeter Zijlstra 	*pprog = prog;
40787c87ecdSPeter Zijlstra }
40887c87ecdSPeter Zijlstra 
409a2c7a983SIngo Molnar /*
410a2c7a983SIngo Molnar  * Generate the following code:
411a2c7a983SIngo Molnar  *
412b52f00e6SAlexei Starovoitov  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
413b52f00e6SAlexei Starovoitov  *   if (index >= array->map.max_entries)
414b52f00e6SAlexei Starovoitov  *     goto out;
415ebf7f6f0STiezhu Yang  *   if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
416b52f00e6SAlexei Starovoitov  *     goto out;
4172a36f0b9SWang Nan  *   prog = array->ptrs[index];
418b52f00e6SAlexei Starovoitov  *   if (prog == NULL)
419b52f00e6SAlexei Starovoitov  *     goto out;
420b52f00e6SAlexei Starovoitov  *   goto *(prog->bpf_func + prologue_size);
421b52f00e6SAlexei Starovoitov  * out:
422b52f00e6SAlexei Starovoitov  */
423ebf7d1f5SMaciej Fijalkowski static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
424dceba081SPeter Zijlstra 					u32 stack_depth, u8 *ip,
425dceba081SPeter Zijlstra 					struct jit_context *ctx)
426b52f00e6SAlexei Starovoitov {
427ebf7d1f5SMaciej Fijalkowski 	int tcc_off = -4 - round_up(stack_depth, 8);
428dceba081SPeter Zijlstra 	u8 *prog = *pprog, *start = *pprog;
429dceba081SPeter Zijlstra 	int offset;
4304d0b8c0bSMaciej Fijalkowski 
431a2c7a983SIngo Molnar 	/*
432a2c7a983SIngo Molnar 	 * rdi - pointer to ctx
433b52f00e6SAlexei Starovoitov 	 * rsi - pointer to bpf_array
434b52f00e6SAlexei Starovoitov 	 * rdx - index in bpf_array
435b52f00e6SAlexei Starovoitov 	 */
436b52f00e6SAlexei Starovoitov 
437a2c7a983SIngo Molnar 	/*
438a2c7a983SIngo Molnar 	 * if (index >= array->map.max_entries)
439b52f00e6SAlexei Starovoitov 	 *	goto out;
440b52f00e6SAlexei Starovoitov 	 */
44190caccddSAlexei Starovoitov 	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
44290caccddSAlexei Starovoitov 	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
443b52f00e6SAlexei Starovoitov 	      offsetof(struct bpf_array, map.max_entries));
444dceba081SPeter Zijlstra 
445dceba081SPeter Zijlstra 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
446dceba081SPeter Zijlstra 	EMIT2(X86_JBE, offset);                   /* jbe out */
447b52f00e6SAlexei Starovoitov 
448a2c7a983SIngo Molnar 	/*
449ebf7f6f0STiezhu Yang 	 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
450b52f00e6SAlexei Starovoitov 	 *	goto out;
451b52f00e6SAlexei Starovoitov 	 */
452ebf7d1f5SMaciej Fijalkowski 	EMIT2_off32(0x8B, 0x85, tcc_off);         /* mov eax, dword ptr [rbp - tcc_off] */
453b52f00e6SAlexei Starovoitov 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
454dceba081SPeter Zijlstra 
455dceba081SPeter Zijlstra 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
456ebf7f6f0STiezhu Yang 	EMIT2(X86_JAE, offset);                   /* jae out */
457b52f00e6SAlexei Starovoitov 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
458ebf7d1f5SMaciej Fijalkowski 	EMIT2_off32(0x89, 0x85, tcc_off);         /* mov dword ptr [rbp - tcc_off], eax */
459b52f00e6SAlexei Starovoitov 
4602a36f0b9SWang Nan 	/* prog = array->ptrs[index]; */
4610d4ddce3SMaciej Fijalkowski 	EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
4622a36f0b9SWang Nan 		    offsetof(struct bpf_array, ptrs));
463b52f00e6SAlexei Starovoitov 
464a2c7a983SIngo Molnar 	/*
465a2c7a983SIngo Molnar 	 * if (prog == NULL)
466b52f00e6SAlexei Starovoitov 	 *	goto out;
467b52f00e6SAlexei Starovoitov 	 */
4680d4ddce3SMaciej Fijalkowski 	EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
469b52f00e6SAlexei Starovoitov 
470dceba081SPeter Zijlstra 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
471dceba081SPeter Zijlstra 	EMIT2(X86_JE, offset);                    /* je out */
472dceba081SPeter Zijlstra 
473dceba081SPeter Zijlstra 	pop_callee_regs(&prog, callee_regs_used);
474ebf7d1f5SMaciej Fijalkowski 
475ebf7d1f5SMaciej Fijalkowski 	EMIT1(0x58);                              /* pop rax */
4764d0b8c0bSMaciej Fijalkowski 	if (stack_depth)
477ebf7d1f5SMaciej Fijalkowski 		EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
478ebf7d1f5SMaciej Fijalkowski 			    round_up(stack_depth, 8));
479ebf7d1f5SMaciej Fijalkowski 
480ebf7d1f5SMaciej Fijalkowski 	/* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
4810d4ddce3SMaciej Fijalkowski 	EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
482b52f00e6SAlexei Starovoitov 	      offsetof(struct bpf_prog, bpf_func));
483ebf7d1f5SMaciej Fijalkowski 	EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
484ebf7d1f5SMaciej Fijalkowski 	      X86_TAIL_CALL_OFFSET);
485a2c7a983SIngo Molnar 	/*
4860d4ddce3SMaciej Fijalkowski 	 * Now we're ready to jump into next BPF program
487b52f00e6SAlexei Starovoitov 	 * rdi == ctx (1st arg)
488ebf7d1f5SMaciej Fijalkowski 	 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
489b52f00e6SAlexei Starovoitov 	 */
49087c87ecdSPeter Zijlstra 	emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
491b52f00e6SAlexei Starovoitov 
492b52f00e6SAlexei Starovoitov 	/* out: */
493dceba081SPeter Zijlstra 	ctx->tail_call_indirect_label = prog - start;
494b52f00e6SAlexei Starovoitov 	*pprog = prog;
495b52f00e6SAlexei Starovoitov }
496b52f00e6SAlexei Starovoitov 
497428d5df1SDaniel Borkmann static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
498dceba081SPeter Zijlstra 				      u8 **pprog, u8 *ip,
499dceba081SPeter Zijlstra 				      bool *callee_regs_used, u32 stack_depth,
500dceba081SPeter Zijlstra 				      struct jit_context *ctx)
501428d5df1SDaniel Borkmann {
502ebf7d1f5SMaciej Fijalkowski 	int tcc_off = -4 - round_up(stack_depth, 8);
503dceba081SPeter Zijlstra 	u8 *prog = *pprog, *start = *pprog;
504dceba081SPeter Zijlstra 	int offset;
505ebf7d1f5SMaciej Fijalkowski 
506428d5df1SDaniel Borkmann 	/*
507ebf7f6f0STiezhu Yang 	 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
508428d5df1SDaniel Borkmann 	 *	goto out;
509428d5df1SDaniel Borkmann 	 */
510ebf7d1f5SMaciej Fijalkowski 	EMIT2_off32(0x8B, 0x85, tcc_off);             /* mov eax, dword ptr [rbp - tcc_off] */
511428d5df1SDaniel Borkmann 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
512dceba081SPeter Zijlstra 
513dceba081SPeter Zijlstra 	offset = ctx->tail_call_direct_label - (prog + 2 - start);
514ebf7f6f0STiezhu Yang 	EMIT2(X86_JAE, offset);                       /* jae out */
515428d5df1SDaniel Borkmann 	EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
516ebf7d1f5SMaciej Fijalkowski 	EMIT2_off32(0x89, 0x85, tcc_off);             /* mov dword ptr [rbp - tcc_off], eax */
517428d5df1SDaniel Borkmann 
518dceba081SPeter Zijlstra 	poke->tailcall_bypass = ip + (prog - start);
519ebf7d1f5SMaciej Fijalkowski 	poke->adj_off = X86_TAIL_CALL_OFFSET;
520dceba081SPeter Zijlstra 	poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
521ebf7d1f5SMaciej Fijalkowski 	poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
522ebf7d1f5SMaciej Fijalkowski 
523ebf7d1f5SMaciej Fijalkowski 	emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
524ebf7d1f5SMaciej Fijalkowski 		  poke->tailcall_bypass);
525ebf7d1f5SMaciej Fijalkowski 
526dceba081SPeter Zijlstra 	pop_callee_regs(&prog, callee_regs_used);
527ebf7d1f5SMaciej Fijalkowski 	EMIT1(0x58);                                  /* pop rax */
5284d0b8c0bSMaciej Fijalkowski 	if (stack_depth)
529ebf7d1f5SMaciej Fijalkowski 		EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
530428d5df1SDaniel Borkmann 
531a89dfde3SPeter Zijlstra 	memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
532428d5df1SDaniel Borkmann 	prog += X86_PATCH_SIZE;
533dceba081SPeter Zijlstra 
534428d5df1SDaniel Borkmann 	/* out: */
535dceba081SPeter Zijlstra 	ctx->tail_call_direct_label = prog - start;
536428d5df1SDaniel Borkmann 
537428d5df1SDaniel Borkmann 	*pprog = prog;
538428d5df1SDaniel Borkmann }
539428d5df1SDaniel Borkmann 
540428d5df1SDaniel Borkmann static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
541428d5df1SDaniel Borkmann {
542428d5df1SDaniel Borkmann 	struct bpf_jit_poke_descriptor *poke;
543428d5df1SDaniel Borkmann 	struct bpf_array *array;
544428d5df1SDaniel Borkmann 	struct bpf_prog *target;
545428d5df1SDaniel Borkmann 	int i, ret;
546428d5df1SDaniel Borkmann 
547428d5df1SDaniel Borkmann 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
548428d5df1SDaniel Borkmann 		poke = &prog->aux->poke_tab[i];
549f263a814SJohn Fastabend 		if (poke->aux && poke->aux != prog->aux)
550f263a814SJohn Fastabend 			continue;
551f263a814SJohn Fastabend 
552cf71b174SMaciej Fijalkowski 		WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
553428d5df1SDaniel Borkmann 
554428d5df1SDaniel Borkmann 		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
555428d5df1SDaniel Borkmann 			continue;
556428d5df1SDaniel Borkmann 
557428d5df1SDaniel Borkmann 		array = container_of(poke->tail_call.map, struct bpf_array, map);
558428d5df1SDaniel Borkmann 		mutex_lock(&array->aux->poke_mutex);
559428d5df1SDaniel Borkmann 		target = array->ptrs[poke->tail_call.key];
560428d5df1SDaniel Borkmann 		if (target) {
561428d5df1SDaniel Borkmann 			/* Plain memcpy is used when image is not live yet
562428d5df1SDaniel Borkmann 			 * and still not locked as read-only. Once poke
563cf71b174SMaciej Fijalkowski 			 * location is active (poke->tailcall_target_stable),
564cf71b174SMaciej Fijalkowski 			 * any parallel bpf_arch_text_poke() might occur
565cf71b174SMaciej Fijalkowski 			 * still on the read-write image until we finally
566cf71b174SMaciej Fijalkowski 			 * locked it as read-only. Both modifications on
567cf71b174SMaciej Fijalkowski 			 * the given image are under text_mutex to avoid
568cf71b174SMaciej Fijalkowski 			 * interference.
569428d5df1SDaniel Borkmann 			 */
570cf71b174SMaciej Fijalkowski 			ret = __bpf_arch_text_poke(poke->tailcall_target,
571cf71b174SMaciej Fijalkowski 						   BPF_MOD_JUMP, NULL,
572428d5df1SDaniel Borkmann 						   (u8 *)target->bpf_func +
573428d5df1SDaniel Borkmann 						   poke->adj_off, false);
574428d5df1SDaniel Borkmann 			BUG_ON(ret < 0);
575ebf7d1f5SMaciej Fijalkowski 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
576ebf7d1f5SMaciej Fijalkowski 						   BPF_MOD_JUMP,
577ebf7d1f5SMaciej Fijalkowski 						   (u8 *)poke->tailcall_target +
578ebf7d1f5SMaciej Fijalkowski 						   X86_PATCH_SIZE, NULL, false);
579ebf7d1f5SMaciej Fijalkowski 			BUG_ON(ret < 0);
580428d5df1SDaniel Borkmann 		}
581cf71b174SMaciej Fijalkowski 		WRITE_ONCE(poke->tailcall_target_stable, true);
582428d5df1SDaniel Borkmann 		mutex_unlock(&array->aux->poke_mutex);
583428d5df1SDaniel Borkmann 	}
584428d5df1SDaniel Borkmann }
585428d5df1SDaniel Borkmann 
5866fe8b9c1SDaniel Borkmann static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
5876fe8b9c1SDaniel Borkmann 			   u32 dst_reg, const u32 imm32)
5886fe8b9c1SDaniel Borkmann {
5896fe8b9c1SDaniel Borkmann 	u8 *prog = *pprog;
5906fe8b9c1SDaniel Borkmann 	u8 b1, b2, b3;
5916fe8b9c1SDaniel Borkmann 
592a2c7a983SIngo Molnar 	/*
593a2c7a983SIngo Molnar 	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
5946fe8b9c1SDaniel Borkmann 	 * (which zero-extends imm32) to save 2 bytes.
5956fe8b9c1SDaniel Borkmann 	 */
5966fe8b9c1SDaniel Borkmann 	if (sign_propagate && (s32)imm32 < 0) {
5976fe8b9c1SDaniel Borkmann 		/* 'mov %rax, imm32' sign extends imm32 */
5986fe8b9c1SDaniel Borkmann 		b1 = add_1mod(0x48, dst_reg);
5996fe8b9c1SDaniel Borkmann 		b2 = 0xC7;
6006fe8b9c1SDaniel Borkmann 		b3 = 0xC0;
6016fe8b9c1SDaniel Borkmann 		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
6026fe8b9c1SDaniel Borkmann 		goto done;
6036fe8b9c1SDaniel Borkmann 	}
6046fe8b9c1SDaniel Borkmann 
605a2c7a983SIngo Molnar 	/*
606a2c7a983SIngo Molnar 	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
6076fe8b9c1SDaniel Borkmann 	 * to save 3 bytes.
6086fe8b9c1SDaniel Borkmann 	 */
6096fe8b9c1SDaniel Borkmann 	if (imm32 == 0) {
6106fe8b9c1SDaniel Borkmann 		if (is_ereg(dst_reg))
6116fe8b9c1SDaniel Borkmann 			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
6126fe8b9c1SDaniel Borkmann 		b2 = 0x31; /* xor */
6136fe8b9c1SDaniel Borkmann 		b3 = 0xC0;
6146fe8b9c1SDaniel Borkmann 		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
6156fe8b9c1SDaniel Borkmann 		goto done;
6166fe8b9c1SDaniel Borkmann 	}
6176fe8b9c1SDaniel Borkmann 
6186fe8b9c1SDaniel Borkmann 	/* mov %eax, imm32 */
6196fe8b9c1SDaniel Borkmann 	if (is_ereg(dst_reg))
6206fe8b9c1SDaniel Borkmann 		EMIT1(add_1mod(0x40, dst_reg));
6216fe8b9c1SDaniel Borkmann 	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
6226fe8b9c1SDaniel Borkmann done:
6236fe8b9c1SDaniel Borkmann 	*pprog = prog;
6246fe8b9c1SDaniel Borkmann }
6256fe8b9c1SDaniel Borkmann 
6266fe8b9c1SDaniel Borkmann static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
6276fe8b9c1SDaniel Borkmann 			   const u32 imm32_hi, const u32 imm32_lo)
6286fe8b9c1SDaniel Borkmann {
6296fe8b9c1SDaniel Borkmann 	u8 *prog = *pprog;
6306fe8b9c1SDaniel Borkmann 
6316fe8b9c1SDaniel Borkmann 	if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
632a2c7a983SIngo Molnar 		/*
633a2c7a983SIngo Molnar 		 * For emitting plain u32, where sign bit must not be
6346fe8b9c1SDaniel Borkmann 		 * propagated LLVM tends to load imm64 over mov32
6356fe8b9c1SDaniel Borkmann 		 * directly, so save couple of bytes by just doing
6366fe8b9c1SDaniel Borkmann 		 * 'mov %eax, imm32' instead.
6376fe8b9c1SDaniel Borkmann 		 */
6386fe8b9c1SDaniel Borkmann 		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
6396fe8b9c1SDaniel Borkmann 	} else {
6406fe8b9c1SDaniel Borkmann 		/* movabsq %rax, imm64 */
6416fe8b9c1SDaniel Borkmann 		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
6426fe8b9c1SDaniel Borkmann 		EMIT(imm32_lo, 4);
6436fe8b9c1SDaniel Borkmann 		EMIT(imm32_hi, 4);
6446fe8b9c1SDaniel Borkmann 	}
6456fe8b9c1SDaniel Borkmann 
6466fe8b9c1SDaniel Borkmann 	*pprog = prog;
6476fe8b9c1SDaniel Borkmann }
6486fe8b9c1SDaniel Borkmann 
6494c38e2f3SDaniel Borkmann static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
6504c38e2f3SDaniel Borkmann {
6514c38e2f3SDaniel Borkmann 	u8 *prog = *pprog;
6524c38e2f3SDaniel Borkmann 
6534c38e2f3SDaniel Borkmann 	if (is64) {
6544c38e2f3SDaniel Borkmann 		/* mov dst, src */
6554c38e2f3SDaniel Borkmann 		EMIT_mov(dst_reg, src_reg);
6564c38e2f3SDaniel Borkmann 	} else {
6574c38e2f3SDaniel Borkmann 		/* mov32 dst, src */
6584c38e2f3SDaniel Borkmann 		if (is_ereg(dst_reg) || is_ereg(src_reg))
6594c38e2f3SDaniel Borkmann 			EMIT1(add_2mod(0x40, dst_reg, src_reg));
6604c38e2f3SDaniel Borkmann 		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
6614c38e2f3SDaniel Borkmann 	}
6624c38e2f3SDaniel Borkmann 
6634c38e2f3SDaniel Borkmann 	*pprog = prog;
6644c38e2f3SDaniel Borkmann }
6654c38e2f3SDaniel Borkmann 
66611c11d07SBrendan Jackman /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
66711c11d07SBrendan Jackman static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
66811c11d07SBrendan Jackman {
66911c11d07SBrendan Jackman 	u8 *prog = *pprog;
67011c11d07SBrendan Jackman 
67111c11d07SBrendan Jackman 	if (is_imm8(off)) {
67211c11d07SBrendan Jackman 		/* 1-byte signed displacement.
67311c11d07SBrendan Jackman 		 *
67411c11d07SBrendan Jackman 		 * If off == 0 we could skip this and save one extra byte, but
67511c11d07SBrendan Jackman 		 * special case of x86 R13 which always needs an offset is not
67611c11d07SBrendan Jackman 		 * worth the hassle
67711c11d07SBrendan Jackman 		 */
67811c11d07SBrendan Jackman 		EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
67911c11d07SBrendan Jackman 	} else {
68011c11d07SBrendan Jackman 		/* 4-byte signed displacement */
68111c11d07SBrendan Jackman 		EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
68211c11d07SBrendan Jackman 	}
68311c11d07SBrendan Jackman 	*pprog = prog;
68411c11d07SBrendan Jackman }
68511c11d07SBrendan Jackman 
68674007cfcSBrendan Jackman /*
68774007cfcSBrendan Jackman  * Emit a REX byte if it will be necessary to address these registers
68874007cfcSBrendan Jackman  */
68974007cfcSBrendan Jackman static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
69074007cfcSBrendan Jackman {
69174007cfcSBrendan Jackman 	u8 *prog = *pprog;
69274007cfcSBrendan Jackman 
69374007cfcSBrendan Jackman 	if (is64)
69474007cfcSBrendan Jackman 		EMIT1(add_2mod(0x48, dst_reg, src_reg));
69574007cfcSBrendan Jackman 	else if (is_ereg(dst_reg) || is_ereg(src_reg))
69674007cfcSBrendan Jackman 		EMIT1(add_2mod(0x40, dst_reg, src_reg));
69774007cfcSBrendan Jackman 	*pprog = prog;
69874007cfcSBrendan Jackman }
69974007cfcSBrendan Jackman 
7006364d7d7SJie Meng /*
7016364d7d7SJie Meng  * Similar version of maybe_emit_mod() for a single register
7026364d7d7SJie Meng  */
7036364d7d7SJie Meng static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
7046364d7d7SJie Meng {
7056364d7d7SJie Meng 	u8 *prog = *pprog;
7066364d7d7SJie Meng 
7076364d7d7SJie Meng 	if (is64)
7086364d7d7SJie Meng 		EMIT1(add_1mod(0x48, reg));
7096364d7d7SJie Meng 	else if (is_ereg(reg))
7106364d7d7SJie Meng 		EMIT1(add_1mod(0x40, reg));
7116364d7d7SJie Meng 	*pprog = prog;
7126364d7d7SJie Meng }
7136364d7d7SJie Meng 
7143b2744e6SAlexei Starovoitov /* LDX: dst_reg = *(u8*)(src_reg + off) */
7153b2744e6SAlexei Starovoitov static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
7163b2744e6SAlexei Starovoitov {
7173b2744e6SAlexei Starovoitov 	u8 *prog = *pprog;
7183b2744e6SAlexei Starovoitov 
7193b2744e6SAlexei Starovoitov 	switch (size) {
7203b2744e6SAlexei Starovoitov 	case BPF_B:
7213b2744e6SAlexei Starovoitov 		/* Emit 'movzx rax, byte ptr [rax + off]' */
7223b2744e6SAlexei Starovoitov 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
7233b2744e6SAlexei Starovoitov 		break;
7243b2744e6SAlexei Starovoitov 	case BPF_H:
7253b2744e6SAlexei Starovoitov 		/* Emit 'movzx rax, word ptr [rax + off]' */
7263b2744e6SAlexei Starovoitov 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
7273b2744e6SAlexei Starovoitov 		break;
7283b2744e6SAlexei Starovoitov 	case BPF_W:
7293b2744e6SAlexei Starovoitov 		/* Emit 'mov eax, dword ptr [rax+0x14]' */
7303b2744e6SAlexei Starovoitov 		if (is_ereg(dst_reg) || is_ereg(src_reg))
7313b2744e6SAlexei Starovoitov 			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
7323b2744e6SAlexei Starovoitov 		else
7333b2744e6SAlexei Starovoitov 			EMIT1(0x8B);
7343b2744e6SAlexei Starovoitov 		break;
7353b2744e6SAlexei Starovoitov 	case BPF_DW:
7363b2744e6SAlexei Starovoitov 		/* Emit 'mov rax, qword ptr [rax+0x14]' */
7373b2744e6SAlexei Starovoitov 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
7383b2744e6SAlexei Starovoitov 		break;
7393b2744e6SAlexei Starovoitov 	}
74011c11d07SBrendan Jackman 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
7413b2744e6SAlexei Starovoitov 	*pprog = prog;
7423b2744e6SAlexei Starovoitov }
7433b2744e6SAlexei Starovoitov 
7443b2744e6SAlexei Starovoitov /* STX: *(u8*)(dst_reg + off) = src_reg */
7453b2744e6SAlexei Starovoitov static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
7463b2744e6SAlexei Starovoitov {
7473b2744e6SAlexei Starovoitov 	u8 *prog = *pprog;
7483b2744e6SAlexei Starovoitov 
7493b2744e6SAlexei Starovoitov 	switch (size) {
7503b2744e6SAlexei Starovoitov 	case BPF_B:
7513b2744e6SAlexei Starovoitov 		/* Emit 'mov byte ptr [rax + off], al' */
752aee194b1SLuke Nelson 		if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
753aee194b1SLuke Nelson 			/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
7543b2744e6SAlexei Starovoitov 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
7553b2744e6SAlexei Starovoitov 		else
7563b2744e6SAlexei Starovoitov 			EMIT1(0x88);
7573b2744e6SAlexei Starovoitov 		break;
7583b2744e6SAlexei Starovoitov 	case BPF_H:
7593b2744e6SAlexei Starovoitov 		if (is_ereg(dst_reg) || is_ereg(src_reg))
7603b2744e6SAlexei Starovoitov 			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
7613b2744e6SAlexei Starovoitov 		else
7623b2744e6SAlexei Starovoitov 			EMIT2(0x66, 0x89);
7633b2744e6SAlexei Starovoitov 		break;
7643b2744e6SAlexei Starovoitov 	case BPF_W:
7653b2744e6SAlexei Starovoitov 		if (is_ereg(dst_reg) || is_ereg(src_reg))
7663b2744e6SAlexei Starovoitov 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
7673b2744e6SAlexei Starovoitov 		else
7683b2744e6SAlexei Starovoitov 			EMIT1(0x89);
7693b2744e6SAlexei Starovoitov 		break;
7703b2744e6SAlexei Starovoitov 	case BPF_DW:
7713b2744e6SAlexei Starovoitov 		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
7723b2744e6SAlexei Starovoitov 		break;
7733b2744e6SAlexei Starovoitov 	}
77411c11d07SBrendan Jackman 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
7753b2744e6SAlexei Starovoitov 	*pprog = prog;
7763b2744e6SAlexei Starovoitov }
7773b2744e6SAlexei Starovoitov 
77891c960b0SBrendan Jackman static int emit_atomic(u8 **pprog, u8 atomic_op,
77991c960b0SBrendan Jackman 		       u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
78091c960b0SBrendan Jackman {
78191c960b0SBrendan Jackman 	u8 *prog = *pprog;
78291c960b0SBrendan Jackman 
78391c960b0SBrendan Jackman 	EMIT1(0xF0); /* lock prefix */
78491c960b0SBrendan Jackman 
78591c960b0SBrendan Jackman 	maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
78691c960b0SBrendan Jackman 
78791c960b0SBrendan Jackman 	/* emit opcode */
78891c960b0SBrendan Jackman 	switch (atomic_op) {
78991c960b0SBrendan Jackman 	case BPF_ADD:
790981f94c3SBrendan Jackman 	case BPF_SUB:
791981f94c3SBrendan Jackman 	case BPF_AND:
792981f94c3SBrendan Jackman 	case BPF_OR:
793981f94c3SBrendan Jackman 	case BPF_XOR:
79491c960b0SBrendan Jackman 		/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
79591c960b0SBrendan Jackman 		EMIT1(simple_alu_opcodes[atomic_op]);
79691c960b0SBrendan Jackman 		break;
7975ca419f2SBrendan Jackman 	case BPF_ADD | BPF_FETCH:
7985ca419f2SBrendan Jackman 		/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
7995ca419f2SBrendan Jackman 		EMIT2(0x0F, 0xC1);
8005ca419f2SBrendan Jackman 		break;
8015ffa2550SBrendan Jackman 	case BPF_XCHG:
8025ffa2550SBrendan Jackman 		/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
8035ffa2550SBrendan Jackman 		EMIT1(0x87);
8045ffa2550SBrendan Jackman 		break;
8055ffa2550SBrendan Jackman 	case BPF_CMPXCHG:
8065ffa2550SBrendan Jackman 		/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
8075ffa2550SBrendan Jackman 		EMIT2(0x0F, 0xB1);
8085ffa2550SBrendan Jackman 		break;
80991c960b0SBrendan Jackman 	default:
81091c960b0SBrendan Jackman 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
81191c960b0SBrendan Jackman 		return -EFAULT;
81291c960b0SBrendan Jackman 	}
81391c960b0SBrendan Jackman 
81491c960b0SBrendan Jackman 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
81591c960b0SBrendan Jackman 
81691c960b0SBrendan Jackman 	*pprog = prog;
81791c960b0SBrendan Jackman 	return 0;
81891c960b0SBrendan Jackman }
81991c960b0SBrendan Jackman 
82046d28947SThomas Gleixner bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
8213dec541bSAlexei Starovoitov {
8223dec541bSAlexei Starovoitov 	u32 reg = x->fixup >> 8;
8233dec541bSAlexei Starovoitov 
8243dec541bSAlexei Starovoitov 	/* jump over faulting load and clear dest register */
8253dec541bSAlexei Starovoitov 	*(unsigned long *)((void *)regs + reg) = 0;
8263dec541bSAlexei Starovoitov 	regs->ip += x->fixup & 0xff;
8273dec541bSAlexei Starovoitov 	return true;
8283dec541bSAlexei Starovoitov }
8293dec541bSAlexei Starovoitov 
830ebf7d1f5SMaciej Fijalkowski static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
831ebf7d1f5SMaciej Fijalkowski 			     bool *regs_used, bool *tail_call_seen)
832ebf7d1f5SMaciej Fijalkowski {
833ebf7d1f5SMaciej Fijalkowski 	int i;
834ebf7d1f5SMaciej Fijalkowski 
835ebf7d1f5SMaciej Fijalkowski 	for (i = 1; i <= insn_cnt; i++, insn++) {
836ebf7d1f5SMaciej Fijalkowski 		if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
837ebf7d1f5SMaciej Fijalkowski 			*tail_call_seen = true;
838ebf7d1f5SMaciej Fijalkowski 		if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
839ebf7d1f5SMaciej Fijalkowski 			regs_used[0] = true;
840ebf7d1f5SMaciej Fijalkowski 		if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
841ebf7d1f5SMaciej Fijalkowski 			regs_used[1] = true;
842ebf7d1f5SMaciej Fijalkowski 		if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
843ebf7d1f5SMaciej Fijalkowski 			regs_used[2] = true;
844ebf7d1f5SMaciej Fijalkowski 		if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
845ebf7d1f5SMaciej Fijalkowski 			regs_used[3] = true;
846ebf7d1f5SMaciej Fijalkowski 	}
847ebf7d1f5SMaciej Fijalkowski }
848ebf7d1f5SMaciej Fijalkowski 
849ced50fc4SJiri Olsa static void emit_nops(u8 **pprog, int len)
85093c5aeccSGary Lin {
85193c5aeccSGary Lin 	u8 *prog = *pprog;
852ced50fc4SJiri Olsa 	int i, noplen;
85393c5aeccSGary Lin 
85493c5aeccSGary Lin 	while (len > 0) {
85593c5aeccSGary Lin 		noplen = len;
85693c5aeccSGary Lin 
85793c5aeccSGary Lin 		if (noplen > ASM_NOP_MAX)
85893c5aeccSGary Lin 			noplen = ASM_NOP_MAX;
85993c5aeccSGary Lin 
86093c5aeccSGary Lin 		for (i = 0; i < noplen; i++)
861a89dfde3SPeter Zijlstra 			EMIT1(x86_nops[noplen][i]);
86293c5aeccSGary Lin 		len -= noplen;
86393c5aeccSGary Lin 	}
86493c5aeccSGary Lin 
86593c5aeccSGary Lin 	*pprog = prog;
86693c5aeccSGary Lin }
86793c5aeccSGary Lin 
86893c5aeccSGary Lin #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
86993c5aeccSGary Lin 
870b52f00e6SAlexei Starovoitov static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
87193c5aeccSGary Lin 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
872b52f00e6SAlexei Starovoitov {
873ebf7d1f5SMaciej Fijalkowski 	bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
874b52f00e6SAlexei Starovoitov 	struct bpf_insn *insn = bpf_prog->insnsi;
875ebf7d1f5SMaciej Fijalkowski 	bool callee_regs_used[4] = {};
876b52f00e6SAlexei Starovoitov 	int insn_cnt = bpf_prog->len;
877ebf7d1f5SMaciej Fijalkowski 	bool tail_call_seen = false;
878b52f00e6SAlexei Starovoitov 	bool seen_exit = false;
879b52f00e6SAlexei Starovoitov 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
880ced50fc4SJiri Olsa 	int i, excnt = 0;
88193c5aeccSGary Lin 	int ilen, proglen = 0;
882b52f00e6SAlexei Starovoitov 	u8 *prog = temp;
88391c960b0SBrendan Jackman 	int err;
884b52f00e6SAlexei Starovoitov 
885ebf7d1f5SMaciej Fijalkowski 	detect_reg_usage(insn, insn_cnt, callee_regs_used,
886ebf7d1f5SMaciej Fijalkowski 			 &tail_call_seen);
887ebf7d1f5SMaciej Fijalkowski 
888ebf7d1f5SMaciej Fijalkowski 	/* tail call's presence in current prog implies it is reachable */
889ebf7d1f5SMaciej Fijalkowski 	tail_call_reachable |= tail_call_seen;
890ebf7d1f5SMaciej Fijalkowski 
89108691752SDaniel Borkmann 	emit_prologue(&prog, bpf_prog->aux->stack_depth,
892ebf7d1f5SMaciej Fijalkowski 		      bpf_prog_was_classic(bpf_prog), tail_call_reachable,
893ebf7d1f5SMaciej Fijalkowski 		      bpf_prog->aux->func_idx != 0);
894ebf7d1f5SMaciej Fijalkowski 	push_callee_regs(&prog, callee_regs_used);
89593c5aeccSGary Lin 
89693c5aeccSGary Lin 	ilen = prog - temp;
89793c5aeccSGary Lin 	if (image)
89893c5aeccSGary Lin 		memcpy(image + proglen, temp, ilen);
89993c5aeccSGary Lin 	proglen += ilen;
90093c5aeccSGary Lin 	addrs[0] = proglen;
90193c5aeccSGary Lin 	prog = temp;
902b52f00e6SAlexei Starovoitov 
9037c2e988fSAlexei Starovoitov 	for (i = 1; i <= insn_cnt; i++, insn++) {
904e430f34eSAlexei Starovoitov 		const s32 imm32 = insn->imm;
905e430f34eSAlexei Starovoitov 		u32 dst_reg = insn->dst_reg;
906e430f34eSAlexei Starovoitov 		u32 src_reg = insn->src_reg;
9076fe8b9c1SDaniel Borkmann 		u8 b2 = 0, b3 = 0;
9084c5de127SAlexei Starovoitov 		u8 *start_of_ldx;
90962258278SAlexei Starovoitov 		s64 jmp_offset;
91062258278SAlexei Starovoitov 		u8 jmp_cond;
91162258278SAlexei Starovoitov 		u8 *func;
91293c5aeccSGary Lin 		int nops;
91362258278SAlexei Starovoitov 
91462258278SAlexei Starovoitov 		switch (insn->code) {
91562258278SAlexei Starovoitov 			/* ALU */
91662258278SAlexei Starovoitov 		case BPF_ALU | BPF_ADD | BPF_X:
91762258278SAlexei Starovoitov 		case BPF_ALU | BPF_SUB | BPF_X:
91862258278SAlexei Starovoitov 		case BPF_ALU | BPF_AND | BPF_X:
91962258278SAlexei Starovoitov 		case BPF_ALU | BPF_OR | BPF_X:
92062258278SAlexei Starovoitov 		case BPF_ALU | BPF_XOR | BPF_X:
92162258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_ADD | BPF_X:
92262258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_SUB | BPF_X:
92362258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_AND | BPF_X:
92462258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_OR | BPF_X:
92562258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_XOR | BPF_X:
92674007cfcSBrendan Jackman 			maybe_emit_mod(&prog, dst_reg, src_reg,
92774007cfcSBrendan Jackman 				       BPF_CLASS(insn->code) == BPF_ALU64);
928e5f02cacSBrendan Jackman 			b2 = simple_alu_opcodes[BPF_OP(insn->code)];
929e430f34eSAlexei Starovoitov 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
9300a14842fSEric Dumazet 			break;
93162258278SAlexei Starovoitov 
93262258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_MOV | BPF_X:
93362258278SAlexei Starovoitov 		case BPF_ALU | BPF_MOV | BPF_X:
9344c38e2f3SDaniel Borkmann 			emit_mov_reg(&prog,
9354c38e2f3SDaniel Borkmann 				     BPF_CLASS(insn->code) == BPF_ALU64,
9364c38e2f3SDaniel Borkmann 				     dst_reg, src_reg);
93762258278SAlexei Starovoitov 			break;
93862258278SAlexei Starovoitov 
939e430f34eSAlexei Starovoitov 			/* neg dst */
94062258278SAlexei Starovoitov 		case BPF_ALU | BPF_NEG:
94162258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_NEG:
9426364d7d7SJie Meng 			maybe_emit_1mod(&prog, dst_reg,
9436364d7d7SJie Meng 					BPF_CLASS(insn->code) == BPF_ALU64);
944e430f34eSAlexei Starovoitov 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
94562258278SAlexei Starovoitov 			break;
94662258278SAlexei Starovoitov 
94762258278SAlexei Starovoitov 		case BPF_ALU | BPF_ADD | BPF_K:
94862258278SAlexei Starovoitov 		case BPF_ALU | BPF_SUB | BPF_K:
94962258278SAlexei Starovoitov 		case BPF_ALU | BPF_AND | BPF_K:
95062258278SAlexei Starovoitov 		case BPF_ALU | BPF_OR | BPF_K:
95162258278SAlexei Starovoitov 		case BPF_ALU | BPF_XOR | BPF_K:
95262258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_ADD | BPF_K:
95362258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_SUB | BPF_K:
95462258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_AND | BPF_K:
95562258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_OR | BPF_K:
95662258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_XOR | BPF_K:
9576364d7d7SJie Meng 			maybe_emit_1mod(&prog, dst_reg,
9586364d7d7SJie Meng 					BPF_CLASS(insn->code) == BPF_ALU64);
95962258278SAlexei Starovoitov 
960a2c7a983SIngo Molnar 			/*
961a2c7a983SIngo Molnar 			 * b3 holds 'normal' opcode, b2 short form only valid
962de0a444dSDaniel Borkmann 			 * in case dst is eax/rax.
963de0a444dSDaniel Borkmann 			 */
96462258278SAlexei Starovoitov 			switch (BPF_OP(insn->code)) {
965de0a444dSDaniel Borkmann 			case BPF_ADD:
966de0a444dSDaniel Borkmann 				b3 = 0xC0;
967de0a444dSDaniel Borkmann 				b2 = 0x05;
968de0a444dSDaniel Borkmann 				break;
969de0a444dSDaniel Borkmann 			case BPF_SUB:
970de0a444dSDaniel Borkmann 				b3 = 0xE8;
971de0a444dSDaniel Borkmann 				b2 = 0x2D;
972de0a444dSDaniel Borkmann 				break;
973de0a444dSDaniel Borkmann 			case BPF_AND:
974de0a444dSDaniel Borkmann 				b3 = 0xE0;
975de0a444dSDaniel Borkmann 				b2 = 0x25;
976de0a444dSDaniel Borkmann 				break;
977de0a444dSDaniel Borkmann 			case BPF_OR:
978de0a444dSDaniel Borkmann 				b3 = 0xC8;
979de0a444dSDaniel Borkmann 				b2 = 0x0D;
980de0a444dSDaniel Borkmann 				break;
981de0a444dSDaniel Borkmann 			case BPF_XOR:
982de0a444dSDaniel Borkmann 				b3 = 0xF0;
983de0a444dSDaniel Borkmann 				b2 = 0x35;
984de0a444dSDaniel Borkmann 				break;
98562258278SAlexei Starovoitov 			}
98662258278SAlexei Starovoitov 
987e430f34eSAlexei Starovoitov 			if (is_imm8(imm32))
988e430f34eSAlexei Starovoitov 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
989de0a444dSDaniel Borkmann 			else if (is_axreg(dst_reg))
990de0a444dSDaniel Borkmann 				EMIT1_off32(b2, imm32);
99162258278SAlexei Starovoitov 			else
992e430f34eSAlexei Starovoitov 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
99362258278SAlexei Starovoitov 			break;
99462258278SAlexei Starovoitov 
99562258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_MOV | BPF_K:
99662258278SAlexei Starovoitov 		case BPF_ALU | BPF_MOV | BPF_K:
9976fe8b9c1SDaniel Borkmann 			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
9986fe8b9c1SDaniel Borkmann 				       dst_reg, imm32);
99962258278SAlexei Starovoitov 			break;
100062258278SAlexei Starovoitov 
100102ab695bSAlexei Starovoitov 		case BPF_LD | BPF_IMM | BPF_DW:
10026fe8b9c1SDaniel Borkmann 			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
100302ab695bSAlexei Starovoitov 			insn++;
100402ab695bSAlexei Starovoitov 			i++;
100502ab695bSAlexei Starovoitov 			break;
100602ab695bSAlexei Starovoitov 
1007e430f34eSAlexei Starovoitov 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
100862258278SAlexei Starovoitov 		case BPF_ALU | BPF_MOD | BPF_X:
100962258278SAlexei Starovoitov 		case BPF_ALU | BPF_DIV | BPF_X:
101062258278SAlexei Starovoitov 		case BPF_ALU | BPF_MOD | BPF_K:
101162258278SAlexei Starovoitov 		case BPF_ALU | BPF_DIV | BPF_K:
101262258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_MOD | BPF_X:
101362258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_DIV | BPF_X:
101462258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_MOD | BPF_K:
101557a610f1SJie Meng 		case BPF_ALU64 | BPF_DIV | BPF_K: {
10164c38e2f3SDaniel Borkmann 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
10174c38e2f3SDaniel Borkmann 
1018d806a0cfSDaniel Borkmann 			if (dst_reg != BPF_REG_0)
101962258278SAlexei Starovoitov 				EMIT1(0x50); /* push rax */
1020d806a0cfSDaniel Borkmann 			if (dst_reg != BPF_REG_3)
102162258278SAlexei Starovoitov 				EMIT1(0x52); /* push rdx */
102262258278SAlexei Starovoitov 
102357a610f1SJie Meng 			if (BPF_SRC(insn->code) == BPF_X) {
102457a610f1SJie Meng 				if (src_reg == BPF_REG_0 ||
102557a610f1SJie Meng 				    src_reg == BPF_REG_3) {
102662258278SAlexei Starovoitov 					/* mov r11, src_reg */
102762258278SAlexei Starovoitov 					EMIT_mov(AUX_REG, src_reg);
102857a610f1SJie Meng 					src_reg = AUX_REG;
102957a610f1SJie Meng 				}
103057a610f1SJie Meng 			} else {
103162258278SAlexei Starovoitov 				/* mov r11, imm32 */
103262258278SAlexei Starovoitov 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
103357a610f1SJie Meng 				src_reg = AUX_REG;
103457a610f1SJie Meng 			}
103562258278SAlexei Starovoitov 
103657a610f1SJie Meng 			if (dst_reg != BPF_REG_0)
103762258278SAlexei Starovoitov 				/* mov rax, dst_reg */
103857a610f1SJie Meng 				emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
103962258278SAlexei Starovoitov 
104062258278SAlexei Starovoitov 			/*
104162258278SAlexei Starovoitov 			 * xor edx, edx
104262258278SAlexei Starovoitov 			 * equivalent to 'xor rdx, rdx', but one byte less
104362258278SAlexei Starovoitov 			 */
104462258278SAlexei Starovoitov 			EMIT2(0x31, 0xd2);
104562258278SAlexei Starovoitov 
104657a610f1SJie Meng 			/* div src_reg */
10476364d7d7SJie Meng 			maybe_emit_1mod(&prog, src_reg, is64);
104857a610f1SJie Meng 			EMIT2(0xF7, add_1reg(0xF0, src_reg));
104962258278SAlexei Starovoitov 
105057a610f1SJie Meng 			if (BPF_OP(insn->code) == BPF_MOD &&
105157a610f1SJie Meng 			    dst_reg != BPF_REG_3)
105257a610f1SJie Meng 				/* mov dst_reg, rdx */
105357a610f1SJie Meng 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
105457a610f1SJie Meng 			else if (BPF_OP(insn->code) == BPF_DIV &&
105557a610f1SJie Meng 				 dst_reg != BPF_REG_0)
105657a610f1SJie Meng 				/* mov dst_reg, rax */
105757a610f1SJie Meng 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
105862258278SAlexei Starovoitov 
1059d806a0cfSDaniel Borkmann 			if (dst_reg != BPF_REG_3)
106062258278SAlexei Starovoitov 				EMIT1(0x5A); /* pop rdx */
106157a610f1SJie Meng 			if (dst_reg != BPF_REG_0)
106262258278SAlexei Starovoitov 				EMIT1(0x58); /* pop rax */
106362258278SAlexei Starovoitov 			break;
10644c38e2f3SDaniel Borkmann 		}
106562258278SAlexei Starovoitov 
106662258278SAlexei Starovoitov 		case BPF_ALU | BPF_MUL | BPF_K:
106762258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_MUL | BPF_K:
10686364d7d7SJie Meng 			maybe_emit_mod(&prog, dst_reg, dst_reg,
10696364d7d7SJie Meng 				       BPF_CLASS(insn->code) == BPF_ALU64);
107062258278SAlexei Starovoitov 
1071c0354077SJie Meng 			if (is_imm8(imm32))
1072c0354077SJie Meng 				/* imul dst_reg, dst_reg, imm8 */
1073c0354077SJie Meng 				EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1074c0354077SJie Meng 				      imm32);
107562258278SAlexei Starovoitov 			else
1076c0354077SJie Meng 				/* imul dst_reg, dst_reg, imm32 */
1077c0354077SJie Meng 				EMIT2_off32(0x69,
1078c0354077SJie Meng 					    add_2reg(0xC0, dst_reg, dst_reg),
1079c0354077SJie Meng 					    imm32);
108062258278SAlexei Starovoitov 			break;
1081c0354077SJie Meng 
1082c0354077SJie Meng 		case BPF_ALU | BPF_MUL | BPF_X:
1083c0354077SJie Meng 		case BPF_ALU64 | BPF_MUL | BPF_X:
10846364d7d7SJie Meng 			maybe_emit_mod(&prog, src_reg, dst_reg,
10856364d7d7SJie Meng 				       BPF_CLASS(insn->code) == BPF_ALU64);
1086c0354077SJie Meng 
1087c0354077SJie Meng 			/* imul dst_reg, src_reg */
1088c0354077SJie Meng 			EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1089c0354077SJie Meng 			break;
1090c0354077SJie Meng 
1091a2c7a983SIngo Molnar 			/* Shifts */
109262258278SAlexei Starovoitov 		case BPF_ALU | BPF_LSH | BPF_K:
109362258278SAlexei Starovoitov 		case BPF_ALU | BPF_RSH | BPF_K:
109462258278SAlexei Starovoitov 		case BPF_ALU | BPF_ARSH | BPF_K:
109562258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_LSH | BPF_K:
109662258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_RSH | BPF_K:
109762258278SAlexei Starovoitov 		case BPF_ALU64 | BPF_ARSH | BPF_K:
10986364d7d7SJie Meng 			maybe_emit_1mod(&prog, dst_reg,
10996364d7d7SJie Meng 					BPF_CLASS(insn->code) == BPF_ALU64);
110062258278SAlexei Starovoitov 
1101e5f02cacSBrendan Jackman 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
110288e69a1fSDaniel Borkmann 			if (imm32 == 1)
110388e69a1fSDaniel Borkmann 				EMIT2(0xD1, add_1reg(b3, dst_reg));
110488e69a1fSDaniel Borkmann 			else
1105e430f34eSAlexei Starovoitov 				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
110662258278SAlexei Starovoitov 			break;
110762258278SAlexei Starovoitov 
110872b603eeSAlexei Starovoitov 		case BPF_ALU | BPF_LSH | BPF_X:
110972b603eeSAlexei Starovoitov 		case BPF_ALU | BPF_RSH | BPF_X:
111072b603eeSAlexei Starovoitov 		case BPF_ALU | BPF_ARSH | BPF_X:
111172b603eeSAlexei Starovoitov 		case BPF_ALU64 | BPF_LSH | BPF_X:
111272b603eeSAlexei Starovoitov 		case BPF_ALU64 | BPF_RSH | BPF_X:
111372b603eeSAlexei Starovoitov 		case BPF_ALU64 | BPF_ARSH | BPF_X:
111472b603eeSAlexei Starovoitov 
1115a2c7a983SIngo Molnar 			/* Check for bad case when dst_reg == rcx */
111672b603eeSAlexei Starovoitov 			if (dst_reg == BPF_REG_4) {
111772b603eeSAlexei Starovoitov 				/* mov r11, dst_reg */
111872b603eeSAlexei Starovoitov 				EMIT_mov(AUX_REG, dst_reg);
111972b603eeSAlexei Starovoitov 				dst_reg = AUX_REG;
112072b603eeSAlexei Starovoitov 			}
112172b603eeSAlexei Starovoitov 
112272b603eeSAlexei Starovoitov 			if (src_reg != BPF_REG_4) { /* common case */
112372b603eeSAlexei Starovoitov 				EMIT1(0x51); /* push rcx */
112472b603eeSAlexei Starovoitov 
112572b603eeSAlexei Starovoitov 				/* mov rcx, src_reg */
112672b603eeSAlexei Starovoitov 				EMIT_mov(BPF_REG_4, src_reg);
112772b603eeSAlexei Starovoitov 			}
112872b603eeSAlexei Starovoitov 
112972b603eeSAlexei Starovoitov 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
11306364d7d7SJie Meng 			maybe_emit_1mod(&prog, dst_reg,
11316364d7d7SJie Meng 					BPF_CLASS(insn->code) == BPF_ALU64);
113272b603eeSAlexei Starovoitov 
1133e5f02cacSBrendan Jackman 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
113472b603eeSAlexei Starovoitov 			EMIT2(0xD3, add_1reg(b3, dst_reg));
113572b603eeSAlexei Starovoitov 
113672b603eeSAlexei Starovoitov 			if (src_reg != BPF_REG_4)
113772b603eeSAlexei Starovoitov 				EMIT1(0x59); /* pop rcx */
113872b603eeSAlexei Starovoitov 
113972b603eeSAlexei Starovoitov 			if (insn->dst_reg == BPF_REG_4)
114072b603eeSAlexei Starovoitov 				/* mov dst_reg, r11 */
114172b603eeSAlexei Starovoitov 				EMIT_mov(insn->dst_reg, AUX_REG);
114272b603eeSAlexei Starovoitov 			break;
114372b603eeSAlexei Starovoitov 
114462258278SAlexei Starovoitov 		case BPF_ALU | BPF_END | BPF_FROM_BE:
1145e430f34eSAlexei Starovoitov 			switch (imm32) {
114662258278SAlexei Starovoitov 			case 16:
1147a2c7a983SIngo Molnar 				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
114862258278SAlexei Starovoitov 				EMIT1(0x66);
1149e430f34eSAlexei Starovoitov 				if (is_ereg(dst_reg))
115062258278SAlexei Starovoitov 					EMIT1(0x41);
1151e430f34eSAlexei Starovoitov 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1152343f845bSAlexei Starovoitov 
1153a2c7a983SIngo Molnar 				/* Emit 'movzwl eax, ax' */
1154343f845bSAlexei Starovoitov 				if (is_ereg(dst_reg))
1155343f845bSAlexei Starovoitov 					EMIT3(0x45, 0x0F, 0xB7);
1156343f845bSAlexei Starovoitov 				else
1157343f845bSAlexei Starovoitov 					EMIT2(0x0F, 0xB7);
1158343f845bSAlexei Starovoitov 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
115962258278SAlexei Starovoitov 				break;
116062258278SAlexei Starovoitov 			case 32:
1161a2c7a983SIngo Molnar 				/* Emit 'bswap eax' to swap lower 4 bytes */
1162e430f34eSAlexei Starovoitov 				if (is_ereg(dst_reg))
116362258278SAlexei Starovoitov 					EMIT2(0x41, 0x0F);
116462258278SAlexei Starovoitov 				else
116562258278SAlexei Starovoitov 					EMIT1(0x0F);
1166e430f34eSAlexei Starovoitov 				EMIT1(add_1reg(0xC8, dst_reg));
116762258278SAlexei Starovoitov 				break;
116862258278SAlexei Starovoitov 			case 64:
1169a2c7a983SIngo Molnar 				/* Emit 'bswap rax' to swap 8 bytes */
1170e430f34eSAlexei Starovoitov 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1171e430f34eSAlexei Starovoitov 				      add_1reg(0xC8, dst_reg));
117262258278SAlexei Starovoitov 				break;
117362258278SAlexei Starovoitov 			}
117462258278SAlexei Starovoitov 			break;
117562258278SAlexei Starovoitov 
117662258278SAlexei Starovoitov 		case BPF_ALU | BPF_END | BPF_FROM_LE:
1177343f845bSAlexei Starovoitov 			switch (imm32) {
1178343f845bSAlexei Starovoitov 			case 16:
1179a2c7a983SIngo Molnar 				/*
1180a2c7a983SIngo Molnar 				 * Emit 'movzwl eax, ax' to zero extend 16-bit
1181343f845bSAlexei Starovoitov 				 * into 64 bit
1182343f845bSAlexei Starovoitov 				 */
1183343f845bSAlexei Starovoitov 				if (is_ereg(dst_reg))
1184343f845bSAlexei Starovoitov 					EMIT3(0x45, 0x0F, 0xB7);
1185343f845bSAlexei Starovoitov 				else
1186343f845bSAlexei Starovoitov 					EMIT2(0x0F, 0xB7);
1187343f845bSAlexei Starovoitov 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1188343f845bSAlexei Starovoitov 				break;
1189343f845bSAlexei Starovoitov 			case 32:
1190a2c7a983SIngo Molnar 				/* Emit 'mov eax, eax' to clear upper 32-bits */
1191343f845bSAlexei Starovoitov 				if (is_ereg(dst_reg))
1192343f845bSAlexei Starovoitov 					EMIT1(0x45);
1193343f845bSAlexei Starovoitov 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1194343f845bSAlexei Starovoitov 				break;
1195343f845bSAlexei Starovoitov 			case 64:
1196343f845bSAlexei Starovoitov 				/* nop */
1197343f845bSAlexei Starovoitov 				break;
1198343f845bSAlexei Starovoitov 			}
119962258278SAlexei Starovoitov 			break;
120062258278SAlexei Starovoitov 
1201f5e81d11SDaniel Borkmann 			/* speculation barrier */
1202f5e81d11SDaniel Borkmann 		case BPF_ST | BPF_NOSPEC:
1203f5e81d11SDaniel Borkmann 			if (boot_cpu_has(X86_FEATURE_XMM2))
120487c87ecdSPeter Zijlstra 				EMIT_LFENCE();
1205f5e81d11SDaniel Borkmann 			break;
1206f5e81d11SDaniel Borkmann 
1207e430f34eSAlexei Starovoitov 			/* ST: *(u8*)(dst_reg + off) = imm */
120862258278SAlexei Starovoitov 		case BPF_ST | BPF_MEM | BPF_B:
1209e430f34eSAlexei Starovoitov 			if (is_ereg(dst_reg))
121062258278SAlexei Starovoitov 				EMIT2(0x41, 0xC6);
121162258278SAlexei Starovoitov 			else
121262258278SAlexei Starovoitov 				EMIT1(0xC6);
121362258278SAlexei Starovoitov 			goto st;
121462258278SAlexei Starovoitov 		case BPF_ST | BPF_MEM | BPF_H:
1215e430f34eSAlexei Starovoitov 			if (is_ereg(dst_reg))
121662258278SAlexei Starovoitov 				EMIT3(0x66, 0x41, 0xC7);
121762258278SAlexei Starovoitov 			else
121862258278SAlexei Starovoitov 				EMIT2(0x66, 0xC7);
121962258278SAlexei Starovoitov 			goto st;
122062258278SAlexei Starovoitov 		case BPF_ST | BPF_MEM | BPF_W:
1221e430f34eSAlexei Starovoitov 			if (is_ereg(dst_reg))
122262258278SAlexei Starovoitov 				EMIT2(0x41, 0xC7);
122362258278SAlexei Starovoitov 			else
122462258278SAlexei Starovoitov 				EMIT1(0xC7);
122562258278SAlexei Starovoitov 			goto st;
122662258278SAlexei Starovoitov 		case BPF_ST | BPF_MEM | BPF_DW:
1227e430f34eSAlexei Starovoitov 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
122862258278SAlexei Starovoitov 
122962258278SAlexei Starovoitov st:			if (is_imm8(insn->off))
1230e430f34eSAlexei Starovoitov 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
123162258278SAlexei Starovoitov 			else
1232e430f34eSAlexei Starovoitov 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
123362258278SAlexei Starovoitov 
1234e430f34eSAlexei Starovoitov 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
123562258278SAlexei Starovoitov 			break;
123662258278SAlexei Starovoitov 
1237e430f34eSAlexei Starovoitov 			/* STX: *(u8*)(dst_reg + off) = src_reg */
123862258278SAlexei Starovoitov 		case BPF_STX | BPF_MEM | BPF_B:
123962258278SAlexei Starovoitov 		case BPF_STX | BPF_MEM | BPF_H:
124062258278SAlexei Starovoitov 		case BPF_STX | BPF_MEM | BPF_W:
124162258278SAlexei Starovoitov 		case BPF_STX | BPF_MEM | BPF_DW:
12423b2744e6SAlexei Starovoitov 			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
124362258278SAlexei Starovoitov 			break;
124462258278SAlexei Starovoitov 
1245e430f34eSAlexei Starovoitov 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
124662258278SAlexei Starovoitov 		case BPF_LDX | BPF_MEM | BPF_B:
12473dec541bSAlexei Starovoitov 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
124862258278SAlexei Starovoitov 		case BPF_LDX | BPF_MEM | BPF_H:
12493dec541bSAlexei Starovoitov 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
125062258278SAlexei Starovoitov 		case BPF_LDX | BPF_MEM | BPF_W:
12513dec541bSAlexei Starovoitov 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
125262258278SAlexei Starovoitov 		case BPF_LDX | BPF_MEM | BPF_DW:
12533dec541bSAlexei Starovoitov 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
12544c5de127SAlexei Starovoitov 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
12554c5de127SAlexei Starovoitov 				/* test src_reg, src_reg */
12564c5de127SAlexei Starovoitov 				maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
12574c5de127SAlexei Starovoitov 				EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
12584c5de127SAlexei Starovoitov 				/* jne start_of_ldx */
12594c5de127SAlexei Starovoitov 				EMIT2(X86_JNE, 0);
12604c5de127SAlexei Starovoitov 				/* xor dst_reg, dst_reg */
12614c5de127SAlexei Starovoitov 				emit_mov_imm32(&prog, false, dst_reg, 0);
12624c5de127SAlexei Starovoitov 				/* jmp byte_after_ldx */
12634c5de127SAlexei Starovoitov 				EMIT2(0xEB, 0);
12644c5de127SAlexei Starovoitov 
12654c5de127SAlexei Starovoitov 				/* populate jmp_offset for JNE above */
12664c5de127SAlexei Starovoitov 				temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
12674c5de127SAlexei Starovoitov 				start_of_ldx = prog;
12684c5de127SAlexei Starovoitov 			}
12693b2744e6SAlexei Starovoitov 			emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
12703dec541bSAlexei Starovoitov 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
12713dec541bSAlexei Starovoitov 				struct exception_table_entry *ex;
1272328aac5eSRavi Bangoria 				u8 *_insn = image + proglen + (start_of_ldx - temp);
12733dec541bSAlexei Starovoitov 				s64 delta;
12743dec541bSAlexei Starovoitov 
12754c5de127SAlexei Starovoitov 				/* populate jmp_offset for JMP above */
12764c5de127SAlexei Starovoitov 				start_of_ldx[-1] = prog - start_of_ldx;
12774c5de127SAlexei Starovoitov 
12783dec541bSAlexei Starovoitov 				if (!bpf_prog->aux->extable)
12793dec541bSAlexei Starovoitov 					break;
12803dec541bSAlexei Starovoitov 
12813dec541bSAlexei Starovoitov 				if (excnt >= bpf_prog->aux->num_exentries) {
12823dec541bSAlexei Starovoitov 					pr_err("ex gen bug\n");
12833dec541bSAlexei Starovoitov 					return -EFAULT;
12843dec541bSAlexei Starovoitov 				}
12853dec541bSAlexei Starovoitov 				ex = &bpf_prog->aux->extable[excnt++];
12863dec541bSAlexei Starovoitov 
12873dec541bSAlexei Starovoitov 				delta = _insn - (u8 *)&ex->insn;
12883dec541bSAlexei Starovoitov 				if (!is_simm32(delta)) {
12893dec541bSAlexei Starovoitov 					pr_err("extable->insn doesn't fit into 32-bit\n");
12903dec541bSAlexei Starovoitov 					return -EFAULT;
12913dec541bSAlexei Starovoitov 				}
12923dec541bSAlexei Starovoitov 				ex->insn = delta;
12933dec541bSAlexei Starovoitov 
129446d28947SThomas Gleixner 				ex->type = EX_TYPE_BPF;
12953dec541bSAlexei Starovoitov 
12963dec541bSAlexei Starovoitov 				if (dst_reg > BPF_REG_9) {
12973dec541bSAlexei Starovoitov 					pr_err("verifier error\n");
12983dec541bSAlexei Starovoitov 					return -EFAULT;
12993dec541bSAlexei Starovoitov 				}
13003dec541bSAlexei Starovoitov 				/*
13013dec541bSAlexei Starovoitov 				 * Compute size of x86 insn and its target dest x86 register.
13023dec541bSAlexei Starovoitov 				 * ex_handler_bpf() will use lower 8 bits to adjust
13033dec541bSAlexei Starovoitov 				 * pt_regs->ip to jump over this x86 instruction
13043dec541bSAlexei Starovoitov 				 * and upper bits to figure out which pt_regs to zero out.
13053dec541bSAlexei Starovoitov 				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
13063dec541bSAlexei Starovoitov 				 * of 4 bytes will be ignored and rbx will be zero inited.
13073dec541bSAlexei Starovoitov 				 */
13083dec541bSAlexei Starovoitov 				ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
13093dec541bSAlexei Starovoitov 			}
131062258278SAlexei Starovoitov 			break;
131162258278SAlexei Starovoitov 
131291c960b0SBrendan Jackman 		case BPF_STX | BPF_ATOMIC | BPF_W:
131391c960b0SBrendan Jackman 		case BPF_STX | BPF_ATOMIC | BPF_DW:
1314981f94c3SBrendan Jackman 			if (insn->imm == (BPF_AND | BPF_FETCH) ||
1315981f94c3SBrendan Jackman 			    insn->imm == (BPF_OR | BPF_FETCH) ||
1316981f94c3SBrendan Jackman 			    insn->imm == (BPF_XOR | BPF_FETCH)) {
1317981f94c3SBrendan Jackman 				bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1318b29dd96bSBrendan Jackman 				u32 real_src_reg = src_reg;
1319ced18582SJohan Almbladh 				u32 real_dst_reg = dst_reg;
1320ced18582SJohan Almbladh 				u8 *branch_target;
1321981f94c3SBrendan Jackman 
1322981f94c3SBrendan Jackman 				/*
1323981f94c3SBrendan Jackman 				 * Can't be implemented with a single x86 insn.
1324981f94c3SBrendan Jackman 				 * Need to do a CMPXCHG loop.
1325981f94c3SBrendan Jackman 				 */
1326981f94c3SBrendan Jackman 
1327981f94c3SBrendan Jackman 				/* Will need RAX as a CMPXCHG operand so save R0 */
1328981f94c3SBrendan Jackman 				emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1329b29dd96bSBrendan Jackman 				if (src_reg == BPF_REG_0)
1330b29dd96bSBrendan Jackman 					real_src_reg = BPF_REG_AX;
1331ced18582SJohan Almbladh 				if (dst_reg == BPF_REG_0)
1332ced18582SJohan Almbladh 					real_dst_reg = BPF_REG_AX;
1333b29dd96bSBrendan Jackman 
1334981f94c3SBrendan Jackman 				branch_target = prog;
1335981f94c3SBrendan Jackman 				/* Load old value */
1336981f94c3SBrendan Jackman 				emit_ldx(&prog, BPF_SIZE(insn->code),
1337ced18582SJohan Almbladh 					 BPF_REG_0, real_dst_reg, insn->off);
1338981f94c3SBrendan Jackman 				/*
1339981f94c3SBrendan Jackman 				 * Perform the (commutative) operation locally,
1340981f94c3SBrendan Jackman 				 * put the result in the AUX_REG.
1341981f94c3SBrendan Jackman 				 */
1342981f94c3SBrendan Jackman 				emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1343b29dd96bSBrendan Jackman 				maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1344981f94c3SBrendan Jackman 				EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1345b29dd96bSBrendan Jackman 				      add_2reg(0xC0, AUX_REG, real_src_reg));
1346981f94c3SBrendan Jackman 				/* Attempt to swap in new value */
1347981f94c3SBrendan Jackman 				err = emit_atomic(&prog, BPF_CMPXCHG,
1348ced18582SJohan Almbladh 						  real_dst_reg, AUX_REG,
1349ced18582SJohan Almbladh 						  insn->off,
1350981f94c3SBrendan Jackman 						  BPF_SIZE(insn->code));
1351981f94c3SBrendan Jackman 				if (WARN_ON(err))
1352981f94c3SBrendan Jackman 					return err;
1353981f94c3SBrendan Jackman 				/*
1354981f94c3SBrendan Jackman 				 * ZF tells us whether we won the race. If it's
1355981f94c3SBrendan Jackman 				 * cleared we need to try again.
1356981f94c3SBrendan Jackman 				 */
1357981f94c3SBrendan Jackman 				EMIT2(X86_JNE, -(prog - branch_target) - 2);
1358981f94c3SBrendan Jackman 				/* Return the pre-modification value */
1359b29dd96bSBrendan Jackman 				emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1360981f94c3SBrendan Jackman 				/* Restore R0 after clobbering RAX */
1361981f94c3SBrendan Jackman 				emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1362981f94c3SBrendan Jackman 				break;
1363981f94c3SBrendan Jackman 			}
1364981f94c3SBrendan Jackman 
136591c960b0SBrendan Jackman 			err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
136691c960b0SBrendan Jackman 					  insn->off, BPF_SIZE(insn->code));
136791c960b0SBrendan Jackman 			if (err)
136891c960b0SBrendan Jackman 				return err;
136962258278SAlexei Starovoitov 			break;
137062258278SAlexei Starovoitov 
137162258278SAlexei Starovoitov 			/* call */
137262258278SAlexei Starovoitov 		case BPF_JMP | BPF_CALL:
1373e430f34eSAlexei Starovoitov 			func = (u8 *) __bpf_call_base + imm32;
1374ebf7d1f5SMaciej Fijalkowski 			if (tail_call_reachable) {
1375ebf7d1f5SMaciej Fijalkowski 				EMIT3_off32(0x48, 0x8B, 0x85,
1376ebf7d1f5SMaciej Fijalkowski 					    -(bpf_prog->aux->stack_depth + 8));
1377ebf7d1f5SMaciej Fijalkowski 				if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1378ebf7d1f5SMaciej Fijalkowski 					return -EINVAL;
1379ebf7d1f5SMaciej Fijalkowski 			} else {
13803b2744e6SAlexei Starovoitov 				if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1381f3c2af7bSAlexei Starovoitov 					return -EINVAL;
1382ebf7d1f5SMaciej Fijalkowski 			}
138362258278SAlexei Starovoitov 			break;
138462258278SAlexei Starovoitov 
138571189fa9SAlexei Starovoitov 		case BPF_JMP | BPF_TAIL_CALL:
1386428d5df1SDaniel Borkmann 			if (imm32)
1387428d5df1SDaniel Borkmann 				emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1388dceba081SPeter Zijlstra 							  &prog, image + addrs[i - 1],
1389ebf7d1f5SMaciej Fijalkowski 							  callee_regs_used,
1390dceba081SPeter Zijlstra 							  bpf_prog->aux->stack_depth,
1391dceba081SPeter Zijlstra 							  ctx);
1392428d5df1SDaniel Borkmann 			else
1393ebf7d1f5SMaciej Fijalkowski 				emit_bpf_tail_call_indirect(&prog,
1394ebf7d1f5SMaciej Fijalkowski 							    callee_regs_used,
1395dceba081SPeter Zijlstra 							    bpf_prog->aux->stack_depth,
1396dceba081SPeter Zijlstra 							    image + addrs[i - 1],
1397dceba081SPeter Zijlstra 							    ctx);
1398b52f00e6SAlexei Starovoitov 			break;
1399b52f00e6SAlexei Starovoitov 
140062258278SAlexei Starovoitov 			/* cond jump */
140162258278SAlexei Starovoitov 		case BPF_JMP | BPF_JEQ | BPF_X:
140262258278SAlexei Starovoitov 		case BPF_JMP | BPF_JNE | BPF_X:
140362258278SAlexei Starovoitov 		case BPF_JMP | BPF_JGT | BPF_X:
140452afc51eSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_X:
140562258278SAlexei Starovoitov 		case BPF_JMP | BPF_JGE | BPF_X:
140652afc51eSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_X:
140762258278SAlexei Starovoitov 		case BPF_JMP | BPF_JSGT | BPF_X:
140852afc51eSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_X:
140962258278SAlexei Starovoitov 		case BPF_JMP | BPF_JSGE | BPF_X:
141052afc51eSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_X:
14113f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_X:
14123f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_X:
14133f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_X:
14143f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_X:
14153f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_X:
14163f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_X:
14173f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_X:
14183f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_X:
14193f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_X:
14203f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_X:
1421e430f34eSAlexei Starovoitov 			/* cmp dst_reg, src_reg */
142274007cfcSBrendan Jackman 			maybe_emit_mod(&prog, dst_reg, src_reg,
142374007cfcSBrendan Jackman 				       BPF_CLASS(insn->code) == BPF_JMP);
14243f5d6525SJiong Wang 			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
142562258278SAlexei Starovoitov 			goto emit_cond_jmp;
142662258278SAlexei Starovoitov 
142762258278SAlexei Starovoitov 		case BPF_JMP | BPF_JSET | BPF_X:
14283f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_X:
1429e430f34eSAlexei Starovoitov 			/* test dst_reg, src_reg */
143074007cfcSBrendan Jackman 			maybe_emit_mod(&prog, dst_reg, src_reg,
143174007cfcSBrendan Jackman 				       BPF_CLASS(insn->code) == BPF_JMP);
14323f5d6525SJiong Wang 			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
143362258278SAlexei Starovoitov 			goto emit_cond_jmp;
143462258278SAlexei Starovoitov 
143562258278SAlexei Starovoitov 		case BPF_JMP | BPF_JSET | BPF_K:
14363f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_K:
1437e430f34eSAlexei Starovoitov 			/* test dst_reg, imm32 */
14386364d7d7SJie Meng 			maybe_emit_1mod(&prog, dst_reg,
14396364d7d7SJie Meng 					BPF_CLASS(insn->code) == BPF_JMP);
1440e430f34eSAlexei Starovoitov 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
144162258278SAlexei Starovoitov 			goto emit_cond_jmp;
144262258278SAlexei Starovoitov 
144362258278SAlexei Starovoitov 		case BPF_JMP | BPF_JEQ | BPF_K:
144462258278SAlexei Starovoitov 		case BPF_JMP | BPF_JNE | BPF_K:
144562258278SAlexei Starovoitov 		case BPF_JMP | BPF_JGT | BPF_K:
144652afc51eSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_K:
144762258278SAlexei Starovoitov 		case BPF_JMP | BPF_JGE | BPF_K:
144852afc51eSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_K:
144962258278SAlexei Starovoitov 		case BPF_JMP | BPF_JSGT | BPF_K:
145052afc51eSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_K:
145162258278SAlexei Starovoitov 		case BPF_JMP | BPF_JSGE | BPF_K:
145252afc51eSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_K:
14533f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_K:
14543f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_K:
14553f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_K:
14563f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_K:
14573f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_K:
14583f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_K:
14593f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_K:
14603f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_K:
14613f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_K:
14623f5d6525SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_K:
146338f51c07SDaniel Borkmann 			/* test dst_reg, dst_reg to save one extra byte */
146438f51c07SDaniel Borkmann 			if (imm32 == 0) {
146574007cfcSBrendan Jackman 				maybe_emit_mod(&prog, dst_reg, dst_reg,
146674007cfcSBrendan Jackman 					       BPF_CLASS(insn->code) == BPF_JMP);
146738f51c07SDaniel Borkmann 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
146838f51c07SDaniel Borkmann 				goto emit_cond_jmp;
146938f51c07SDaniel Borkmann 			}
147038f51c07SDaniel Borkmann 
1471e430f34eSAlexei Starovoitov 			/* cmp dst_reg, imm8/32 */
14726364d7d7SJie Meng 			maybe_emit_1mod(&prog, dst_reg,
14736364d7d7SJie Meng 					BPF_CLASS(insn->code) == BPF_JMP);
147462258278SAlexei Starovoitov 
1475e430f34eSAlexei Starovoitov 			if (is_imm8(imm32))
1476e430f34eSAlexei Starovoitov 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
147762258278SAlexei Starovoitov 			else
1478e430f34eSAlexei Starovoitov 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
147962258278SAlexei Starovoitov 
1480a2c7a983SIngo Molnar emit_cond_jmp:		/* Convert BPF opcode to x86 */
148162258278SAlexei Starovoitov 			switch (BPF_OP(insn->code)) {
148262258278SAlexei Starovoitov 			case BPF_JEQ:
148362258278SAlexei Starovoitov 				jmp_cond = X86_JE;
148462258278SAlexei Starovoitov 				break;
148562258278SAlexei Starovoitov 			case BPF_JSET:
148662258278SAlexei Starovoitov 			case BPF_JNE:
148762258278SAlexei Starovoitov 				jmp_cond = X86_JNE;
148862258278SAlexei Starovoitov 				break;
148962258278SAlexei Starovoitov 			case BPF_JGT:
149062258278SAlexei Starovoitov 				/* GT is unsigned '>', JA in x86 */
149162258278SAlexei Starovoitov 				jmp_cond = X86_JA;
149262258278SAlexei Starovoitov 				break;
149352afc51eSDaniel Borkmann 			case BPF_JLT:
149452afc51eSDaniel Borkmann 				/* LT is unsigned '<', JB in x86 */
149552afc51eSDaniel Borkmann 				jmp_cond = X86_JB;
149652afc51eSDaniel Borkmann 				break;
149762258278SAlexei Starovoitov 			case BPF_JGE:
149862258278SAlexei Starovoitov 				/* GE is unsigned '>=', JAE in x86 */
149962258278SAlexei Starovoitov 				jmp_cond = X86_JAE;
150062258278SAlexei Starovoitov 				break;
150152afc51eSDaniel Borkmann 			case BPF_JLE:
150252afc51eSDaniel Borkmann 				/* LE is unsigned '<=', JBE in x86 */
150352afc51eSDaniel Borkmann 				jmp_cond = X86_JBE;
150452afc51eSDaniel Borkmann 				break;
150562258278SAlexei Starovoitov 			case BPF_JSGT:
1506a2c7a983SIngo Molnar 				/* Signed '>', GT in x86 */
150762258278SAlexei Starovoitov 				jmp_cond = X86_JG;
150862258278SAlexei Starovoitov 				break;
150952afc51eSDaniel Borkmann 			case BPF_JSLT:
1510a2c7a983SIngo Molnar 				/* Signed '<', LT in x86 */
151152afc51eSDaniel Borkmann 				jmp_cond = X86_JL;
151252afc51eSDaniel Borkmann 				break;
151362258278SAlexei Starovoitov 			case BPF_JSGE:
1514a2c7a983SIngo Molnar 				/* Signed '>=', GE in x86 */
151562258278SAlexei Starovoitov 				jmp_cond = X86_JGE;
151662258278SAlexei Starovoitov 				break;
151752afc51eSDaniel Borkmann 			case BPF_JSLE:
1518a2c7a983SIngo Molnar 				/* Signed '<=', LE in x86 */
151952afc51eSDaniel Borkmann 				jmp_cond = X86_JLE;
152052afc51eSDaniel Borkmann 				break;
1521a2c7a983SIngo Molnar 			default: /* to silence GCC warning */
152262258278SAlexei Starovoitov 				return -EFAULT;
152362258278SAlexei Starovoitov 			}
152462258278SAlexei Starovoitov 			jmp_offset = addrs[i + insn->off] - addrs[i];
152562258278SAlexei Starovoitov 			if (is_imm8(jmp_offset)) {
152693c5aeccSGary Lin 				if (jmp_padding) {
152793c5aeccSGary Lin 					/* To keep the jmp_offset valid, the extra bytes are
1528d9f6e12fSIngo Molnar 					 * padded before the jump insn, so we subtract the
152993c5aeccSGary Lin 					 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
153093c5aeccSGary Lin 					 *
153193c5aeccSGary Lin 					 * If the previous pass already emits an imm8
153293c5aeccSGary Lin 					 * jmp_cond, then this BPF insn won't shrink, so
153393c5aeccSGary Lin 					 * "nops" is 0.
153493c5aeccSGary Lin 					 *
153593c5aeccSGary Lin 					 * On the other hand, if the previous pass emits an
153693c5aeccSGary Lin 					 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
153793c5aeccSGary Lin 					 * keep the image from shrinking further.
153893c5aeccSGary Lin 					 *
153993c5aeccSGary Lin 					 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
154093c5aeccSGary Lin 					 *     is 2 bytes, so the size difference is 4 bytes.
154193c5aeccSGary Lin 					 */
154293c5aeccSGary Lin 					nops = INSN_SZ_DIFF - 2;
154393c5aeccSGary Lin 					if (nops != 0 && nops != 4) {
154493c5aeccSGary Lin 						pr_err("unexpected jmp_cond padding: %d bytes\n",
154593c5aeccSGary Lin 						       nops);
154693c5aeccSGary Lin 						return -EFAULT;
154793c5aeccSGary Lin 					}
1548ced50fc4SJiri Olsa 					emit_nops(&prog, nops);
154993c5aeccSGary Lin 				}
155062258278SAlexei Starovoitov 				EMIT2(jmp_cond, jmp_offset);
155162258278SAlexei Starovoitov 			} else if (is_simm32(jmp_offset)) {
155262258278SAlexei Starovoitov 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
15533b58908aSEric Dumazet 			} else {
155462258278SAlexei Starovoitov 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
155562258278SAlexei Starovoitov 				return -EFAULT;
15563b58908aSEric Dumazet 			}
155762258278SAlexei Starovoitov 
15583b58908aSEric Dumazet 			break;
155962258278SAlexei Starovoitov 
156062258278SAlexei Starovoitov 		case BPF_JMP | BPF_JA:
15611612a981SGianluca Borello 			if (insn->off == -1)
15621612a981SGianluca Borello 				/* -1 jmp instructions will always jump
15631612a981SGianluca Borello 				 * backwards two bytes. Explicitly handling
15641612a981SGianluca Borello 				 * this case avoids wasting too many passes
15651612a981SGianluca Borello 				 * when there are long sequences of replaced
15661612a981SGianluca Borello 				 * dead code.
15671612a981SGianluca Borello 				 */
15681612a981SGianluca Borello 				jmp_offset = -2;
15691612a981SGianluca Borello 			else
157062258278SAlexei Starovoitov 				jmp_offset = addrs[i + insn->off] - addrs[i];
15711612a981SGianluca Borello 
157293c5aeccSGary Lin 			if (!jmp_offset) {
157393c5aeccSGary Lin 				/*
157493c5aeccSGary Lin 				 * If jmp_padding is enabled, the extra nops will
157593c5aeccSGary Lin 				 * be inserted. Otherwise, optimize out nop jumps.
157693c5aeccSGary Lin 				 */
157793c5aeccSGary Lin 				if (jmp_padding) {
157893c5aeccSGary Lin 					/* There are 3 possible conditions.
157993c5aeccSGary Lin 					 * (1) This BPF_JA is already optimized out in
158093c5aeccSGary Lin 					 *     the previous run, so there is no need
158193c5aeccSGary Lin 					 *     to pad any extra byte (0 byte).
158293c5aeccSGary Lin 					 * (2) The previous pass emits an imm8 jmp,
158393c5aeccSGary Lin 					 *     so we pad 2 bytes to match the previous
158493c5aeccSGary Lin 					 *     insn size.
158593c5aeccSGary Lin 					 * (3) Similarly, the previous pass emits an
158693c5aeccSGary Lin 					 *     imm32 jmp, and 5 bytes is padded.
158793c5aeccSGary Lin 					 */
158893c5aeccSGary Lin 					nops = INSN_SZ_DIFF;
158993c5aeccSGary Lin 					if (nops != 0 && nops != 2 && nops != 5) {
159093c5aeccSGary Lin 						pr_err("unexpected nop jump padding: %d bytes\n",
159193c5aeccSGary Lin 						       nops);
159293c5aeccSGary Lin 						return -EFAULT;
159393c5aeccSGary Lin 					}
1594ced50fc4SJiri Olsa 					emit_nops(&prog, nops);
159593c5aeccSGary Lin 				}
159662258278SAlexei Starovoitov 				break;
159793c5aeccSGary Lin 			}
159862258278SAlexei Starovoitov emit_jmp:
159962258278SAlexei Starovoitov 			if (is_imm8(jmp_offset)) {
160093c5aeccSGary Lin 				if (jmp_padding) {
160193c5aeccSGary Lin 					/* To avoid breaking jmp_offset, the extra bytes
160293c5aeccSGary Lin 					 * are padded before the actual jmp insn, so
1603d9f6e12fSIngo Molnar 					 * 2 bytes is subtracted from INSN_SZ_DIFF.
160493c5aeccSGary Lin 					 *
160593c5aeccSGary Lin 					 * If the previous pass already emits an imm8
160693c5aeccSGary Lin 					 * jmp, there is nothing to pad (0 byte).
160793c5aeccSGary Lin 					 *
160893c5aeccSGary Lin 					 * If it emits an imm32 jmp (5 bytes) previously
160993c5aeccSGary Lin 					 * and now an imm8 jmp (2 bytes), then we pad
161093c5aeccSGary Lin 					 * (5 - 2 = 3) bytes to stop the image from
161193c5aeccSGary Lin 					 * shrinking further.
161293c5aeccSGary Lin 					 */
161393c5aeccSGary Lin 					nops = INSN_SZ_DIFF - 2;
161493c5aeccSGary Lin 					if (nops != 0 && nops != 3) {
161593c5aeccSGary Lin 						pr_err("unexpected jump padding: %d bytes\n",
161693c5aeccSGary Lin 						       nops);
161793c5aeccSGary Lin 						return -EFAULT;
161893c5aeccSGary Lin 					}
1619ced50fc4SJiri Olsa 					emit_nops(&prog, INSN_SZ_DIFF - 2);
162093c5aeccSGary Lin 				}
162162258278SAlexei Starovoitov 				EMIT2(0xEB, jmp_offset);
162262258278SAlexei Starovoitov 			} else if (is_simm32(jmp_offset)) {
162362258278SAlexei Starovoitov 				EMIT1_off32(0xE9, jmp_offset);
162462258278SAlexei Starovoitov 			} else {
162562258278SAlexei Starovoitov 				pr_err("jmp gen bug %llx\n", jmp_offset);
162662258278SAlexei Starovoitov 				return -EFAULT;
16273b58908aSEric Dumazet 			}
162862258278SAlexei Starovoitov 			break;
162962258278SAlexei Starovoitov 
163062258278SAlexei Starovoitov 		case BPF_JMP | BPF_EXIT:
1631769e0de6SAlexei Starovoitov 			if (seen_exit) {
163262258278SAlexei Starovoitov 				jmp_offset = ctx->cleanup_addr - addrs[i];
163362258278SAlexei Starovoitov 				goto emit_jmp;
163462258278SAlexei Starovoitov 			}
1635769e0de6SAlexei Starovoitov 			seen_exit = true;
1636a2c7a983SIngo Molnar 			/* Update cleanup_addr */
163762258278SAlexei Starovoitov 			ctx->cleanup_addr = proglen;
1638ebf7d1f5SMaciej Fijalkowski 			pop_callee_regs(&prog, callee_regs_used);
163962258278SAlexei Starovoitov 			EMIT1(0xC9);         /* leave */
164062258278SAlexei Starovoitov 			EMIT1(0xC3);         /* ret */
16410a14842fSEric Dumazet 			break;
16420a14842fSEric Dumazet 
16430a14842fSEric Dumazet 		default:
1644a2c7a983SIngo Molnar 			/*
1645a2c7a983SIngo Molnar 			 * By design x86-64 JIT should support all BPF instructions.
164662258278SAlexei Starovoitov 			 * This error will be seen if new instruction was added
1647a2c7a983SIngo Molnar 			 * to the interpreter, but not to the JIT, or if there is
1648a2c7a983SIngo Molnar 			 * junk in bpf_prog.
164962258278SAlexei Starovoitov 			 */
165062258278SAlexei Starovoitov 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1651f3c2af7bSAlexei Starovoitov 			return -EINVAL;
16520a14842fSEric Dumazet 		}
165362258278SAlexei Starovoitov 
16540a14842fSEric Dumazet 		ilen = prog - temp;
1655e0ee9c12SAlexei Starovoitov 		if (ilen > BPF_MAX_INSN_SIZE) {
16569383191dSDaniel Borkmann 			pr_err("bpf_jit: fatal insn size error\n");
1657e0ee9c12SAlexei Starovoitov 			return -EFAULT;
1658e0ee9c12SAlexei Starovoitov 		}
1659e0ee9c12SAlexei Starovoitov 
16600a14842fSEric Dumazet 		if (image) {
1661e4d4d456SPiotr Krysiuk 			/*
1662e4d4d456SPiotr Krysiuk 			 * When populating the image, assert that:
1663e4d4d456SPiotr Krysiuk 			 *
1664e4d4d456SPiotr Krysiuk 			 *  i) We do not write beyond the allocated space, and
1665e4d4d456SPiotr Krysiuk 			 * ii) addrs[i] did not change from the prior run, in order
1666e4d4d456SPiotr Krysiuk 			 *     to validate assumptions made for computing branch
1667e4d4d456SPiotr Krysiuk 			 *     displacements.
1668e4d4d456SPiotr Krysiuk 			 */
1669e4d4d456SPiotr Krysiuk 			if (unlikely(proglen + ilen > oldproglen ||
1670e4d4d456SPiotr Krysiuk 				     proglen + ilen != addrs[i])) {
16719383191dSDaniel Borkmann 				pr_err("bpf_jit: fatal error\n");
1672f3c2af7bSAlexei Starovoitov 				return -EFAULT;
16730a14842fSEric Dumazet 			}
16740a14842fSEric Dumazet 			memcpy(image + proglen, temp, ilen);
16750a14842fSEric Dumazet 		}
16760a14842fSEric Dumazet 		proglen += ilen;
16770a14842fSEric Dumazet 		addrs[i] = proglen;
16780a14842fSEric Dumazet 		prog = temp;
16790a14842fSEric Dumazet 	}
16803dec541bSAlexei Starovoitov 
16813dec541bSAlexei Starovoitov 	if (image && excnt != bpf_prog->aux->num_exentries) {
16823dec541bSAlexei Starovoitov 		pr_err("extable is not populated\n");
16833dec541bSAlexei Starovoitov 		return -EFAULT;
16843dec541bSAlexei Starovoitov 	}
1685f3c2af7bSAlexei Starovoitov 	return proglen;
1686f3c2af7bSAlexei Starovoitov }
1687f3c2af7bSAlexei Starovoitov 
168885d33df3SMartin KaFai Lau static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1689fec56f58SAlexei Starovoitov 		      int stack_size)
1690fec56f58SAlexei Starovoitov {
1691fec56f58SAlexei Starovoitov 	int i;
1692fec56f58SAlexei Starovoitov 	/* Store function arguments to stack.
1693fec56f58SAlexei Starovoitov 	 * For a function that accepts two pointers the sequence will be:
1694fec56f58SAlexei Starovoitov 	 * mov QWORD PTR [rbp-0x10],rdi
1695fec56f58SAlexei Starovoitov 	 * mov QWORD PTR [rbp-0x8],rsi
1696fec56f58SAlexei Starovoitov 	 */
1697fec56f58SAlexei Starovoitov 	for (i = 0; i < min(nr_args, 6); i++)
1698fec56f58SAlexei Starovoitov 		emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1699fec56f58SAlexei Starovoitov 			 BPF_REG_FP,
1700fec56f58SAlexei Starovoitov 			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1701fec56f58SAlexei Starovoitov 			 -(stack_size - i * 8));
1702fec56f58SAlexei Starovoitov }
1703fec56f58SAlexei Starovoitov 
170485d33df3SMartin KaFai Lau static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1705fec56f58SAlexei Starovoitov 			 int stack_size)
1706fec56f58SAlexei Starovoitov {
1707fec56f58SAlexei Starovoitov 	int i;
1708fec56f58SAlexei Starovoitov 
1709fec56f58SAlexei Starovoitov 	/* Restore function arguments from stack.
1710fec56f58SAlexei Starovoitov 	 * For a function that accepts two pointers the sequence will be:
1711fec56f58SAlexei Starovoitov 	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1712fec56f58SAlexei Starovoitov 	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1713fec56f58SAlexei Starovoitov 	 */
1714fec56f58SAlexei Starovoitov 	for (i = 0; i < min(nr_args, 6); i++)
1715fec56f58SAlexei Starovoitov 		emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1716fec56f58SAlexei Starovoitov 			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1717fec56f58SAlexei Starovoitov 			 BPF_REG_FP,
1718fec56f58SAlexei Starovoitov 			 -(stack_size - i * 8));
1719fec56f58SAlexei Starovoitov }
1720fec56f58SAlexei Starovoitov 
17217e639208SKP Singh static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1722356ed649SHou Tao 			   struct bpf_prog *p, int stack_size, bool save_ret)
1723fec56f58SAlexei Starovoitov {
1724fec56f58SAlexei Starovoitov 	u8 *prog = *pprog;
1725ca06f55bSAlexei Starovoitov 	u8 *jmp_insn;
1726fec56f58SAlexei Starovoitov 
1727ca06f55bSAlexei Starovoitov 	/* arg1: mov rdi, progs[i] */
1728ca06f55bSAlexei Starovoitov 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1729f2dd3b39SAlexei Starovoitov 	if (emit_call(&prog,
1730f2dd3b39SAlexei Starovoitov 		      p->aux->sleepable ? __bpf_prog_enter_sleepable :
1731f2dd3b39SAlexei Starovoitov 		      __bpf_prog_enter, prog))
1732fec56f58SAlexei Starovoitov 			return -EINVAL;
1733fec56f58SAlexei Starovoitov 	/* remember prog start time returned by __bpf_prog_enter */
1734fec56f58SAlexei Starovoitov 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1735fec56f58SAlexei Starovoitov 
1736ca06f55bSAlexei Starovoitov 	/* if (__bpf_prog_enter*(prog) == 0)
1737ca06f55bSAlexei Starovoitov 	 *	goto skip_exec_of_prog;
1738ca06f55bSAlexei Starovoitov 	 */
1739ca06f55bSAlexei Starovoitov 	EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
1740ca06f55bSAlexei Starovoitov 	/* emit 2 nops that will be replaced with JE insn */
1741ca06f55bSAlexei Starovoitov 	jmp_insn = prog;
1742ca06f55bSAlexei Starovoitov 	emit_nops(&prog, 2);
1743ca06f55bSAlexei Starovoitov 
1744fec56f58SAlexei Starovoitov 	/* arg1: lea rdi, [rbp - stack_size] */
1745fec56f58SAlexei Starovoitov 	EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1746fec56f58SAlexei Starovoitov 	/* arg2: progs[i]->insnsi for interpreter */
17477e639208SKP Singh 	if (!p->jited)
1748fec56f58SAlexei Starovoitov 		emit_mov_imm64(&prog, BPF_REG_2,
17497e639208SKP Singh 			       (long) p->insnsi >> 32,
17507e639208SKP Singh 			       (u32) (long) p->insnsi);
1751fec56f58SAlexei Starovoitov 	/* call JITed bpf program or interpreter */
17527e639208SKP Singh 	if (emit_call(&prog, p->bpf_func, prog))
1753fec56f58SAlexei Starovoitov 		return -EINVAL;
1754fec56f58SAlexei Starovoitov 
1755356ed649SHou Tao 	/*
1756356ed649SHou Tao 	 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1757ae240823SKP Singh 	 * of the previous call which is then passed on the stack to
1758ae240823SKP Singh 	 * the next BPF program.
1759356ed649SHou Tao 	 *
1760356ed649SHou Tao 	 * BPF_TRAMP_FENTRY trampoline may need to return the return
1761356ed649SHou Tao 	 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
1762ae240823SKP Singh 	 */
1763356ed649SHou Tao 	if (save_ret)
1764ae240823SKP Singh 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1765ae240823SKP Singh 
1766ca06f55bSAlexei Starovoitov 	/* replace 2 nops with JE insn, since jmp target is known */
1767ca06f55bSAlexei Starovoitov 	jmp_insn[0] = X86_JE;
1768ca06f55bSAlexei Starovoitov 	jmp_insn[1] = prog - jmp_insn - 2;
1769ca06f55bSAlexei Starovoitov 
1770fec56f58SAlexei Starovoitov 	/* arg1: mov rdi, progs[i] */
1771f2dd3b39SAlexei Starovoitov 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1772fec56f58SAlexei Starovoitov 	/* arg2: mov rsi, rbx <- start time in nsec */
1773fec56f58SAlexei Starovoitov 	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1774f2dd3b39SAlexei Starovoitov 	if (emit_call(&prog,
1775f2dd3b39SAlexei Starovoitov 		      p->aux->sleepable ? __bpf_prog_exit_sleepable :
1776f2dd3b39SAlexei Starovoitov 		      __bpf_prog_exit, prog))
1777fec56f58SAlexei Starovoitov 			return -EINVAL;
17787e639208SKP Singh 
17797e639208SKP Singh 	*pprog = prog;
17807e639208SKP Singh 	return 0;
17817e639208SKP Singh }
17827e639208SKP Singh 
17837e639208SKP Singh static void emit_align(u8 **pprog, u32 align)
17847e639208SKP Singh {
17857e639208SKP Singh 	u8 *target, *prog = *pprog;
17867e639208SKP Singh 
17877e639208SKP Singh 	target = PTR_ALIGN(prog, align);
17887e639208SKP Singh 	if (target != prog)
17897e639208SKP Singh 		emit_nops(&prog, target - prog);
17907e639208SKP Singh 
17917e639208SKP Singh 	*pprog = prog;
17927e639208SKP Singh }
17937e639208SKP Singh 
17947e639208SKP Singh static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
17957e639208SKP Singh {
17967e639208SKP Singh 	u8 *prog = *pprog;
17977e639208SKP Singh 	s64 offset;
17987e639208SKP Singh 
17997e639208SKP Singh 	offset = func - (ip + 2 + 4);
18007e639208SKP Singh 	if (!is_simm32(offset)) {
18017e639208SKP Singh 		pr_err("Target %p is out of range\n", func);
18027e639208SKP Singh 		return -EINVAL;
18037e639208SKP Singh 	}
18047e639208SKP Singh 	EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
18057e639208SKP Singh 	*pprog = prog;
18067e639208SKP Singh 	return 0;
18077e639208SKP Singh }
18087e639208SKP Singh 
18097e639208SKP Singh static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1810356ed649SHou Tao 		      struct bpf_tramp_progs *tp, int stack_size,
1811356ed649SHou Tao 		      bool save_ret)
18127e639208SKP Singh {
18137e639208SKP Singh 	int i;
18147e639208SKP Singh 	u8 *prog = *pprog;
18157e639208SKP Singh 
18167e639208SKP Singh 	for (i = 0; i < tp->nr_progs; i++) {
1817356ed649SHou Tao 		if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
1818356ed649SHou Tao 				    save_ret))
18197e639208SKP Singh 			return -EINVAL;
1820fec56f58SAlexei Starovoitov 	}
1821fec56f58SAlexei Starovoitov 	*pprog = prog;
1822fec56f58SAlexei Starovoitov 	return 0;
1823fec56f58SAlexei Starovoitov }
1824fec56f58SAlexei Starovoitov 
1825ae240823SKP Singh static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1826ae240823SKP Singh 			      struct bpf_tramp_progs *tp, int stack_size,
1827ae240823SKP Singh 			      u8 **branches)
1828ae240823SKP Singh {
1829ae240823SKP Singh 	u8 *prog = *pprog;
1830ced50fc4SJiri Olsa 	int i;
1831ae240823SKP Singh 
1832ae240823SKP Singh 	/* The first fmod_ret program will receive a garbage return value.
1833ae240823SKP Singh 	 * Set this to 0 to avoid confusing the program.
1834ae240823SKP Singh 	 */
1835ae240823SKP Singh 	emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1836ae240823SKP Singh 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1837ae240823SKP Singh 	for (i = 0; i < tp->nr_progs; i++) {
1838ae240823SKP Singh 		if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1839ae240823SKP Singh 			return -EINVAL;
1840ae240823SKP Singh 
184113fac1d8SAlexei Starovoitov 		/* mod_ret prog stored return value into [rbp - 8]. Emit:
184213fac1d8SAlexei Starovoitov 		 * if (*(u64 *)(rbp - 8) !=  0)
1843ae240823SKP Singh 		 *	goto do_fexit;
1844ae240823SKP Singh 		 */
184513fac1d8SAlexei Starovoitov 		/* cmp QWORD PTR [rbp - 0x8], 0x0 */
184613fac1d8SAlexei Starovoitov 		EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1847ae240823SKP Singh 
1848ae240823SKP Singh 		/* Save the location of the branch and Generate 6 nops
1849ae240823SKP Singh 		 * (4 bytes for an offset and 2 bytes for the jump) These nops
1850ae240823SKP Singh 		 * are replaced with a conditional jump once do_fexit (i.e. the
1851ae240823SKP Singh 		 * start of the fexit invocation) is finalized.
1852ae240823SKP Singh 		 */
1853ae240823SKP Singh 		branches[i] = prog;
1854ae240823SKP Singh 		emit_nops(&prog, 4 + 2);
1855ae240823SKP Singh 	}
1856ae240823SKP Singh 
1857ae240823SKP Singh 	*pprog = prog;
1858ae240823SKP Singh 	return 0;
1859ae240823SKP Singh }
1860ae240823SKP Singh 
1861356ed649SHou Tao static bool is_valid_bpf_tramp_flags(unsigned int flags)
1862356ed649SHou Tao {
1863356ed649SHou Tao 	if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1864356ed649SHou Tao 	    (flags & BPF_TRAMP_F_SKIP_FRAME))
1865356ed649SHou Tao 		return false;
1866356ed649SHou Tao 
1867356ed649SHou Tao 	/*
1868356ed649SHou Tao 	 * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
1869356ed649SHou Tao 	 * and it must be used alone.
1870356ed649SHou Tao 	 */
1871356ed649SHou Tao 	if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
1872356ed649SHou Tao 	    (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
1873356ed649SHou Tao 		return false;
1874356ed649SHou Tao 
1875356ed649SHou Tao 	return true;
1876356ed649SHou Tao }
1877356ed649SHou Tao 
1878fec56f58SAlexei Starovoitov /* Example:
1879fec56f58SAlexei Starovoitov  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1880fec56f58SAlexei Starovoitov  * its 'struct btf_func_model' will be nr_args=2
1881fec56f58SAlexei Starovoitov  * The assembly code when eth_type_trans is executing after trampoline:
1882fec56f58SAlexei Starovoitov  *
1883fec56f58SAlexei Starovoitov  * push rbp
1884fec56f58SAlexei Starovoitov  * mov rbp, rsp
1885fec56f58SAlexei Starovoitov  * sub rsp, 16                     // space for skb and dev
1886fec56f58SAlexei Starovoitov  * push rbx                        // temp regs to pass start time
1887fec56f58SAlexei Starovoitov  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
1888fec56f58SAlexei Starovoitov  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
1889fec56f58SAlexei Starovoitov  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1890fec56f58SAlexei Starovoitov  * mov rbx, rax                    // remember start time in bpf stats are enabled
1891fec56f58SAlexei Starovoitov  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
1892fec56f58SAlexei Starovoitov  * call addr_of_jited_FENTRY_prog
1893fec56f58SAlexei Starovoitov  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1894fec56f58SAlexei Starovoitov  * mov rsi, rbx                    // prog start time
1895fec56f58SAlexei Starovoitov  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1896fec56f58SAlexei Starovoitov  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
1897fec56f58SAlexei Starovoitov  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
1898fec56f58SAlexei Starovoitov  * pop rbx
1899fec56f58SAlexei Starovoitov  * leave
1900fec56f58SAlexei Starovoitov  * ret
1901fec56f58SAlexei Starovoitov  *
1902fec56f58SAlexei Starovoitov  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1903fec56f58SAlexei Starovoitov  * replaced with 'call generated_bpf_trampoline'. When it returns
1904fec56f58SAlexei Starovoitov  * eth_type_trans will continue executing with original skb and dev pointers.
1905fec56f58SAlexei Starovoitov  *
1906fec56f58SAlexei Starovoitov  * The assembly code when eth_type_trans is called from trampoline:
1907fec56f58SAlexei Starovoitov  *
1908fec56f58SAlexei Starovoitov  * push rbp
1909fec56f58SAlexei Starovoitov  * mov rbp, rsp
1910fec56f58SAlexei Starovoitov  * sub rsp, 24                     // space for skb, dev, return value
1911fec56f58SAlexei Starovoitov  * push rbx                        // temp regs to pass start time
1912fec56f58SAlexei Starovoitov  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
1913fec56f58SAlexei Starovoitov  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
1914fec56f58SAlexei Starovoitov  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1915fec56f58SAlexei Starovoitov  * mov rbx, rax                    // remember start time if bpf stats are enabled
1916fec56f58SAlexei Starovoitov  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1917fec56f58SAlexei Starovoitov  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
1918fec56f58SAlexei Starovoitov  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1919fec56f58SAlexei Starovoitov  * mov rsi, rbx                    // prog start time
1920fec56f58SAlexei Starovoitov  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1921fec56f58SAlexei Starovoitov  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
1922fec56f58SAlexei Starovoitov  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
1923fec56f58SAlexei Starovoitov  * call eth_type_trans+5           // execute body of eth_type_trans
1924fec56f58SAlexei Starovoitov  * mov qword ptr [rbp - 8], rax    // save return value
1925fec56f58SAlexei Starovoitov  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1926fec56f58SAlexei Starovoitov  * mov rbx, rax                    // remember start time in bpf stats are enabled
1927fec56f58SAlexei Starovoitov  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1928fec56f58SAlexei Starovoitov  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
1929fec56f58SAlexei Starovoitov  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1930fec56f58SAlexei Starovoitov  * mov rsi, rbx                    // prog start time
1931fec56f58SAlexei Starovoitov  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1932fec56f58SAlexei Starovoitov  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
1933fec56f58SAlexei Starovoitov  * pop rbx
1934fec56f58SAlexei Starovoitov  * leave
1935fec56f58SAlexei Starovoitov  * add rsp, 8                      // skip eth_type_trans's frame
1936fec56f58SAlexei Starovoitov  * ret                             // return to its caller
1937fec56f58SAlexei Starovoitov  */
1938e21aa341SAlexei Starovoitov int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
193985d33df3SMartin KaFai Lau 				const struct btf_func_model *m, u32 flags,
194088fd9e53SKP Singh 				struct bpf_tramp_progs *tprogs,
1941fec56f58SAlexei Starovoitov 				void *orig_call)
1942fec56f58SAlexei Starovoitov {
1943ced50fc4SJiri Olsa 	int ret, i, nr_args = m->nr_args;
1944fec56f58SAlexei Starovoitov 	int stack_size = nr_args * 8;
194588fd9e53SKP Singh 	struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
194688fd9e53SKP Singh 	struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
1947ae240823SKP Singh 	struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
1948ae240823SKP Singh 	u8 **branches = NULL;
1949fec56f58SAlexei Starovoitov 	u8 *prog;
1950356ed649SHou Tao 	bool save_ret;
1951fec56f58SAlexei Starovoitov 
1952fec56f58SAlexei Starovoitov 	/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1953fec56f58SAlexei Starovoitov 	if (nr_args > 6)
1954fec56f58SAlexei Starovoitov 		return -ENOTSUPP;
1955fec56f58SAlexei Starovoitov 
1956356ed649SHou Tao 	if (!is_valid_bpf_tramp_flags(flags))
1957fec56f58SAlexei Starovoitov 		return -EINVAL;
1958fec56f58SAlexei Starovoitov 
1959356ed649SHou Tao 	/* room for return value of orig_call or fentry prog */
1960356ed649SHou Tao 	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
1961356ed649SHou Tao 	if (save_ret)
1962356ed649SHou Tao 		stack_size += 8;
1963fec56f58SAlexei Starovoitov 
19647e6f3cd8SJiri Olsa 	if (flags & BPF_TRAMP_F_IP_ARG)
19657e6f3cd8SJiri Olsa 		stack_size += 8; /* room for IP address argument */
19667e6f3cd8SJiri Olsa 
1967fec56f58SAlexei Starovoitov 	if (flags & BPF_TRAMP_F_SKIP_FRAME)
1968fec56f58SAlexei Starovoitov 		/* skip patched call instruction and point orig_call to actual
1969fec56f58SAlexei Starovoitov 		 * body of the kernel function.
1970fec56f58SAlexei Starovoitov 		 */
19714b3da77bSDaniel Borkmann 		orig_call += X86_PATCH_SIZE;
1972fec56f58SAlexei Starovoitov 
1973fec56f58SAlexei Starovoitov 	prog = image;
1974fec56f58SAlexei Starovoitov 
1975fec56f58SAlexei Starovoitov 	EMIT1(0x55);		 /* push rbp */
1976fec56f58SAlexei Starovoitov 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
1977fec56f58SAlexei Starovoitov 	EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
1978fec56f58SAlexei Starovoitov 	EMIT1(0x53);		 /* push rbx */
1979fec56f58SAlexei Starovoitov 
19807e6f3cd8SJiri Olsa 	if (flags & BPF_TRAMP_F_IP_ARG) {
19817e6f3cd8SJiri Olsa 		/* Store IP address of the traced function:
19827e6f3cd8SJiri Olsa 		 * mov rax, QWORD PTR [rbp + 8]
19837e6f3cd8SJiri Olsa 		 * sub rax, X86_PATCH_SIZE
19847e6f3cd8SJiri Olsa 		 * mov QWORD PTR [rbp - stack_size], rax
19857e6f3cd8SJiri Olsa 		 */
19867e6f3cd8SJiri Olsa 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
19877e6f3cd8SJiri Olsa 		EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE);
19887e6f3cd8SJiri Olsa 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -stack_size);
19897e6f3cd8SJiri Olsa 
19907e6f3cd8SJiri Olsa 		/* Continue with stack_size for regs storage, stack will
19917e6f3cd8SJiri Olsa 		 * be correctly restored with 'leave' instruction.
19927e6f3cd8SJiri Olsa 		 */
19937e6f3cd8SJiri Olsa 		stack_size -= 8;
19947e6f3cd8SJiri Olsa 	}
19957e6f3cd8SJiri Olsa 
1996fec56f58SAlexei Starovoitov 	save_regs(m, &prog, nr_args, stack_size);
1997fec56f58SAlexei Starovoitov 
1998e21aa341SAlexei Starovoitov 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
1999e21aa341SAlexei Starovoitov 		/* arg1: mov rdi, im */
2000e21aa341SAlexei Starovoitov 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2001e21aa341SAlexei Starovoitov 		if (emit_call(&prog, __bpf_tramp_enter, prog)) {
2002e21aa341SAlexei Starovoitov 			ret = -EINVAL;
2003e21aa341SAlexei Starovoitov 			goto cleanup;
2004e21aa341SAlexei Starovoitov 		}
2005e21aa341SAlexei Starovoitov 	}
2006e21aa341SAlexei Starovoitov 
200788fd9e53SKP Singh 	if (fentry->nr_progs)
2008356ed649SHou Tao 		if (invoke_bpf(m, &prog, fentry, stack_size,
2009356ed649SHou Tao 			       flags & BPF_TRAMP_F_RET_FENTRY_RET))
2010fec56f58SAlexei Starovoitov 			return -EINVAL;
2011fec56f58SAlexei Starovoitov 
2012ae240823SKP Singh 	if (fmod_ret->nr_progs) {
2013ae240823SKP Singh 		branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
2014ae240823SKP Singh 				   GFP_KERNEL);
2015ae240823SKP Singh 		if (!branches)
2016ae240823SKP Singh 			return -ENOMEM;
2017ae240823SKP Singh 
2018ae240823SKP Singh 		if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
2019ae240823SKP Singh 				       branches)) {
2020ae240823SKP Singh 			ret = -EINVAL;
2021ae240823SKP Singh 			goto cleanup;
2022ae240823SKP Singh 		}
2023ae240823SKP Singh 	}
2024ae240823SKP Singh 
2025fec56f58SAlexei Starovoitov 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2026fec56f58SAlexei Starovoitov 		restore_regs(m, &prog, nr_args, stack_size);
2027fec56f58SAlexei Starovoitov 
2028fec56f58SAlexei Starovoitov 		/* call original function */
2029ae240823SKP Singh 		if (emit_call(&prog, orig_call, prog)) {
2030ae240823SKP Singh 			ret = -EINVAL;
2031ae240823SKP Singh 			goto cleanup;
2032ae240823SKP Singh 		}
2033fec56f58SAlexei Starovoitov 		/* remember return value in a stack for bpf prog to access */
2034fec56f58SAlexei Starovoitov 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2035e21aa341SAlexei Starovoitov 		im->ip_after_call = prog;
2036b1f480bcSIngo Molnar 		memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2037b9082970SStanislav Fomichev 		prog += X86_PATCH_SIZE;
2038fec56f58SAlexei Starovoitov 	}
2039fec56f58SAlexei Starovoitov 
2040ae240823SKP Singh 	if (fmod_ret->nr_progs) {
2041ae240823SKP Singh 		/* From Intel 64 and IA-32 Architectures Optimization
2042ae240823SKP Singh 		 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2043ae240823SKP Singh 		 * Coding Rule 11: All branch targets should be 16-byte
2044ae240823SKP Singh 		 * aligned.
2045ae240823SKP Singh 		 */
2046ae240823SKP Singh 		emit_align(&prog, 16);
2047ae240823SKP Singh 		/* Update the branches saved in invoke_bpf_mod_ret with the
2048ae240823SKP Singh 		 * aligned address of do_fexit.
2049ae240823SKP Singh 		 */
2050ae240823SKP Singh 		for (i = 0; i < fmod_ret->nr_progs; i++)
2051ae240823SKP Singh 			emit_cond_near_jump(&branches[i], prog, branches[i],
2052ae240823SKP Singh 					    X86_JNE);
2053ae240823SKP Singh 	}
2054ae240823SKP Singh 
205588fd9e53SKP Singh 	if (fexit->nr_progs)
2056356ed649SHou Tao 		if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
2057ae240823SKP Singh 			ret = -EINVAL;
2058ae240823SKP Singh 			goto cleanup;
2059ae240823SKP Singh 		}
2060fec56f58SAlexei Starovoitov 
2061fec56f58SAlexei Starovoitov 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
2062fec56f58SAlexei Starovoitov 		restore_regs(m, &prog, nr_args, stack_size);
2063fec56f58SAlexei Starovoitov 
2064ae240823SKP Singh 	/* This needs to be done regardless. If there were fmod_ret programs,
2065ae240823SKP Singh 	 * the return value is only updated on the stack and still needs to be
2066ae240823SKP Singh 	 * restored to R0.
2067ae240823SKP Singh 	 */
2068e21aa341SAlexei Starovoitov 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2069e21aa341SAlexei Starovoitov 		im->ip_epilogue = prog;
2070e21aa341SAlexei Starovoitov 		/* arg1: mov rdi, im */
2071e21aa341SAlexei Starovoitov 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2072e21aa341SAlexei Starovoitov 		if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2073e21aa341SAlexei Starovoitov 			ret = -EINVAL;
2074e21aa341SAlexei Starovoitov 			goto cleanup;
2075e21aa341SAlexei Starovoitov 		}
2076e21aa341SAlexei Starovoitov 	}
2077356ed649SHou Tao 	/* restore return value of orig_call or fentry prog back into RAX */
2078356ed649SHou Tao 	if (save_ret)
2079356ed649SHou Tao 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2080fec56f58SAlexei Starovoitov 
2081fec56f58SAlexei Starovoitov 	EMIT1(0x5B); /* pop rbx */
2082fec56f58SAlexei Starovoitov 	EMIT1(0xC9); /* leave */
2083fec56f58SAlexei Starovoitov 	if (flags & BPF_TRAMP_F_SKIP_FRAME)
2084fec56f58SAlexei Starovoitov 		/* skip our return address and return to parent */
2085fec56f58SAlexei Starovoitov 		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2086fec56f58SAlexei Starovoitov 	EMIT1(0xC3); /* ret */
208785d33df3SMartin KaFai Lau 	/* Make sure the trampoline generation logic doesn't overflow */
2088ae240823SKP Singh 	if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2089ae240823SKP Singh 		ret = -EFAULT;
2090ae240823SKP Singh 		goto cleanup;
2091ae240823SKP Singh 	}
2092ae240823SKP Singh 	ret = prog - (u8 *)image;
2093ae240823SKP Singh 
2094ae240823SKP Singh cleanup:
2095ae240823SKP Singh 	kfree(branches);
2096ae240823SKP Singh 	return ret;
2097fec56f58SAlexei Starovoitov }
2098fec56f58SAlexei Starovoitov 
209975ccbef6SBjörn Töpel static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
210075ccbef6SBjörn Töpel {
21017e639208SKP Singh 	u8 *jg_reloc, *prog = *pprog;
2102ced50fc4SJiri Olsa 	int pivot, err, jg_bytes = 1;
210375ccbef6SBjörn Töpel 	s64 jg_offset;
210475ccbef6SBjörn Töpel 
210575ccbef6SBjörn Töpel 	if (a == b) {
210675ccbef6SBjörn Töpel 		/* Leaf node of recursion, i.e. not a range of indices
210775ccbef6SBjörn Töpel 		 * anymore.
210875ccbef6SBjörn Töpel 		 */
210975ccbef6SBjörn Töpel 		EMIT1(add_1mod(0x48, BPF_REG_3));	/* cmp rdx,func */
211075ccbef6SBjörn Töpel 		if (!is_simm32(progs[a]))
211175ccbef6SBjörn Töpel 			return -1;
211275ccbef6SBjörn Töpel 		EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
211375ccbef6SBjörn Töpel 			    progs[a]);
211475ccbef6SBjörn Töpel 		err = emit_cond_near_jump(&prog,	/* je func */
211575ccbef6SBjörn Töpel 					  (void *)progs[a], prog,
211675ccbef6SBjörn Töpel 					  X86_JE);
211775ccbef6SBjörn Töpel 		if (err)
211875ccbef6SBjörn Töpel 			return err;
211975ccbef6SBjörn Töpel 
212087c87ecdSPeter Zijlstra 		emit_indirect_jump(&prog, 2 /* rdx */, prog);
212175ccbef6SBjörn Töpel 
212275ccbef6SBjörn Töpel 		*pprog = prog;
212375ccbef6SBjörn Töpel 		return 0;
212475ccbef6SBjörn Töpel 	}
212575ccbef6SBjörn Töpel 
212675ccbef6SBjörn Töpel 	/* Not a leaf node, so we pivot, and recursively descend into
212775ccbef6SBjörn Töpel 	 * the lower and upper ranges.
212875ccbef6SBjörn Töpel 	 */
212975ccbef6SBjörn Töpel 	pivot = (b - a) / 2;
213075ccbef6SBjörn Töpel 	EMIT1(add_1mod(0x48, BPF_REG_3));		/* cmp rdx,func */
213175ccbef6SBjörn Töpel 	if (!is_simm32(progs[a + pivot]))
213275ccbef6SBjörn Töpel 		return -1;
213375ccbef6SBjörn Töpel 	EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
213475ccbef6SBjörn Töpel 
213575ccbef6SBjörn Töpel 	if (pivot > 2) {				/* jg upper_part */
213675ccbef6SBjörn Töpel 		/* Require near jump. */
213775ccbef6SBjörn Töpel 		jg_bytes = 4;
213875ccbef6SBjörn Töpel 		EMIT2_off32(0x0F, X86_JG + 0x10, 0);
213975ccbef6SBjörn Töpel 	} else {
214075ccbef6SBjörn Töpel 		EMIT2(X86_JG, 0);
214175ccbef6SBjörn Töpel 	}
214275ccbef6SBjörn Töpel 	jg_reloc = prog;
214375ccbef6SBjörn Töpel 
214475ccbef6SBjörn Töpel 	err = emit_bpf_dispatcher(&prog, a, a + pivot,	/* emit lower_part */
214575ccbef6SBjörn Töpel 				  progs);
214675ccbef6SBjörn Töpel 	if (err)
214775ccbef6SBjörn Töpel 		return err;
214875ccbef6SBjörn Töpel 
2149116eb788SBjörn Töpel 	/* From Intel 64 and IA-32 Architectures Optimization
2150116eb788SBjörn Töpel 	 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2151116eb788SBjörn Töpel 	 * Coding Rule 11: All branch targets should be 16-byte
2152116eb788SBjörn Töpel 	 * aligned.
2153116eb788SBjörn Töpel 	 */
21547e639208SKP Singh 	emit_align(&prog, 16);
215575ccbef6SBjörn Töpel 	jg_offset = prog - jg_reloc;
215675ccbef6SBjörn Töpel 	emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
215775ccbef6SBjörn Töpel 
215875ccbef6SBjörn Töpel 	err = emit_bpf_dispatcher(&prog, a + pivot + 1,	/* emit upper_part */
215975ccbef6SBjörn Töpel 				  b, progs);
216075ccbef6SBjörn Töpel 	if (err)
216175ccbef6SBjörn Töpel 		return err;
216275ccbef6SBjörn Töpel 
216375ccbef6SBjörn Töpel 	*pprog = prog;
216475ccbef6SBjörn Töpel 	return 0;
216575ccbef6SBjörn Töpel }
216675ccbef6SBjörn Töpel 
216775ccbef6SBjörn Töpel static int cmp_ips(const void *a, const void *b)
216875ccbef6SBjörn Töpel {
216975ccbef6SBjörn Töpel 	const s64 *ipa = a;
217075ccbef6SBjörn Töpel 	const s64 *ipb = b;
217175ccbef6SBjörn Töpel 
217275ccbef6SBjörn Töpel 	if (*ipa > *ipb)
217375ccbef6SBjörn Töpel 		return 1;
217475ccbef6SBjörn Töpel 	if (*ipa < *ipb)
217575ccbef6SBjörn Töpel 		return -1;
217675ccbef6SBjörn Töpel 	return 0;
217775ccbef6SBjörn Töpel }
217875ccbef6SBjörn Töpel 
217975ccbef6SBjörn Töpel int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
218075ccbef6SBjörn Töpel {
218175ccbef6SBjörn Töpel 	u8 *prog = image;
218275ccbef6SBjörn Töpel 
218375ccbef6SBjörn Töpel 	sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
218475ccbef6SBjörn Töpel 	return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
218575ccbef6SBjörn Töpel }
218675ccbef6SBjörn Töpel 
21871c2a088aSAlexei Starovoitov struct x64_jit_data {
21881c2a088aSAlexei Starovoitov 	struct bpf_binary_header *header;
21891c2a088aSAlexei Starovoitov 	int *addrs;
21901c2a088aSAlexei Starovoitov 	u8 *image;
21911c2a088aSAlexei Starovoitov 	int proglen;
21921c2a088aSAlexei Starovoitov 	struct jit_context ctx;
21931c2a088aSAlexei Starovoitov };
21941c2a088aSAlexei Starovoitov 
219593c5aeccSGary Lin #define MAX_PASSES 20
219693c5aeccSGary Lin #define PADDING_PASSES (MAX_PASSES - 5)
219793c5aeccSGary Lin 
2198d1c55ab5SDaniel Borkmann struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
219962258278SAlexei Starovoitov {
2200f3c2af7bSAlexei Starovoitov 	struct bpf_binary_header *header = NULL;
2201959a7579SDaniel Borkmann 	struct bpf_prog *tmp, *orig_prog = prog;
22021c2a088aSAlexei Starovoitov 	struct x64_jit_data *jit_data;
2203f3c2af7bSAlexei Starovoitov 	int proglen, oldproglen = 0;
2204f3c2af7bSAlexei Starovoitov 	struct jit_context ctx = {};
2205959a7579SDaniel Borkmann 	bool tmp_blinded = false;
22061c2a088aSAlexei Starovoitov 	bool extra_pass = false;
220793c5aeccSGary Lin 	bool padding = false;
2208f3c2af7bSAlexei Starovoitov 	u8 *image = NULL;
2209f3c2af7bSAlexei Starovoitov 	int *addrs;
2210f3c2af7bSAlexei Starovoitov 	int pass;
2211f3c2af7bSAlexei Starovoitov 	int i;
2212f3c2af7bSAlexei Starovoitov 
221360b58afcSAlexei Starovoitov 	if (!prog->jit_requested)
2214959a7579SDaniel Borkmann 		return orig_prog;
2215959a7579SDaniel Borkmann 
2216959a7579SDaniel Borkmann 	tmp = bpf_jit_blind_constants(prog);
2217a2c7a983SIngo Molnar 	/*
2218a2c7a983SIngo Molnar 	 * If blinding was requested and we failed during blinding,
2219959a7579SDaniel Borkmann 	 * we must fall back to the interpreter.
2220959a7579SDaniel Borkmann 	 */
2221959a7579SDaniel Borkmann 	if (IS_ERR(tmp))
2222959a7579SDaniel Borkmann 		return orig_prog;
2223959a7579SDaniel Borkmann 	if (tmp != prog) {
2224959a7579SDaniel Borkmann 		tmp_blinded = true;
2225959a7579SDaniel Borkmann 		prog = tmp;
2226959a7579SDaniel Borkmann 	}
2227f3c2af7bSAlexei Starovoitov 
22281c2a088aSAlexei Starovoitov 	jit_data = prog->aux->jit_data;
22291c2a088aSAlexei Starovoitov 	if (!jit_data) {
22301c2a088aSAlexei Starovoitov 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
22311c2a088aSAlexei Starovoitov 		if (!jit_data) {
22321c2a088aSAlexei Starovoitov 			prog = orig_prog;
22331c2a088aSAlexei Starovoitov 			goto out;
22341c2a088aSAlexei Starovoitov 		}
22351c2a088aSAlexei Starovoitov 		prog->aux->jit_data = jit_data;
22361c2a088aSAlexei Starovoitov 	}
22371c2a088aSAlexei Starovoitov 	addrs = jit_data->addrs;
22381c2a088aSAlexei Starovoitov 	if (addrs) {
22391c2a088aSAlexei Starovoitov 		ctx = jit_data->ctx;
22401c2a088aSAlexei Starovoitov 		oldproglen = jit_data->proglen;
22411c2a088aSAlexei Starovoitov 		image = jit_data->image;
22421c2a088aSAlexei Starovoitov 		header = jit_data->header;
22431c2a088aSAlexei Starovoitov 		extra_pass = true;
224493c5aeccSGary Lin 		padding = true;
22451c2a088aSAlexei Starovoitov 		goto skip_init_addrs;
22461c2a088aSAlexei Starovoitov 	}
2247de920fc6SYonghong Song 	addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2248959a7579SDaniel Borkmann 	if (!addrs) {
2249959a7579SDaniel Borkmann 		prog = orig_prog;
22501c2a088aSAlexei Starovoitov 		goto out_addrs;
2251959a7579SDaniel Borkmann 	}
2252f3c2af7bSAlexei Starovoitov 
2253a2c7a983SIngo Molnar 	/*
2254a2c7a983SIngo Molnar 	 * Before first pass, make a rough estimation of addrs[]
2255a2c7a983SIngo Molnar 	 * each BPF instruction is translated to less than 64 bytes
2256f3c2af7bSAlexei Starovoitov 	 */
22577c2e988fSAlexei Starovoitov 	for (proglen = 0, i = 0; i <= prog->len; i++) {
2258f3c2af7bSAlexei Starovoitov 		proglen += 64;
2259f3c2af7bSAlexei Starovoitov 		addrs[i] = proglen;
2260f3c2af7bSAlexei Starovoitov 	}
2261f3c2af7bSAlexei Starovoitov 	ctx.cleanup_addr = proglen;
22621c2a088aSAlexei Starovoitov skip_init_addrs:
2263f3c2af7bSAlexei Starovoitov 
2264a2c7a983SIngo Molnar 	/*
2265a2c7a983SIngo Molnar 	 * JITed image shrinks with every pass and the loop iterates
2266a2c7a983SIngo Molnar 	 * until the image stops shrinking. Very large BPF programs
22673f7352bfSAlexei Starovoitov 	 * may converge on the last pass. In such case do one more
2268a2c7a983SIngo Molnar 	 * pass to emit the final image.
22693f7352bfSAlexei Starovoitov 	 */
227093c5aeccSGary Lin 	for (pass = 0; pass < MAX_PASSES || image; pass++) {
227193c5aeccSGary Lin 		if (!padding && pass >= PADDING_PASSES)
227293c5aeccSGary Lin 			padding = true;
227393c5aeccSGary Lin 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
2274f3c2af7bSAlexei Starovoitov 		if (proglen <= 0) {
22753aab8884SDaniel Borkmann out_image:
2276f3c2af7bSAlexei Starovoitov 			image = NULL;
2277f3c2af7bSAlexei Starovoitov 			if (header)
2278738cbe72SDaniel Borkmann 				bpf_jit_binary_free(header);
2279959a7579SDaniel Borkmann 			prog = orig_prog;
2280959a7579SDaniel Borkmann 			goto out_addrs;
2281f3c2af7bSAlexei Starovoitov 		}
22820a14842fSEric Dumazet 		if (image) {
2283e0ee9c12SAlexei Starovoitov 			if (proglen != oldproglen) {
2284f3c2af7bSAlexei Starovoitov 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2285f3c2af7bSAlexei Starovoitov 				       proglen, oldproglen);
22863aab8884SDaniel Borkmann 				goto out_image;
2287e0ee9c12SAlexei Starovoitov 			}
22880a14842fSEric Dumazet 			break;
22890a14842fSEric Dumazet 		}
22900a14842fSEric Dumazet 		if (proglen == oldproglen) {
22913dec541bSAlexei Starovoitov 			/*
22923dec541bSAlexei Starovoitov 			 * The number of entries in extable is the number of BPF_LDX
22933dec541bSAlexei Starovoitov 			 * insns that access kernel memory via "pointer to BTF type".
22943dec541bSAlexei Starovoitov 			 * The verifier changed their opcode from LDX|MEM|size
22953dec541bSAlexei Starovoitov 			 * to LDX|PROBE_MEM|size to make JITing easier.
22963dec541bSAlexei Starovoitov 			 */
22973dec541bSAlexei Starovoitov 			u32 align = __alignof__(struct exception_table_entry);
22983dec541bSAlexei Starovoitov 			u32 extable_size = prog->aux->num_exentries *
22993dec541bSAlexei Starovoitov 				sizeof(struct exception_table_entry);
23003dec541bSAlexei Starovoitov 
23013dec541bSAlexei Starovoitov 			/* allocate module memory for x86 insns and extable */
23023dec541bSAlexei Starovoitov 			header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
23033dec541bSAlexei Starovoitov 						      &image, align, jit_fill_hole);
2304959a7579SDaniel Borkmann 			if (!header) {
2305959a7579SDaniel Borkmann 				prog = orig_prog;
2306959a7579SDaniel Borkmann 				goto out_addrs;
2307959a7579SDaniel Borkmann 			}
23083dec541bSAlexei Starovoitov 			prog->aux->extable = (void *) image + roundup(proglen, align);
23090a14842fSEric Dumazet 		}
23100a14842fSEric Dumazet 		oldproglen = proglen;
23116007b080SDaniel Borkmann 		cond_resched();
23120a14842fSEric Dumazet 	}
231379617801SDaniel Borkmann 
23140a14842fSEric Dumazet 	if (bpf_jit_enable > 1)
2315485d6511SDaniel Borkmann 		bpf_jit_dump(prog->len, proglen, pass + 1, image);
23160a14842fSEric Dumazet 
23170a14842fSEric Dumazet 	if (image) {
23181c2a088aSAlexei Starovoitov 		if (!prog->is_func || extra_pass) {
2319428d5df1SDaniel Borkmann 			bpf_tail_call_direct_fixup(prog);
23209d876e79SDaniel Borkmann 			bpf_jit_binary_lock_ro(header);
23211c2a088aSAlexei Starovoitov 		} else {
23221c2a088aSAlexei Starovoitov 			jit_data->addrs = addrs;
23231c2a088aSAlexei Starovoitov 			jit_data->ctx = ctx;
23241c2a088aSAlexei Starovoitov 			jit_data->proglen = proglen;
23251c2a088aSAlexei Starovoitov 			jit_data->image = image;
23261c2a088aSAlexei Starovoitov 			jit_data->header = header;
23271c2a088aSAlexei Starovoitov 		}
2328f3c2af7bSAlexei Starovoitov 		prog->bpf_func = (void *)image;
2329a91263d5SDaniel Borkmann 		prog->jited = 1;
2330783d28ddSMartin KaFai Lau 		prog->jited_len = proglen;
23319d5ecb09SDaniel Borkmann 	} else {
23329d5ecb09SDaniel Borkmann 		prog = orig_prog;
23330a14842fSEric Dumazet 	}
2334959a7579SDaniel Borkmann 
233539f56ca9SDaniel Borkmann 	if (!image || !prog->is_func || extra_pass) {
2336c454a46bSMartin KaFai Lau 		if (image)
23377c2e988fSAlexei Starovoitov 			bpf_prog_fill_jited_linfo(prog, addrs + 1);
2338959a7579SDaniel Borkmann out_addrs:
2339de920fc6SYonghong Song 		kvfree(addrs);
23401c2a088aSAlexei Starovoitov 		kfree(jit_data);
23411c2a088aSAlexei Starovoitov 		prog->aux->jit_data = NULL;
23421c2a088aSAlexei Starovoitov 	}
2343959a7579SDaniel Borkmann out:
2344959a7579SDaniel Borkmann 	if (tmp_blinded)
2345959a7579SDaniel Borkmann 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
2346959a7579SDaniel Borkmann 					   tmp : orig_prog);
2347d1c55ab5SDaniel Borkmann 	return prog;
23480a14842fSEric Dumazet }
2349e6ac2450SMartin KaFai Lau 
2350e6ac2450SMartin KaFai Lau bool bpf_jit_supports_kfunc_call(void)
2351e6ac2450SMartin KaFai Lau {
2352e6ac2450SMartin KaFai Lau 	return true;
2353e6ac2450SMartin KaFai Lau }
2354