xref: /openbmc/linux/arch/x86/net/bpf_jit_comp.c (revision a48acad789ff33d90e079311ed0323e5e5fc5cbd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler
4  *
5  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6  * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7  */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/text-patching.h>
18 
19 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
20 {
21 	if (len == 1)
22 		*ptr = bytes;
23 	else if (len == 2)
24 		*(u16 *)ptr = bytes;
25 	else {
26 		*(u32 *)ptr = bytes;
27 		barrier();
28 	}
29 	return ptr + len;
30 }
31 
32 #define EMIT(bytes, len) \
33 	do { prog = emit_code(prog, bytes, len); } while (0)
34 
35 #define EMIT1(b1)		EMIT(b1, 1)
36 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
37 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
38 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
39 
40 #define EMIT1_off32(b1, off) \
41 	do { EMIT1(b1); EMIT(off, 4); } while (0)
42 #define EMIT2_off32(b1, b2, off) \
43 	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
44 #define EMIT3_off32(b1, b2, b3, off) \
45 	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
46 #define EMIT4_off32(b1, b2, b3, b4, off) \
47 	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
48 
49 #ifdef CONFIG_X86_KERNEL_IBT
50 #define EMIT_ENDBR()	EMIT(gen_endbr(), 4)
51 #else
52 #define EMIT_ENDBR()
53 #endif
54 
55 static bool is_imm8(int value)
56 {
57 	return value <= 127 && value >= -128;
58 }
59 
60 static bool is_simm32(s64 value)
61 {
62 	return value == (s64)(s32)value;
63 }
64 
65 static bool is_uimm32(u64 value)
66 {
67 	return value == (u64)(u32)value;
68 }
69 
70 /* mov dst, src */
71 #define EMIT_mov(DST, SRC)								 \
72 	do {										 \
73 		if (DST != SRC)								 \
74 			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
75 	} while (0)
76 
77 static int bpf_size_to_x86_bytes(int bpf_size)
78 {
79 	if (bpf_size == BPF_W)
80 		return 4;
81 	else if (bpf_size == BPF_H)
82 		return 2;
83 	else if (bpf_size == BPF_B)
84 		return 1;
85 	else if (bpf_size == BPF_DW)
86 		return 4; /* imm32 */
87 	else
88 		return 0;
89 }
90 
91 /*
92  * List of x86 cond jumps opcodes (. + s8)
93  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
94  */
95 #define X86_JB  0x72
96 #define X86_JAE 0x73
97 #define X86_JE  0x74
98 #define X86_JNE 0x75
99 #define X86_JBE 0x76
100 #define X86_JA  0x77
101 #define X86_JL  0x7C
102 #define X86_JGE 0x7D
103 #define X86_JLE 0x7E
104 #define X86_JG  0x7F
105 
106 /* Pick a register outside of BPF range for JIT internal work */
107 #define AUX_REG (MAX_BPF_JIT_REG + 1)
108 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
109 
110 /*
111  * The following table maps BPF registers to x86-64 registers.
112  *
113  * x86-64 register R12 is unused, since if used as base address
114  * register in load/store instructions, it always needs an
115  * extra byte of encoding and is callee saved.
116  *
117  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
118  * trampoline. x86-64 register R10 is used for blinding (if enabled).
119  */
120 static const int reg2hex[] = {
121 	[BPF_REG_0] = 0,  /* RAX */
122 	[BPF_REG_1] = 7,  /* RDI */
123 	[BPF_REG_2] = 6,  /* RSI */
124 	[BPF_REG_3] = 2,  /* RDX */
125 	[BPF_REG_4] = 1,  /* RCX */
126 	[BPF_REG_5] = 0,  /* R8  */
127 	[BPF_REG_6] = 3,  /* RBX callee saved */
128 	[BPF_REG_7] = 5,  /* R13 callee saved */
129 	[BPF_REG_8] = 6,  /* R14 callee saved */
130 	[BPF_REG_9] = 7,  /* R15 callee saved */
131 	[BPF_REG_FP] = 5, /* RBP readonly */
132 	[BPF_REG_AX] = 2, /* R10 temp register */
133 	[AUX_REG] = 3,    /* R11 temp register */
134 	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
135 };
136 
137 static const int reg2pt_regs[] = {
138 	[BPF_REG_0] = offsetof(struct pt_regs, ax),
139 	[BPF_REG_1] = offsetof(struct pt_regs, di),
140 	[BPF_REG_2] = offsetof(struct pt_regs, si),
141 	[BPF_REG_3] = offsetof(struct pt_regs, dx),
142 	[BPF_REG_4] = offsetof(struct pt_regs, cx),
143 	[BPF_REG_5] = offsetof(struct pt_regs, r8),
144 	[BPF_REG_6] = offsetof(struct pt_regs, bx),
145 	[BPF_REG_7] = offsetof(struct pt_regs, r13),
146 	[BPF_REG_8] = offsetof(struct pt_regs, r14),
147 	[BPF_REG_9] = offsetof(struct pt_regs, r15),
148 };
149 
150 /*
151  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
152  * which need extra byte of encoding.
153  * rax,rcx,...,rbp have simpler encoding
154  */
155 static bool is_ereg(u32 reg)
156 {
157 	return (1 << reg) & (BIT(BPF_REG_5) |
158 			     BIT(AUX_REG) |
159 			     BIT(BPF_REG_7) |
160 			     BIT(BPF_REG_8) |
161 			     BIT(BPF_REG_9) |
162 			     BIT(X86_REG_R9) |
163 			     BIT(BPF_REG_AX));
164 }
165 
166 /*
167  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
168  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
169  * of encoding. al,cl,dl,bl have simpler encoding.
170  */
171 static bool is_ereg_8l(u32 reg)
172 {
173 	return is_ereg(reg) ||
174 	    (1 << reg) & (BIT(BPF_REG_1) |
175 			  BIT(BPF_REG_2) |
176 			  BIT(BPF_REG_FP));
177 }
178 
179 static bool is_axreg(u32 reg)
180 {
181 	return reg == BPF_REG_0;
182 }
183 
184 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
185 static u8 add_1mod(u8 byte, u32 reg)
186 {
187 	if (is_ereg(reg))
188 		byte |= 1;
189 	return byte;
190 }
191 
192 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
193 {
194 	if (is_ereg(r1))
195 		byte |= 1;
196 	if (is_ereg(r2))
197 		byte |= 4;
198 	return byte;
199 }
200 
201 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
202 static u8 add_1reg(u8 byte, u32 dst_reg)
203 {
204 	return byte + reg2hex[dst_reg];
205 }
206 
207 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
208 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
209 {
210 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
211 }
212 
213 /* Some 1-byte opcodes for binary ALU operations */
214 static u8 simple_alu_opcodes[] = {
215 	[BPF_ADD] = 0x01,
216 	[BPF_SUB] = 0x29,
217 	[BPF_AND] = 0x21,
218 	[BPF_OR] = 0x09,
219 	[BPF_XOR] = 0x31,
220 	[BPF_LSH] = 0xE0,
221 	[BPF_RSH] = 0xE8,
222 	[BPF_ARSH] = 0xF8,
223 };
224 
225 static void jit_fill_hole(void *area, unsigned int size)
226 {
227 	/* Fill whole space with INT3 instructions */
228 	memset(area, 0xcc, size);
229 }
230 
231 int bpf_arch_text_invalidate(void *dst, size_t len)
232 {
233 	return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
234 }
235 
236 struct jit_context {
237 	int cleanup_addr; /* Epilogue code offset */
238 
239 	/*
240 	 * Program specific offsets of labels in the code; these rely on the
241 	 * JIT doing at least 2 passes, recording the position on the first
242 	 * pass, only to generate the correct offset on the second pass.
243 	 */
244 	int tail_call_direct_label;
245 	int tail_call_indirect_label;
246 };
247 
248 /* Maximum number of bytes emitted while JITing one eBPF insn */
249 #define BPF_MAX_INSN_SIZE	128
250 #define BPF_INSN_SAFETY		64
251 
252 /* Number of bytes emit_patch() needs to generate instructions */
253 #define X86_PATCH_SIZE		5
254 /* Number of bytes that will be skipped on tailcall */
255 #define X86_TAIL_CALL_OFFSET	(11 + ENDBR_INSN_SIZE)
256 
257 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
258 {
259 	u8 *prog = *pprog;
260 
261 	if (callee_regs_used[0])
262 		EMIT1(0x53);         /* push rbx */
263 	if (callee_regs_used[1])
264 		EMIT2(0x41, 0x55);   /* push r13 */
265 	if (callee_regs_used[2])
266 		EMIT2(0x41, 0x56);   /* push r14 */
267 	if (callee_regs_used[3])
268 		EMIT2(0x41, 0x57);   /* push r15 */
269 	*pprog = prog;
270 }
271 
272 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
273 {
274 	u8 *prog = *pprog;
275 
276 	if (callee_regs_used[3])
277 		EMIT2(0x41, 0x5F);   /* pop r15 */
278 	if (callee_regs_used[2])
279 		EMIT2(0x41, 0x5E);   /* pop r14 */
280 	if (callee_regs_used[1])
281 		EMIT2(0x41, 0x5D);   /* pop r13 */
282 	if (callee_regs_used[0])
283 		EMIT1(0x5B);         /* pop rbx */
284 	*pprog = prog;
285 }
286 
287 /*
288  * Emit x86-64 prologue code for BPF program.
289  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
290  * while jumping to another program
291  */
292 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
293 			  bool tail_call_reachable, bool is_subprog)
294 {
295 	u8 *prog = *pprog;
296 
297 	/* BPF trampoline can be made to work without these nops,
298 	 * but let's waste 5 bytes for now and optimize later
299 	 */
300 	EMIT_ENDBR();
301 	memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
302 	prog += X86_PATCH_SIZE;
303 	if (!ebpf_from_cbpf) {
304 		if (tail_call_reachable && !is_subprog)
305 			EMIT2(0x31, 0xC0); /* xor eax, eax */
306 		else
307 			EMIT2(0x66, 0x90); /* nop2 */
308 	}
309 	EMIT1(0x55);             /* push rbp */
310 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
311 
312 	/* X86_TAIL_CALL_OFFSET is here */
313 	EMIT_ENDBR();
314 
315 	/* sub rsp, rounded_stack_depth */
316 	if (stack_depth)
317 		EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
318 	if (tail_call_reachable)
319 		EMIT1(0x50);         /* push rax */
320 	*pprog = prog;
321 }
322 
323 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
324 {
325 	u8 *prog = *pprog;
326 	s64 offset;
327 
328 	offset = func - (ip + X86_PATCH_SIZE);
329 	if (!is_simm32(offset)) {
330 		pr_err("Target call %p is out of range\n", func);
331 		return -ERANGE;
332 	}
333 	EMIT1_off32(opcode, offset);
334 	*pprog = prog;
335 	return 0;
336 }
337 
338 static int emit_call(u8 **pprog, void *func, void *ip)
339 {
340 	return emit_patch(pprog, func, ip, 0xE8);
341 }
342 
343 static int emit_jump(u8 **pprog, void *func, void *ip)
344 {
345 	return emit_patch(pprog, func, ip, 0xE9);
346 }
347 
348 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
349 				void *old_addr, void *new_addr)
350 {
351 	const u8 *nop_insn = x86_nops[5];
352 	u8 old_insn[X86_PATCH_SIZE];
353 	u8 new_insn[X86_PATCH_SIZE];
354 	u8 *prog;
355 	int ret;
356 
357 	memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
358 	if (old_addr) {
359 		prog = old_insn;
360 		ret = t == BPF_MOD_CALL ?
361 		      emit_call(&prog, old_addr, ip) :
362 		      emit_jump(&prog, old_addr, ip);
363 		if (ret)
364 			return ret;
365 	}
366 
367 	memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
368 	if (new_addr) {
369 		prog = new_insn;
370 		ret = t == BPF_MOD_CALL ?
371 		      emit_call(&prog, new_addr, ip) :
372 		      emit_jump(&prog, new_addr, ip);
373 		if (ret)
374 			return ret;
375 	}
376 
377 	ret = -EBUSY;
378 	mutex_lock(&text_mutex);
379 	if (memcmp(ip, old_insn, X86_PATCH_SIZE))
380 		goto out;
381 	ret = 1;
382 	if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
383 		text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
384 		ret = 0;
385 	}
386 out:
387 	mutex_unlock(&text_mutex);
388 	return ret;
389 }
390 
391 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
392 		       void *old_addr, void *new_addr)
393 {
394 	if (!is_kernel_text((long)ip) &&
395 	    !is_bpf_text_address((long)ip))
396 		/* BPF poking in modules is not supported */
397 		return -EINVAL;
398 
399 	/*
400 	 * See emit_prologue(), for IBT builds the trampoline hook is preceded
401 	 * with an ENDBR instruction.
402 	 */
403 	if (is_endbr(*(u32 *)ip))
404 		ip += ENDBR_INSN_SIZE;
405 
406 	return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
407 }
408 
409 #define EMIT_LFENCE()	EMIT3(0x0F, 0xAE, 0xE8)
410 
411 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
412 {
413 	u8 *prog = *pprog;
414 
415 	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
416 		EMIT_LFENCE();
417 		EMIT2(0xFF, 0xE0 + reg);
418 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
419 		OPTIMIZER_HIDE_VAR(reg);
420 		emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
421 	} else {
422 		EMIT2(0xFF, 0xE0 + reg);	/* jmp *%\reg */
423 		if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS))
424 			EMIT1(0xCC);		/* int3 */
425 	}
426 
427 	*pprog = prog;
428 }
429 
430 static void emit_return(u8 **pprog, u8 *ip)
431 {
432 	u8 *prog = *pprog;
433 
434 	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
435 		emit_jump(&prog, &__x86_return_thunk, ip);
436 	} else {
437 		EMIT1(0xC3);		/* ret */
438 		if (IS_ENABLED(CONFIG_SLS))
439 			EMIT1(0xCC);	/* int3 */
440 	}
441 
442 	*pprog = prog;
443 }
444 
445 /*
446  * Generate the following code:
447  *
448  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
449  *   if (index >= array->map.max_entries)
450  *     goto out;
451  *   if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
452  *     goto out;
453  *   prog = array->ptrs[index];
454  *   if (prog == NULL)
455  *     goto out;
456  *   goto *(prog->bpf_func + prologue_size);
457  * out:
458  */
459 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
460 					u32 stack_depth, u8 *ip,
461 					struct jit_context *ctx)
462 {
463 	int tcc_off = -4 - round_up(stack_depth, 8);
464 	u8 *prog = *pprog, *start = *pprog;
465 	int offset;
466 
467 	/*
468 	 * rdi - pointer to ctx
469 	 * rsi - pointer to bpf_array
470 	 * rdx - index in bpf_array
471 	 */
472 
473 	/*
474 	 * if (index >= array->map.max_entries)
475 	 *	goto out;
476 	 */
477 	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
478 	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
479 	      offsetof(struct bpf_array, map.max_entries));
480 
481 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
482 	EMIT2(X86_JBE, offset);                   /* jbe out */
483 
484 	/*
485 	 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
486 	 *	goto out;
487 	 */
488 	EMIT2_off32(0x8B, 0x85, tcc_off);         /* mov eax, dword ptr [rbp - tcc_off] */
489 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
490 
491 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
492 	EMIT2(X86_JAE, offset);                   /* jae out */
493 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
494 	EMIT2_off32(0x89, 0x85, tcc_off);         /* mov dword ptr [rbp - tcc_off], eax */
495 
496 	/* prog = array->ptrs[index]; */
497 	EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
498 		    offsetof(struct bpf_array, ptrs));
499 
500 	/*
501 	 * if (prog == NULL)
502 	 *	goto out;
503 	 */
504 	EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
505 
506 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
507 	EMIT2(X86_JE, offset);                    /* je out */
508 
509 	pop_callee_regs(&prog, callee_regs_used);
510 
511 	EMIT1(0x58);                              /* pop rax */
512 	if (stack_depth)
513 		EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
514 			    round_up(stack_depth, 8));
515 
516 	/* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
517 	EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
518 	      offsetof(struct bpf_prog, bpf_func));
519 	EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
520 	      X86_TAIL_CALL_OFFSET);
521 	/*
522 	 * Now we're ready to jump into next BPF program
523 	 * rdi == ctx (1st arg)
524 	 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
525 	 */
526 	emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
527 
528 	/* out: */
529 	ctx->tail_call_indirect_label = prog - start;
530 	*pprog = prog;
531 }
532 
533 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
534 				      u8 **pprog, u8 *ip,
535 				      bool *callee_regs_used, u32 stack_depth,
536 				      struct jit_context *ctx)
537 {
538 	int tcc_off = -4 - round_up(stack_depth, 8);
539 	u8 *prog = *pprog, *start = *pprog;
540 	int offset;
541 
542 	/*
543 	 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
544 	 *	goto out;
545 	 */
546 	EMIT2_off32(0x8B, 0x85, tcc_off);             /* mov eax, dword ptr [rbp - tcc_off] */
547 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
548 
549 	offset = ctx->tail_call_direct_label - (prog + 2 - start);
550 	EMIT2(X86_JAE, offset);                       /* jae out */
551 	EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
552 	EMIT2_off32(0x89, 0x85, tcc_off);             /* mov dword ptr [rbp - tcc_off], eax */
553 
554 	poke->tailcall_bypass = ip + (prog - start);
555 	poke->adj_off = X86_TAIL_CALL_OFFSET;
556 	poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
557 	poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
558 
559 	emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
560 		  poke->tailcall_bypass);
561 
562 	pop_callee_regs(&prog, callee_regs_used);
563 	EMIT1(0x58);                                  /* pop rax */
564 	if (stack_depth)
565 		EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
566 
567 	memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
568 	prog += X86_PATCH_SIZE;
569 
570 	/* out: */
571 	ctx->tail_call_direct_label = prog - start;
572 
573 	*pprog = prog;
574 }
575 
576 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
577 {
578 	struct bpf_jit_poke_descriptor *poke;
579 	struct bpf_array *array;
580 	struct bpf_prog *target;
581 	int i, ret;
582 
583 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
584 		poke = &prog->aux->poke_tab[i];
585 		if (poke->aux && poke->aux != prog->aux)
586 			continue;
587 
588 		WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
589 
590 		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
591 			continue;
592 
593 		array = container_of(poke->tail_call.map, struct bpf_array, map);
594 		mutex_lock(&array->aux->poke_mutex);
595 		target = array->ptrs[poke->tail_call.key];
596 		if (target) {
597 			ret = __bpf_arch_text_poke(poke->tailcall_target,
598 						   BPF_MOD_JUMP, NULL,
599 						   (u8 *)target->bpf_func +
600 						   poke->adj_off);
601 			BUG_ON(ret < 0);
602 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
603 						   BPF_MOD_JUMP,
604 						   (u8 *)poke->tailcall_target +
605 						   X86_PATCH_SIZE, NULL);
606 			BUG_ON(ret < 0);
607 		}
608 		WRITE_ONCE(poke->tailcall_target_stable, true);
609 		mutex_unlock(&array->aux->poke_mutex);
610 	}
611 }
612 
613 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
614 			   u32 dst_reg, const u32 imm32)
615 {
616 	u8 *prog = *pprog;
617 	u8 b1, b2, b3;
618 
619 	/*
620 	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
621 	 * (which zero-extends imm32) to save 2 bytes.
622 	 */
623 	if (sign_propagate && (s32)imm32 < 0) {
624 		/* 'mov %rax, imm32' sign extends imm32 */
625 		b1 = add_1mod(0x48, dst_reg);
626 		b2 = 0xC7;
627 		b3 = 0xC0;
628 		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
629 		goto done;
630 	}
631 
632 	/*
633 	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
634 	 * to save 3 bytes.
635 	 */
636 	if (imm32 == 0) {
637 		if (is_ereg(dst_reg))
638 			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
639 		b2 = 0x31; /* xor */
640 		b3 = 0xC0;
641 		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
642 		goto done;
643 	}
644 
645 	/* mov %eax, imm32 */
646 	if (is_ereg(dst_reg))
647 		EMIT1(add_1mod(0x40, dst_reg));
648 	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
649 done:
650 	*pprog = prog;
651 }
652 
653 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
654 			   const u32 imm32_hi, const u32 imm32_lo)
655 {
656 	u8 *prog = *pprog;
657 
658 	if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
659 		/*
660 		 * For emitting plain u32, where sign bit must not be
661 		 * propagated LLVM tends to load imm64 over mov32
662 		 * directly, so save couple of bytes by just doing
663 		 * 'mov %eax, imm32' instead.
664 		 */
665 		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
666 	} else {
667 		/* movabsq rax, imm64 */
668 		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
669 		EMIT(imm32_lo, 4);
670 		EMIT(imm32_hi, 4);
671 	}
672 
673 	*pprog = prog;
674 }
675 
676 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
677 {
678 	u8 *prog = *pprog;
679 
680 	if (is64) {
681 		/* mov dst, src */
682 		EMIT_mov(dst_reg, src_reg);
683 	} else {
684 		/* mov32 dst, src */
685 		if (is_ereg(dst_reg) || is_ereg(src_reg))
686 			EMIT1(add_2mod(0x40, dst_reg, src_reg));
687 		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
688 	}
689 
690 	*pprog = prog;
691 }
692 
693 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
694 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
695 {
696 	u8 *prog = *pprog;
697 
698 	if (is_imm8(off)) {
699 		/* 1-byte signed displacement.
700 		 *
701 		 * If off == 0 we could skip this and save one extra byte, but
702 		 * special case of x86 R13 which always needs an offset is not
703 		 * worth the hassle
704 		 */
705 		EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
706 	} else {
707 		/* 4-byte signed displacement */
708 		EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
709 	}
710 	*pprog = prog;
711 }
712 
713 /*
714  * Emit a REX byte if it will be necessary to address these registers
715  */
716 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
717 {
718 	u8 *prog = *pprog;
719 
720 	if (is64)
721 		EMIT1(add_2mod(0x48, dst_reg, src_reg));
722 	else if (is_ereg(dst_reg) || is_ereg(src_reg))
723 		EMIT1(add_2mod(0x40, dst_reg, src_reg));
724 	*pprog = prog;
725 }
726 
727 /*
728  * Similar version of maybe_emit_mod() for a single register
729  */
730 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
731 {
732 	u8 *prog = *pprog;
733 
734 	if (is64)
735 		EMIT1(add_1mod(0x48, reg));
736 	else if (is_ereg(reg))
737 		EMIT1(add_1mod(0x40, reg));
738 	*pprog = prog;
739 }
740 
741 /* LDX: dst_reg = *(u8*)(src_reg + off) */
742 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
743 {
744 	u8 *prog = *pprog;
745 
746 	switch (size) {
747 	case BPF_B:
748 		/* Emit 'movzx rax, byte ptr [rax + off]' */
749 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
750 		break;
751 	case BPF_H:
752 		/* Emit 'movzx rax, word ptr [rax + off]' */
753 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
754 		break;
755 	case BPF_W:
756 		/* Emit 'mov eax, dword ptr [rax+0x14]' */
757 		if (is_ereg(dst_reg) || is_ereg(src_reg))
758 			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
759 		else
760 			EMIT1(0x8B);
761 		break;
762 	case BPF_DW:
763 		/* Emit 'mov rax, qword ptr [rax+0x14]' */
764 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
765 		break;
766 	}
767 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
768 	*pprog = prog;
769 }
770 
771 /* STX: *(u8*)(dst_reg + off) = src_reg */
772 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
773 {
774 	u8 *prog = *pprog;
775 
776 	switch (size) {
777 	case BPF_B:
778 		/* Emit 'mov byte ptr [rax + off], al' */
779 		if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
780 			/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
781 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
782 		else
783 			EMIT1(0x88);
784 		break;
785 	case BPF_H:
786 		if (is_ereg(dst_reg) || is_ereg(src_reg))
787 			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
788 		else
789 			EMIT2(0x66, 0x89);
790 		break;
791 	case BPF_W:
792 		if (is_ereg(dst_reg) || is_ereg(src_reg))
793 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
794 		else
795 			EMIT1(0x89);
796 		break;
797 	case BPF_DW:
798 		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
799 		break;
800 	}
801 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
802 	*pprog = prog;
803 }
804 
805 static int emit_atomic(u8 **pprog, u8 atomic_op,
806 		       u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
807 {
808 	u8 *prog = *pprog;
809 
810 	EMIT1(0xF0); /* lock prefix */
811 
812 	maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
813 
814 	/* emit opcode */
815 	switch (atomic_op) {
816 	case BPF_ADD:
817 	case BPF_AND:
818 	case BPF_OR:
819 	case BPF_XOR:
820 		/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
821 		EMIT1(simple_alu_opcodes[atomic_op]);
822 		break;
823 	case BPF_ADD | BPF_FETCH:
824 		/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
825 		EMIT2(0x0F, 0xC1);
826 		break;
827 	case BPF_XCHG:
828 		/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
829 		EMIT1(0x87);
830 		break;
831 	case BPF_CMPXCHG:
832 		/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
833 		EMIT2(0x0F, 0xB1);
834 		break;
835 	default:
836 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
837 		return -EFAULT;
838 	}
839 
840 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
841 
842 	*pprog = prog;
843 	return 0;
844 }
845 
846 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
847 {
848 	u32 reg = x->fixup >> 8;
849 
850 	/* jump over faulting load and clear dest register */
851 	*(unsigned long *)((void *)regs + reg) = 0;
852 	regs->ip += x->fixup & 0xff;
853 	return true;
854 }
855 
856 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
857 			     bool *regs_used, bool *tail_call_seen)
858 {
859 	int i;
860 
861 	for (i = 1; i <= insn_cnt; i++, insn++) {
862 		if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
863 			*tail_call_seen = true;
864 		if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
865 			regs_used[0] = true;
866 		if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
867 			regs_used[1] = true;
868 		if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
869 			regs_used[2] = true;
870 		if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
871 			regs_used[3] = true;
872 	}
873 }
874 
875 static void emit_nops(u8 **pprog, int len)
876 {
877 	u8 *prog = *pprog;
878 	int i, noplen;
879 
880 	while (len > 0) {
881 		noplen = len;
882 
883 		if (noplen > ASM_NOP_MAX)
884 			noplen = ASM_NOP_MAX;
885 
886 		for (i = 0; i < noplen; i++)
887 			EMIT1(x86_nops[noplen][i]);
888 		len -= noplen;
889 	}
890 
891 	*pprog = prog;
892 }
893 
894 /* emit the 3-byte VEX prefix
895  *
896  * r: same as rex.r, extra bit for ModRM reg field
897  * x: same as rex.x, extra bit for SIB index field
898  * b: same as rex.b, extra bit for ModRM r/m, or SIB base
899  * m: opcode map select, encoding escape bytes e.g. 0x0f38
900  * w: same as rex.w (32 bit or 64 bit) or opcode specific
901  * src_reg2: additional source reg (encoded as BPF reg)
902  * l: vector length (128 bit or 256 bit) or reserved
903  * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
904  */
905 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
906 		      bool w, u8 src_reg2, bool l, u8 pp)
907 {
908 	u8 *prog = *pprog;
909 	const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
910 	u8 b1, b2;
911 	u8 vvvv = reg2hex[src_reg2];
912 
913 	/* reg2hex gives only the lower 3 bit of vvvv */
914 	if (is_ereg(src_reg2))
915 		vvvv |= 1 << 3;
916 
917 	/*
918 	 * 2nd byte of 3-byte VEX prefix
919 	 * ~ means bit inverted encoding
920 	 *
921 	 *    7                           0
922 	 *  +---+---+---+---+---+---+---+---+
923 	 *  |~R |~X |~B |         m         |
924 	 *  +---+---+---+---+---+---+---+---+
925 	 */
926 	b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
927 	/*
928 	 * 3rd byte of 3-byte VEX prefix
929 	 *
930 	 *    7                           0
931 	 *  +---+---+---+---+---+---+---+---+
932 	 *  | W |     ~vvvv     | L |   pp  |
933 	 *  +---+---+---+---+---+---+---+---+
934 	 */
935 	b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
936 
937 	EMIT3(b0, b1, b2);
938 	*pprog = prog;
939 }
940 
941 /* emit BMI2 shift instruction */
942 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
943 {
944 	u8 *prog = *pprog;
945 	bool r = is_ereg(dst_reg);
946 	u8 m = 2; /* escape code 0f38 */
947 
948 	emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
949 	EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
950 	*pprog = prog;
951 }
952 
953 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
954 
955 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
956 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
957 {
958 	bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
959 	struct bpf_insn *insn = bpf_prog->insnsi;
960 	bool callee_regs_used[4] = {};
961 	int insn_cnt = bpf_prog->len;
962 	bool tail_call_seen = false;
963 	bool seen_exit = false;
964 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
965 	int i, excnt = 0;
966 	int ilen, proglen = 0;
967 	u8 *prog = temp;
968 	int err;
969 
970 	detect_reg_usage(insn, insn_cnt, callee_regs_used,
971 			 &tail_call_seen);
972 
973 	/* tail call's presence in current prog implies it is reachable */
974 	tail_call_reachable |= tail_call_seen;
975 
976 	emit_prologue(&prog, bpf_prog->aux->stack_depth,
977 		      bpf_prog_was_classic(bpf_prog), tail_call_reachable,
978 		      bpf_prog->aux->func_idx != 0);
979 	push_callee_regs(&prog, callee_regs_used);
980 
981 	ilen = prog - temp;
982 	if (rw_image)
983 		memcpy(rw_image + proglen, temp, ilen);
984 	proglen += ilen;
985 	addrs[0] = proglen;
986 	prog = temp;
987 
988 	for (i = 1; i <= insn_cnt; i++, insn++) {
989 		const s32 imm32 = insn->imm;
990 		u32 dst_reg = insn->dst_reg;
991 		u32 src_reg = insn->src_reg;
992 		u8 b2 = 0, b3 = 0;
993 		u8 *start_of_ldx;
994 		s64 jmp_offset;
995 		u8 jmp_cond;
996 		u8 *func;
997 		int nops;
998 
999 		switch (insn->code) {
1000 			/* ALU */
1001 		case BPF_ALU | BPF_ADD | BPF_X:
1002 		case BPF_ALU | BPF_SUB | BPF_X:
1003 		case BPF_ALU | BPF_AND | BPF_X:
1004 		case BPF_ALU | BPF_OR | BPF_X:
1005 		case BPF_ALU | BPF_XOR | BPF_X:
1006 		case BPF_ALU64 | BPF_ADD | BPF_X:
1007 		case BPF_ALU64 | BPF_SUB | BPF_X:
1008 		case BPF_ALU64 | BPF_AND | BPF_X:
1009 		case BPF_ALU64 | BPF_OR | BPF_X:
1010 		case BPF_ALU64 | BPF_XOR | BPF_X:
1011 			maybe_emit_mod(&prog, dst_reg, src_reg,
1012 				       BPF_CLASS(insn->code) == BPF_ALU64);
1013 			b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1014 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1015 			break;
1016 
1017 		case BPF_ALU64 | BPF_MOV | BPF_X:
1018 		case BPF_ALU | BPF_MOV | BPF_X:
1019 			emit_mov_reg(&prog,
1020 				     BPF_CLASS(insn->code) == BPF_ALU64,
1021 				     dst_reg, src_reg);
1022 			break;
1023 
1024 			/* neg dst */
1025 		case BPF_ALU | BPF_NEG:
1026 		case BPF_ALU64 | BPF_NEG:
1027 			maybe_emit_1mod(&prog, dst_reg,
1028 					BPF_CLASS(insn->code) == BPF_ALU64);
1029 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1030 			break;
1031 
1032 		case BPF_ALU | BPF_ADD | BPF_K:
1033 		case BPF_ALU | BPF_SUB | BPF_K:
1034 		case BPF_ALU | BPF_AND | BPF_K:
1035 		case BPF_ALU | BPF_OR | BPF_K:
1036 		case BPF_ALU | BPF_XOR | BPF_K:
1037 		case BPF_ALU64 | BPF_ADD | BPF_K:
1038 		case BPF_ALU64 | BPF_SUB | BPF_K:
1039 		case BPF_ALU64 | BPF_AND | BPF_K:
1040 		case BPF_ALU64 | BPF_OR | BPF_K:
1041 		case BPF_ALU64 | BPF_XOR | BPF_K:
1042 			maybe_emit_1mod(&prog, dst_reg,
1043 					BPF_CLASS(insn->code) == BPF_ALU64);
1044 
1045 			/*
1046 			 * b3 holds 'normal' opcode, b2 short form only valid
1047 			 * in case dst is eax/rax.
1048 			 */
1049 			switch (BPF_OP(insn->code)) {
1050 			case BPF_ADD:
1051 				b3 = 0xC0;
1052 				b2 = 0x05;
1053 				break;
1054 			case BPF_SUB:
1055 				b3 = 0xE8;
1056 				b2 = 0x2D;
1057 				break;
1058 			case BPF_AND:
1059 				b3 = 0xE0;
1060 				b2 = 0x25;
1061 				break;
1062 			case BPF_OR:
1063 				b3 = 0xC8;
1064 				b2 = 0x0D;
1065 				break;
1066 			case BPF_XOR:
1067 				b3 = 0xF0;
1068 				b2 = 0x35;
1069 				break;
1070 			}
1071 
1072 			if (is_imm8(imm32))
1073 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1074 			else if (is_axreg(dst_reg))
1075 				EMIT1_off32(b2, imm32);
1076 			else
1077 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1078 			break;
1079 
1080 		case BPF_ALU64 | BPF_MOV | BPF_K:
1081 		case BPF_ALU | BPF_MOV | BPF_K:
1082 			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1083 				       dst_reg, imm32);
1084 			break;
1085 
1086 		case BPF_LD | BPF_IMM | BPF_DW:
1087 			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1088 			insn++;
1089 			i++;
1090 			break;
1091 
1092 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1093 		case BPF_ALU | BPF_MOD | BPF_X:
1094 		case BPF_ALU | BPF_DIV | BPF_X:
1095 		case BPF_ALU | BPF_MOD | BPF_K:
1096 		case BPF_ALU | BPF_DIV | BPF_K:
1097 		case BPF_ALU64 | BPF_MOD | BPF_X:
1098 		case BPF_ALU64 | BPF_DIV | BPF_X:
1099 		case BPF_ALU64 | BPF_MOD | BPF_K:
1100 		case BPF_ALU64 | BPF_DIV | BPF_K: {
1101 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1102 
1103 			if (dst_reg != BPF_REG_0)
1104 				EMIT1(0x50); /* push rax */
1105 			if (dst_reg != BPF_REG_3)
1106 				EMIT1(0x52); /* push rdx */
1107 
1108 			if (BPF_SRC(insn->code) == BPF_X) {
1109 				if (src_reg == BPF_REG_0 ||
1110 				    src_reg == BPF_REG_3) {
1111 					/* mov r11, src_reg */
1112 					EMIT_mov(AUX_REG, src_reg);
1113 					src_reg = AUX_REG;
1114 				}
1115 			} else {
1116 				/* mov r11, imm32 */
1117 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1118 				src_reg = AUX_REG;
1119 			}
1120 
1121 			if (dst_reg != BPF_REG_0)
1122 				/* mov rax, dst_reg */
1123 				emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1124 
1125 			/*
1126 			 * xor edx, edx
1127 			 * equivalent to 'xor rdx, rdx', but one byte less
1128 			 */
1129 			EMIT2(0x31, 0xd2);
1130 
1131 			/* div src_reg */
1132 			maybe_emit_1mod(&prog, src_reg, is64);
1133 			EMIT2(0xF7, add_1reg(0xF0, src_reg));
1134 
1135 			if (BPF_OP(insn->code) == BPF_MOD &&
1136 			    dst_reg != BPF_REG_3)
1137 				/* mov dst_reg, rdx */
1138 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1139 			else if (BPF_OP(insn->code) == BPF_DIV &&
1140 				 dst_reg != BPF_REG_0)
1141 				/* mov dst_reg, rax */
1142 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1143 
1144 			if (dst_reg != BPF_REG_3)
1145 				EMIT1(0x5A); /* pop rdx */
1146 			if (dst_reg != BPF_REG_0)
1147 				EMIT1(0x58); /* pop rax */
1148 			break;
1149 		}
1150 
1151 		case BPF_ALU | BPF_MUL | BPF_K:
1152 		case BPF_ALU64 | BPF_MUL | BPF_K:
1153 			maybe_emit_mod(&prog, dst_reg, dst_reg,
1154 				       BPF_CLASS(insn->code) == BPF_ALU64);
1155 
1156 			if (is_imm8(imm32))
1157 				/* imul dst_reg, dst_reg, imm8 */
1158 				EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1159 				      imm32);
1160 			else
1161 				/* imul dst_reg, dst_reg, imm32 */
1162 				EMIT2_off32(0x69,
1163 					    add_2reg(0xC0, dst_reg, dst_reg),
1164 					    imm32);
1165 			break;
1166 
1167 		case BPF_ALU | BPF_MUL | BPF_X:
1168 		case BPF_ALU64 | BPF_MUL | BPF_X:
1169 			maybe_emit_mod(&prog, src_reg, dst_reg,
1170 				       BPF_CLASS(insn->code) == BPF_ALU64);
1171 
1172 			/* imul dst_reg, src_reg */
1173 			EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1174 			break;
1175 
1176 			/* Shifts */
1177 		case BPF_ALU | BPF_LSH | BPF_K:
1178 		case BPF_ALU | BPF_RSH | BPF_K:
1179 		case BPF_ALU | BPF_ARSH | BPF_K:
1180 		case BPF_ALU64 | BPF_LSH | BPF_K:
1181 		case BPF_ALU64 | BPF_RSH | BPF_K:
1182 		case BPF_ALU64 | BPF_ARSH | BPF_K:
1183 			maybe_emit_1mod(&prog, dst_reg,
1184 					BPF_CLASS(insn->code) == BPF_ALU64);
1185 
1186 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1187 			if (imm32 == 1)
1188 				EMIT2(0xD1, add_1reg(b3, dst_reg));
1189 			else
1190 				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1191 			break;
1192 
1193 		case BPF_ALU | BPF_LSH | BPF_X:
1194 		case BPF_ALU | BPF_RSH | BPF_X:
1195 		case BPF_ALU | BPF_ARSH | BPF_X:
1196 		case BPF_ALU64 | BPF_LSH | BPF_X:
1197 		case BPF_ALU64 | BPF_RSH | BPF_X:
1198 		case BPF_ALU64 | BPF_ARSH | BPF_X:
1199 			/* BMI2 shifts aren't better when shift count is already in rcx */
1200 			if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
1201 				/* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1202 				bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
1203 				u8 op;
1204 
1205 				switch (BPF_OP(insn->code)) {
1206 				case BPF_LSH:
1207 					op = 1; /* prefix 0x66 */
1208 					break;
1209 				case BPF_RSH:
1210 					op = 3; /* prefix 0xf2 */
1211 					break;
1212 				case BPF_ARSH:
1213 					op = 2; /* prefix 0xf3 */
1214 					break;
1215 				}
1216 
1217 				emit_shiftx(&prog, dst_reg, src_reg, w, op);
1218 
1219 				break;
1220 			}
1221 
1222 			if (src_reg != BPF_REG_4) { /* common case */
1223 				/* Check for bad case when dst_reg == rcx */
1224 				if (dst_reg == BPF_REG_4) {
1225 					/* mov r11, dst_reg */
1226 					EMIT_mov(AUX_REG, dst_reg);
1227 					dst_reg = AUX_REG;
1228 				} else {
1229 					EMIT1(0x51); /* push rcx */
1230 				}
1231 				/* mov rcx, src_reg */
1232 				EMIT_mov(BPF_REG_4, src_reg);
1233 			}
1234 
1235 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1236 			maybe_emit_1mod(&prog, dst_reg,
1237 					BPF_CLASS(insn->code) == BPF_ALU64);
1238 
1239 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1240 			EMIT2(0xD3, add_1reg(b3, dst_reg));
1241 
1242 			if (src_reg != BPF_REG_4) {
1243 				if (insn->dst_reg == BPF_REG_4)
1244 					/* mov dst_reg, r11 */
1245 					EMIT_mov(insn->dst_reg, AUX_REG);
1246 				else
1247 					EMIT1(0x59); /* pop rcx */
1248 			}
1249 
1250 			break;
1251 
1252 		case BPF_ALU | BPF_END | BPF_FROM_BE:
1253 			switch (imm32) {
1254 			case 16:
1255 				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
1256 				EMIT1(0x66);
1257 				if (is_ereg(dst_reg))
1258 					EMIT1(0x41);
1259 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1260 
1261 				/* Emit 'movzwl eax, ax' */
1262 				if (is_ereg(dst_reg))
1263 					EMIT3(0x45, 0x0F, 0xB7);
1264 				else
1265 					EMIT2(0x0F, 0xB7);
1266 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1267 				break;
1268 			case 32:
1269 				/* Emit 'bswap eax' to swap lower 4 bytes */
1270 				if (is_ereg(dst_reg))
1271 					EMIT2(0x41, 0x0F);
1272 				else
1273 					EMIT1(0x0F);
1274 				EMIT1(add_1reg(0xC8, dst_reg));
1275 				break;
1276 			case 64:
1277 				/* Emit 'bswap rax' to swap 8 bytes */
1278 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1279 				      add_1reg(0xC8, dst_reg));
1280 				break;
1281 			}
1282 			break;
1283 
1284 		case BPF_ALU | BPF_END | BPF_FROM_LE:
1285 			switch (imm32) {
1286 			case 16:
1287 				/*
1288 				 * Emit 'movzwl eax, ax' to zero extend 16-bit
1289 				 * into 64 bit
1290 				 */
1291 				if (is_ereg(dst_reg))
1292 					EMIT3(0x45, 0x0F, 0xB7);
1293 				else
1294 					EMIT2(0x0F, 0xB7);
1295 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1296 				break;
1297 			case 32:
1298 				/* Emit 'mov eax, eax' to clear upper 32-bits */
1299 				if (is_ereg(dst_reg))
1300 					EMIT1(0x45);
1301 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1302 				break;
1303 			case 64:
1304 				/* nop */
1305 				break;
1306 			}
1307 			break;
1308 
1309 			/* speculation barrier */
1310 		case BPF_ST | BPF_NOSPEC:
1311 			EMIT_LFENCE();
1312 			break;
1313 
1314 			/* ST: *(u8*)(dst_reg + off) = imm */
1315 		case BPF_ST | BPF_MEM | BPF_B:
1316 			if (is_ereg(dst_reg))
1317 				EMIT2(0x41, 0xC6);
1318 			else
1319 				EMIT1(0xC6);
1320 			goto st;
1321 		case BPF_ST | BPF_MEM | BPF_H:
1322 			if (is_ereg(dst_reg))
1323 				EMIT3(0x66, 0x41, 0xC7);
1324 			else
1325 				EMIT2(0x66, 0xC7);
1326 			goto st;
1327 		case BPF_ST | BPF_MEM | BPF_W:
1328 			if (is_ereg(dst_reg))
1329 				EMIT2(0x41, 0xC7);
1330 			else
1331 				EMIT1(0xC7);
1332 			goto st;
1333 		case BPF_ST | BPF_MEM | BPF_DW:
1334 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1335 
1336 st:			if (is_imm8(insn->off))
1337 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
1338 			else
1339 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1340 
1341 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1342 			break;
1343 
1344 			/* STX: *(u8*)(dst_reg + off) = src_reg */
1345 		case BPF_STX | BPF_MEM | BPF_B:
1346 		case BPF_STX | BPF_MEM | BPF_H:
1347 		case BPF_STX | BPF_MEM | BPF_W:
1348 		case BPF_STX | BPF_MEM | BPF_DW:
1349 			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1350 			break;
1351 
1352 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
1353 		case BPF_LDX | BPF_MEM | BPF_B:
1354 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1355 		case BPF_LDX | BPF_MEM | BPF_H:
1356 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1357 		case BPF_LDX | BPF_MEM | BPF_W:
1358 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1359 		case BPF_LDX | BPF_MEM | BPF_DW:
1360 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1361 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1362 				/* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
1363 				 * add abs(insn->off) to the limit to make sure that negative
1364 				 * offset won't be an issue.
1365 				 * insn->off is s16, so it won't affect valid pointers.
1366 				 */
1367 				u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
1368 				u8 *end_of_jmp1, *end_of_jmp2;
1369 
1370 				/* Conservatively check that src_reg + insn->off is a kernel address:
1371 				 * 1. src_reg + insn->off >= limit
1372 				 * 2. src_reg + insn->off doesn't become small positive.
1373 				 * Cannot do src_reg + insn->off >= limit in one branch,
1374 				 * since it needs two spare registers, but JIT has only one.
1375 				 */
1376 
1377 				/* movabsq r11, limit */
1378 				EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
1379 				EMIT((u32)limit, 4);
1380 				EMIT(limit >> 32, 4);
1381 				/* cmp src_reg, r11 */
1382 				maybe_emit_mod(&prog, src_reg, AUX_REG, true);
1383 				EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
1384 				/* if unsigned '<' goto end_of_jmp2 */
1385 				EMIT2(X86_JB, 0);
1386 				end_of_jmp1 = prog;
1387 
1388 				/* mov r11, src_reg */
1389 				emit_mov_reg(&prog, true, AUX_REG, src_reg);
1390 				/* add r11, insn->off */
1391 				maybe_emit_1mod(&prog, AUX_REG, true);
1392 				EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
1393 				/* jmp if not carry to start_of_ldx
1394 				 * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
1395 				 * that has to be rejected.
1396 				 */
1397 				EMIT2(0x73 /* JNC */, 0);
1398 				end_of_jmp2 = prog;
1399 
1400 				/* xor dst_reg, dst_reg */
1401 				emit_mov_imm32(&prog, false, dst_reg, 0);
1402 				/* jmp byte_after_ldx */
1403 				EMIT2(0xEB, 0);
1404 
1405 				/* populate jmp_offset for JB above to jump to xor dst_reg */
1406 				end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
1407 				/* populate jmp_offset for JNC above to jump to start_of_ldx */
1408 				start_of_ldx = prog;
1409 				end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
1410 			}
1411 			emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1412 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1413 				struct exception_table_entry *ex;
1414 				u8 *_insn = image + proglen + (start_of_ldx - temp);
1415 				s64 delta;
1416 
1417 				/* populate jmp_offset for JMP above */
1418 				start_of_ldx[-1] = prog - start_of_ldx;
1419 
1420 				if (!bpf_prog->aux->extable)
1421 					break;
1422 
1423 				if (excnt >= bpf_prog->aux->num_exentries) {
1424 					pr_err("ex gen bug\n");
1425 					return -EFAULT;
1426 				}
1427 				ex = &bpf_prog->aux->extable[excnt++];
1428 
1429 				delta = _insn - (u8 *)&ex->insn;
1430 				if (!is_simm32(delta)) {
1431 					pr_err("extable->insn doesn't fit into 32-bit\n");
1432 					return -EFAULT;
1433 				}
1434 				/* switch ex to rw buffer for writes */
1435 				ex = (void *)rw_image + ((void *)ex - (void *)image);
1436 
1437 				ex->insn = delta;
1438 
1439 				ex->data = EX_TYPE_BPF;
1440 
1441 				if (dst_reg > BPF_REG_9) {
1442 					pr_err("verifier error\n");
1443 					return -EFAULT;
1444 				}
1445 				/*
1446 				 * Compute size of x86 insn and its target dest x86 register.
1447 				 * ex_handler_bpf() will use lower 8 bits to adjust
1448 				 * pt_regs->ip to jump over this x86 instruction
1449 				 * and upper bits to figure out which pt_regs to zero out.
1450 				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1451 				 * of 4 bytes will be ignored and rbx will be zero inited.
1452 				 */
1453 				ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1454 			}
1455 			break;
1456 
1457 		case BPF_STX | BPF_ATOMIC | BPF_W:
1458 		case BPF_STX | BPF_ATOMIC | BPF_DW:
1459 			if (insn->imm == (BPF_AND | BPF_FETCH) ||
1460 			    insn->imm == (BPF_OR | BPF_FETCH) ||
1461 			    insn->imm == (BPF_XOR | BPF_FETCH)) {
1462 				bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1463 				u32 real_src_reg = src_reg;
1464 				u32 real_dst_reg = dst_reg;
1465 				u8 *branch_target;
1466 
1467 				/*
1468 				 * Can't be implemented with a single x86 insn.
1469 				 * Need to do a CMPXCHG loop.
1470 				 */
1471 
1472 				/* Will need RAX as a CMPXCHG operand so save R0 */
1473 				emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1474 				if (src_reg == BPF_REG_0)
1475 					real_src_reg = BPF_REG_AX;
1476 				if (dst_reg == BPF_REG_0)
1477 					real_dst_reg = BPF_REG_AX;
1478 
1479 				branch_target = prog;
1480 				/* Load old value */
1481 				emit_ldx(&prog, BPF_SIZE(insn->code),
1482 					 BPF_REG_0, real_dst_reg, insn->off);
1483 				/*
1484 				 * Perform the (commutative) operation locally,
1485 				 * put the result in the AUX_REG.
1486 				 */
1487 				emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1488 				maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1489 				EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1490 				      add_2reg(0xC0, AUX_REG, real_src_reg));
1491 				/* Attempt to swap in new value */
1492 				err = emit_atomic(&prog, BPF_CMPXCHG,
1493 						  real_dst_reg, AUX_REG,
1494 						  insn->off,
1495 						  BPF_SIZE(insn->code));
1496 				if (WARN_ON(err))
1497 					return err;
1498 				/*
1499 				 * ZF tells us whether we won the race. If it's
1500 				 * cleared we need to try again.
1501 				 */
1502 				EMIT2(X86_JNE, -(prog - branch_target) - 2);
1503 				/* Return the pre-modification value */
1504 				emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1505 				/* Restore R0 after clobbering RAX */
1506 				emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1507 				break;
1508 			}
1509 
1510 			err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1511 					  insn->off, BPF_SIZE(insn->code));
1512 			if (err)
1513 				return err;
1514 			break;
1515 
1516 			/* call */
1517 		case BPF_JMP | BPF_CALL:
1518 			func = (u8 *) __bpf_call_base + imm32;
1519 			if (tail_call_reachable) {
1520 				/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
1521 				EMIT3_off32(0x48, 0x8B, 0x85,
1522 					    -round_up(bpf_prog->aux->stack_depth, 8) - 8);
1523 				if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1524 					return -EINVAL;
1525 			} else {
1526 				if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1527 					return -EINVAL;
1528 			}
1529 			break;
1530 
1531 		case BPF_JMP | BPF_TAIL_CALL:
1532 			if (imm32)
1533 				emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1534 							  &prog, image + addrs[i - 1],
1535 							  callee_regs_used,
1536 							  bpf_prog->aux->stack_depth,
1537 							  ctx);
1538 			else
1539 				emit_bpf_tail_call_indirect(&prog,
1540 							    callee_regs_used,
1541 							    bpf_prog->aux->stack_depth,
1542 							    image + addrs[i - 1],
1543 							    ctx);
1544 			break;
1545 
1546 			/* cond jump */
1547 		case BPF_JMP | BPF_JEQ | BPF_X:
1548 		case BPF_JMP | BPF_JNE | BPF_X:
1549 		case BPF_JMP | BPF_JGT | BPF_X:
1550 		case BPF_JMP | BPF_JLT | BPF_X:
1551 		case BPF_JMP | BPF_JGE | BPF_X:
1552 		case BPF_JMP | BPF_JLE | BPF_X:
1553 		case BPF_JMP | BPF_JSGT | BPF_X:
1554 		case BPF_JMP | BPF_JSLT | BPF_X:
1555 		case BPF_JMP | BPF_JSGE | BPF_X:
1556 		case BPF_JMP | BPF_JSLE | BPF_X:
1557 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1558 		case BPF_JMP32 | BPF_JNE | BPF_X:
1559 		case BPF_JMP32 | BPF_JGT | BPF_X:
1560 		case BPF_JMP32 | BPF_JLT | BPF_X:
1561 		case BPF_JMP32 | BPF_JGE | BPF_X:
1562 		case BPF_JMP32 | BPF_JLE | BPF_X:
1563 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1564 		case BPF_JMP32 | BPF_JSLT | BPF_X:
1565 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1566 		case BPF_JMP32 | BPF_JSLE | BPF_X:
1567 			/* cmp dst_reg, src_reg */
1568 			maybe_emit_mod(&prog, dst_reg, src_reg,
1569 				       BPF_CLASS(insn->code) == BPF_JMP);
1570 			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1571 			goto emit_cond_jmp;
1572 
1573 		case BPF_JMP | BPF_JSET | BPF_X:
1574 		case BPF_JMP32 | BPF_JSET | BPF_X:
1575 			/* test dst_reg, src_reg */
1576 			maybe_emit_mod(&prog, dst_reg, src_reg,
1577 				       BPF_CLASS(insn->code) == BPF_JMP);
1578 			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1579 			goto emit_cond_jmp;
1580 
1581 		case BPF_JMP | BPF_JSET | BPF_K:
1582 		case BPF_JMP32 | BPF_JSET | BPF_K:
1583 			/* test dst_reg, imm32 */
1584 			maybe_emit_1mod(&prog, dst_reg,
1585 					BPF_CLASS(insn->code) == BPF_JMP);
1586 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1587 			goto emit_cond_jmp;
1588 
1589 		case BPF_JMP | BPF_JEQ | BPF_K:
1590 		case BPF_JMP | BPF_JNE | BPF_K:
1591 		case BPF_JMP | BPF_JGT | BPF_K:
1592 		case BPF_JMP | BPF_JLT | BPF_K:
1593 		case BPF_JMP | BPF_JGE | BPF_K:
1594 		case BPF_JMP | BPF_JLE | BPF_K:
1595 		case BPF_JMP | BPF_JSGT | BPF_K:
1596 		case BPF_JMP | BPF_JSLT | BPF_K:
1597 		case BPF_JMP | BPF_JSGE | BPF_K:
1598 		case BPF_JMP | BPF_JSLE | BPF_K:
1599 		case BPF_JMP32 | BPF_JEQ | BPF_K:
1600 		case BPF_JMP32 | BPF_JNE | BPF_K:
1601 		case BPF_JMP32 | BPF_JGT | BPF_K:
1602 		case BPF_JMP32 | BPF_JLT | BPF_K:
1603 		case BPF_JMP32 | BPF_JGE | BPF_K:
1604 		case BPF_JMP32 | BPF_JLE | BPF_K:
1605 		case BPF_JMP32 | BPF_JSGT | BPF_K:
1606 		case BPF_JMP32 | BPF_JSLT | BPF_K:
1607 		case BPF_JMP32 | BPF_JSGE | BPF_K:
1608 		case BPF_JMP32 | BPF_JSLE | BPF_K:
1609 			/* test dst_reg, dst_reg to save one extra byte */
1610 			if (imm32 == 0) {
1611 				maybe_emit_mod(&prog, dst_reg, dst_reg,
1612 					       BPF_CLASS(insn->code) == BPF_JMP);
1613 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1614 				goto emit_cond_jmp;
1615 			}
1616 
1617 			/* cmp dst_reg, imm8/32 */
1618 			maybe_emit_1mod(&prog, dst_reg,
1619 					BPF_CLASS(insn->code) == BPF_JMP);
1620 
1621 			if (is_imm8(imm32))
1622 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1623 			else
1624 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1625 
1626 emit_cond_jmp:		/* Convert BPF opcode to x86 */
1627 			switch (BPF_OP(insn->code)) {
1628 			case BPF_JEQ:
1629 				jmp_cond = X86_JE;
1630 				break;
1631 			case BPF_JSET:
1632 			case BPF_JNE:
1633 				jmp_cond = X86_JNE;
1634 				break;
1635 			case BPF_JGT:
1636 				/* GT is unsigned '>', JA in x86 */
1637 				jmp_cond = X86_JA;
1638 				break;
1639 			case BPF_JLT:
1640 				/* LT is unsigned '<', JB in x86 */
1641 				jmp_cond = X86_JB;
1642 				break;
1643 			case BPF_JGE:
1644 				/* GE is unsigned '>=', JAE in x86 */
1645 				jmp_cond = X86_JAE;
1646 				break;
1647 			case BPF_JLE:
1648 				/* LE is unsigned '<=', JBE in x86 */
1649 				jmp_cond = X86_JBE;
1650 				break;
1651 			case BPF_JSGT:
1652 				/* Signed '>', GT in x86 */
1653 				jmp_cond = X86_JG;
1654 				break;
1655 			case BPF_JSLT:
1656 				/* Signed '<', LT in x86 */
1657 				jmp_cond = X86_JL;
1658 				break;
1659 			case BPF_JSGE:
1660 				/* Signed '>=', GE in x86 */
1661 				jmp_cond = X86_JGE;
1662 				break;
1663 			case BPF_JSLE:
1664 				/* Signed '<=', LE in x86 */
1665 				jmp_cond = X86_JLE;
1666 				break;
1667 			default: /* to silence GCC warning */
1668 				return -EFAULT;
1669 			}
1670 			jmp_offset = addrs[i + insn->off] - addrs[i];
1671 			if (is_imm8(jmp_offset)) {
1672 				if (jmp_padding) {
1673 					/* To keep the jmp_offset valid, the extra bytes are
1674 					 * padded before the jump insn, so we subtract the
1675 					 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1676 					 *
1677 					 * If the previous pass already emits an imm8
1678 					 * jmp_cond, then this BPF insn won't shrink, so
1679 					 * "nops" is 0.
1680 					 *
1681 					 * On the other hand, if the previous pass emits an
1682 					 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1683 					 * keep the image from shrinking further.
1684 					 *
1685 					 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1686 					 *     is 2 bytes, so the size difference is 4 bytes.
1687 					 */
1688 					nops = INSN_SZ_DIFF - 2;
1689 					if (nops != 0 && nops != 4) {
1690 						pr_err("unexpected jmp_cond padding: %d bytes\n",
1691 						       nops);
1692 						return -EFAULT;
1693 					}
1694 					emit_nops(&prog, nops);
1695 				}
1696 				EMIT2(jmp_cond, jmp_offset);
1697 			} else if (is_simm32(jmp_offset)) {
1698 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1699 			} else {
1700 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1701 				return -EFAULT;
1702 			}
1703 
1704 			break;
1705 
1706 		case BPF_JMP | BPF_JA:
1707 			if (insn->off == -1)
1708 				/* -1 jmp instructions will always jump
1709 				 * backwards two bytes. Explicitly handling
1710 				 * this case avoids wasting too many passes
1711 				 * when there are long sequences of replaced
1712 				 * dead code.
1713 				 */
1714 				jmp_offset = -2;
1715 			else
1716 				jmp_offset = addrs[i + insn->off] - addrs[i];
1717 
1718 			if (!jmp_offset) {
1719 				/*
1720 				 * If jmp_padding is enabled, the extra nops will
1721 				 * be inserted. Otherwise, optimize out nop jumps.
1722 				 */
1723 				if (jmp_padding) {
1724 					/* There are 3 possible conditions.
1725 					 * (1) This BPF_JA is already optimized out in
1726 					 *     the previous run, so there is no need
1727 					 *     to pad any extra byte (0 byte).
1728 					 * (2) The previous pass emits an imm8 jmp,
1729 					 *     so we pad 2 bytes to match the previous
1730 					 *     insn size.
1731 					 * (3) Similarly, the previous pass emits an
1732 					 *     imm32 jmp, and 5 bytes is padded.
1733 					 */
1734 					nops = INSN_SZ_DIFF;
1735 					if (nops != 0 && nops != 2 && nops != 5) {
1736 						pr_err("unexpected nop jump padding: %d bytes\n",
1737 						       nops);
1738 						return -EFAULT;
1739 					}
1740 					emit_nops(&prog, nops);
1741 				}
1742 				break;
1743 			}
1744 emit_jmp:
1745 			if (is_imm8(jmp_offset)) {
1746 				if (jmp_padding) {
1747 					/* To avoid breaking jmp_offset, the extra bytes
1748 					 * are padded before the actual jmp insn, so
1749 					 * 2 bytes is subtracted from INSN_SZ_DIFF.
1750 					 *
1751 					 * If the previous pass already emits an imm8
1752 					 * jmp, there is nothing to pad (0 byte).
1753 					 *
1754 					 * If it emits an imm32 jmp (5 bytes) previously
1755 					 * and now an imm8 jmp (2 bytes), then we pad
1756 					 * (5 - 2 = 3) bytes to stop the image from
1757 					 * shrinking further.
1758 					 */
1759 					nops = INSN_SZ_DIFF - 2;
1760 					if (nops != 0 && nops != 3) {
1761 						pr_err("unexpected jump padding: %d bytes\n",
1762 						       nops);
1763 						return -EFAULT;
1764 					}
1765 					emit_nops(&prog, INSN_SZ_DIFF - 2);
1766 				}
1767 				EMIT2(0xEB, jmp_offset);
1768 			} else if (is_simm32(jmp_offset)) {
1769 				EMIT1_off32(0xE9, jmp_offset);
1770 			} else {
1771 				pr_err("jmp gen bug %llx\n", jmp_offset);
1772 				return -EFAULT;
1773 			}
1774 			break;
1775 
1776 		case BPF_JMP | BPF_EXIT:
1777 			if (seen_exit) {
1778 				jmp_offset = ctx->cleanup_addr - addrs[i];
1779 				goto emit_jmp;
1780 			}
1781 			seen_exit = true;
1782 			/* Update cleanup_addr */
1783 			ctx->cleanup_addr = proglen;
1784 			pop_callee_regs(&prog, callee_regs_used);
1785 			EMIT1(0xC9);         /* leave */
1786 			emit_return(&prog, image + addrs[i - 1] + (prog - temp));
1787 			break;
1788 
1789 		default:
1790 			/*
1791 			 * By design x86-64 JIT should support all BPF instructions.
1792 			 * This error will be seen if new instruction was added
1793 			 * to the interpreter, but not to the JIT, or if there is
1794 			 * junk in bpf_prog.
1795 			 */
1796 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1797 			return -EINVAL;
1798 		}
1799 
1800 		ilen = prog - temp;
1801 		if (ilen > BPF_MAX_INSN_SIZE) {
1802 			pr_err("bpf_jit: fatal insn size error\n");
1803 			return -EFAULT;
1804 		}
1805 
1806 		if (image) {
1807 			/*
1808 			 * When populating the image, assert that:
1809 			 *
1810 			 *  i) We do not write beyond the allocated space, and
1811 			 * ii) addrs[i] did not change from the prior run, in order
1812 			 *     to validate assumptions made for computing branch
1813 			 *     displacements.
1814 			 */
1815 			if (unlikely(proglen + ilen > oldproglen ||
1816 				     proglen + ilen != addrs[i])) {
1817 				pr_err("bpf_jit: fatal error\n");
1818 				return -EFAULT;
1819 			}
1820 			memcpy(rw_image + proglen, temp, ilen);
1821 		}
1822 		proglen += ilen;
1823 		addrs[i] = proglen;
1824 		prog = temp;
1825 	}
1826 
1827 	if (image && excnt != bpf_prog->aux->num_exentries) {
1828 		pr_err("extable is not populated\n");
1829 		return -EFAULT;
1830 	}
1831 	return proglen;
1832 }
1833 
1834 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1835 		      int stack_size)
1836 {
1837 	int i, j, arg_size, nr_regs;
1838 	/* Store function arguments to stack.
1839 	 * For a function that accepts two pointers the sequence will be:
1840 	 * mov QWORD PTR [rbp-0x10],rdi
1841 	 * mov QWORD PTR [rbp-0x8],rsi
1842 	 */
1843 	for (i = 0, j = 0; i < min(nr_args, 6); i++) {
1844 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
1845 			nr_regs = (m->arg_size[i] + 7) / 8;
1846 			arg_size = 8;
1847 		} else {
1848 			nr_regs = 1;
1849 			arg_size = m->arg_size[i];
1850 		}
1851 
1852 		while (nr_regs) {
1853 			emit_stx(prog, bytes_to_bpf_size(arg_size),
1854 				 BPF_REG_FP,
1855 				 j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
1856 				 -(stack_size - j * 8));
1857 			nr_regs--;
1858 			j++;
1859 		}
1860 	}
1861 }
1862 
1863 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1864 			 int stack_size)
1865 {
1866 	int i, j, arg_size, nr_regs;
1867 
1868 	/* Restore function arguments from stack.
1869 	 * For a function that accepts two pointers the sequence will be:
1870 	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1871 	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1872 	 */
1873 	for (i = 0, j = 0; i < min(nr_args, 6); i++) {
1874 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
1875 			nr_regs = (m->arg_size[i] + 7) / 8;
1876 			arg_size = 8;
1877 		} else {
1878 			nr_regs = 1;
1879 			arg_size = m->arg_size[i];
1880 		}
1881 
1882 		while (nr_regs) {
1883 			emit_ldx(prog, bytes_to_bpf_size(arg_size),
1884 				 j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
1885 				 BPF_REG_FP,
1886 				 -(stack_size - j * 8));
1887 			nr_regs--;
1888 			j++;
1889 		}
1890 	}
1891 }
1892 
1893 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1894 			   struct bpf_tramp_link *l, int stack_size,
1895 			   int run_ctx_off, bool save_ret)
1896 {
1897 	u8 *prog = *pprog;
1898 	u8 *jmp_insn;
1899 	int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
1900 	struct bpf_prog *p = l->link.prog;
1901 	u64 cookie = l->cookie;
1902 
1903 	/* mov rdi, cookie */
1904 	emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
1905 
1906 	/* Prepare struct bpf_tramp_run_ctx.
1907 	 *
1908 	 * bpf_tramp_run_ctx is already preserved by
1909 	 * arch_prepare_bpf_trampoline().
1910 	 *
1911 	 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
1912 	 */
1913 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
1914 
1915 	/* arg1: mov rdi, progs[i] */
1916 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1917 	/* arg2: lea rsi, [rbp - ctx_cookie_off] */
1918 	EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
1919 
1920 	if (emit_call(&prog, bpf_trampoline_enter(p), prog))
1921 		return -EINVAL;
1922 	/* remember prog start time returned by __bpf_prog_enter */
1923 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1924 
1925 	/* if (__bpf_prog_enter*(prog) == 0)
1926 	 *	goto skip_exec_of_prog;
1927 	 */
1928 	EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
1929 	/* emit 2 nops that will be replaced with JE insn */
1930 	jmp_insn = prog;
1931 	emit_nops(&prog, 2);
1932 
1933 	/* arg1: lea rdi, [rbp - stack_size] */
1934 	EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1935 	/* arg2: progs[i]->insnsi for interpreter */
1936 	if (!p->jited)
1937 		emit_mov_imm64(&prog, BPF_REG_2,
1938 			       (long) p->insnsi >> 32,
1939 			       (u32) (long) p->insnsi);
1940 	/* call JITed bpf program or interpreter */
1941 	if (emit_call(&prog, p->bpf_func, prog))
1942 		return -EINVAL;
1943 
1944 	/*
1945 	 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1946 	 * of the previous call which is then passed on the stack to
1947 	 * the next BPF program.
1948 	 *
1949 	 * BPF_TRAMP_FENTRY trampoline may need to return the return
1950 	 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
1951 	 */
1952 	if (save_ret)
1953 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1954 
1955 	/* replace 2 nops with JE insn, since jmp target is known */
1956 	jmp_insn[0] = X86_JE;
1957 	jmp_insn[1] = prog - jmp_insn - 2;
1958 
1959 	/* arg1: mov rdi, progs[i] */
1960 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1961 	/* arg2: mov rsi, rbx <- start time in nsec */
1962 	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1963 	/* arg3: lea rdx, [rbp - run_ctx_off] */
1964 	EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
1965 	if (emit_call(&prog, bpf_trampoline_exit(p), prog))
1966 		return -EINVAL;
1967 
1968 	*pprog = prog;
1969 	return 0;
1970 }
1971 
1972 static void emit_align(u8 **pprog, u32 align)
1973 {
1974 	u8 *target, *prog = *pprog;
1975 
1976 	target = PTR_ALIGN(prog, align);
1977 	if (target != prog)
1978 		emit_nops(&prog, target - prog);
1979 
1980 	*pprog = prog;
1981 }
1982 
1983 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1984 {
1985 	u8 *prog = *pprog;
1986 	s64 offset;
1987 
1988 	offset = func - (ip + 2 + 4);
1989 	if (!is_simm32(offset)) {
1990 		pr_err("Target %p is out of range\n", func);
1991 		return -EINVAL;
1992 	}
1993 	EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1994 	*pprog = prog;
1995 	return 0;
1996 }
1997 
1998 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1999 		      struct bpf_tramp_links *tl, int stack_size,
2000 		      int run_ctx_off, bool save_ret)
2001 {
2002 	int i;
2003 	u8 *prog = *pprog;
2004 
2005 	for (i = 0; i < tl->nr_links; i++) {
2006 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
2007 				    run_ctx_off, save_ret))
2008 			return -EINVAL;
2009 	}
2010 	*pprog = prog;
2011 	return 0;
2012 }
2013 
2014 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
2015 			      struct bpf_tramp_links *tl, int stack_size,
2016 			      int run_ctx_off, u8 **branches)
2017 {
2018 	u8 *prog = *pprog;
2019 	int i;
2020 
2021 	/* The first fmod_ret program will receive a garbage return value.
2022 	 * Set this to 0 to avoid confusing the program.
2023 	 */
2024 	emit_mov_imm32(&prog, false, BPF_REG_0, 0);
2025 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2026 	for (i = 0; i < tl->nr_links; i++) {
2027 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true))
2028 			return -EINVAL;
2029 
2030 		/* mod_ret prog stored return value into [rbp - 8]. Emit:
2031 		 * if (*(u64 *)(rbp - 8) !=  0)
2032 		 *	goto do_fexit;
2033 		 */
2034 		/* cmp QWORD PTR [rbp - 0x8], 0x0 */
2035 		EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
2036 
2037 		/* Save the location of the branch and Generate 6 nops
2038 		 * (4 bytes for an offset and 2 bytes for the jump) These nops
2039 		 * are replaced with a conditional jump once do_fexit (i.e. the
2040 		 * start of the fexit invocation) is finalized.
2041 		 */
2042 		branches[i] = prog;
2043 		emit_nops(&prog, 4 + 2);
2044 	}
2045 
2046 	*pprog = prog;
2047 	return 0;
2048 }
2049 
2050 /* Example:
2051  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
2052  * its 'struct btf_func_model' will be nr_args=2
2053  * The assembly code when eth_type_trans is executing after trampoline:
2054  *
2055  * push rbp
2056  * mov rbp, rsp
2057  * sub rsp, 16                     // space for skb and dev
2058  * push rbx                        // temp regs to pass start time
2059  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
2060  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
2061  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2062  * mov rbx, rax                    // remember start time in bpf stats are enabled
2063  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
2064  * call addr_of_jited_FENTRY_prog
2065  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2066  * mov rsi, rbx                    // prog start time
2067  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2068  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
2069  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
2070  * pop rbx
2071  * leave
2072  * ret
2073  *
2074  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2075  * replaced with 'call generated_bpf_trampoline'. When it returns
2076  * eth_type_trans will continue executing with original skb and dev pointers.
2077  *
2078  * The assembly code when eth_type_trans is called from trampoline:
2079  *
2080  * push rbp
2081  * mov rbp, rsp
2082  * sub rsp, 24                     // space for skb, dev, return value
2083  * push rbx                        // temp regs to pass start time
2084  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
2085  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
2086  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2087  * mov rbx, rax                    // remember start time if bpf stats are enabled
2088  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2089  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
2090  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2091  * mov rsi, rbx                    // prog start time
2092  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2093  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
2094  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
2095  * call eth_type_trans+5           // execute body of eth_type_trans
2096  * mov qword ptr [rbp - 8], rax    // save return value
2097  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2098  * mov rbx, rax                    // remember start time in bpf stats are enabled
2099  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2100  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
2101  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2102  * mov rsi, rbx                    // prog start time
2103  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2104  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
2105  * pop rbx
2106  * leave
2107  * add rsp, 8                      // skip eth_type_trans's frame
2108  * ret                             // return to its caller
2109  */
2110 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
2111 				const struct btf_func_model *m, u32 flags,
2112 				struct bpf_tramp_links *tlinks,
2113 				void *func_addr)
2114 {
2115 	int ret, i, nr_args = m->nr_args, extra_nregs = 0;
2116 	int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off;
2117 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2118 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2119 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2120 	void *orig_call = func_addr;
2121 	u8 **branches = NULL;
2122 	u8 *prog;
2123 	bool save_ret;
2124 
2125 	/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
2126 	if (nr_args > 6)
2127 		return -ENOTSUPP;
2128 
2129 	for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
2130 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
2131 			extra_nregs += (m->arg_size[i] + 7) / 8 - 1;
2132 	}
2133 	if (nr_args + extra_nregs > 6)
2134 		return -ENOTSUPP;
2135 	stack_size += extra_nregs * 8;
2136 
2137 	/* Generated trampoline stack layout:
2138 	 *
2139 	 * RBP + 8         [ return address  ]
2140 	 * RBP + 0         [ RBP             ]
2141 	 *
2142 	 * RBP - 8         [ return value    ]  BPF_TRAMP_F_CALL_ORIG or
2143 	 *                                      BPF_TRAMP_F_RET_FENTRY_RET flags
2144 	 *
2145 	 *                 [ reg_argN        ]  always
2146 	 *                 [ ...             ]
2147 	 * RBP - regs_off  [ reg_arg1        ]  program's ctx pointer
2148 	 *
2149 	 * RBP - args_off  [ arg regs count  ]  always
2150 	 *
2151 	 * RBP - ip_off    [ traced function ]  BPF_TRAMP_F_IP_ARG flag
2152 	 *
2153 	 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2154 	 */
2155 
2156 	/* room for return value of orig_call or fentry prog */
2157 	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2158 	if (save_ret)
2159 		stack_size += 8;
2160 
2161 	regs_off = stack_size;
2162 
2163 	/* args count  */
2164 	stack_size += 8;
2165 	args_off = stack_size;
2166 
2167 	if (flags & BPF_TRAMP_F_IP_ARG)
2168 		stack_size += 8; /* room for IP address argument */
2169 
2170 	ip_off = stack_size;
2171 
2172 	stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
2173 	run_ctx_off = stack_size;
2174 
2175 	if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2176 		/* skip patched call instruction and point orig_call to actual
2177 		 * body of the kernel function.
2178 		 */
2179 		if (is_endbr(*(u32 *)orig_call))
2180 			orig_call += ENDBR_INSN_SIZE;
2181 		orig_call += X86_PATCH_SIZE;
2182 	}
2183 
2184 	prog = image;
2185 
2186 	EMIT_ENDBR();
2187 	EMIT1(0x55);		 /* push rbp */
2188 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2189 	EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
2190 	EMIT1(0x53);		 /* push rbx */
2191 
2192 	/* Store number of argument registers of the traced function:
2193 	 *   mov rax, nr_args + extra_nregs
2194 	 *   mov QWORD PTR [rbp - args_off], rax
2195 	 */
2196 	emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args + extra_nregs);
2197 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
2198 
2199 	if (flags & BPF_TRAMP_F_IP_ARG) {
2200 		/* Store IP address of the traced function:
2201 		 * movabsq rax, func_addr
2202 		 * mov QWORD PTR [rbp - ip_off], rax
2203 		 */
2204 		emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
2205 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
2206 	}
2207 
2208 	save_regs(m, &prog, nr_args, regs_off);
2209 
2210 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2211 		/* arg1: mov rdi, im */
2212 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2213 		if (emit_call(&prog, __bpf_tramp_enter, prog)) {
2214 			ret = -EINVAL;
2215 			goto cleanup;
2216 		}
2217 	}
2218 
2219 	if (fentry->nr_links)
2220 		if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
2221 			       flags & BPF_TRAMP_F_RET_FENTRY_RET))
2222 			return -EINVAL;
2223 
2224 	if (fmod_ret->nr_links) {
2225 		branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
2226 				   GFP_KERNEL);
2227 		if (!branches)
2228 			return -ENOMEM;
2229 
2230 		if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2231 				       run_ctx_off, branches)) {
2232 			ret = -EINVAL;
2233 			goto cleanup;
2234 		}
2235 	}
2236 
2237 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2238 		restore_regs(m, &prog, nr_args, regs_off);
2239 
2240 		if (flags & BPF_TRAMP_F_ORIG_STACK) {
2241 			emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
2242 			EMIT2(0xff, 0xd0); /* call *rax */
2243 		} else {
2244 			/* call original function */
2245 			if (emit_call(&prog, orig_call, prog)) {
2246 				ret = -EINVAL;
2247 				goto cleanup;
2248 			}
2249 		}
2250 		/* remember return value in a stack for bpf prog to access */
2251 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2252 		im->ip_after_call = prog;
2253 		memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2254 		prog += X86_PATCH_SIZE;
2255 	}
2256 
2257 	if (fmod_ret->nr_links) {
2258 		/* From Intel 64 and IA-32 Architectures Optimization
2259 		 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2260 		 * Coding Rule 11: All branch targets should be 16-byte
2261 		 * aligned.
2262 		 */
2263 		emit_align(&prog, 16);
2264 		/* Update the branches saved in invoke_bpf_mod_ret with the
2265 		 * aligned address of do_fexit.
2266 		 */
2267 		for (i = 0; i < fmod_ret->nr_links; i++)
2268 			emit_cond_near_jump(&branches[i], prog, branches[i],
2269 					    X86_JNE);
2270 	}
2271 
2272 	if (fexit->nr_links)
2273 		if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) {
2274 			ret = -EINVAL;
2275 			goto cleanup;
2276 		}
2277 
2278 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
2279 		restore_regs(m, &prog, nr_args, regs_off);
2280 
2281 	/* This needs to be done regardless. If there were fmod_ret programs,
2282 	 * the return value is only updated on the stack and still needs to be
2283 	 * restored to R0.
2284 	 */
2285 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2286 		im->ip_epilogue = prog;
2287 		/* arg1: mov rdi, im */
2288 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2289 		if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2290 			ret = -EINVAL;
2291 			goto cleanup;
2292 		}
2293 	}
2294 	/* restore return value of orig_call or fentry prog back into RAX */
2295 	if (save_ret)
2296 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2297 
2298 	EMIT1(0x5B); /* pop rbx */
2299 	EMIT1(0xC9); /* leave */
2300 	if (flags & BPF_TRAMP_F_SKIP_FRAME)
2301 		/* skip our return address and return to parent */
2302 		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2303 	emit_return(&prog, prog);
2304 	/* Make sure the trampoline generation logic doesn't overflow */
2305 	if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2306 		ret = -EFAULT;
2307 		goto cleanup;
2308 	}
2309 	ret = prog - (u8 *)image;
2310 
2311 cleanup:
2312 	kfree(branches);
2313 	return ret;
2314 }
2315 
2316 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
2317 {
2318 	u8 *jg_reloc, *prog = *pprog;
2319 	int pivot, err, jg_bytes = 1;
2320 	s64 jg_offset;
2321 
2322 	if (a == b) {
2323 		/* Leaf node of recursion, i.e. not a range of indices
2324 		 * anymore.
2325 		 */
2326 		EMIT1(add_1mod(0x48, BPF_REG_3));	/* cmp rdx,func */
2327 		if (!is_simm32(progs[a]))
2328 			return -1;
2329 		EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2330 			    progs[a]);
2331 		err = emit_cond_near_jump(&prog,	/* je func */
2332 					  (void *)progs[a], image + (prog - buf),
2333 					  X86_JE);
2334 		if (err)
2335 			return err;
2336 
2337 		emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
2338 
2339 		*pprog = prog;
2340 		return 0;
2341 	}
2342 
2343 	/* Not a leaf node, so we pivot, and recursively descend into
2344 	 * the lower and upper ranges.
2345 	 */
2346 	pivot = (b - a) / 2;
2347 	EMIT1(add_1mod(0x48, BPF_REG_3));		/* cmp rdx,func */
2348 	if (!is_simm32(progs[a + pivot]))
2349 		return -1;
2350 	EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2351 
2352 	if (pivot > 2) {				/* jg upper_part */
2353 		/* Require near jump. */
2354 		jg_bytes = 4;
2355 		EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2356 	} else {
2357 		EMIT2(X86_JG, 0);
2358 	}
2359 	jg_reloc = prog;
2360 
2361 	err = emit_bpf_dispatcher(&prog, a, a + pivot,	/* emit lower_part */
2362 				  progs, image, buf);
2363 	if (err)
2364 		return err;
2365 
2366 	/* From Intel 64 and IA-32 Architectures Optimization
2367 	 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2368 	 * Coding Rule 11: All branch targets should be 16-byte
2369 	 * aligned.
2370 	 */
2371 	emit_align(&prog, 16);
2372 	jg_offset = prog - jg_reloc;
2373 	emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2374 
2375 	err = emit_bpf_dispatcher(&prog, a + pivot + 1,	/* emit upper_part */
2376 				  b, progs, image, buf);
2377 	if (err)
2378 		return err;
2379 
2380 	*pprog = prog;
2381 	return 0;
2382 }
2383 
2384 static int cmp_ips(const void *a, const void *b)
2385 {
2386 	const s64 *ipa = a;
2387 	const s64 *ipb = b;
2388 
2389 	if (*ipa > *ipb)
2390 		return 1;
2391 	if (*ipa < *ipb)
2392 		return -1;
2393 	return 0;
2394 }
2395 
2396 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
2397 {
2398 	u8 *prog = buf;
2399 
2400 	sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2401 	return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
2402 }
2403 
2404 struct x64_jit_data {
2405 	struct bpf_binary_header *rw_header;
2406 	struct bpf_binary_header *header;
2407 	int *addrs;
2408 	u8 *image;
2409 	int proglen;
2410 	struct jit_context ctx;
2411 };
2412 
2413 #define MAX_PASSES 20
2414 #define PADDING_PASSES (MAX_PASSES - 5)
2415 
2416 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2417 {
2418 	struct bpf_binary_header *rw_header = NULL;
2419 	struct bpf_binary_header *header = NULL;
2420 	struct bpf_prog *tmp, *orig_prog = prog;
2421 	struct x64_jit_data *jit_data;
2422 	int proglen, oldproglen = 0;
2423 	struct jit_context ctx = {};
2424 	bool tmp_blinded = false;
2425 	bool extra_pass = false;
2426 	bool padding = false;
2427 	u8 *rw_image = NULL;
2428 	u8 *image = NULL;
2429 	int *addrs;
2430 	int pass;
2431 	int i;
2432 
2433 	if (!prog->jit_requested)
2434 		return orig_prog;
2435 
2436 	tmp = bpf_jit_blind_constants(prog);
2437 	/*
2438 	 * If blinding was requested and we failed during blinding,
2439 	 * we must fall back to the interpreter.
2440 	 */
2441 	if (IS_ERR(tmp))
2442 		return orig_prog;
2443 	if (tmp != prog) {
2444 		tmp_blinded = true;
2445 		prog = tmp;
2446 	}
2447 
2448 	jit_data = prog->aux->jit_data;
2449 	if (!jit_data) {
2450 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2451 		if (!jit_data) {
2452 			prog = orig_prog;
2453 			goto out;
2454 		}
2455 		prog->aux->jit_data = jit_data;
2456 	}
2457 	addrs = jit_data->addrs;
2458 	if (addrs) {
2459 		ctx = jit_data->ctx;
2460 		oldproglen = jit_data->proglen;
2461 		image = jit_data->image;
2462 		header = jit_data->header;
2463 		rw_header = jit_data->rw_header;
2464 		rw_image = (void *)rw_header + ((void *)image - (void *)header);
2465 		extra_pass = true;
2466 		padding = true;
2467 		goto skip_init_addrs;
2468 	}
2469 	addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2470 	if (!addrs) {
2471 		prog = orig_prog;
2472 		goto out_addrs;
2473 	}
2474 
2475 	/*
2476 	 * Before first pass, make a rough estimation of addrs[]
2477 	 * each BPF instruction is translated to less than 64 bytes
2478 	 */
2479 	for (proglen = 0, i = 0; i <= prog->len; i++) {
2480 		proglen += 64;
2481 		addrs[i] = proglen;
2482 	}
2483 	ctx.cleanup_addr = proglen;
2484 skip_init_addrs:
2485 
2486 	/*
2487 	 * JITed image shrinks with every pass and the loop iterates
2488 	 * until the image stops shrinking. Very large BPF programs
2489 	 * may converge on the last pass. In such case do one more
2490 	 * pass to emit the final image.
2491 	 */
2492 	for (pass = 0; pass < MAX_PASSES || image; pass++) {
2493 		if (!padding && pass >= PADDING_PASSES)
2494 			padding = true;
2495 		proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
2496 		if (proglen <= 0) {
2497 out_image:
2498 			image = NULL;
2499 			if (header) {
2500 				bpf_arch_text_copy(&header->size, &rw_header->size,
2501 						   sizeof(rw_header->size));
2502 				bpf_jit_binary_pack_free(header, rw_header);
2503 			}
2504 			/* Fall back to interpreter mode */
2505 			prog = orig_prog;
2506 			if (extra_pass) {
2507 				prog->bpf_func = NULL;
2508 				prog->jited = 0;
2509 				prog->jited_len = 0;
2510 			}
2511 			goto out_addrs;
2512 		}
2513 		if (image) {
2514 			if (proglen != oldproglen) {
2515 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2516 				       proglen, oldproglen);
2517 				goto out_image;
2518 			}
2519 			break;
2520 		}
2521 		if (proglen == oldproglen) {
2522 			/*
2523 			 * The number of entries in extable is the number of BPF_LDX
2524 			 * insns that access kernel memory via "pointer to BTF type".
2525 			 * The verifier changed their opcode from LDX|MEM|size
2526 			 * to LDX|PROBE_MEM|size to make JITing easier.
2527 			 */
2528 			u32 align = __alignof__(struct exception_table_entry);
2529 			u32 extable_size = prog->aux->num_exentries *
2530 				sizeof(struct exception_table_entry);
2531 
2532 			/* allocate module memory for x86 insns and extable */
2533 			header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
2534 							   &image, align, &rw_header, &rw_image,
2535 							   jit_fill_hole);
2536 			if (!header) {
2537 				prog = orig_prog;
2538 				goto out_addrs;
2539 			}
2540 			prog->aux->extable = (void *) image + roundup(proglen, align);
2541 		}
2542 		oldproglen = proglen;
2543 		cond_resched();
2544 	}
2545 
2546 	if (bpf_jit_enable > 1)
2547 		bpf_jit_dump(prog->len, proglen, pass + 1, image);
2548 
2549 	if (image) {
2550 		if (!prog->is_func || extra_pass) {
2551 			/*
2552 			 * bpf_jit_binary_pack_finalize fails in two scenarios:
2553 			 *   1) header is not pointing to proper module memory;
2554 			 *   2) the arch doesn't support bpf_arch_text_copy().
2555 			 *
2556 			 * Both cases are serious bugs and justify WARN_ON.
2557 			 */
2558 			if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
2559 				/* header has been freed */
2560 				header = NULL;
2561 				goto out_image;
2562 			}
2563 
2564 			bpf_tail_call_direct_fixup(prog);
2565 		} else {
2566 			jit_data->addrs = addrs;
2567 			jit_data->ctx = ctx;
2568 			jit_data->proglen = proglen;
2569 			jit_data->image = image;
2570 			jit_data->header = header;
2571 			jit_data->rw_header = rw_header;
2572 		}
2573 		prog->bpf_func = (void *)image;
2574 		prog->jited = 1;
2575 		prog->jited_len = proglen;
2576 	} else {
2577 		prog = orig_prog;
2578 	}
2579 
2580 	if (!image || !prog->is_func || extra_pass) {
2581 		if (image)
2582 			bpf_prog_fill_jited_linfo(prog, addrs + 1);
2583 out_addrs:
2584 		kvfree(addrs);
2585 		kfree(jit_data);
2586 		prog->aux->jit_data = NULL;
2587 	}
2588 out:
2589 	if (tmp_blinded)
2590 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
2591 					   tmp : orig_prog);
2592 	return prog;
2593 }
2594 
2595 bool bpf_jit_supports_kfunc_call(void)
2596 {
2597 	return true;
2598 }
2599 
2600 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
2601 {
2602 	if (text_poke_copy(dst, src, len) == NULL)
2603 		return ERR_PTR(-EINVAL);
2604 	return dst;
2605 }
2606 
2607 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
2608 bool bpf_jit_supports_subprog_tailcalls(void)
2609 {
2610 	return true;
2611 }
2612 
2613 void bpf_jit_free(struct bpf_prog *prog)
2614 {
2615 	if (prog->jited) {
2616 		struct x64_jit_data *jit_data = prog->aux->jit_data;
2617 		struct bpf_binary_header *hdr;
2618 
2619 		/*
2620 		 * If we fail the final pass of JIT (from jit_subprogs),
2621 		 * the program may not be finalized yet. Call finalize here
2622 		 * before freeing it.
2623 		 */
2624 		if (jit_data) {
2625 			bpf_jit_binary_pack_finalize(prog, jit_data->header,
2626 						     jit_data->rw_header);
2627 			kvfree(jit_data->addrs);
2628 			kfree(jit_data);
2629 		}
2630 		hdr = bpf_jit_binary_pack_hdr(prog);
2631 		bpf_jit_binary_pack_free(hdr, NULL);
2632 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
2633 	}
2634 
2635 	bpf_prog_unlock_free(prog);
2636 }
2637