xref: /openbmc/linux/arch/x86/net/bpf_jit_comp.c (revision b4e18b29)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bpf_jit_comp.c: BPF JIT compiler
4  *
5  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6  * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7  */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/text-patching.h>
18 #include <asm/asm-prototypes.h>
19 
20 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
21 {
22 	if (len == 1)
23 		*ptr = bytes;
24 	else if (len == 2)
25 		*(u16 *)ptr = bytes;
26 	else {
27 		*(u32 *)ptr = bytes;
28 		barrier();
29 	}
30 	return ptr + len;
31 }
32 
33 #define EMIT(bytes, len) \
34 	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
35 
36 #define EMIT1(b1)		EMIT(b1, 1)
37 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
38 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
39 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
40 
41 #define EMIT1_off32(b1, off) \
42 	do { EMIT1(b1); EMIT(off, 4); } while (0)
43 #define EMIT2_off32(b1, b2, off) \
44 	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
45 #define EMIT3_off32(b1, b2, b3, off) \
46 	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
47 #define EMIT4_off32(b1, b2, b3, b4, off) \
48 	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
49 
50 static bool is_imm8(int value)
51 {
52 	return value <= 127 && value >= -128;
53 }
54 
55 static bool is_simm32(s64 value)
56 {
57 	return value == (s64)(s32)value;
58 }
59 
60 static bool is_uimm32(u64 value)
61 {
62 	return value == (u64)(u32)value;
63 }
64 
65 /* mov dst, src */
66 #define EMIT_mov(DST, SRC)								 \
67 	do {										 \
68 		if (DST != SRC)								 \
69 			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
70 	} while (0)
71 
72 static int bpf_size_to_x86_bytes(int bpf_size)
73 {
74 	if (bpf_size == BPF_W)
75 		return 4;
76 	else if (bpf_size == BPF_H)
77 		return 2;
78 	else if (bpf_size == BPF_B)
79 		return 1;
80 	else if (bpf_size == BPF_DW)
81 		return 4; /* imm32 */
82 	else
83 		return 0;
84 }
85 
86 /*
87  * List of x86 cond jumps opcodes (. + s8)
88  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
89  */
90 #define X86_JB  0x72
91 #define X86_JAE 0x73
92 #define X86_JE  0x74
93 #define X86_JNE 0x75
94 #define X86_JBE 0x76
95 #define X86_JA  0x77
96 #define X86_JL  0x7C
97 #define X86_JGE 0x7D
98 #define X86_JLE 0x7E
99 #define X86_JG  0x7F
100 
101 /* Pick a register outside of BPF range for JIT internal work */
102 #define AUX_REG (MAX_BPF_JIT_REG + 1)
103 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
104 
105 /*
106  * The following table maps BPF registers to x86-64 registers.
107  *
108  * x86-64 register R12 is unused, since if used as base address
109  * register in load/store instructions, it always needs an
110  * extra byte of encoding and is callee saved.
111  *
112  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
113  * trampoline. x86-64 register R10 is used for blinding (if enabled).
114  */
115 static const int reg2hex[] = {
116 	[BPF_REG_0] = 0,  /* RAX */
117 	[BPF_REG_1] = 7,  /* RDI */
118 	[BPF_REG_2] = 6,  /* RSI */
119 	[BPF_REG_3] = 2,  /* RDX */
120 	[BPF_REG_4] = 1,  /* RCX */
121 	[BPF_REG_5] = 0,  /* R8  */
122 	[BPF_REG_6] = 3,  /* RBX callee saved */
123 	[BPF_REG_7] = 5,  /* R13 callee saved */
124 	[BPF_REG_8] = 6,  /* R14 callee saved */
125 	[BPF_REG_9] = 7,  /* R15 callee saved */
126 	[BPF_REG_FP] = 5, /* RBP readonly */
127 	[BPF_REG_AX] = 2, /* R10 temp register */
128 	[AUX_REG] = 3,    /* R11 temp register */
129 	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
130 };
131 
132 static const int reg2pt_regs[] = {
133 	[BPF_REG_0] = offsetof(struct pt_regs, ax),
134 	[BPF_REG_1] = offsetof(struct pt_regs, di),
135 	[BPF_REG_2] = offsetof(struct pt_regs, si),
136 	[BPF_REG_3] = offsetof(struct pt_regs, dx),
137 	[BPF_REG_4] = offsetof(struct pt_regs, cx),
138 	[BPF_REG_5] = offsetof(struct pt_regs, r8),
139 	[BPF_REG_6] = offsetof(struct pt_regs, bx),
140 	[BPF_REG_7] = offsetof(struct pt_regs, r13),
141 	[BPF_REG_8] = offsetof(struct pt_regs, r14),
142 	[BPF_REG_9] = offsetof(struct pt_regs, r15),
143 };
144 
145 /*
146  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
147  * which need extra byte of encoding.
148  * rax,rcx,...,rbp have simpler encoding
149  */
150 static bool is_ereg(u32 reg)
151 {
152 	return (1 << reg) & (BIT(BPF_REG_5) |
153 			     BIT(AUX_REG) |
154 			     BIT(BPF_REG_7) |
155 			     BIT(BPF_REG_8) |
156 			     BIT(BPF_REG_9) |
157 			     BIT(X86_REG_R9) |
158 			     BIT(BPF_REG_AX));
159 }
160 
161 /*
162  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
163  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
164  * of encoding. al,cl,dl,bl have simpler encoding.
165  */
166 static bool is_ereg_8l(u32 reg)
167 {
168 	return is_ereg(reg) ||
169 	    (1 << reg) & (BIT(BPF_REG_1) |
170 			  BIT(BPF_REG_2) |
171 			  BIT(BPF_REG_FP));
172 }
173 
174 static bool is_axreg(u32 reg)
175 {
176 	return reg == BPF_REG_0;
177 }
178 
179 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
180 static u8 add_1mod(u8 byte, u32 reg)
181 {
182 	if (is_ereg(reg))
183 		byte |= 1;
184 	return byte;
185 }
186 
187 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
188 {
189 	if (is_ereg(r1))
190 		byte |= 1;
191 	if (is_ereg(r2))
192 		byte |= 4;
193 	return byte;
194 }
195 
196 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
197 static u8 add_1reg(u8 byte, u32 dst_reg)
198 {
199 	return byte + reg2hex[dst_reg];
200 }
201 
202 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
203 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
204 {
205 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
206 }
207 
208 /* Some 1-byte opcodes for binary ALU operations */
209 static u8 simple_alu_opcodes[] = {
210 	[BPF_ADD] = 0x01,
211 	[BPF_SUB] = 0x29,
212 	[BPF_AND] = 0x21,
213 	[BPF_OR] = 0x09,
214 	[BPF_XOR] = 0x31,
215 	[BPF_LSH] = 0xE0,
216 	[BPF_RSH] = 0xE8,
217 	[BPF_ARSH] = 0xF8,
218 };
219 
220 static void jit_fill_hole(void *area, unsigned int size)
221 {
222 	/* Fill whole space with INT3 instructions */
223 	memset(area, 0xcc, size);
224 }
225 
226 struct jit_context {
227 	int cleanup_addr; /* Epilogue code offset */
228 };
229 
230 /* Maximum number of bytes emitted while JITing one eBPF insn */
231 #define BPF_MAX_INSN_SIZE	128
232 #define BPF_INSN_SAFETY		64
233 
234 /* Number of bytes emit_patch() needs to generate instructions */
235 #define X86_PATCH_SIZE		5
236 /* Number of bytes that will be skipped on tailcall */
237 #define X86_TAIL_CALL_OFFSET	11
238 
239 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
240 {
241 	u8 *prog = *pprog;
242 	int cnt = 0;
243 
244 	if (callee_regs_used[0])
245 		EMIT1(0x53);         /* push rbx */
246 	if (callee_regs_used[1])
247 		EMIT2(0x41, 0x55);   /* push r13 */
248 	if (callee_regs_used[2])
249 		EMIT2(0x41, 0x56);   /* push r14 */
250 	if (callee_regs_used[3])
251 		EMIT2(0x41, 0x57);   /* push r15 */
252 	*pprog = prog;
253 }
254 
255 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
256 {
257 	u8 *prog = *pprog;
258 	int cnt = 0;
259 
260 	if (callee_regs_used[3])
261 		EMIT2(0x41, 0x5F);   /* pop r15 */
262 	if (callee_regs_used[2])
263 		EMIT2(0x41, 0x5E);   /* pop r14 */
264 	if (callee_regs_used[1])
265 		EMIT2(0x41, 0x5D);   /* pop r13 */
266 	if (callee_regs_used[0])
267 		EMIT1(0x5B);         /* pop rbx */
268 	*pprog = prog;
269 }
270 
271 /*
272  * Emit x86-64 prologue code for BPF program.
273  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
274  * while jumping to another program
275  */
276 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
277 			  bool tail_call_reachable, bool is_subprog)
278 {
279 	u8 *prog = *pprog;
280 	int cnt = X86_PATCH_SIZE;
281 
282 	/* BPF trampoline can be made to work without these nops,
283 	 * but let's waste 5 bytes for now and optimize later
284 	 */
285 	memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
286 	prog += cnt;
287 	if (!ebpf_from_cbpf) {
288 		if (tail_call_reachable && !is_subprog)
289 			EMIT2(0x31, 0xC0); /* xor eax, eax */
290 		else
291 			EMIT2(0x66, 0x90); /* nop2 */
292 	}
293 	EMIT1(0x55);             /* push rbp */
294 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
295 	/* sub rsp, rounded_stack_depth */
296 	if (stack_depth)
297 		EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
298 	if (tail_call_reachable)
299 		EMIT1(0x50);         /* push rax */
300 	*pprog = prog;
301 }
302 
303 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
304 {
305 	u8 *prog = *pprog;
306 	int cnt = 0;
307 	s64 offset;
308 
309 	offset = func - (ip + X86_PATCH_SIZE);
310 	if (!is_simm32(offset)) {
311 		pr_err("Target call %p is out of range\n", func);
312 		return -ERANGE;
313 	}
314 	EMIT1_off32(opcode, offset);
315 	*pprog = prog;
316 	return 0;
317 }
318 
319 static int emit_call(u8 **pprog, void *func, void *ip)
320 {
321 	return emit_patch(pprog, func, ip, 0xE8);
322 }
323 
324 static int emit_jump(u8 **pprog, void *func, void *ip)
325 {
326 	return emit_patch(pprog, func, ip, 0xE9);
327 }
328 
329 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
330 				void *old_addr, void *new_addr,
331 				const bool text_live)
332 {
333 	const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
334 	u8 old_insn[X86_PATCH_SIZE];
335 	u8 new_insn[X86_PATCH_SIZE];
336 	u8 *prog;
337 	int ret;
338 
339 	memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
340 	if (old_addr) {
341 		prog = old_insn;
342 		ret = t == BPF_MOD_CALL ?
343 		      emit_call(&prog, old_addr, ip) :
344 		      emit_jump(&prog, old_addr, ip);
345 		if (ret)
346 			return ret;
347 	}
348 
349 	memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
350 	if (new_addr) {
351 		prog = new_insn;
352 		ret = t == BPF_MOD_CALL ?
353 		      emit_call(&prog, new_addr, ip) :
354 		      emit_jump(&prog, new_addr, ip);
355 		if (ret)
356 			return ret;
357 	}
358 
359 	ret = -EBUSY;
360 	mutex_lock(&text_mutex);
361 	if (memcmp(ip, old_insn, X86_PATCH_SIZE))
362 		goto out;
363 	ret = 1;
364 	if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
365 		if (text_live)
366 			text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
367 		else
368 			memcpy(ip, new_insn, X86_PATCH_SIZE);
369 		ret = 0;
370 	}
371 out:
372 	mutex_unlock(&text_mutex);
373 	return ret;
374 }
375 
376 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
377 		       void *old_addr, void *new_addr)
378 {
379 	if (!is_kernel_text((long)ip) &&
380 	    !is_bpf_text_address((long)ip))
381 		/* BPF poking in modules is not supported */
382 		return -EINVAL;
383 
384 	return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
385 }
386 
387 static int get_pop_bytes(bool *callee_regs_used)
388 {
389 	int bytes = 0;
390 
391 	if (callee_regs_used[3])
392 		bytes += 2;
393 	if (callee_regs_used[2])
394 		bytes += 2;
395 	if (callee_regs_used[1])
396 		bytes += 2;
397 	if (callee_regs_used[0])
398 		bytes += 1;
399 
400 	return bytes;
401 }
402 
403 /*
404  * Generate the following code:
405  *
406  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
407  *   if (index >= array->map.max_entries)
408  *     goto out;
409  *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
410  *     goto out;
411  *   prog = array->ptrs[index];
412  *   if (prog == NULL)
413  *     goto out;
414  *   goto *(prog->bpf_func + prologue_size);
415  * out:
416  */
417 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
418 					u32 stack_depth)
419 {
420 	int tcc_off = -4 - round_up(stack_depth, 8);
421 	u8 *prog = *pprog;
422 	int pop_bytes = 0;
423 	int off1 = 42;
424 	int off2 = 31;
425 	int off3 = 9;
426 	int cnt = 0;
427 
428 	/* count the additional bytes used for popping callee regs from stack
429 	 * that need to be taken into account for each of the offsets that
430 	 * are used for bailing out of the tail call
431 	 */
432 	pop_bytes = get_pop_bytes(callee_regs_used);
433 	off1 += pop_bytes;
434 	off2 += pop_bytes;
435 	off3 += pop_bytes;
436 
437 	if (stack_depth) {
438 		off1 += 7;
439 		off2 += 7;
440 		off3 += 7;
441 	}
442 
443 	/*
444 	 * rdi - pointer to ctx
445 	 * rsi - pointer to bpf_array
446 	 * rdx - index in bpf_array
447 	 */
448 
449 	/*
450 	 * if (index >= array->map.max_entries)
451 	 *	goto out;
452 	 */
453 	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
454 	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
455 	      offsetof(struct bpf_array, map.max_entries));
456 #define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
457 	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
458 
459 	/*
460 	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
461 	 *	goto out;
462 	 */
463 	EMIT2_off32(0x8B, 0x85, tcc_off);         /* mov eax, dword ptr [rbp - tcc_off] */
464 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
465 #define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
466 	EMIT2(X86_JA, OFFSET2);                   /* ja out */
467 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
468 	EMIT2_off32(0x89, 0x85, tcc_off);         /* mov dword ptr [rbp - tcc_off], eax */
469 
470 	/* prog = array->ptrs[index]; */
471 	EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
472 		    offsetof(struct bpf_array, ptrs));
473 
474 	/*
475 	 * if (prog == NULL)
476 	 *	goto out;
477 	 */
478 	EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
479 #define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
480 	EMIT2(X86_JE, OFFSET3);                   /* je out */
481 
482 	*pprog = prog;
483 	pop_callee_regs(pprog, callee_regs_used);
484 	prog = *pprog;
485 
486 	EMIT1(0x58);                              /* pop rax */
487 	if (stack_depth)
488 		EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
489 			    round_up(stack_depth, 8));
490 
491 	/* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
492 	EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
493 	      offsetof(struct bpf_prog, bpf_func));
494 	EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
495 	      X86_TAIL_CALL_OFFSET);
496 	/*
497 	 * Now we're ready to jump into next BPF program
498 	 * rdi == ctx (1st arg)
499 	 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
500 	 */
501 	RETPOLINE_RCX_BPF_JIT();
502 
503 	/* out: */
504 	*pprog = prog;
505 }
506 
507 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
508 				      u8 **pprog, int addr, u8 *image,
509 				      bool *callee_regs_used, u32 stack_depth)
510 {
511 	int tcc_off = -4 - round_up(stack_depth, 8);
512 	u8 *prog = *pprog;
513 	int pop_bytes = 0;
514 	int off1 = 20;
515 	int poke_off;
516 	int cnt = 0;
517 
518 	/* count the additional bytes used for popping callee regs to stack
519 	 * that need to be taken into account for jump offset that is used for
520 	 * bailing out from of the tail call when limit is reached
521 	 */
522 	pop_bytes = get_pop_bytes(callee_regs_used);
523 	off1 += pop_bytes;
524 
525 	/*
526 	 * total bytes for:
527 	 * - nop5/ jmpq $off
528 	 * - pop callee regs
529 	 * - sub rsp, $val if depth > 0
530 	 * - pop rax
531 	 */
532 	poke_off = X86_PATCH_SIZE + pop_bytes + 1;
533 	if (stack_depth) {
534 		poke_off += 7;
535 		off1 += 7;
536 	}
537 
538 	/*
539 	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
540 	 *	goto out;
541 	 */
542 	EMIT2_off32(0x8B, 0x85, tcc_off);             /* mov eax, dword ptr [rbp - tcc_off] */
543 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
544 	EMIT2(X86_JA, off1);                          /* ja out */
545 	EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
546 	EMIT2_off32(0x89, 0x85, tcc_off);             /* mov dword ptr [rbp - tcc_off], eax */
547 
548 	poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
549 	poke->adj_off = X86_TAIL_CALL_OFFSET;
550 	poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
551 	poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
552 
553 	emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
554 		  poke->tailcall_bypass);
555 
556 	*pprog = prog;
557 	pop_callee_regs(pprog, callee_regs_used);
558 	prog = *pprog;
559 	EMIT1(0x58);                                  /* pop rax */
560 	if (stack_depth)
561 		EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
562 
563 	memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
564 	prog += X86_PATCH_SIZE;
565 	/* out: */
566 
567 	*pprog = prog;
568 }
569 
570 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
571 {
572 	struct bpf_jit_poke_descriptor *poke;
573 	struct bpf_array *array;
574 	struct bpf_prog *target;
575 	int i, ret;
576 
577 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
578 		poke = &prog->aux->poke_tab[i];
579 		WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
580 
581 		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
582 			continue;
583 
584 		array = container_of(poke->tail_call.map, struct bpf_array, map);
585 		mutex_lock(&array->aux->poke_mutex);
586 		target = array->ptrs[poke->tail_call.key];
587 		if (target) {
588 			/* Plain memcpy is used when image is not live yet
589 			 * and still not locked as read-only. Once poke
590 			 * location is active (poke->tailcall_target_stable),
591 			 * any parallel bpf_arch_text_poke() might occur
592 			 * still on the read-write image until we finally
593 			 * locked it as read-only. Both modifications on
594 			 * the given image are under text_mutex to avoid
595 			 * interference.
596 			 */
597 			ret = __bpf_arch_text_poke(poke->tailcall_target,
598 						   BPF_MOD_JUMP, NULL,
599 						   (u8 *)target->bpf_func +
600 						   poke->adj_off, false);
601 			BUG_ON(ret < 0);
602 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
603 						   BPF_MOD_JUMP,
604 						   (u8 *)poke->tailcall_target +
605 						   X86_PATCH_SIZE, NULL, false);
606 			BUG_ON(ret < 0);
607 		}
608 		WRITE_ONCE(poke->tailcall_target_stable, true);
609 		mutex_unlock(&array->aux->poke_mutex);
610 	}
611 }
612 
613 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
614 			   u32 dst_reg, const u32 imm32)
615 {
616 	u8 *prog = *pprog;
617 	u8 b1, b2, b3;
618 	int cnt = 0;
619 
620 	/*
621 	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
622 	 * (which zero-extends imm32) to save 2 bytes.
623 	 */
624 	if (sign_propagate && (s32)imm32 < 0) {
625 		/* 'mov %rax, imm32' sign extends imm32 */
626 		b1 = add_1mod(0x48, dst_reg);
627 		b2 = 0xC7;
628 		b3 = 0xC0;
629 		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
630 		goto done;
631 	}
632 
633 	/*
634 	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
635 	 * to save 3 bytes.
636 	 */
637 	if (imm32 == 0) {
638 		if (is_ereg(dst_reg))
639 			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
640 		b2 = 0x31; /* xor */
641 		b3 = 0xC0;
642 		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
643 		goto done;
644 	}
645 
646 	/* mov %eax, imm32 */
647 	if (is_ereg(dst_reg))
648 		EMIT1(add_1mod(0x40, dst_reg));
649 	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
650 done:
651 	*pprog = prog;
652 }
653 
654 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
655 			   const u32 imm32_hi, const u32 imm32_lo)
656 {
657 	u8 *prog = *pprog;
658 	int cnt = 0;
659 
660 	if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
661 		/*
662 		 * For emitting plain u32, where sign bit must not be
663 		 * propagated LLVM tends to load imm64 over mov32
664 		 * directly, so save couple of bytes by just doing
665 		 * 'mov %eax, imm32' instead.
666 		 */
667 		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
668 	} else {
669 		/* movabsq %rax, imm64 */
670 		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
671 		EMIT(imm32_lo, 4);
672 		EMIT(imm32_hi, 4);
673 	}
674 
675 	*pprog = prog;
676 }
677 
678 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
679 {
680 	u8 *prog = *pprog;
681 	int cnt = 0;
682 
683 	if (is64) {
684 		/* mov dst, src */
685 		EMIT_mov(dst_reg, src_reg);
686 	} else {
687 		/* mov32 dst, src */
688 		if (is_ereg(dst_reg) || is_ereg(src_reg))
689 			EMIT1(add_2mod(0x40, dst_reg, src_reg));
690 		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
691 	}
692 
693 	*pprog = prog;
694 }
695 
696 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
697 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
698 {
699 	u8 *prog = *pprog;
700 	int cnt = 0;
701 
702 	if (is_imm8(off)) {
703 		/* 1-byte signed displacement.
704 		 *
705 		 * If off == 0 we could skip this and save one extra byte, but
706 		 * special case of x86 R13 which always needs an offset is not
707 		 * worth the hassle
708 		 */
709 		EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
710 	} else {
711 		/* 4-byte signed displacement */
712 		EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
713 	}
714 	*pprog = prog;
715 }
716 
717 /*
718  * Emit a REX byte if it will be necessary to address these registers
719  */
720 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
721 {
722 	u8 *prog = *pprog;
723 	int cnt = 0;
724 
725 	if (is64)
726 		EMIT1(add_2mod(0x48, dst_reg, src_reg));
727 	else if (is_ereg(dst_reg) || is_ereg(src_reg))
728 		EMIT1(add_2mod(0x40, dst_reg, src_reg));
729 	*pprog = prog;
730 }
731 
732 /* LDX: dst_reg = *(u8*)(src_reg + off) */
733 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
734 {
735 	u8 *prog = *pprog;
736 	int cnt = 0;
737 
738 	switch (size) {
739 	case BPF_B:
740 		/* Emit 'movzx rax, byte ptr [rax + off]' */
741 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
742 		break;
743 	case BPF_H:
744 		/* Emit 'movzx rax, word ptr [rax + off]' */
745 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
746 		break;
747 	case BPF_W:
748 		/* Emit 'mov eax, dword ptr [rax+0x14]' */
749 		if (is_ereg(dst_reg) || is_ereg(src_reg))
750 			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
751 		else
752 			EMIT1(0x8B);
753 		break;
754 	case BPF_DW:
755 		/* Emit 'mov rax, qword ptr [rax+0x14]' */
756 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
757 		break;
758 	}
759 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
760 	*pprog = prog;
761 }
762 
763 /* STX: *(u8*)(dst_reg + off) = src_reg */
764 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
765 {
766 	u8 *prog = *pprog;
767 	int cnt = 0;
768 
769 	switch (size) {
770 	case BPF_B:
771 		/* Emit 'mov byte ptr [rax + off], al' */
772 		if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
773 			/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
774 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
775 		else
776 			EMIT1(0x88);
777 		break;
778 	case BPF_H:
779 		if (is_ereg(dst_reg) || is_ereg(src_reg))
780 			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
781 		else
782 			EMIT2(0x66, 0x89);
783 		break;
784 	case BPF_W:
785 		if (is_ereg(dst_reg) || is_ereg(src_reg))
786 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
787 		else
788 			EMIT1(0x89);
789 		break;
790 	case BPF_DW:
791 		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
792 		break;
793 	}
794 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
795 	*pprog = prog;
796 }
797 
798 static int emit_atomic(u8 **pprog, u8 atomic_op,
799 		       u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
800 {
801 	u8 *prog = *pprog;
802 	int cnt = 0;
803 
804 	EMIT1(0xF0); /* lock prefix */
805 
806 	maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
807 
808 	/* emit opcode */
809 	switch (atomic_op) {
810 	case BPF_ADD:
811 	case BPF_SUB:
812 	case BPF_AND:
813 	case BPF_OR:
814 	case BPF_XOR:
815 		/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
816 		EMIT1(simple_alu_opcodes[atomic_op]);
817 		break;
818 	case BPF_ADD | BPF_FETCH:
819 		/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
820 		EMIT2(0x0F, 0xC1);
821 		break;
822 	case BPF_XCHG:
823 		/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
824 		EMIT1(0x87);
825 		break;
826 	case BPF_CMPXCHG:
827 		/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
828 		EMIT2(0x0F, 0xB1);
829 		break;
830 	default:
831 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
832 		return -EFAULT;
833 	}
834 
835 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
836 
837 	*pprog = prog;
838 	return 0;
839 }
840 
841 static bool ex_handler_bpf(const struct exception_table_entry *x,
842 			   struct pt_regs *regs, int trapnr,
843 			   unsigned long error_code, unsigned long fault_addr)
844 {
845 	u32 reg = x->fixup >> 8;
846 
847 	/* jump over faulting load and clear dest register */
848 	*(unsigned long *)((void *)regs + reg) = 0;
849 	regs->ip += x->fixup & 0xff;
850 	return true;
851 }
852 
853 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
854 			     bool *regs_used, bool *tail_call_seen)
855 {
856 	int i;
857 
858 	for (i = 1; i <= insn_cnt; i++, insn++) {
859 		if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
860 			*tail_call_seen = true;
861 		if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
862 			regs_used[0] = true;
863 		if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
864 			regs_used[1] = true;
865 		if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
866 			regs_used[2] = true;
867 		if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
868 			regs_used[3] = true;
869 	}
870 }
871 
872 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
873 		  int oldproglen, struct jit_context *ctx)
874 {
875 	bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
876 	struct bpf_insn *insn = bpf_prog->insnsi;
877 	bool callee_regs_used[4] = {};
878 	int insn_cnt = bpf_prog->len;
879 	bool tail_call_seen = false;
880 	bool seen_exit = false;
881 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
882 	int i, cnt = 0, excnt = 0;
883 	int proglen = 0;
884 	u8 *prog = temp;
885 	int err;
886 
887 	detect_reg_usage(insn, insn_cnt, callee_regs_used,
888 			 &tail_call_seen);
889 
890 	/* tail call's presence in current prog implies it is reachable */
891 	tail_call_reachable |= tail_call_seen;
892 
893 	emit_prologue(&prog, bpf_prog->aux->stack_depth,
894 		      bpf_prog_was_classic(bpf_prog), tail_call_reachable,
895 		      bpf_prog->aux->func_idx != 0);
896 	push_callee_regs(&prog, callee_regs_used);
897 	addrs[0] = prog - temp;
898 
899 	for (i = 1; i <= insn_cnt; i++, insn++) {
900 		const s32 imm32 = insn->imm;
901 		u32 dst_reg = insn->dst_reg;
902 		u32 src_reg = insn->src_reg;
903 		u8 b2 = 0, b3 = 0;
904 		s64 jmp_offset;
905 		u8 jmp_cond;
906 		int ilen;
907 		u8 *func;
908 
909 		switch (insn->code) {
910 			/* ALU */
911 		case BPF_ALU | BPF_ADD | BPF_X:
912 		case BPF_ALU | BPF_SUB | BPF_X:
913 		case BPF_ALU | BPF_AND | BPF_X:
914 		case BPF_ALU | BPF_OR | BPF_X:
915 		case BPF_ALU | BPF_XOR | BPF_X:
916 		case BPF_ALU64 | BPF_ADD | BPF_X:
917 		case BPF_ALU64 | BPF_SUB | BPF_X:
918 		case BPF_ALU64 | BPF_AND | BPF_X:
919 		case BPF_ALU64 | BPF_OR | BPF_X:
920 		case BPF_ALU64 | BPF_XOR | BPF_X:
921 			maybe_emit_mod(&prog, dst_reg, src_reg,
922 				       BPF_CLASS(insn->code) == BPF_ALU64);
923 			b2 = simple_alu_opcodes[BPF_OP(insn->code)];
924 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
925 			break;
926 
927 		case BPF_ALU64 | BPF_MOV | BPF_X:
928 		case BPF_ALU | BPF_MOV | BPF_X:
929 			emit_mov_reg(&prog,
930 				     BPF_CLASS(insn->code) == BPF_ALU64,
931 				     dst_reg, src_reg);
932 			break;
933 
934 			/* neg dst */
935 		case BPF_ALU | BPF_NEG:
936 		case BPF_ALU64 | BPF_NEG:
937 			if (BPF_CLASS(insn->code) == BPF_ALU64)
938 				EMIT1(add_1mod(0x48, dst_reg));
939 			else if (is_ereg(dst_reg))
940 				EMIT1(add_1mod(0x40, dst_reg));
941 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
942 			break;
943 
944 		case BPF_ALU | BPF_ADD | BPF_K:
945 		case BPF_ALU | BPF_SUB | BPF_K:
946 		case BPF_ALU | BPF_AND | BPF_K:
947 		case BPF_ALU | BPF_OR | BPF_K:
948 		case BPF_ALU | BPF_XOR | BPF_K:
949 		case BPF_ALU64 | BPF_ADD | BPF_K:
950 		case BPF_ALU64 | BPF_SUB | BPF_K:
951 		case BPF_ALU64 | BPF_AND | BPF_K:
952 		case BPF_ALU64 | BPF_OR | BPF_K:
953 		case BPF_ALU64 | BPF_XOR | BPF_K:
954 			if (BPF_CLASS(insn->code) == BPF_ALU64)
955 				EMIT1(add_1mod(0x48, dst_reg));
956 			else if (is_ereg(dst_reg))
957 				EMIT1(add_1mod(0x40, dst_reg));
958 
959 			/*
960 			 * b3 holds 'normal' opcode, b2 short form only valid
961 			 * in case dst is eax/rax.
962 			 */
963 			switch (BPF_OP(insn->code)) {
964 			case BPF_ADD:
965 				b3 = 0xC0;
966 				b2 = 0x05;
967 				break;
968 			case BPF_SUB:
969 				b3 = 0xE8;
970 				b2 = 0x2D;
971 				break;
972 			case BPF_AND:
973 				b3 = 0xE0;
974 				b2 = 0x25;
975 				break;
976 			case BPF_OR:
977 				b3 = 0xC8;
978 				b2 = 0x0D;
979 				break;
980 			case BPF_XOR:
981 				b3 = 0xF0;
982 				b2 = 0x35;
983 				break;
984 			}
985 
986 			if (is_imm8(imm32))
987 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
988 			else if (is_axreg(dst_reg))
989 				EMIT1_off32(b2, imm32);
990 			else
991 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
992 			break;
993 
994 		case BPF_ALU64 | BPF_MOV | BPF_K:
995 		case BPF_ALU | BPF_MOV | BPF_K:
996 			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
997 				       dst_reg, imm32);
998 			break;
999 
1000 		case BPF_LD | BPF_IMM | BPF_DW:
1001 			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1002 			insn++;
1003 			i++;
1004 			break;
1005 
1006 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1007 		case BPF_ALU | BPF_MOD | BPF_X:
1008 		case BPF_ALU | BPF_DIV | BPF_X:
1009 		case BPF_ALU | BPF_MOD | BPF_K:
1010 		case BPF_ALU | BPF_DIV | BPF_K:
1011 		case BPF_ALU64 | BPF_MOD | BPF_X:
1012 		case BPF_ALU64 | BPF_DIV | BPF_X:
1013 		case BPF_ALU64 | BPF_MOD | BPF_K:
1014 		case BPF_ALU64 | BPF_DIV | BPF_K:
1015 			EMIT1(0x50); /* push rax */
1016 			EMIT1(0x52); /* push rdx */
1017 
1018 			if (BPF_SRC(insn->code) == BPF_X)
1019 				/* mov r11, src_reg */
1020 				EMIT_mov(AUX_REG, src_reg);
1021 			else
1022 				/* mov r11, imm32 */
1023 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1024 
1025 			/* mov rax, dst_reg */
1026 			EMIT_mov(BPF_REG_0, dst_reg);
1027 
1028 			/*
1029 			 * xor edx, edx
1030 			 * equivalent to 'xor rdx, rdx', but one byte less
1031 			 */
1032 			EMIT2(0x31, 0xd2);
1033 
1034 			if (BPF_CLASS(insn->code) == BPF_ALU64)
1035 				/* div r11 */
1036 				EMIT3(0x49, 0xF7, 0xF3);
1037 			else
1038 				/* div r11d */
1039 				EMIT3(0x41, 0xF7, 0xF3);
1040 
1041 			if (BPF_OP(insn->code) == BPF_MOD)
1042 				/* mov r11, rdx */
1043 				EMIT3(0x49, 0x89, 0xD3);
1044 			else
1045 				/* mov r11, rax */
1046 				EMIT3(0x49, 0x89, 0xC3);
1047 
1048 			EMIT1(0x5A); /* pop rdx */
1049 			EMIT1(0x58); /* pop rax */
1050 
1051 			/* mov dst_reg, r11 */
1052 			EMIT_mov(dst_reg, AUX_REG);
1053 			break;
1054 
1055 		case BPF_ALU | BPF_MUL | BPF_K:
1056 		case BPF_ALU | BPF_MUL | BPF_X:
1057 		case BPF_ALU64 | BPF_MUL | BPF_K:
1058 		case BPF_ALU64 | BPF_MUL | BPF_X:
1059 		{
1060 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1061 
1062 			if (dst_reg != BPF_REG_0)
1063 				EMIT1(0x50); /* push rax */
1064 			if (dst_reg != BPF_REG_3)
1065 				EMIT1(0x52); /* push rdx */
1066 
1067 			/* mov r11, dst_reg */
1068 			EMIT_mov(AUX_REG, dst_reg);
1069 
1070 			if (BPF_SRC(insn->code) == BPF_X)
1071 				emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
1072 			else
1073 				emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
1074 
1075 			if (is64)
1076 				EMIT1(add_1mod(0x48, AUX_REG));
1077 			else if (is_ereg(AUX_REG))
1078 				EMIT1(add_1mod(0x40, AUX_REG));
1079 			/* mul(q) r11 */
1080 			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
1081 
1082 			if (dst_reg != BPF_REG_3)
1083 				EMIT1(0x5A); /* pop rdx */
1084 			if (dst_reg != BPF_REG_0) {
1085 				/* mov dst_reg, rax */
1086 				EMIT_mov(dst_reg, BPF_REG_0);
1087 				EMIT1(0x58); /* pop rax */
1088 			}
1089 			break;
1090 		}
1091 			/* Shifts */
1092 		case BPF_ALU | BPF_LSH | BPF_K:
1093 		case BPF_ALU | BPF_RSH | BPF_K:
1094 		case BPF_ALU | BPF_ARSH | BPF_K:
1095 		case BPF_ALU64 | BPF_LSH | BPF_K:
1096 		case BPF_ALU64 | BPF_RSH | BPF_K:
1097 		case BPF_ALU64 | BPF_ARSH | BPF_K:
1098 			if (BPF_CLASS(insn->code) == BPF_ALU64)
1099 				EMIT1(add_1mod(0x48, dst_reg));
1100 			else if (is_ereg(dst_reg))
1101 				EMIT1(add_1mod(0x40, dst_reg));
1102 
1103 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1104 			if (imm32 == 1)
1105 				EMIT2(0xD1, add_1reg(b3, dst_reg));
1106 			else
1107 				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1108 			break;
1109 
1110 		case BPF_ALU | BPF_LSH | BPF_X:
1111 		case BPF_ALU | BPF_RSH | BPF_X:
1112 		case BPF_ALU | BPF_ARSH | BPF_X:
1113 		case BPF_ALU64 | BPF_LSH | BPF_X:
1114 		case BPF_ALU64 | BPF_RSH | BPF_X:
1115 		case BPF_ALU64 | BPF_ARSH | BPF_X:
1116 
1117 			/* Check for bad case when dst_reg == rcx */
1118 			if (dst_reg == BPF_REG_4) {
1119 				/* mov r11, dst_reg */
1120 				EMIT_mov(AUX_REG, dst_reg);
1121 				dst_reg = AUX_REG;
1122 			}
1123 
1124 			if (src_reg != BPF_REG_4) { /* common case */
1125 				EMIT1(0x51); /* push rcx */
1126 
1127 				/* mov rcx, src_reg */
1128 				EMIT_mov(BPF_REG_4, src_reg);
1129 			}
1130 
1131 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1132 			if (BPF_CLASS(insn->code) == BPF_ALU64)
1133 				EMIT1(add_1mod(0x48, dst_reg));
1134 			else if (is_ereg(dst_reg))
1135 				EMIT1(add_1mod(0x40, dst_reg));
1136 
1137 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1138 			EMIT2(0xD3, add_1reg(b3, dst_reg));
1139 
1140 			if (src_reg != BPF_REG_4)
1141 				EMIT1(0x59); /* pop rcx */
1142 
1143 			if (insn->dst_reg == BPF_REG_4)
1144 				/* mov dst_reg, r11 */
1145 				EMIT_mov(insn->dst_reg, AUX_REG);
1146 			break;
1147 
1148 		case BPF_ALU | BPF_END | BPF_FROM_BE:
1149 			switch (imm32) {
1150 			case 16:
1151 				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
1152 				EMIT1(0x66);
1153 				if (is_ereg(dst_reg))
1154 					EMIT1(0x41);
1155 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1156 
1157 				/* Emit 'movzwl eax, ax' */
1158 				if (is_ereg(dst_reg))
1159 					EMIT3(0x45, 0x0F, 0xB7);
1160 				else
1161 					EMIT2(0x0F, 0xB7);
1162 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1163 				break;
1164 			case 32:
1165 				/* Emit 'bswap eax' to swap lower 4 bytes */
1166 				if (is_ereg(dst_reg))
1167 					EMIT2(0x41, 0x0F);
1168 				else
1169 					EMIT1(0x0F);
1170 				EMIT1(add_1reg(0xC8, dst_reg));
1171 				break;
1172 			case 64:
1173 				/* Emit 'bswap rax' to swap 8 bytes */
1174 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1175 				      add_1reg(0xC8, dst_reg));
1176 				break;
1177 			}
1178 			break;
1179 
1180 		case BPF_ALU | BPF_END | BPF_FROM_LE:
1181 			switch (imm32) {
1182 			case 16:
1183 				/*
1184 				 * Emit 'movzwl eax, ax' to zero extend 16-bit
1185 				 * into 64 bit
1186 				 */
1187 				if (is_ereg(dst_reg))
1188 					EMIT3(0x45, 0x0F, 0xB7);
1189 				else
1190 					EMIT2(0x0F, 0xB7);
1191 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1192 				break;
1193 			case 32:
1194 				/* Emit 'mov eax, eax' to clear upper 32-bits */
1195 				if (is_ereg(dst_reg))
1196 					EMIT1(0x45);
1197 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1198 				break;
1199 			case 64:
1200 				/* nop */
1201 				break;
1202 			}
1203 			break;
1204 
1205 			/* ST: *(u8*)(dst_reg + off) = imm */
1206 		case BPF_ST | BPF_MEM | BPF_B:
1207 			if (is_ereg(dst_reg))
1208 				EMIT2(0x41, 0xC6);
1209 			else
1210 				EMIT1(0xC6);
1211 			goto st;
1212 		case BPF_ST | BPF_MEM | BPF_H:
1213 			if (is_ereg(dst_reg))
1214 				EMIT3(0x66, 0x41, 0xC7);
1215 			else
1216 				EMIT2(0x66, 0xC7);
1217 			goto st;
1218 		case BPF_ST | BPF_MEM | BPF_W:
1219 			if (is_ereg(dst_reg))
1220 				EMIT2(0x41, 0xC7);
1221 			else
1222 				EMIT1(0xC7);
1223 			goto st;
1224 		case BPF_ST | BPF_MEM | BPF_DW:
1225 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1226 
1227 st:			if (is_imm8(insn->off))
1228 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
1229 			else
1230 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1231 
1232 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1233 			break;
1234 
1235 			/* STX: *(u8*)(dst_reg + off) = src_reg */
1236 		case BPF_STX | BPF_MEM | BPF_B:
1237 		case BPF_STX | BPF_MEM | BPF_H:
1238 		case BPF_STX | BPF_MEM | BPF_W:
1239 		case BPF_STX | BPF_MEM | BPF_DW:
1240 			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1241 			break;
1242 
1243 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
1244 		case BPF_LDX | BPF_MEM | BPF_B:
1245 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1246 		case BPF_LDX | BPF_MEM | BPF_H:
1247 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1248 		case BPF_LDX | BPF_MEM | BPF_W:
1249 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1250 		case BPF_LDX | BPF_MEM | BPF_DW:
1251 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1252 			emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1253 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1254 				struct exception_table_entry *ex;
1255 				u8 *_insn = image + proglen;
1256 				s64 delta;
1257 
1258 				if (!bpf_prog->aux->extable)
1259 					break;
1260 
1261 				if (excnt >= bpf_prog->aux->num_exentries) {
1262 					pr_err("ex gen bug\n");
1263 					return -EFAULT;
1264 				}
1265 				ex = &bpf_prog->aux->extable[excnt++];
1266 
1267 				delta = _insn - (u8 *)&ex->insn;
1268 				if (!is_simm32(delta)) {
1269 					pr_err("extable->insn doesn't fit into 32-bit\n");
1270 					return -EFAULT;
1271 				}
1272 				ex->insn = delta;
1273 
1274 				delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
1275 				if (!is_simm32(delta)) {
1276 					pr_err("extable->handler doesn't fit into 32-bit\n");
1277 					return -EFAULT;
1278 				}
1279 				ex->handler = delta;
1280 
1281 				if (dst_reg > BPF_REG_9) {
1282 					pr_err("verifier error\n");
1283 					return -EFAULT;
1284 				}
1285 				/*
1286 				 * Compute size of x86 insn and its target dest x86 register.
1287 				 * ex_handler_bpf() will use lower 8 bits to adjust
1288 				 * pt_regs->ip to jump over this x86 instruction
1289 				 * and upper bits to figure out which pt_regs to zero out.
1290 				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1291 				 * of 4 bytes will be ignored and rbx will be zero inited.
1292 				 */
1293 				ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1294 			}
1295 			break;
1296 
1297 		case BPF_STX | BPF_ATOMIC | BPF_W:
1298 		case BPF_STX | BPF_ATOMIC | BPF_DW:
1299 			if (insn->imm == (BPF_AND | BPF_FETCH) ||
1300 			    insn->imm == (BPF_OR | BPF_FETCH) ||
1301 			    insn->imm == (BPF_XOR | BPF_FETCH)) {
1302 				u8 *branch_target;
1303 				bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1304 
1305 				/*
1306 				 * Can't be implemented with a single x86 insn.
1307 				 * Need to do a CMPXCHG loop.
1308 				 */
1309 
1310 				/* Will need RAX as a CMPXCHG operand so save R0 */
1311 				emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1312 				branch_target = prog;
1313 				/* Load old value */
1314 				emit_ldx(&prog, BPF_SIZE(insn->code),
1315 					 BPF_REG_0, dst_reg, insn->off);
1316 				/*
1317 				 * Perform the (commutative) operation locally,
1318 				 * put the result in the AUX_REG.
1319 				 */
1320 				emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1321 				maybe_emit_mod(&prog, AUX_REG, src_reg, is64);
1322 				EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1323 				      add_2reg(0xC0, AUX_REG, src_reg));
1324 				/* Attempt to swap in new value */
1325 				err = emit_atomic(&prog, BPF_CMPXCHG,
1326 						  dst_reg, AUX_REG, insn->off,
1327 						  BPF_SIZE(insn->code));
1328 				if (WARN_ON(err))
1329 					return err;
1330 				/*
1331 				 * ZF tells us whether we won the race. If it's
1332 				 * cleared we need to try again.
1333 				 */
1334 				EMIT2(X86_JNE, -(prog - branch_target) - 2);
1335 				/* Return the pre-modification value */
1336 				emit_mov_reg(&prog, is64, src_reg, BPF_REG_0);
1337 				/* Restore R0 after clobbering RAX */
1338 				emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1339 				break;
1340 
1341 			}
1342 
1343 			err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1344 						  insn->off, BPF_SIZE(insn->code));
1345 			if (err)
1346 				return err;
1347 			break;
1348 
1349 			/* call */
1350 		case BPF_JMP | BPF_CALL:
1351 			func = (u8 *) __bpf_call_base + imm32;
1352 			if (tail_call_reachable) {
1353 				EMIT3_off32(0x48, 0x8B, 0x85,
1354 					    -(bpf_prog->aux->stack_depth + 8));
1355 				if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1356 					return -EINVAL;
1357 			} else {
1358 				if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1359 					return -EINVAL;
1360 			}
1361 			break;
1362 
1363 		case BPF_JMP | BPF_TAIL_CALL:
1364 			if (imm32)
1365 				emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1366 							  &prog, addrs[i], image,
1367 							  callee_regs_used,
1368 							  bpf_prog->aux->stack_depth);
1369 			else
1370 				emit_bpf_tail_call_indirect(&prog,
1371 							    callee_regs_used,
1372 							    bpf_prog->aux->stack_depth);
1373 			break;
1374 
1375 			/* cond jump */
1376 		case BPF_JMP | BPF_JEQ | BPF_X:
1377 		case BPF_JMP | BPF_JNE | BPF_X:
1378 		case BPF_JMP | BPF_JGT | BPF_X:
1379 		case BPF_JMP | BPF_JLT | BPF_X:
1380 		case BPF_JMP | BPF_JGE | BPF_X:
1381 		case BPF_JMP | BPF_JLE | BPF_X:
1382 		case BPF_JMP | BPF_JSGT | BPF_X:
1383 		case BPF_JMP | BPF_JSLT | BPF_X:
1384 		case BPF_JMP | BPF_JSGE | BPF_X:
1385 		case BPF_JMP | BPF_JSLE | BPF_X:
1386 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1387 		case BPF_JMP32 | BPF_JNE | BPF_X:
1388 		case BPF_JMP32 | BPF_JGT | BPF_X:
1389 		case BPF_JMP32 | BPF_JLT | BPF_X:
1390 		case BPF_JMP32 | BPF_JGE | BPF_X:
1391 		case BPF_JMP32 | BPF_JLE | BPF_X:
1392 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1393 		case BPF_JMP32 | BPF_JSLT | BPF_X:
1394 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1395 		case BPF_JMP32 | BPF_JSLE | BPF_X:
1396 			/* cmp dst_reg, src_reg */
1397 			maybe_emit_mod(&prog, dst_reg, src_reg,
1398 				       BPF_CLASS(insn->code) == BPF_JMP);
1399 			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1400 			goto emit_cond_jmp;
1401 
1402 		case BPF_JMP | BPF_JSET | BPF_X:
1403 		case BPF_JMP32 | BPF_JSET | BPF_X:
1404 			/* test dst_reg, src_reg */
1405 			maybe_emit_mod(&prog, dst_reg, src_reg,
1406 				       BPF_CLASS(insn->code) == BPF_JMP);
1407 			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1408 			goto emit_cond_jmp;
1409 
1410 		case BPF_JMP | BPF_JSET | BPF_K:
1411 		case BPF_JMP32 | BPF_JSET | BPF_K:
1412 			/* test dst_reg, imm32 */
1413 			if (BPF_CLASS(insn->code) == BPF_JMP)
1414 				EMIT1(add_1mod(0x48, dst_reg));
1415 			else if (is_ereg(dst_reg))
1416 				EMIT1(add_1mod(0x40, dst_reg));
1417 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1418 			goto emit_cond_jmp;
1419 
1420 		case BPF_JMP | BPF_JEQ | BPF_K:
1421 		case BPF_JMP | BPF_JNE | BPF_K:
1422 		case BPF_JMP | BPF_JGT | BPF_K:
1423 		case BPF_JMP | BPF_JLT | BPF_K:
1424 		case BPF_JMP | BPF_JGE | BPF_K:
1425 		case BPF_JMP | BPF_JLE | BPF_K:
1426 		case BPF_JMP | BPF_JSGT | BPF_K:
1427 		case BPF_JMP | BPF_JSLT | BPF_K:
1428 		case BPF_JMP | BPF_JSGE | BPF_K:
1429 		case BPF_JMP | BPF_JSLE | BPF_K:
1430 		case BPF_JMP32 | BPF_JEQ | BPF_K:
1431 		case BPF_JMP32 | BPF_JNE | BPF_K:
1432 		case BPF_JMP32 | BPF_JGT | BPF_K:
1433 		case BPF_JMP32 | BPF_JLT | BPF_K:
1434 		case BPF_JMP32 | BPF_JGE | BPF_K:
1435 		case BPF_JMP32 | BPF_JLE | BPF_K:
1436 		case BPF_JMP32 | BPF_JSGT | BPF_K:
1437 		case BPF_JMP32 | BPF_JSLT | BPF_K:
1438 		case BPF_JMP32 | BPF_JSGE | BPF_K:
1439 		case BPF_JMP32 | BPF_JSLE | BPF_K:
1440 			/* test dst_reg, dst_reg to save one extra byte */
1441 			if (imm32 == 0) {
1442 				maybe_emit_mod(&prog, dst_reg, dst_reg,
1443 					       BPF_CLASS(insn->code) == BPF_JMP);
1444 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1445 				goto emit_cond_jmp;
1446 			}
1447 
1448 			/* cmp dst_reg, imm8/32 */
1449 			if (BPF_CLASS(insn->code) == BPF_JMP)
1450 				EMIT1(add_1mod(0x48, dst_reg));
1451 			else if (is_ereg(dst_reg))
1452 				EMIT1(add_1mod(0x40, dst_reg));
1453 
1454 			if (is_imm8(imm32))
1455 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1456 			else
1457 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1458 
1459 emit_cond_jmp:		/* Convert BPF opcode to x86 */
1460 			switch (BPF_OP(insn->code)) {
1461 			case BPF_JEQ:
1462 				jmp_cond = X86_JE;
1463 				break;
1464 			case BPF_JSET:
1465 			case BPF_JNE:
1466 				jmp_cond = X86_JNE;
1467 				break;
1468 			case BPF_JGT:
1469 				/* GT is unsigned '>', JA in x86 */
1470 				jmp_cond = X86_JA;
1471 				break;
1472 			case BPF_JLT:
1473 				/* LT is unsigned '<', JB in x86 */
1474 				jmp_cond = X86_JB;
1475 				break;
1476 			case BPF_JGE:
1477 				/* GE is unsigned '>=', JAE in x86 */
1478 				jmp_cond = X86_JAE;
1479 				break;
1480 			case BPF_JLE:
1481 				/* LE is unsigned '<=', JBE in x86 */
1482 				jmp_cond = X86_JBE;
1483 				break;
1484 			case BPF_JSGT:
1485 				/* Signed '>', GT in x86 */
1486 				jmp_cond = X86_JG;
1487 				break;
1488 			case BPF_JSLT:
1489 				/* Signed '<', LT in x86 */
1490 				jmp_cond = X86_JL;
1491 				break;
1492 			case BPF_JSGE:
1493 				/* Signed '>=', GE in x86 */
1494 				jmp_cond = X86_JGE;
1495 				break;
1496 			case BPF_JSLE:
1497 				/* Signed '<=', LE in x86 */
1498 				jmp_cond = X86_JLE;
1499 				break;
1500 			default: /* to silence GCC warning */
1501 				return -EFAULT;
1502 			}
1503 			jmp_offset = addrs[i + insn->off] - addrs[i];
1504 			if (is_imm8(jmp_offset)) {
1505 				EMIT2(jmp_cond, jmp_offset);
1506 			} else if (is_simm32(jmp_offset)) {
1507 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1508 			} else {
1509 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1510 				return -EFAULT;
1511 			}
1512 
1513 			break;
1514 
1515 		case BPF_JMP | BPF_JA:
1516 			if (insn->off == -1)
1517 				/* -1 jmp instructions will always jump
1518 				 * backwards two bytes. Explicitly handling
1519 				 * this case avoids wasting too many passes
1520 				 * when there are long sequences of replaced
1521 				 * dead code.
1522 				 */
1523 				jmp_offset = -2;
1524 			else
1525 				jmp_offset = addrs[i + insn->off] - addrs[i];
1526 
1527 			if (!jmp_offset)
1528 				/* Optimize out nop jumps */
1529 				break;
1530 emit_jmp:
1531 			if (is_imm8(jmp_offset)) {
1532 				EMIT2(0xEB, jmp_offset);
1533 			} else if (is_simm32(jmp_offset)) {
1534 				EMIT1_off32(0xE9, jmp_offset);
1535 			} else {
1536 				pr_err("jmp gen bug %llx\n", jmp_offset);
1537 				return -EFAULT;
1538 			}
1539 			break;
1540 
1541 		case BPF_JMP | BPF_EXIT:
1542 			if (seen_exit) {
1543 				jmp_offset = ctx->cleanup_addr - addrs[i];
1544 				goto emit_jmp;
1545 			}
1546 			seen_exit = true;
1547 			/* Update cleanup_addr */
1548 			ctx->cleanup_addr = proglen;
1549 			pop_callee_regs(&prog, callee_regs_used);
1550 			EMIT1(0xC9);         /* leave */
1551 			EMIT1(0xC3);         /* ret */
1552 			break;
1553 
1554 		default:
1555 			/*
1556 			 * By design x86-64 JIT should support all BPF instructions.
1557 			 * This error will be seen if new instruction was added
1558 			 * to the interpreter, but not to the JIT, or if there is
1559 			 * junk in bpf_prog.
1560 			 */
1561 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1562 			return -EINVAL;
1563 		}
1564 
1565 		ilen = prog - temp;
1566 		if (ilen > BPF_MAX_INSN_SIZE) {
1567 			pr_err("bpf_jit: fatal insn size error\n");
1568 			return -EFAULT;
1569 		}
1570 
1571 		if (image) {
1572 			if (unlikely(proglen + ilen > oldproglen)) {
1573 				pr_err("bpf_jit: fatal error\n");
1574 				return -EFAULT;
1575 			}
1576 			memcpy(image + proglen, temp, ilen);
1577 		}
1578 		proglen += ilen;
1579 		addrs[i] = proglen;
1580 		prog = temp;
1581 	}
1582 
1583 	if (image && excnt != bpf_prog->aux->num_exentries) {
1584 		pr_err("extable is not populated\n");
1585 		return -EFAULT;
1586 	}
1587 	return proglen;
1588 }
1589 
1590 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1591 		      int stack_size)
1592 {
1593 	int i;
1594 	/* Store function arguments to stack.
1595 	 * For a function that accepts two pointers the sequence will be:
1596 	 * mov QWORD PTR [rbp-0x10],rdi
1597 	 * mov QWORD PTR [rbp-0x8],rsi
1598 	 */
1599 	for (i = 0; i < min(nr_args, 6); i++)
1600 		emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1601 			 BPF_REG_FP,
1602 			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1603 			 -(stack_size - i * 8));
1604 }
1605 
1606 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1607 			 int stack_size)
1608 {
1609 	int i;
1610 
1611 	/* Restore function arguments from stack.
1612 	 * For a function that accepts two pointers the sequence will be:
1613 	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1614 	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1615 	 */
1616 	for (i = 0; i < min(nr_args, 6); i++)
1617 		emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1618 			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1619 			 BPF_REG_FP,
1620 			 -(stack_size - i * 8));
1621 }
1622 
1623 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1624 			   struct bpf_prog *p, int stack_size, bool mod_ret)
1625 {
1626 	u8 *prog = *pprog;
1627 	int cnt = 0;
1628 
1629 	if (p->aux->sleepable) {
1630 		if (emit_call(&prog, __bpf_prog_enter_sleepable, prog))
1631 			return -EINVAL;
1632 	} else {
1633 		if (emit_call(&prog, __bpf_prog_enter, prog))
1634 			return -EINVAL;
1635 		/* remember prog start time returned by __bpf_prog_enter */
1636 		emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1637 	}
1638 
1639 	/* arg1: lea rdi, [rbp - stack_size] */
1640 	EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1641 	/* arg2: progs[i]->insnsi for interpreter */
1642 	if (!p->jited)
1643 		emit_mov_imm64(&prog, BPF_REG_2,
1644 			       (long) p->insnsi >> 32,
1645 			       (u32) (long) p->insnsi);
1646 	/* call JITed bpf program or interpreter */
1647 	if (emit_call(&prog, p->bpf_func, prog))
1648 		return -EINVAL;
1649 
1650 	/* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1651 	 * of the previous call which is then passed on the stack to
1652 	 * the next BPF program.
1653 	 */
1654 	if (mod_ret)
1655 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1656 
1657 	if (p->aux->sleepable) {
1658 		if (emit_call(&prog, __bpf_prog_exit_sleepable, prog))
1659 			return -EINVAL;
1660 	} else {
1661 		/* arg1: mov rdi, progs[i] */
1662 		emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
1663 			       (u32) (long) p);
1664 		/* arg2: mov rsi, rbx <- start time in nsec */
1665 		emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1666 		if (emit_call(&prog, __bpf_prog_exit, prog))
1667 			return -EINVAL;
1668 	}
1669 
1670 	*pprog = prog;
1671 	return 0;
1672 }
1673 
1674 static void emit_nops(u8 **pprog, unsigned int len)
1675 {
1676 	unsigned int i, noplen;
1677 	u8 *prog = *pprog;
1678 	int cnt = 0;
1679 
1680 	while (len > 0) {
1681 		noplen = len;
1682 
1683 		if (noplen > ASM_NOP_MAX)
1684 			noplen = ASM_NOP_MAX;
1685 
1686 		for (i = 0; i < noplen; i++)
1687 			EMIT1(ideal_nops[noplen][i]);
1688 		len -= noplen;
1689 	}
1690 
1691 	*pprog = prog;
1692 }
1693 
1694 static void emit_align(u8 **pprog, u32 align)
1695 {
1696 	u8 *target, *prog = *pprog;
1697 
1698 	target = PTR_ALIGN(prog, align);
1699 	if (target != prog)
1700 		emit_nops(&prog, target - prog);
1701 
1702 	*pprog = prog;
1703 }
1704 
1705 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1706 {
1707 	u8 *prog = *pprog;
1708 	int cnt = 0;
1709 	s64 offset;
1710 
1711 	offset = func - (ip + 2 + 4);
1712 	if (!is_simm32(offset)) {
1713 		pr_err("Target %p is out of range\n", func);
1714 		return -EINVAL;
1715 	}
1716 	EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1717 	*pprog = prog;
1718 	return 0;
1719 }
1720 
1721 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1722 		      struct bpf_tramp_progs *tp, int stack_size)
1723 {
1724 	int i;
1725 	u8 *prog = *pprog;
1726 
1727 	for (i = 0; i < tp->nr_progs; i++) {
1728 		if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
1729 			return -EINVAL;
1730 	}
1731 	*pprog = prog;
1732 	return 0;
1733 }
1734 
1735 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1736 			      struct bpf_tramp_progs *tp, int stack_size,
1737 			      u8 **branches)
1738 {
1739 	u8 *prog = *pprog;
1740 	int i, cnt = 0;
1741 
1742 	/* The first fmod_ret program will receive a garbage return value.
1743 	 * Set this to 0 to avoid confusing the program.
1744 	 */
1745 	emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1746 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1747 	for (i = 0; i < tp->nr_progs; i++) {
1748 		if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1749 			return -EINVAL;
1750 
1751 		/* mod_ret prog stored return value into [rbp - 8]. Emit:
1752 		 * if (*(u64 *)(rbp - 8) !=  0)
1753 		 *	goto do_fexit;
1754 		 */
1755 		/* cmp QWORD PTR [rbp - 0x8], 0x0 */
1756 		EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1757 
1758 		/* Save the location of the branch and Generate 6 nops
1759 		 * (4 bytes for an offset and 2 bytes for the jump) These nops
1760 		 * are replaced with a conditional jump once do_fexit (i.e. the
1761 		 * start of the fexit invocation) is finalized.
1762 		 */
1763 		branches[i] = prog;
1764 		emit_nops(&prog, 4 + 2);
1765 	}
1766 
1767 	*pprog = prog;
1768 	return 0;
1769 }
1770 
1771 /* Example:
1772  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1773  * its 'struct btf_func_model' will be nr_args=2
1774  * The assembly code when eth_type_trans is executing after trampoline:
1775  *
1776  * push rbp
1777  * mov rbp, rsp
1778  * sub rsp, 16                     // space for skb and dev
1779  * push rbx                        // temp regs to pass start time
1780  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
1781  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
1782  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1783  * mov rbx, rax                    // remember start time in bpf stats are enabled
1784  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
1785  * call addr_of_jited_FENTRY_prog
1786  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1787  * mov rsi, rbx                    // prog start time
1788  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1789  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
1790  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
1791  * pop rbx
1792  * leave
1793  * ret
1794  *
1795  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1796  * replaced with 'call generated_bpf_trampoline'. When it returns
1797  * eth_type_trans will continue executing with original skb and dev pointers.
1798  *
1799  * The assembly code when eth_type_trans is called from trampoline:
1800  *
1801  * push rbp
1802  * mov rbp, rsp
1803  * sub rsp, 24                     // space for skb, dev, return value
1804  * push rbx                        // temp regs to pass start time
1805  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
1806  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
1807  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1808  * mov rbx, rax                    // remember start time if bpf stats are enabled
1809  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1810  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
1811  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1812  * mov rsi, rbx                    // prog start time
1813  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1814  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
1815  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
1816  * call eth_type_trans+5           // execute body of eth_type_trans
1817  * mov qword ptr [rbp - 8], rax    // save return value
1818  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1819  * mov rbx, rax                    // remember start time in bpf stats are enabled
1820  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1821  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
1822  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1823  * mov rsi, rbx                    // prog start time
1824  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1825  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
1826  * pop rbx
1827  * leave
1828  * add rsp, 8                      // skip eth_type_trans's frame
1829  * ret                             // return to its caller
1830  */
1831 int arch_prepare_bpf_trampoline(void *image, void *image_end,
1832 				const struct btf_func_model *m, u32 flags,
1833 				struct bpf_tramp_progs *tprogs,
1834 				void *orig_call)
1835 {
1836 	int ret, i, cnt = 0, nr_args = m->nr_args;
1837 	int stack_size = nr_args * 8;
1838 	struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
1839 	struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
1840 	struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
1841 	u8 **branches = NULL;
1842 	u8 *prog;
1843 
1844 	/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1845 	if (nr_args > 6)
1846 		return -ENOTSUPP;
1847 
1848 	if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1849 	    (flags & BPF_TRAMP_F_SKIP_FRAME))
1850 		return -EINVAL;
1851 
1852 	if (flags & BPF_TRAMP_F_CALL_ORIG)
1853 		stack_size += 8; /* room for return value of orig_call */
1854 
1855 	if (flags & BPF_TRAMP_F_SKIP_FRAME)
1856 		/* skip patched call instruction and point orig_call to actual
1857 		 * body of the kernel function.
1858 		 */
1859 		orig_call += X86_PATCH_SIZE;
1860 
1861 	prog = image;
1862 
1863 	EMIT1(0x55);		 /* push rbp */
1864 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
1865 	EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
1866 	EMIT1(0x53);		 /* push rbx */
1867 
1868 	save_regs(m, &prog, nr_args, stack_size);
1869 
1870 	if (fentry->nr_progs)
1871 		if (invoke_bpf(m, &prog, fentry, stack_size))
1872 			return -EINVAL;
1873 
1874 	if (fmod_ret->nr_progs) {
1875 		branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
1876 				   GFP_KERNEL);
1877 		if (!branches)
1878 			return -ENOMEM;
1879 
1880 		if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
1881 				       branches)) {
1882 			ret = -EINVAL;
1883 			goto cleanup;
1884 		}
1885 	}
1886 
1887 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
1888 		if (fentry->nr_progs || fmod_ret->nr_progs)
1889 			restore_regs(m, &prog, nr_args, stack_size);
1890 
1891 		/* call original function */
1892 		if (emit_call(&prog, orig_call, prog)) {
1893 			ret = -EINVAL;
1894 			goto cleanup;
1895 		}
1896 		/* remember return value in a stack for bpf prog to access */
1897 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1898 	}
1899 
1900 	if (fmod_ret->nr_progs) {
1901 		/* From Intel 64 and IA-32 Architectures Optimization
1902 		 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
1903 		 * Coding Rule 11: All branch targets should be 16-byte
1904 		 * aligned.
1905 		 */
1906 		emit_align(&prog, 16);
1907 		/* Update the branches saved in invoke_bpf_mod_ret with the
1908 		 * aligned address of do_fexit.
1909 		 */
1910 		for (i = 0; i < fmod_ret->nr_progs; i++)
1911 			emit_cond_near_jump(&branches[i], prog, branches[i],
1912 					    X86_JNE);
1913 	}
1914 
1915 	if (fexit->nr_progs)
1916 		if (invoke_bpf(m, &prog, fexit, stack_size)) {
1917 			ret = -EINVAL;
1918 			goto cleanup;
1919 		}
1920 
1921 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
1922 		restore_regs(m, &prog, nr_args, stack_size);
1923 
1924 	/* This needs to be done regardless. If there were fmod_ret programs,
1925 	 * the return value is only updated on the stack and still needs to be
1926 	 * restored to R0.
1927 	 */
1928 	if (flags & BPF_TRAMP_F_CALL_ORIG)
1929 		/* restore original return value back into RAX */
1930 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
1931 
1932 	EMIT1(0x5B); /* pop rbx */
1933 	EMIT1(0xC9); /* leave */
1934 	if (flags & BPF_TRAMP_F_SKIP_FRAME)
1935 		/* skip our return address and return to parent */
1936 		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
1937 	EMIT1(0xC3); /* ret */
1938 	/* Make sure the trampoline generation logic doesn't overflow */
1939 	if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
1940 		ret = -EFAULT;
1941 		goto cleanup;
1942 	}
1943 	ret = prog - (u8 *)image;
1944 
1945 cleanup:
1946 	kfree(branches);
1947 	return ret;
1948 }
1949 
1950 static int emit_fallback_jump(u8 **pprog)
1951 {
1952 	u8 *prog = *pprog;
1953 	int err = 0;
1954 
1955 #ifdef CONFIG_RETPOLINE
1956 	/* Note that this assumes the the compiler uses external
1957 	 * thunks for indirect calls. Both clang and GCC use the same
1958 	 * naming convention for external thunks.
1959 	 */
1960 	err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
1961 #else
1962 	int cnt = 0;
1963 
1964 	EMIT2(0xFF, 0xE2);	/* jmp rdx */
1965 #endif
1966 	*pprog = prog;
1967 	return err;
1968 }
1969 
1970 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
1971 {
1972 	u8 *jg_reloc, *prog = *pprog;
1973 	int pivot, err, jg_bytes = 1, cnt = 0;
1974 	s64 jg_offset;
1975 
1976 	if (a == b) {
1977 		/* Leaf node of recursion, i.e. not a range of indices
1978 		 * anymore.
1979 		 */
1980 		EMIT1(add_1mod(0x48, BPF_REG_3));	/* cmp rdx,func */
1981 		if (!is_simm32(progs[a]))
1982 			return -1;
1983 		EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
1984 			    progs[a]);
1985 		err = emit_cond_near_jump(&prog,	/* je func */
1986 					  (void *)progs[a], prog,
1987 					  X86_JE);
1988 		if (err)
1989 			return err;
1990 
1991 		err = emit_fallback_jump(&prog);	/* jmp thunk/indirect */
1992 		if (err)
1993 			return err;
1994 
1995 		*pprog = prog;
1996 		return 0;
1997 	}
1998 
1999 	/* Not a leaf node, so we pivot, and recursively descend into
2000 	 * the lower and upper ranges.
2001 	 */
2002 	pivot = (b - a) / 2;
2003 	EMIT1(add_1mod(0x48, BPF_REG_3));		/* cmp rdx,func */
2004 	if (!is_simm32(progs[a + pivot]))
2005 		return -1;
2006 	EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2007 
2008 	if (pivot > 2) {				/* jg upper_part */
2009 		/* Require near jump. */
2010 		jg_bytes = 4;
2011 		EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2012 	} else {
2013 		EMIT2(X86_JG, 0);
2014 	}
2015 	jg_reloc = prog;
2016 
2017 	err = emit_bpf_dispatcher(&prog, a, a + pivot,	/* emit lower_part */
2018 				  progs);
2019 	if (err)
2020 		return err;
2021 
2022 	/* From Intel 64 and IA-32 Architectures Optimization
2023 	 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2024 	 * Coding Rule 11: All branch targets should be 16-byte
2025 	 * aligned.
2026 	 */
2027 	emit_align(&prog, 16);
2028 	jg_offset = prog - jg_reloc;
2029 	emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2030 
2031 	err = emit_bpf_dispatcher(&prog, a + pivot + 1,	/* emit upper_part */
2032 				  b, progs);
2033 	if (err)
2034 		return err;
2035 
2036 	*pprog = prog;
2037 	return 0;
2038 }
2039 
2040 static int cmp_ips(const void *a, const void *b)
2041 {
2042 	const s64 *ipa = a;
2043 	const s64 *ipb = b;
2044 
2045 	if (*ipa > *ipb)
2046 		return 1;
2047 	if (*ipa < *ipb)
2048 		return -1;
2049 	return 0;
2050 }
2051 
2052 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
2053 {
2054 	u8 *prog = image;
2055 
2056 	sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2057 	return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2058 }
2059 
2060 struct x64_jit_data {
2061 	struct bpf_binary_header *header;
2062 	int *addrs;
2063 	u8 *image;
2064 	int proglen;
2065 	struct jit_context ctx;
2066 };
2067 
2068 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2069 {
2070 	struct bpf_binary_header *header = NULL;
2071 	struct bpf_prog *tmp, *orig_prog = prog;
2072 	struct x64_jit_data *jit_data;
2073 	int proglen, oldproglen = 0;
2074 	struct jit_context ctx = {};
2075 	bool tmp_blinded = false;
2076 	bool extra_pass = false;
2077 	u8 *image = NULL;
2078 	int *addrs;
2079 	int pass;
2080 	int i;
2081 
2082 	if (!prog->jit_requested)
2083 		return orig_prog;
2084 
2085 	tmp = bpf_jit_blind_constants(prog);
2086 	/*
2087 	 * If blinding was requested and we failed during blinding,
2088 	 * we must fall back to the interpreter.
2089 	 */
2090 	if (IS_ERR(tmp))
2091 		return orig_prog;
2092 	if (tmp != prog) {
2093 		tmp_blinded = true;
2094 		prog = tmp;
2095 	}
2096 
2097 	jit_data = prog->aux->jit_data;
2098 	if (!jit_data) {
2099 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2100 		if (!jit_data) {
2101 			prog = orig_prog;
2102 			goto out;
2103 		}
2104 		prog->aux->jit_data = jit_data;
2105 	}
2106 	addrs = jit_data->addrs;
2107 	if (addrs) {
2108 		ctx = jit_data->ctx;
2109 		oldproglen = jit_data->proglen;
2110 		image = jit_data->image;
2111 		header = jit_data->header;
2112 		extra_pass = true;
2113 		goto skip_init_addrs;
2114 	}
2115 	addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2116 	if (!addrs) {
2117 		prog = orig_prog;
2118 		goto out_addrs;
2119 	}
2120 
2121 	/*
2122 	 * Before first pass, make a rough estimation of addrs[]
2123 	 * each BPF instruction is translated to less than 64 bytes
2124 	 */
2125 	for (proglen = 0, i = 0; i <= prog->len; i++) {
2126 		proglen += 64;
2127 		addrs[i] = proglen;
2128 	}
2129 	ctx.cleanup_addr = proglen;
2130 skip_init_addrs:
2131 
2132 	/*
2133 	 * JITed image shrinks with every pass and the loop iterates
2134 	 * until the image stops shrinking. Very large BPF programs
2135 	 * may converge on the last pass. In such case do one more
2136 	 * pass to emit the final image.
2137 	 */
2138 	for (pass = 0; pass < 20 || image; pass++) {
2139 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
2140 		if (proglen <= 0) {
2141 out_image:
2142 			image = NULL;
2143 			if (header)
2144 				bpf_jit_binary_free(header);
2145 			prog = orig_prog;
2146 			goto out_addrs;
2147 		}
2148 		if (image) {
2149 			if (proglen != oldproglen) {
2150 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2151 				       proglen, oldproglen);
2152 				goto out_image;
2153 			}
2154 			break;
2155 		}
2156 		if (proglen == oldproglen) {
2157 			/*
2158 			 * The number of entries in extable is the number of BPF_LDX
2159 			 * insns that access kernel memory via "pointer to BTF type".
2160 			 * The verifier changed their opcode from LDX|MEM|size
2161 			 * to LDX|PROBE_MEM|size to make JITing easier.
2162 			 */
2163 			u32 align = __alignof__(struct exception_table_entry);
2164 			u32 extable_size = prog->aux->num_exentries *
2165 				sizeof(struct exception_table_entry);
2166 
2167 			/* allocate module memory for x86 insns and extable */
2168 			header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
2169 						      &image, align, jit_fill_hole);
2170 			if (!header) {
2171 				prog = orig_prog;
2172 				goto out_addrs;
2173 			}
2174 			prog->aux->extable = (void *) image + roundup(proglen, align);
2175 		}
2176 		oldproglen = proglen;
2177 		cond_resched();
2178 	}
2179 
2180 	if (bpf_jit_enable > 1)
2181 		bpf_jit_dump(prog->len, proglen, pass + 1, image);
2182 
2183 	if (image) {
2184 		if (!prog->is_func || extra_pass) {
2185 			bpf_tail_call_direct_fixup(prog);
2186 			bpf_jit_binary_lock_ro(header);
2187 		} else {
2188 			jit_data->addrs = addrs;
2189 			jit_data->ctx = ctx;
2190 			jit_data->proglen = proglen;
2191 			jit_data->image = image;
2192 			jit_data->header = header;
2193 		}
2194 		prog->bpf_func = (void *)image;
2195 		prog->jited = 1;
2196 		prog->jited_len = proglen;
2197 	} else {
2198 		prog = orig_prog;
2199 	}
2200 
2201 	if (!image || !prog->is_func || extra_pass) {
2202 		if (image)
2203 			bpf_prog_fill_jited_linfo(prog, addrs + 1);
2204 out_addrs:
2205 		kfree(addrs);
2206 		kfree(jit_data);
2207 		prog->aux->jit_data = NULL;
2208 	}
2209 out:
2210 	if (tmp_blinded)
2211 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
2212 					   tmp : orig_prog);
2213 	return prog;
2214 }
2215