xref: /openbmc/linux/arch/x86/net/bpf_jit_comp.c (revision 6774def6)
1 /* bpf_jit_comp.c : BPF JIT compiler
2  *
3  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4  * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; version 2
9  * of the License.
10  */
11 #include <linux/netdevice.h>
12 #include <linux/filter.h>
13 #include <linux/if_vlan.h>
14 #include <asm/cacheflush.h>
15 
16 int bpf_jit_enable __read_mostly;
17 
18 /*
19  * assembly code in arch/x86/net/bpf_jit.S
20  */
21 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
22 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
23 extern u8 sk_load_byte_positive_offset[];
24 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
25 extern u8 sk_load_byte_negative_offset[];
26 
27 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
28 {
29 	if (len == 1)
30 		*ptr = bytes;
31 	else if (len == 2)
32 		*(u16 *)ptr = bytes;
33 	else {
34 		*(u32 *)ptr = bytes;
35 		barrier();
36 	}
37 	return ptr + len;
38 }
39 
40 #define EMIT(bytes, len)	do { prog = emit_code(prog, bytes, len); } while (0)
41 
42 #define EMIT1(b1)		EMIT(b1, 1)
43 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
44 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
45 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
46 #define EMIT1_off32(b1, off) \
47 	do {EMIT1(b1); EMIT(off, 4); } while (0)
48 #define EMIT2_off32(b1, b2, off) \
49 	do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
50 #define EMIT3_off32(b1, b2, b3, off) \
51 	do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
52 #define EMIT4_off32(b1, b2, b3, b4, off) \
53 	do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
54 
55 static inline bool is_imm8(int value)
56 {
57 	return value <= 127 && value >= -128;
58 }
59 
60 static inline bool is_simm32(s64 value)
61 {
62 	return value == (s64) (s32) value;
63 }
64 
65 /* mov dst, src */
66 #define EMIT_mov(DST, SRC) \
67 	do {if (DST != SRC) \
68 		EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
69 	} while (0)
70 
71 static int bpf_size_to_x86_bytes(int bpf_size)
72 {
73 	if (bpf_size == BPF_W)
74 		return 4;
75 	else if (bpf_size == BPF_H)
76 		return 2;
77 	else if (bpf_size == BPF_B)
78 		return 1;
79 	else if (bpf_size == BPF_DW)
80 		return 4; /* imm32 */
81 	else
82 		return 0;
83 }
84 
85 /* list of x86 cond jumps opcodes (. + s8)
86  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
87  */
88 #define X86_JB  0x72
89 #define X86_JAE 0x73
90 #define X86_JE  0x74
91 #define X86_JNE 0x75
92 #define X86_JBE 0x76
93 #define X86_JA  0x77
94 #define X86_JGE 0x7D
95 #define X86_JG  0x7F
96 
97 static inline void bpf_flush_icache(void *start, void *end)
98 {
99 	mm_segment_t old_fs = get_fs();
100 
101 	set_fs(KERNEL_DS);
102 	smp_wmb();
103 	flush_icache_range((unsigned long)start, (unsigned long)end);
104 	set_fs(old_fs);
105 }
106 
107 #define CHOOSE_LOAD_FUNC(K, func) \
108 	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
109 
110 /* pick a register outside of BPF range for JIT internal work */
111 #define AUX_REG (MAX_BPF_REG + 1)
112 
113 /* the following table maps BPF registers to x64 registers.
114  * x64 register r12 is unused, since if used as base address register
115  * in load/store instructions, it always needs an extra byte of encoding
116  */
117 static const int reg2hex[] = {
118 	[BPF_REG_0] = 0,  /* rax */
119 	[BPF_REG_1] = 7,  /* rdi */
120 	[BPF_REG_2] = 6,  /* rsi */
121 	[BPF_REG_3] = 2,  /* rdx */
122 	[BPF_REG_4] = 1,  /* rcx */
123 	[BPF_REG_5] = 0,  /* r8 */
124 	[BPF_REG_6] = 3,  /* rbx callee saved */
125 	[BPF_REG_7] = 5,  /* r13 callee saved */
126 	[BPF_REG_8] = 6,  /* r14 callee saved */
127 	[BPF_REG_9] = 7,  /* r15 callee saved */
128 	[BPF_REG_FP] = 5, /* rbp readonly */
129 	[AUX_REG] = 3,    /* r11 temp register */
130 };
131 
132 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
133  * which need extra byte of encoding.
134  * rax,rcx,...,rbp have simpler encoding
135  */
136 static inline bool is_ereg(u32 reg)
137 {
138 	if (reg == BPF_REG_5 || reg == AUX_REG ||
139 	    (reg >= BPF_REG_7 && reg <= BPF_REG_9))
140 		return true;
141 	else
142 		return false;
143 }
144 
145 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
146 static inline u8 add_1mod(u8 byte, u32 reg)
147 {
148 	if (is_ereg(reg))
149 		byte |= 1;
150 	return byte;
151 }
152 
153 static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
154 {
155 	if (is_ereg(r1))
156 		byte |= 1;
157 	if (is_ereg(r2))
158 		byte |= 4;
159 	return byte;
160 }
161 
162 /* encode 'dst_reg' register into x64 opcode 'byte' */
163 static inline u8 add_1reg(u8 byte, u32 dst_reg)
164 {
165 	return byte + reg2hex[dst_reg];
166 }
167 
168 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
169 static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
170 {
171 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
172 }
173 
174 static void jit_fill_hole(void *area, unsigned int size)
175 {
176 	/* fill whole space with int3 instructions */
177 	memset(area, 0xcc, size);
178 }
179 
180 struct jit_context {
181 	unsigned int cleanup_addr; /* epilogue code offset */
182 	bool seen_ld_abs;
183 };
184 
185 /* maximum number of bytes emitted while JITing one eBPF insn */
186 #define BPF_MAX_INSN_SIZE	128
187 #define BPF_INSN_SAFETY		64
188 
189 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
190 		  int oldproglen, struct jit_context *ctx)
191 {
192 	struct bpf_insn *insn = bpf_prog->insnsi;
193 	int insn_cnt = bpf_prog->len;
194 	bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
195 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
196 	int i;
197 	int proglen = 0;
198 	u8 *prog = temp;
199 	int stacksize = MAX_BPF_STACK +
200 		32 /* space for rbx, r13, r14, r15 */ +
201 		8 /* space for skb_copy_bits() buffer */;
202 
203 	EMIT1(0x55); /* push rbp */
204 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
205 
206 	/* sub rsp, stacksize */
207 	EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
208 
209 	/* all classic BPF filters use R6(rbx) save it */
210 
211 	/* mov qword ptr [rbp-X],rbx */
212 	EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
213 
214 	/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
215 	 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
216 	 * R8(r14). R9(r15) spill could be made conditional, but there is only
217 	 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
218 	 * The overhead of extra spill is negligible for any filter other
219 	 * than synthetic ones. Therefore not worth adding complexity.
220 	 */
221 
222 	/* mov qword ptr [rbp-X],r13 */
223 	EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
224 	/* mov qword ptr [rbp-X],r14 */
225 	EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
226 	/* mov qword ptr [rbp-X],r15 */
227 	EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
228 
229 	/* clear A and X registers */
230 	EMIT2(0x31, 0xc0); /* xor eax, eax */
231 	EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
232 
233 	if (seen_ld_abs) {
234 		/* r9d : skb->len - skb->data_len (headlen)
235 		 * r10 : skb->data
236 		 */
237 		if (is_imm8(offsetof(struct sk_buff, len)))
238 			/* mov %r9d, off8(%rdi) */
239 			EMIT4(0x44, 0x8b, 0x4f,
240 			      offsetof(struct sk_buff, len));
241 		else
242 			/* mov %r9d, off32(%rdi) */
243 			EMIT3_off32(0x44, 0x8b, 0x8f,
244 				    offsetof(struct sk_buff, len));
245 
246 		if (is_imm8(offsetof(struct sk_buff, data_len)))
247 			/* sub %r9d, off8(%rdi) */
248 			EMIT4(0x44, 0x2b, 0x4f,
249 			      offsetof(struct sk_buff, data_len));
250 		else
251 			EMIT3_off32(0x44, 0x2b, 0x8f,
252 				    offsetof(struct sk_buff, data_len));
253 
254 		if (is_imm8(offsetof(struct sk_buff, data)))
255 			/* mov %r10, off8(%rdi) */
256 			EMIT4(0x4c, 0x8b, 0x57,
257 			      offsetof(struct sk_buff, data));
258 		else
259 			/* mov %r10, off32(%rdi) */
260 			EMIT3_off32(0x4c, 0x8b, 0x97,
261 				    offsetof(struct sk_buff, data));
262 	}
263 
264 	for (i = 0; i < insn_cnt; i++, insn++) {
265 		const s32 imm32 = insn->imm;
266 		u32 dst_reg = insn->dst_reg;
267 		u32 src_reg = insn->src_reg;
268 		u8 b1 = 0, b2 = 0, b3 = 0;
269 		s64 jmp_offset;
270 		u8 jmp_cond;
271 		int ilen;
272 		u8 *func;
273 
274 		switch (insn->code) {
275 			/* ALU */
276 		case BPF_ALU | BPF_ADD | BPF_X:
277 		case BPF_ALU | BPF_SUB | BPF_X:
278 		case BPF_ALU | BPF_AND | BPF_X:
279 		case BPF_ALU | BPF_OR | BPF_X:
280 		case BPF_ALU | BPF_XOR | BPF_X:
281 		case BPF_ALU64 | BPF_ADD | BPF_X:
282 		case BPF_ALU64 | BPF_SUB | BPF_X:
283 		case BPF_ALU64 | BPF_AND | BPF_X:
284 		case BPF_ALU64 | BPF_OR | BPF_X:
285 		case BPF_ALU64 | BPF_XOR | BPF_X:
286 			switch (BPF_OP(insn->code)) {
287 			case BPF_ADD: b2 = 0x01; break;
288 			case BPF_SUB: b2 = 0x29; break;
289 			case BPF_AND: b2 = 0x21; break;
290 			case BPF_OR: b2 = 0x09; break;
291 			case BPF_XOR: b2 = 0x31; break;
292 			}
293 			if (BPF_CLASS(insn->code) == BPF_ALU64)
294 				EMIT1(add_2mod(0x48, dst_reg, src_reg));
295 			else if (is_ereg(dst_reg) || is_ereg(src_reg))
296 				EMIT1(add_2mod(0x40, dst_reg, src_reg));
297 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
298 			break;
299 
300 			/* mov dst, src */
301 		case BPF_ALU64 | BPF_MOV | BPF_X:
302 			EMIT_mov(dst_reg, src_reg);
303 			break;
304 
305 			/* mov32 dst, src */
306 		case BPF_ALU | BPF_MOV | BPF_X:
307 			if (is_ereg(dst_reg) || is_ereg(src_reg))
308 				EMIT1(add_2mod(0x40, dst_reg, src_reg));
309 			EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
310 			break;
311 
312 			/* neg dst */
313 		case BPF_ALU | BPF_NEG:
314 		case BPF_ALU64 | BPF_NEG:
315 			if (BPF_CLASS(insn->code) == BPF_ALU64)
316 				EMIT1(add_1mod(0x48, dst_reg));
317 			else if (is_ereg(dst_reg))
318 				EMIT1(add_1mod(0x40, dst_reg));
319 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
320 			break;
321 
322 		case BPF_ALU | BPF_ADD | BPF_K:
323 		case BPF_ALU | BPF_SUB | BPF_K:
324 		case BPF_ALU | BPF_AND | BPF_K:
325 		case BPF_ALU | BPF_OR | BPF_K:
326 		case BPF_ALU | BPF_XOR | BPF_K:
327 		case BPF_ALU64 | BPF_ADD | BPF_K:
328 		case BPF_ALU64 | BPF_SUB | BPF_K:
329 		case BPF_ALU64 | BPF_AND | BPF_K:
330 		case BPF_ALU64 | BPF_OR | BPF_K:
331 		case BPF_ALU64 | BPF_XOR | BPF_K:
332 			if (BPF_CLASS(insn->code) == BPF_ALU64)
333 				EMIT1(add_1mod(0x48, dst_reg));
334 			else if (is_ereg(dst_reg))
335 				EMIT1(add_1mod(0x40, dst_reg));
336 
337 			switch (BPF_OP(insn->code)) {
338 			case BPF_ADD: b3 = 0xC0; break;
339 			case BPF_SUB: b3 = 0xE8; break;
340 			case BPF_AND: b3 = 0xE0; break;
341 			case BPF_OR: b3 = 0xC8; break;
342 			case BPF_XOR: b3 = 0xF0; break;
343 			}
344 
345 			if (is_imm8(imm32))
346 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
347 			else
348 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
349 			break;
350 
351 		case BPF_ALU64 | BPF_MOV | BPF_K:
352 			/* optimization: if imm32 is positive,
353 			 * use 'mov eax, imm32' (which zero-extends imm32)
354 			 * to save 2 bytes
355 			 */
356 			if (imm32 < 0) {
357 				/* 'mov rax, imm32' sign extends imm32 */
358 				b1 = add_1mod(0x48, dst_reg);
359 				b2 = 0xC7;
360 				b3 = 0xC0;
361 				EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
362 				break;
363 			}
364 
365 		case BPF_ALU | BPF_MOV | BPF_K:
366 			/* mov %eax, imm32 */
367 			if (is_ereg(dst_reg))
368 				EMIT1(add_1mod(0x40, dst_reg));
369 			EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
370 			break;
371 
372 		case BPF_LD | BPF_IMM | BPF_DW:
373 			if (insn[1].code != 0 || insn[1].src_reg != 0 ||
374 			    insn[1].dst_reg != 0 || insn[1].off != 0) {
375 				/* verifier must catch invalid insns */
376 				pr_err("invalid BPF_LD_IMM64 insn\n");
377 				return -EINVAL;
378 			}
379 
380 			/* movabsq %rax, imm64 */
381 			EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
382 			EMIT(insn[0].imm, 4);
383 			EMIT(insn[1].imm, 4);
384 
385 			insn++;
386 			i++;
387 			break;
388 
389 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
390 		case BPF_ALU | BPF_MOD | BPF_X:
391 		case BPF_ALU | BPF_DIV | BPF_X:
392 		case BPF_ALU | BPF_MOD | BPF_K:
393 		case BPF_ALU | BPF_DIV | BPF_K:
394 		case BPF_ALU64 | BPF_MOD | BPF_X:
395 		case BPF_ALU64 | BPF_DIV | BPF_X:
396 		case BPF_ALU64 | BPF_MOD | BPF_K:
397 		case BPF_ALU64 | BPF_DIV | BPF_K:
398 			EMIT1(0x50); /* push rax */
399 			EMIT1(0x52); /* push rdx */
400 
401 			if (BPF_SRC(insn->code) == BPF_X)
402 				/* mov r11, src_reg */
403 				EMIT_mov(AUX_REG, src_reg);
404 			else
405 				/* mov r11, imm32 */
406 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
407 
408 			/* mov rax, dst_reg */
409 			EMIT_mov(BPF_REG_0, dst_reg);
410 
411 			/* xor edx, edx
412 			 * equivalent to 'xor rdx, rdx', but one byte less
413 			 */
414 			EMIT2(0x31, 0xd2);
415 
416 			if (BPF_SRC(insn->code) == BPF_X) {
417 				/* if (src_reg == 0) return 0 */
418 
419 				/* cmp r11, 0 */
420 				EMIT4(0x49, 0x83, 0xFB, 0x00);
421 
422 				/* jne .+9 (skip over pop, pop, xor and jmp) */
423 				EMIT2(X86_JNE, 1 + 1 + 2 + 5);
424 				EMIT1(0x5A); /* pop rdx */
425 				EMIT1(0x58); /* pop rax */
426 				EMIT2(0x31, 0xc0); /* xor eax, eax */
427 
428 				/* jmp cleanup_addr
429 				 * addrs[i] - 11, because there are 11 bytes
430 				 * after this insn: div, mov, pop, pop, mov
431 				 */
432 				jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
433 				EMIT1_off32(0xE9, jmp_offset);
434 			}
435 
436 			if (BPF_CLASS(insn->code) == BPF_ALU64)
437 				/* div r11 */
438 				EMIT3(0x49, 0xF7, 0xF3);
439 			else
440 				/* div r11d */
441 				EMIT3(0x41, 0xF7, 0xF3);
442 
443 			if (BPF_OP(insn->code) == BPF_MOD)
444 				/* mov r11, rdx */
445 				EMIT3(0x49, 0x89, 0xD3);
446 			else
447 				/* mov r11, rax */
448 				EMIT3(0x49, 0x89, 0xC3);
449 
450 			EMIT1(0x5A); /* pop rdx */
451 			EMIT1(0x58); /* pop rax */
452 
453 			/* mov dst_reg, r11 */
454 			EMIT_mov(dst_reg, AUX_REG);
455 			break;
456 
457 		case BPF_ALU | BPF_MUL | BPF_K:
458 		case BPF_ALU | BPF_MUL | BPF_X:
459 		case BPF_ALU64 | BPF_MUL | BPF_K:
460 		case BPF_ALU64 | BPF_MUL | BPF_X:
461 			EMIT1(0x50); /* push rax */
462 			EMIT1(0x52); /* push rdx */
463 
464 			/* mov r11, dst_reg */
465 			EMIT_mov(AUX_REG, dst_reg);
466 
467 			if (BPF_SRC(insn->code) == BPF_X)
468 				/* mov rax, src_reg */
469 				EMIT_mov(BPF_REG_0, src_reg);
470 			else
471 				/* mov rax, imm32 */
472 				EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
473 
474 			if (BPF_CLASS(insn->code) == BPF_ALU64)
475 				EMIT1(add_1mod(0x48, AUX_REG));
476 			else if (is_ereg(AUX_REG))
477 				EMIT1(add_1mod(0x40, AUX_REG));
478 			/* mul(q) r11 */
479 			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
480 
481 			/* mov r11, rax */
482 			EMIT_mov(AUX_REG, BPF_REG_0);
483 
484 			EMIT1(0x5A); /* pop rdx */
485 			EMIT1(0x58); /* pop rax */
486 
487 			/* mov dst_reg, r11 */
488 			EMIT_mov(dst_reg, AUX_REG);
489 			break;
490 
491 			/* shifts */
492 		case BPF_ALU | BPF_LSH | BPF_K:
493 		case BPF_ALU | BPF_RSH | BPF_K:
494 		case BPF_ALU | BPF_ARSH | BPF_K:
495 		case BPF_ALU64 | BPF_LSH | BPF_K:
496 		case BPF_ALU64 | BPF_RSH | BPF_K:
497 		case BPF_ALU64 | BPF_ARSH | BPF_K:
498 			if (BPF_CLASS(insn->code) == BPF_ALU64)
499 				EMIT1(add_1mod(0x48, dst_reg));
500 			else if (is_ereg(dst_reg))
501 				EMIT1(add_1mod(0x40, dst_reg));
502 
503 			switch (BPF_OP(insn->code)) {
504 			case BPF_LSH: b3 = 0xE0; break;
505 			case BPF_RSH: b3 = 0xE8; break;
506 			case BPF_ARSH: b3 = 0xF8; break;
507 			}
508 			EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
509 			break;
510 
511 		case BPF_ALU | BPF_LSH | BPF_X:
512 		case BPF_ALU | BPF_RSH | BPF_X:
513 		case BPF_ALU | BPF_ARSH | BPF_X:
514 		case BPF_ALU64 | BPF_LSH | BPF_X:
515 		case BPF_ALU64 | BPF_RSH | BPF_X:
516 		case BPF_ALU64 | BPF_ARSH | BPF_X:
517 
518 			/* check for bad case when dst_reg == rcx */
519 			if (dst_reg == BPF_REG_4) {
520 				/* mov r11, dst_reg */
521 				EMIT_mov(AUX_REG, dst_reg);
522 				dst_reg = AUX_REG;
523 			}
524 
525 			if (src_reg != BPF_REG_4) { /* common case */
526 				EMIT1(0x51); /* push rcx */
527 
528 				/* mov rcx, src_reg */
529 				EMIT_mov(BPF_REG_4, src_reg);
530 			}
531 
532 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
533 			if (BPF_CLASS(insn->code) == BPF_ALU64)
534 				EMIT1(add_1mod(0x48, dst_reg));
535 			else if (is_ereg(dst_reg))
536 				EMIT1(add_1mod(0x40, dst_reg));
537 
538 			switch (BPF_OP(insn->code)) {
539 			case BPF_LSH: b3 = 0xE0; break;
540 			case BPF_RSH: b3 = 0xE8; break;
541 			case BPF_ARSH: b3 = 0xF8; break;
542 			}
543 			EMIT2(0xD3, add_1reg(b3, dst_reg));
544 
545 			if (src_reg != BPF_REG_4)
546 				EMIT1(0x59); /* pop rcx */
547 
548 			if (insn->dst_reg == BPF_REG_4)
549 				/* mov dst_reg, r11 */
550 				EMIT_mov(insn->dst_reg, AUX_REG);
551 			break;
552 
553 		case BPF_ALU | BPF_END | BPF_FROM_BE:
554 			switch (imm32) {
555 			case 16:
556 				/* emit 'ror %ax, 8' to swap lower 2 bytes */
557 				EMIT1(0x66);
558 				if (is_ereg(dst_reg))
559 					EMIT1(0x41);
560 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
561 				break;
562 			case 32:
563 				/* emit 'bswap eax' to swap lower 4 bytes */
564 				if (is_ereg(dst_reg))
565 					EMIT2(0x41, 0x0F);
566 				else
567 					EMIT1(0x0F);
568 				EMIT1(add_1reg(0xC8, dst_reg));
569 				break;
570 			case 64:
571 				/* emit 'bswap rax' to swap 8 bytes */
572 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
573 				      add_1reg(0xC8, dst_reg));
574 				break;
575 			}
576 			break;
577 
578 		case BPF_ALU | BPF_END | BPF_FROM_LE:
579 			break;
580 
581 			/* ST: *(u8*)(dst_reg + off) = imm */
582 		case BPF_ST | BPF_MEM | BPF_B:
583 			if (is_ereg(dst_reg))
584 				EMIT2(0x41, 0xC6);
585 			else
586 				EMIT1(0xC6);
587 			goto st;
588 		case BPF_ST | BPF_MEM | BPF_H:
589 			if (is_ereg(dst_reg))
590 				EMIT3(0x66, 0x41, 0xC7);
591 			else
592 				EMIT2(0x66, 0xC7);
593 			goto st;
594 		case BPF_ST | BPF_MEM | BPF_W:
595 			if (is_ereg(dst_reg))
596 				EMIT2(0x41, 0xC7);
597 			else
598 				EMIT1(0xC7);
599 			goto st;
600 		case BPF_ST | BPF_MEM | BPF_DW:
601 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
602 
603 st:			if (is_imm8(insn->off))
604 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
605 			else
606 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
607 
608 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
609 			break;
610 
611 			/* STX: *(u8*)(dst_reg + off) = src_reg */
612 		case BPF_STX | BPF_MEM | BPF_B:
613 			/* emit 'mov byte ptr [rax + off], al' */
614 			if (is_ereg(dst_reg) || is_ereg(src_reg) ||
615 			    /* have to add extra byte for x86 SIL, DIL regs */
616 			    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
617 				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
618 			else
619 				EMIT1(0x88);
620 			goto stx;
621 		case BPF_STX | BPF_MEM | BPF_H:
622 			if (is_ereg(dst_reg) || is_ereg(src_reg))
623 				EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
624 			else
625 				EMIT2(0x66, 0x89);
626 			goto stx;
627 		case BPF_STX | BPF_MEM | BPF_W:
628 			if (is_ereg(dst_reg) || is_ereg(src_reg))
629 				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
630 			else
631 				EMIT1(0x89);
632 			goto stx;
633 		case BPF_STX | BPF_MEM | BPF_DW:
634 			EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
635 stx:			if (is_imm8(insn->off))
636 				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
637 			else
638 				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
639 					    insn->off);
640 			break;
641 
642 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
643 		case BPF_LDX | BPF_MEM | BPF_B:
644 			/* emit 'movzx rax, byte ptr [rax + off]' */
645 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
646 			goto ldx;
647 		case BPF_LDX | BPF_MEM | BPF_H:
648 			/* emit 'movzx rax, word ptr [rax + off]' */
649 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
650 			goto ldx;
651 		case BPF_LDX | BPF_MEM | BPF_W:
652 			/* emit 'mov eax, dword ptr [rax+0x14]' */
653 			if (is_ereg(dst_reg) || is_ereg(src_reg))
654 				EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
655 			else
656 				EMIT1(0x8B);
657 			goto ldx;
658 		case BPF_LDX | BPF_MEM | BPF_DW:
659 			/* emit 'mov rax, qword ptr [rax+0x14]' */
660 			EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
661 ldx:			/* if insn->off == 0 we can save one extra byte, but
662 			 * special case of x86 r13 which always needs an offset
663 			 * is not worth the hassle
664 			 */
665 			if (is_imm8(insn->off))
666 				EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
667 			else
668 				EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
669 					    insn->off);
670 			break;
671 
672 			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
673 		case BPF_STX | BPF_XADD | BPF_W:
674 			/* emit 'lock add dword ptr [rax + off], eax' */
675 			if (is_ereg(dst_reg) || is_ereg(src_reg))
676 				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
677 			else
678 				EMIT2(0xF0, 0x01);
679 			goto xadd;
680 		case BPF_STX | BPF_XADD | BPF_DW:
681 			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
682 xadd:			if (is_imm8(insn->off))
683 				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
684 			else
685 				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
686 					    insn->off);
687 			break;
688 
689 			/* call */
690 		case BPF_JMP | BPF_CALL:
691 			func = (u8 *) __bpf_call_base + imm32;
692 			jmp_offset = func - (image + addrs[i]);
693 			if (seen_ld_abs) {
694 				EMIT2(0x41, 0x52); /* push %r10 */
695 				EMIT2(0x41, 0x51); /* push %r9 */
696 				/* need to adjust jmp offset, since
697 				 * pop %r9, pop %r10 take 4 bytes after call insn
698 				 */
699 				jmp_offset += 4;
700 			}
701 			if (!imm32 || !is_simm32(jmp_offset)) {
702 				pr_err("unsupported bpf func %d addr %p image %p\n",
703 				       imm32, func, image);
704 				return -EINVAL;
705 			}
706 			EMIT1_off32(0xE8, jmp_offset);
707 			if (seen_ld_abs) {
708 				EMIT2(0x41, 0x59); /* pop %r9 */
709 				EMIT2(0x41, 0x5A); /* pop %r10 */
710 			}
711 			break;
712 
713 			/* cond jump */
714 		case BPF_JMP | BPF_JEQ | BPF_X:
715 		case BPF_JMP | BPF_JNE | BPF_X:
716 		case BPF_JMP | BPF_JGT | BPF_X:
717 		case BPF_JMP | BPF_JGE | BPF_X:
718 		case BPF_JMP | BPF_JSGT | BPF_X:
719 		case BPF_JMP | BPF_JSGE | BPF_X:
720 			/* cmp dst_reg, src_reg */
721 			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
722 			      add_2reg(0xC0, dst_reg, src_reg));
723 			goto emit_cond_jmp;
724 
725 		case BPF_JMP | BPF_JSET | BPF_X:
726 			/* test dst_reg, src_reg */
727 			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
728 			      add_2reg(0xC0, dst_reg, src_reg));
729 			goto emit_cond_jmp;
730 
731 		case BPF_JMP | BPF_JSET | BPF_K:
732 			/* test dst_reg, imm32 */
733 			EMIT1(add_1mod(0x48, dst_reg));
734 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
735 			goto emit_cond_jmp;
736 
737 		case BPF_JMP | BPF_JEQ | BPF_K:
738 		case BPF_JMP | BPF_JNE | BPF_K:
739 		case BPF_JMP | BPF_JGT | BPF_K:
740 		case BPF_JMP | BPF_JGE | BPF_K:
741 		case BPF_JMP | BPF_JSGT | BPF_K:
742 		case BPF_JMP | BPF_JSGE | BPF_K:
743 			/* cmp dst_reg, imm8/32 */
744 			EMIT1(add_1mod(0x48, dst_reg));
745 
746 			if (is_imm8(imm32))
747 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
748 			else
749 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
750 
751 emit_cond_jmp:		/* convert BPF opcode to x86 */
752 			switch (BPF_OP(insn->code)) {
753 			case BPF_JEQ:
754 				jmp_cond = X86_JE;
755 				break;
756 			case BPF_JSET:
757 			case BPF_JNE:
758 				jmp_cond = X86_JNE;
759 				break;
760 			case BPF_JGT:
761 				/* GT is unsigned '>', JA in x86 */
762 				jmp_cond = X86_JA;
763 				break;
764 			case BPF_JGE:
765 				/* GE is unsigned '>=', JAE in x86 */
766 				jmp_cond = X86_JAE;
767 				break;
768 			case BPF_JSGT:
769 				/* signed '>', GT in x86 */
770 				jmp_cond = X86_JG;
771 				break;
772 			case BPF_JSGE:
773 				/* signed '>=', GE in x86 */
774 				jmp_cond = X86_JGE;
775 				break;
776 			default: /* to silence gcc warning */
777 				return -EFAULT;
778 			}
779 			jmp_offset = addrs[i + insn->off] - addrs[i];
780 			if (is_imm8(jmp_offset)) {
781 				EMIT2(jmp_cond, jmp_offset);
782 			} else if (is_simm32(jmp_offset)) {
783 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
784 			} else {
785 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
786 				return -EFAULT;
787 			}
788 
789 			break;
790 
791 		case BPF_JMP | BPF_JA:
792 			jmp_offset = addrs[i + insn->off] - addrs[i];
793 			if (!jmp_offset)
794 				/* optimize out nop jumps */
795 				break;
796 emit_jmp:
797 			if (is_imm8(jmp_offset)) {
798 				EMIT2(0xEB, jmp_offset);
799 			} else if (is_simm32(jmp_offset)) {
800 				EMIT1_off32(0xE9, jmp_offset);
801 			} else {
802 				pr_err("jmp gen bug %llx\n", jmp_offset);
803 				return -EFAULT;
804 			}
805 			break;
806 
807 		case BPF_LD | BPF_IND | BPF_W:
808 			func = sk_load_word;
809 			goto common_load;
810 		case BPF_LD | BPF_ABS | BPF_W:
811 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
812 common_load:
813 			ctx->seen_ld_abs = seen_ld_abs = true;
814 			jmp_offset = func - (image + addrs[i]);
815 			if (!func || !is_simm32(jmp_offset)) {
816 				pr_err("unsupported bpf func %d addr %p image %p\n",
817 				       imm32, func, image);
818 				return -EINVAL;
819 			}
820 			if (BPF_MODE(insn->code) == BPF_ABS) {
821 				/* mov %esi, imm32 */
822 				EMIT1_off32(0xBE, imm32);
823 			} else {
824 				/* mov %rsi, src_reg */
825 				EMIT_mov(BPF_REG_2, src_reg);
826 				if (imm32) {
827 					if (is_imm8(imm32))
828 						/* add %esi, imm8 */
829 						EMIT3(0x83, 0xC6, imm32);
830 					else
831 						/* add %esi, imm32 */
832 						EMIT2_off32(0x81, 0xC6, imm32);
833 				}
834 			}
835 			/* skb pointer is in R6 (%rbx), it will be copied into
836 			 * %rdi if skb_copy_bits() call is necessary.
837 			 * sk_load_* helpers also use %r10 and %r9d.
838 			 * See bpf_jit.S
839 			 */
840 			EMIT1_off32(0xE8, jmp_offset); /* call */
841 			break;
842 
843 		case BPF_LD | BPF_IND | BPF_H:
844 			func = sk_load_half;
845 			goto common_load;
846 		case BPF_LD | BPF_ABS | BPF_H:
847 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
848 			goto common_load;
849 		case BPF_LD | BPF_IND | BPF_B:
850 			func = sk_load_byte;
851 			goto common_load;
852 		case BPF_LD | BPF_ABS | BPF_B:
853 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
854 			goto common_load;
855 
856 		case BPF_JMP | BPF_EXIT:
857 			if (i != insn_cnt - 1) {
858 				jmp_offset = ctx->cleanup_addr - addrs[i];
859 				goto emit_jmp;
860 			}
861 			/* update cleanup_addr */
862 			ctx->cleanup_addr = proglen;
863 			/* mov rbx, qword ptr [rbp-X] */
864 			EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
865 			/* mov r13, qword ptr [rbp-X] */
866 			EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
867 			/* mov r14, qword ptr [rbp-X] */
868 			EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
869 			/* mov r15, qword ptr [rbp-X] */
870 			EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
871 
872 			EMIT1(0xC9); /* leave */
873 			EMIT1(0xC3); /* ret */
874 			break;
875 
876 		default:
877 			/* By design x64 JIT should support all BPF instructions
878 			 * This error will be seen if new instruction was added
879 			 * to interpreter, but not to JIT
880 			 * or if there is junk in bpf_prog
881 			 */
882 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
883 			return -EINVAL;
884 		}
885 
886 		ilen = prog - temp;
887 		if (ilen > BPF_MAX_INSN_SIZE) {
888 			pr_err("bpf_jit_compile fatal insn size error\n");
889 			return -EFAULT;
890 		}
891 
892 		if (image) {
893 			if (unlikely(proglen + ilen > oldproglen)) {
894 				pr_err("bpf_jit_compile fatal error\n");
895 				return -EFAULT;
896 			}
897 			memcpy(image + proglen, temp, ilen);
898 		}
899 		proglen += ilen;
900 		addrs[i] = proglen;
901 		prog = temp;
902 	}
903 	return proglen;
904 }
905 
906 void bpf_jit_compile(struct bpf_prog *prog)
907 {
908 }
909 
910 void bpf_int_jit_compile(struct bpf_prog *prog)
911 {
912 	struct bpf_binary_header *header = NULL;
913 	int proglen, oldproglen = 0;
914 	struct jit_context ctx = {};
915 	u8 *image = NULL;
916 	int *addrs;
917 	int pass;
918 	int i;
919 
920 	if (!bpf_jit_enable)
921 		return;
922 
923 	if (!prog || !prog->len)
924 		return;
925 
926 	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
927 	if (!addrs)
928 		return;
929 
930 	/* Before first pass, make a rough estimation of addrs[]
931 	 * each bpf instruction is translated to less than 64 bytes
932 	 */
933 	for (proglen = 0, i = 0; i < prog->len; i++) {
934 		proglen += 64;
935 		addrs[i] = proglen;
936 	}
937 	ctx.cleanup_addr = proglen;
938 
939 	for (pass = 0; pass < 10; pass++) {
940 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
941 		if (proglen <= 0) {
942 			image = NULL;
943 			if (header)
944 				bpf_jit_binary_free(header);
945 			goto out;
946 		}
947 		if (image) {
948 			if (proglen != oldproglen) {
949 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
950 				       proglen, oldproglen);
951 				goto out;
952 			}
953 			break;
954 		}
955 		if (proglen == oldproglen) {
956 			header = bpf_jit_binary_alloc(proglen, &image,
957 						      1, jit_fill_hole);
958 			if (!header)
959 				goto out;
960 		}
961 		oldproglen = proglen;
962 	}
963 
964 	if (bpf_jit_enable > 1)
965 		bpf_jit_dump(prog->len, proglen, 0, image);
966 
967 	if (image) {
968 		bpf_flush_icache(header, image + proglen);
969 		set_memory_ro((unsigned long)header, header->pages);
970 		prog->bpf_func = (void *)image;
971 		prog->jited = true;
972 	}
973 out:
974 	kfree(addrs);
975 }
976 
977 void bpf_jit_free(struct bpf_prog *fp)
978 {
979 	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
980 	struct bpf_binary_header *header = (void *)addr;
981 
982 	if (!fp->jited)
983 		goto free_filter;
984 
985 	set_memory_rw(addr, header->pages);
986 	bpf_jit_binary_free(header);
987 
988 free_filter:
989 	bpf_prog_unlock_free(fp);
990 }
991