xref: /openbmc/linux/arch/x86/net/bpf_jit_comp.c (revision a8fe58ce)
1 /* bpf_jit_comp.c : BPF JIT compiler
2  *
3  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4  * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; version 2
9  * of the License.
10  */
11 #include <linux/netdevice.h>
12 #include <linux/filter.h>
13 #include <linux/if_vlan.h>
14 #include <asm/cacheflush.h>
15 #include <linux/bpf.h>
16 
17 int bpf_jit_enable __read_mostly;
18 
19 /*
20  * assembly code in arch/x86/net/bpf_jit.S
21  */
22 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
23 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
24 extern u8 sk_load_byte_positive_offset[];
25 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
26 extern u8 sk_load_byte_negative_offset[];
27 
28 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29 {
30 	if (len == 1)
31 		*ptr = bytes;
32 	else if (len == 2)
33 		*(u16 *)ptr = bytes;
34 	else {
35 		*(u32 *)ptr = bytes;
36 		barrier();
37 	}
38 	return ptr + len;
39 }
40 
41 #define EMIT(bytes, len) \
42 	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
43 
44 #define EMIT1(b1)		EMIT(b1, 1)
45 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
46 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
47 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
48 #define EMIT1_off32(b1, off) \
49 	do {EMIT1(b1); EMIT(off, 4); } while (0)
50 #define EMIT2_off32(b1, b2, off) \
51 	do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
52 #define EMIT3_off32(b1, b2, b3, off) \
53 	do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54 #define EMIT4_off32(b1, b2, b3, b4, off) \
55 	do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
56 
57 static bool is_imm8(int value)
58 {
59 	return value <= 127 && value >= -128;
60 }
61 
62 static bool is_simm32(s64 value)
63 {
64 	return value == (s64) (s32) value;
65 }
66 
67 /* mov dst, src */
68 #define EMIT_mov(DST, SRC) \
69 	do {if (DST != SRC) \
70 		EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
71 	} while (0)
72 
73 static int bpf_size_to_x86_bytes(int bpf_size)
74 {
75 	if (bpf_size == BPF_W)
76 		return 4;
77 	else if (bpf_size == BPF_H)
78 		return 2;
79 	else if (bpf_size == BPF_B)
80 		return 1;
81 	else if (bpf_size == BPF_DW)
82 		return 4; /* imm32 */
83 	else
84 		return 0;
85 }
86 
87 /* list of x86 cond jumps opcodes (. + s8)
88  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
89  */
90 #define X86_JB  0x72
91 #define X86_JAE 0x73
92 #define X86_JE  0x74
93 #define X86_JNE 0x75
94 #define X86_JBE 0x76
95 #define X86_JA  0x77
96 #define X86_JGE 0x7D
97 #define X86_JG  0x7F
98 
99 static void bpf_flush_icache(void *start, void *end)
100 {
101 	mm_segment_t old_fs = get_fs();
102 
103 	set_fs(KERNEL_DS);
104 	smp_wmb();
105 	flush_icache_range((unsigned long)start, (unsigned long)end);
106 	set_fs(old_fs);
107 }
108 
109 #define CHOOSE_LOAD_FUNC(K, func) \
110 	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
111 
112 /* pick a register outside of BPF range for JIT internal work */
113 #define AUX_REG (MAX_BPF_REG + 1)
114 
115 /* the following table maps BPF registers to x64 registers.
116  * x64 register r12 is unused, since if used as base address register
117  * in load/store instructions, it always needs an extra byte of encoding
118  */
119 static const int reg2hex[] = {
120 	[BPF_REG_0] = 0,  /* rax */
121 	[BPF_REG_1] = 7,  /* rdi */
122 	[BPF_REG_2] = 6,  /* rsi */
123 	[BPF_REG_3] = 2,  /* rdx */
124 	[BPF_REG_4] = 1,  /* rcx */
125 	[BPF_REG_5] = 0,  /* r8 */
126 	[BPF_REG_6] = 3,  /* rbx callee saved */
127 	[BPF_REG_7] = 5,  /* r13 callee saved */
128 	[BPF_REG_8] = 6,  /* r14 callee saved */
129 	[BPF_REG_9] = 7,  /* r15 callee saved */
130 	[BPF_REG_FP] = 5, /* rbp readonly */
131 	[AUX_REG] = 3,    /* r11 temp register */
132 };
133 
134 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
135  * which need extra byte of encoding.
136  * rax,rcx,...,rbp have simpler encoding
137  */
138 static bool is_ereg(u32 reg)
139 {
140 	return (1 << reg) & (BIT(BPF_REG_5) |
141 			     BIT(AUX_REG) |
142 			     BIT(BPF_REG_7) |
143 			     BIT(BPF_REG_8) |
144 			     BIT(BPF_REG_9));
145 }
146 
147 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
148 static u8 add_1mod(u8 byte, u32 reg)
149 {
150 	if (is_ereg(reg))
151 		byte |= 1;
152 	return byte;
153 }
154 
155 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
156 {
157 	if (is_ereg(r1))
158 		byte |= 1;
159 	if (is_ereg(r2))
160 		byte |= 4;
161 	return byte;
162 }
163 
164 /* encode 'dst_reg' register into x64 opcode 'byte' */
165 static u8 add_1reg(u8 byte, u32 dst_reg)
166 {
167 	return byte + reg2hex[dst_reg];
168 }
169 
170 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
171 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
172 {
173 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
174 }
175 
176 static void jit_fill_hole(void *area, unsigned int size)
177 {
178 	/* fill whole space with int3 instructions */
179 	memset(area, 0xcc, size);
180 }
181 
182 struct jit_context {
183 	int cleanup_addr; /* epilogue code offset */
184 	bool seen_ld_abs;
185 };
186 
187 /* maximum number of bytes emitted while JITing one eBPF insn */
188 #define BPF_MAX_INSN_SIZE	128
189 #define BPF_INSN_SAFETY		64
190 
191 #define STACKSIZE \
192 	(MAX_BPF_STACK + \
193 	 32 /* space for rbx, r13, r14, r15 */ + \
194 	 8 /* space for skb_copy_bits() buffer */)
195 
196 #define PROLOGUE_SIZE 48
197 
198 /* emit x64 prologue code for BPF program and check it's size.
199  * bpf_tail_call helper will skip it while jumping into another program
200  */
201 static void emit_prologue(u8 **pprog)
202 {
203 	u8 *prog = *pprog;
204 	int cnt = 0;
205 
206 	EMIT1(0x55); /* push rbp */
207 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
208 
209 	/* sub rsp, STACKSIZE */
210 	EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE);
211 
212 	/* all classic BPF filters use R6(rbx) save it */
213 
214 	/* mov qword ptr [rbp-X],rbx */
215 	EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE);
216 
217 	/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
218 	 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
219 	 * R8(r14). R9(r15) spill could be made conditional, but there is only
220 	 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
221 	 * The overhead of extra spill is negligible for any filter other
222 	 * than synthetic ones. Therefore not worth adding complexity.
223 	 */
224 
225 	/* mov qword ptr [rbp-X],r13 */
226 	EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE + 8);
227 	/* mov qword ptr [rbp-X],r14 */
228 	EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE + 16);
229 	/* mov qword ptr [rbp-X],r15 */
230 	EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24);
231 
232 	/* Clear the tail call counter (tail_call_cnt): for eBPF tail calls
233 	 * we need to reset the counter to 0. It's done in two instructions,
234 	 * resetting rax register to 0 (xor on eax gets 0 extended), and
235 	 * moving it to the counter location.
236 	 */
237 
238 	/* xor eax, eax */
239 	EMIT2(0x31, 0xc0);
240 	/* mov qword ptr [rbp-X], rax */
241 	EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32);
242 
243 	BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
244 	*pprog = prog;
245 }
246 
247 /* generate the following code:
248  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
249  *   if (index >= array->map.max_entries)
250  *     goto out;
251  *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
252  *     goto out;
253  *   prog = array->ptrs[index];
254  *   if (prog == NULL)
255  *     goto out;
256  *   goto *(prog->bpf_func + prologue_size);
257  * out:
258  */
259 static void emit_bpf_tail_call(u8 **pprog)
260 {
261 	u8 *prog = *pprog;
262 	int label1, label2, label3;
263 	int cnt = 0;
264 
265 	/* rdi - pointer to ctx
266 	 * rsi - pointer to bpf_array
267 	 * rdx - index in bpf_array
268 	 */
269 
270 	/* if (index >= array->map.max_entries)
271 	 *   goto out;
272 	 */
273 	EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 16] */
274 	      offsetof(struct bpf_array, map.max_entries));
275 	EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
276 #define OFFSET1 47 /* number of bytes to jump */
277 	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
278 	label1 = cnt;
279 
280 	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
281 	 *   goto out;
282 	 */
283 	EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
284 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
285 #define OFFSET2 36
286 	EMIT2(X86_JA, OFFSET2);                   /* ja out */
287 	label2 = cnt;
288 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
289 	EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
290 
291 	/* prog = array->ptrs[index]; */
292 	EMIT4_off32(0x48, 0x8D, 0x84, 0xD6,       /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
293 		    offsetof(struct bpf_array, ptrs));
294 	EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
295 
296 	/* if (prog == NULL)
297 	 *   goto out;
298 	 */
299 	EMIT4(0x48, 0x83, 0xF8, 0x00);            /* cmp rax, 0 */
300 #define OFFSET3 10
301 	EMIT2(X86_JE, OFFSET3);                   /* je out */
302 	label3 = cnt;
303 
304 	/* goto *(prog->bpf_func + prologue_size); */
305 	EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
306 	      offsetof(struct bpf_prog, bpf_func));
307 	EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */
308 
309 	/* now we're ready to jump into next BPF program
310 	 * rdi == ctx (1st arg)
311 	 * rax == prog->bpf_func + prologue_size
312 	 */
313 	EMIT2(0xFF, 0xE0);                        /* jmp rax */
314 
315 	/* out: */
316 	BUILD_BUG_ON(cnt - label1 != OFFSET1);
317 	BUILD_BUG_ON(cnt - label2 != OFFSET2);
318 	BUILD_BUG_ON(cnt - label3 != OFFSET3);
319 	*pprog = prog;
320 }
321 
322 
323 static void emit_load_skb_data_hlen(u8 **pprog)
324 {
325 	u8 *prog = *pprog;
326 	int cnt = 0;
327 
328 	/* r9d = skb->len - skb->data_len (headlen)
329 	 * r10 = skb->data
330 	 */
331 	/* mov %r9d, off32(%rdi) */
332 	EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
333 
334 	/* sub %r9d, off32(%rdi) */
335 	EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
336 
337 	/* mov %r10, off32(%rdi) */
338 	EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
339 	*pprog = prog;
340 }
341 
342 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
343 		  int oldproglen, struct jit_context *ctx)
344 {
345 	struct bpf_insn *insn = bpf_prog->insnsi;
346 	int insn_cnt = bpf_prog->len;
347 	bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
348 	bool seen_exit = false;
349 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
350 	int i, cnt = 0;
351 	int proglen = 0;
352 	u8 *prog = temp;
353 
354 	emit_prologue(&prog);
355 
356 	if (seen_ld_abs)
357 		emit_load_skb_data_hlen(&prog);
358 
359 	for (i = 0; i < insn_cnt; i++, insn++) {
360 		const s32 imm32 = insn->imm;
361 		u32 dst_reg = insn->dst_reg;
362 		u32 src_reg = insn->src_reg;
363 		u8 b1 = 0, b2 = 0, b3 = 0;
364 		s64 jmp_offset;
365 		u8 jmp_cond;
366 		bool reload_skb_data;
367 		int ilen;
368 		u8 *func;
369 
370 		switch (insn->code) {
371 			/* ALU */
372 		case BPF_ALU | BPF_ADD | BPF_X:
373 		case BPF_ALU | BPF_SUB | BPF_X:
374 		case BPF_ALU | BPF_AND | BPF_X:
375 		case BPF_ALU | BPF_OR | BPF_X:
376 		case BPF_ALU | BPF_XOR | BPF_X:
377 		case BPF_ALU64 | BPF_ADD | BPF_X:
378 		case BPF_ALU64 | BPF_SUB | BPF_X:
379 		case BPF_ALU64 | BPF_AND | BPF_X:
380 		case BPF_ALU64 | BPF_OR | BPF_X:
381 		case BPF_ALU64 | BPF_XOR | BPF_X:
382 			switch (BPF_OP(insn->code)) {
383 			case BPF_ADD: b2 = 0x01; break;
384 			case BPF_SUB: b2 = 0x29; break;
385 			case BPF_AND: b2 = 0x21; break;
386 			case BPF_OR: b2 = 0x09; break;
387 			case BPF_XOR: b2 = 0x31; break;
388 			}
389 			if (BPF_CLASS(insn->code) == BPF_ALU64)
390 				EMIT1(add_2mod(0x48, dst_reg, src_reg));
391 			else if (is_ereg(dst_reg) || is_ereg(src_reg))
392 				EMIT1(add_2mod(0x40, dst_reg, src_reg));
393 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
394 			break;
395 
396 			/* mov dst, src */
397 		case BPF_ALU64 | BPF_MOV | BPF_X:
398 			EMIT_mov(dst_reg, src_reg);
399 			break;
400 
401 			/* mov32 dst, src */
402 		case BPF_ALU | BPF_MOV | BPF_X:
403 			if (is_ereg(dst_reg) || is_ereg(src_reg))
404 				EMIT1(add_2mod(0x40, dst_reg, src_reg));
405 			EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
406 			break;
407 
408 			/* neg dst */
409 		case BPF_ALU | BPF_NEG:
410 		case BPF_ALU64 | BPF_NEG:
411 			if (BPF_CLASS(insn->code) == BPF_ALU64)
412 				EMIT1(add_1mod(0x48, dst_reg));
413 			else if (is_ereg(dst_reg))
414 				EMIT1(add_1mod(0x40, dst_reg));
415 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
416 			break;
417 
418 		case BPF_ALU | BPF_ADD | BPF_K:
419 		case BPF_ALU | BPF_SUB | BPF_K:
420 		case BPF_ALU | BPF_AND | BPF_K:
421 		case BPF_ALU | BPF_OR | BPF_K:
422 		case BPF_ALU | BPF_XOR | BPF_K:
423 		case BPF_ALU64 | BPF_ADD | BPF_K:
424 		case BPF_ALU64 | BPF_SUB | BPF_K:
425 		case BPF_ALU64 | BPF_AND | BPF_K:
426 		case BPF_ALU64 | BPF_OR | BPF_K:
427 		case BPF_ALU64 | BPF_XOR | BPF_K:
428 			if (BPF_CLASS(insn->code) == BPF_ALU64)
429 				EMIT1(add_1mod(0x48, dst_reg));
430 			else if (is_ereg(dst_reg))
431 				EMIT1(add_1mod(0x40, dst_reg));
432 
433 			switch (BPF_OP(insn->code)) {
434 			case BPF_ADD: b3 = 0xC0; break;
435 			case BPF_SUB: b3 = 0xE8; break;
436 			case BPF_AND: b3 = 0xE0; break;
437 			case BPF_OR: b3 = 0xC8; break;
438 			case BPF_XOR: b3 = 0xF0; break;
439 			}
440 
441 			if (is_imm8(imm32))
442 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
443 			else
444 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
445 			break;
446 
447 		case BPF_ALU64 | BPF_MOV | BPF_K:
448 			/* optimization: if imm32 is positive,
449 			 * use 'mov eax, imm32' (which zero-extends imm32)
450 			 * to save 2 bytes
451 			 */
452 			if (imm32 < 0) {
453 				/* 'mov rax, imm32' sign extends imm32 */
454 				b1 = add_1mod(0x48, dst_reg);
455 				b2 = 0xC7;
456 				b3 = 0xC0;
457 				EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
458 				break;
459 			}
460 
461 		case BPF_ALU | BPF_MOV | BPF_K:
462 			/* optimization: if imm32 is zero, use 'xor <dst>,<dst>'
463 			 * to save 3 bytes.
464 			 */
465 			if (imm32 == 0) {
466 				if (is_ereg(dst_reg))
467 					EMIT1(add_2mod(0x40, dst_reg, dst_reg));
468 				b2 = 0x31; /* xor */
469 				b3 = 0xC0;
470 				EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
471 				break;
472 			}
473 
474 			/* mov %eax, imm32 */
475 			if (is_ereg(dst_reg))
476 				EMIT1(add_1mod(0x40, dst_reg));
477 			EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
478 			break;
479 
480 		case BPF_LD | BPF_IMM | BPF_DW:
481 			if (insn[1].code != 0 || insn[1].src_reg != 0 ||
482 			    insn[1].dst_reg != 0 || insn[1].off != 0) {
483 				/* verifier must catch invalid insns */
484 				pr_err("invalid BPF_LD_IMM64 insn\n");
485 				return -EINVAL;
486 			}
487 
488 			/* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
489 			 * to save 7 bytes.
490 			 */
491 			if (insn[0].imm == 0 && insn[1].imm == 0) {
492 				b1 = add_2mod(0x48, dst_reg, dst_reg);
493 				b2 = 0x31; /* xor */
494 				b3 = 0xC0;
495 				EMIT3(b1, b2, add_2reg(b3, dst_reg, dst_reg));
496 
497 				insn++;
498 				i++;
499 				break;
500 			}
501 
502 			/* movabsq %rax, imm64 */
503 			EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
504 			EMIT(insn[0].imm, 4);
505 			EMIT(insn[1].imm, 4);
506 
507 			insn++;
508 			i++;
509 			break;
510 
511 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
512 		case BPF_ALU | BPF_MOD | BPF_X:
513 		case BPF_ALU | BPF_DIV | BPF_X:
514 		case BPF_ALU | BPF_MOD | BPF_K:
515 		case BPF_ALU | BPF_DIV | BPF_K:
516 		case BPF_ALU64 | BPF_MOD | BPF_X:
517 		case BPF_ALU64 | BPF_DIV | BPF_X:
518 		case BPF_ALU64 | BPF_MOD | BPF_K:
519 		case BPF_ALU64 | BPF_DIV | BPF_K:
520 			EMIT1(0x50); /* push rax */
521 			EMIT1(0x52); /* push rdx */
522 
523 			if (BPF_SRC(insn->code) == BPF_X)
524 				/* mov r11, src_reg */
525 				EMIT_mov(AUX_REG, src_reg);
526 			else
527 				/* mov r11, imm32 */
528 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
529 
530 			/* mov rax, dst_reg */
531 			EMIT_mov(BPF_REG_0, dst_reg);
532 
533 			/* xor edx, edx
534 			 * equivalent to 'xor rdx, rdx', but one byte less
535 			 */
536 			EMIT2(0x31, 0xd2);
537 
538 			if (BPF_SRC(insn->code) == BPF_X) {
539 				/* if (src_reg == 0) return 0 */
540 
541 				/* cmp r11, 0 */
542 				EMIT4(0x49, 0x83, 0xFB, 0x00);
543 
544 				/* jne .+9 (skip over pop, pop, xor and jmp) */
545 				EMIT2(X86_JNE, 1 + 1 + 2 + 5);
546 				EMIT1(0x5A); /* pop rdx */
547 				EMIT1(0x58); /* pop rax */
548 				EMIT2(0x31, 0xc0); /* xor eax, eax */
549 
550 				/* jmp cleanup_addr
551 				 * addrs[i] - 11, because there are 11 bytes
552 				 * after this insn: div, mov, pop, pop, mov
553 				 */
554 				jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
555 				EMIT1_off32(0xE9, jmp_offset);
556 			}
557 
558 			if (BPF_CLASS(insn->code) == BPF_ALU64)
559 				/* div r11 */
560 				EMIT3(0x49, 0xF7, 0xF3);
561 			else
562 				/* div r11d */
563 				EMIT3(0x41, 0xF7, 0xF3);
564 
565 			if (BPF_OP(insn->code) == BPF_MOD)
566 				/* mov r11, rdx */
567 				EMIT3(0x49, 0x89, 0xD3);
568 			else
569 				/* mov r11, rax */
570 				EMIT3(0x49, 0x89, 0xC3);
571 
572 			EMIT1(0x5A); /* pop rdx */
573 			EMIT1(0x58); /* pop rax */
574 
575 			/* mov dst_reg, r11 */
576 			EMIT_mov(dst_reg, AUX_REG);
577 			break;
578 
579 		case BPF_ALU | BPF_MUL | BPF_K:
580 		case BPF_ALU | BPF_MUL | BPF_X:
581 		case BPF_ALU64 | BPF_MUL | BPF_K:
582 		case BPF_ALU64 | BPF_MUL | BPF_X:
583 			EMIT1(0x50); /* push rax */
584 			EMIT1(0x52); /* push rdx */
585 
586 			/* mov r11, dst_reg */
587 			EMIT_mov(AUX_REG, dst_reg);
588 
589 			if (BPF_SRC(insn->code) == BPF_X)
590 				/* mov rax, src_reg */
591 				EMIT_mov(BPF_REG_0, src_reg);
592 			else
593 				/* mov rax, imm32 */
594 				EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
595 
596 			if (BPF_CLASS(insn->code) == BPF_ALU64)
597 				EMIT1(add_1mod(0x48, AUX_REG));
598 			else if (is_ereg(AUX_REG))
599 				EMIT1(add_1mod(0x40, AUX_REG));
600 			/* mul(q) r11 */
601 			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
602 
603 			/* mov r11, rax */
604 			EMIT_mov(AUX_REG, BPF_REG_0);
605 
606 			EMIT1(0x5A); /* pop rdx */
607 			EMIT1(0x58); /* pop rax */
608 
609 			/* mov dst_reg, r11 */
610 			EMIT_mov(dst_reg, AUX_REG);
611 			break;
612 
613 			/* shifts */
614 		case BPF_ALU | BPF_LSH | BPF_K:
615 		case BPF_ALU | BPF_RSH | BPF_K:
616 		case BPF_ALU | BPF_ARSH | BPF_K:
617 		case BPF_ALU64 | BPF_LSH | BPF_K:
618 		case BPF_ALU64 | BPF_RSH | BPF_K:
619 		case BPF_ALU64 | BPF_ARSH | BPF_K:
620 			if (BPF_CLASS(insn->code) == BPF_ALU64)
621 				EMIT1(add_1mod(0x48, dst_reg));
622 			else if (is_ereg(dst_reg))
623 				EMIT1(add_1mod(0x40, dst_reg));
624 
625 			switch (BPF_OP(insn->code)) {
626 			case BPF_LSH: b3 = 0xE0; break;
627 			case BPF_RSH: b3 = 0xE8; break;
628 			case BPF_ARSH: b3 = 0xF8; break;
629 			}
630 			EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
631 			break;
632 
633 		case BPF_ALU | BPF_LSH | BPF_X:
634 		case BPF_ALU | BPF_RSH | BPF_X:
635 		case BPF_ALU | BPF_ARSH | BPF_X:
636 		case BPF_ALU64 | BPF_LSH | BPF_X:
637 		case BPF_ALU64 | BPF_RSH | BPF_X:
638 		case BPF_ALU64 | BPF_ARSH | BPF_X:
639 
640 			/* check for bad case when dst_reg == rcx */
641 			if (dst_reg == BPF_REG_4) {
642 				/* mov r11, dst_reg */
643 				EMIT_mov(AUX_REG, dst_reg);
644 				dst_reg = AUX_REG;
645 			}
646 
647 			if (src_reg != BPF_REG_4) { /* common case */
648 				EMIT1(0x51); /* push rcx */
649 
650 				/* mov rcx, src_reg */
651 				EMIT_mov(BPF_REG_4, src_reg);
652 			}
653 
654 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
655 			if (BPF_CLASS(insn->code) == BPF_ALU64)
656 				EMIT1(add_1mod(0x48, dst_reg));
657 			else if (is_ereg(dst_reg))
658 				EMIT1(add_1mod(0x40, dst_reg));
659 
660 			switch (BPF_OP(insn->code)) {
661 			case BPF_LSH: b3 = 0xE0; break;
662 			case BPF_RSH: b3 = 0xE8; break;
663 			case BPF_ARSH: b3 = 0xF8; break;
664 			}
665 			EMIT2(0xD3, add_1reg(b3, dst_reg));
666 
667 			if (src_reg != BPF_REG_4)
668 				EMIT1(0x59); /* pop rcx */
669 
670 			if (insn->dst_reg == BPF_REG_4)
671 				/* mov dst_reg, r11 */
672 				EMIT_mov(insn->dst_reg, AUX_REG);
673 			break;
674 
675 		case BPF_ALU | BPF_END | BPF_FROM_BE:
676 			switch (imm32) {
677 			case 16:
678 				/* emit 'ror %ax, 8' to swap lower 2 bytes */
679 				EMIT1(0x66);
680 				if (is_ereg(dst_reg))
681 					EMIT1(0x41);
682 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
683 
684 				/* emit 'movzwl eax, ax' */
685 				if (is_ereg(dst_reg))
686 					EMIT3(0x45, 0x0F, 0xB7);
687 				else
688 					EMIT2(0x0F, 0xB7);
689 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
690 				break;
691 			case 32:
692 				/* emit 'bswap eax' to swap lower 4 bytes */
693 				if (is_ereg(dst_reg))
694 					EMIT2(0x41, 0x0F);
695 				else
696 					EMIT1(0x0F);
697 				EMIT1(add_1reg(0xC8, dst_reg));
698 				break;
699 			case 64:
700 				/* emit 'bswap rax' to swap 8 bytes */
701 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
702 				      add_1reg(0xC8, dst_reg));
703 				break;
704 			}
705 			break;
706 
707 		case BPF_ALU | BPF_END | BPF_FROM_LE:
708 			switch (imm32) {
709 			case 16:
710 				/* emit 'movzwl eax, ax' to zero extend 16-bit
711 				 * into 64 bit
712 				 */
713 				if (is_ereg(dst_reg))
714 					EMIT3(0x45, 0x0F, 0xB7);
715 				else
716 					EMIT2(0x0F, 0xB7);
717 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
718 				break;
719 			case 32:
720 				/* emit 'mov eax, eax' to clear upper 32-bits */
721 				if (is_ereg(dst_reg))
722 					EMIT1(0x45);
723 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
724 				break;
725 			case 64:
726 				/* nop */
727 				break;
728 			}
729 			break;
730 
731 			/* ST: *(u8*)(dst_reg + off) = imm */
732 		case BPF_ST | BPF_MEM | BPF_B:
733 			if (is_ereg(dst_reg))
734 				EMIT2(0x41, 0xC6);
735 			else
736 				EMIT1(0xC6);
737 			goto st;
738 		case BPF_ST | BPF_MEM | BPF_H:
739 			if (is_ereg(dst_reg))
740 				EMIT3(0x66, 0x41, 0xC7);
741 			else
742 				EMIT2(0x66, 0xC7);
743 			goto st;
744 		case BPF_ST | BPF_MEM | BPF_W:
745 			if (is_ereg(dst_reg))
746 				EMIT2(0x41, 0xC7);
747 			else
748 				EMIT1(0xC7);
749 			goto st;
750 		case BPF_ST | BPF_MEM | BPF_DW:
751 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
752 
753 st:			if (is_imm8(insn->off))
754 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
755 			else
756 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
757 
758 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
759 			break;
760 
761 			/* STX: *(u8*)(dst_reg + off) = src_reg */
762 		case BPF_STX | BPF_MEM | BPF_B:
763 			/* emit 'mov byte ptr [rax + off], al' */
764 			if (is_ereg(dst_reg) || is_ereg(src_reg) ||
765 			    /* have to add extra byte for x86 SIL, DIL regs */
766 			    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
767 				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
768 			else
769 				EMIT1(0x88);
770 			goto stx;
771 		case BPF_STX | BPF_MEM | BPF_H:
772 			if (is_ereg(dst_reg) || is_ereg(src_reg))
773 				EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
774 			else
775 				EMIT2(0x66, 0x89);
776 			goto stx;
777 		case BPF_STX | BPF_MEM | BPF_W:
778 			if (is_ereg(dst_reg) || is_ereg(src_reg))
779 				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
780 			else
781 				EMIT1(0x89);
782 			goto stx;
783 		case BPF_STX | BPF_MEM | BPF_DW:
784 			EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
785 stx:			if (is_imm8(insn->off))
786 				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
787 			else
788 				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
789 					    insn->off);
790 			break;
791 
792 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
793 		case BPF_LDX | BPF_MEM | BPF_B:
794 			/* emit 'movzx rax, byte ptr [rax + off]' */
795 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
796 			goto ldx;
797 		case BPF_LDX | BPF_MEM | BPF_H:
798 			/* emit 'movzx rax, word ptr [rax + off]' */
799 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
800 			goto ldx;
801 		case BPF_LDX | BPF_MEM | BPF_W:
802 			/* emit 'mov eax, dword ptr [rax+0x14]' */
803 			if (is_ereg(dst_reg) || is_ereg(src_reg))
804 				EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
805 			else
806 				EMIT1(0x8B);
807 			goto ldx;
808 		case BPF_LDX | BPF_MEM | BPF_DW:
809 			/* emit 'mov rax, qword ptr [rax+0x14]' */
810 			EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
811 ldx:			/* if insn->off == 0 we can save one extra byte, but
812 			 * special case of x86 r13 which always needs an offset
813 			 * is not worth the hassle
814 			 */
815 			if (is_imm8(insn->off))
816 				EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
817 			else
818 				EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
819 					    insn->off);
820 			break;
821 
822 			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
823 		case BPF_STX | BPF_XADD | BPF_W:
824 			/* emit 'lock add dword ptr [rax + off], eax' */
825 			if (is_ereg(dst_reg) || is_ereg(src_reg))
826 				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
827 			else
828 				EMIT2(0xF0, 0x01);
829 			goto xadd;
830 		case BPF_STX | BPF_XADD | BPF_DW:
831 			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
832 xadd:			if (is_imm8(insn->off))
833 				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
834 			else
835 				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
836 					    insn->off);
837 			break;
838 
839 			/* call */
840 		case BPF_JMP | BPF_CALL:
841 			func = (u8 *) __bpf_call_base + imm32;
842 			jmp_offset = func - (image + addrs[i]);
843 			if (seen_ld_abs) {
844 				reload_skb_data = bpf_helper_changes_skb_data(func);
845 				if (reload_skb_data) {
846 					EMIT1(0x57); /* push %rdi */
847 					jmp_offset += 22; /* pop, mov, sub, mov */
848 				} else {
849 					EMIT2(0x41, 0x52); /* push %r10 */
850 					EMIT2(0x41, 0x51); /* push %r9 */
851 					/* need to adjust jmp offset, since
852 					 * pop %r9, pop %r10 take 4 bytes after call insn
853 					 */
854 					jmp_offset += 4;
855 				}
856 			}
857 			if (!imm32 || !is_simm32(jmp_offset)) {
858 				pr_err("unsupported bpf func %d addr %p image %p\n",
859 				       imm32, func, image);
860 				return -EINVAL;
861 			}
862 			EMIT1_off32(0xE8, jmp_offset);
863 			if (seen_ld_abs) {
864 				if (reload_skb_data) {
865 					EMIT1(0x5F); /* pop %rdi */
866 					emit_load_skb_data_hlen(&prog);
867 				} else {
868 					EMIT2(0x41, 0x59); /* pop %r9 */
869 					EMIT2(0x41, 0x5A); /* pop %r10 */
870 				}
871 			}
872 			break;
873 
874 		case BPF_JMP | BPF_CALL | BPF_X:
875 			emit_bpf_tail_call(&prog);
876 			break;
877 
878 			/* cond jump */
879 		case BPF_JMP | BPF_JEQ | BPF_X:
880 		case BPF_JMP | BPF_JNE | BPF_X:
881 		case BPF_JMP | BPF_JGT | BPF_X:
882 		case BPF_JMP | BPF_JGE | BPF_X:
883 		case BPF_JMP | BPF_JSGT | BPF_X:
884 		case BPF_JMP | BPF_JSGE | BPF_X:
885 			/* cmp dst_reg, src_reg */
886 			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
887 			      add_2reg(0xC0, dst_reg, src_reg));
888 			goto emit_cond_jmp;
889 
890 		case BPF_JMP | BPF_JSET | BPF_X:
891 			/* test dst_reg, src_reg */
892 			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
893 			      add_2reg(0xC0, dst_reg, src_reg));
894 			goto emit_cond_jmp;
895 
896 		case BPF_JMP | BPF_JSET | BPF_K:
897 			/* test dst_reg, imm32 */
898 			EMIT1(add_1mod(0x48, dst_reg));
899 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
900 			goto emit_cond_jmp;
901 
902 		case BPF_JMP | BPF_JEQ | BPF_K:
903 		case BPF_JMP | BPF_JNE | BPF_K:
904 		case BPF_JMP | BPF_JGT | BPF_K:
905 		case BPF_JMP | BPF_JGE | BPF_K:
906 		case BPF_JMP | BPF_JSGT | BPF_K:
907 		case BPF_JMP | BPF_JSGE | BPF_K:
908 			/* cmp dst_reg, imm8/32 */
909 			EMIT1(add_1mod(0x48, dst_reg));
910 
911 			if (is_imm8(imm32))
912 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
913 			else
914 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
915 
916 emit_cond_jmp:		/* convert BPF opcode to x86 */
917 			switch (BPF_OP(insn->code)) {
918 			case BPF_JEQ:
919 				jmp_cond = X86_JE;
920 				break;
921 			case BPF_JSET:
922 			case BPF_JNE:
923 				jmp_cond = X86_JNE;
924 				break;
925 			case BPF_JGT:
926 				/* GT is unsigned '>', JA in x86 */
927 				jmp_cond = X86_JA;
928 				break;
929 			case BPF_JGE:
930 				/* GE is unsigned '>=', JAE in x86 */
931 				jmp_cond = X86_JAE;
932 				break;
933 			case BPF_JSGT:
934 				/* signed '>', GT in x86 */
935 				jmp_cond = X86_JG;
936 				break;
937 			case BPF_JSGE:
938 				/* signed '>=', GE in x86 */
939 				jmp_cond = X86_JGE;
940 				break;
941 			default: /* to silence gcc warning */
942 				return -EFAULT;
943 			}
944 			jmp_offset = addrs[i + insn->off] - addrs[i];
945 			if (is_imm8(jmp_offset)) {
946 				EMIT2(jmp_cond, jmp_offset);
947 			} else if (is_simm32(jmp_offset)) {
948 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
949 			} else {
950 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
951 				return -EFAULT;
952 			}
953 
954 			break;
955 
956 		case BPF_JMP | BPF_JA:
957 			jmp_offset = addrs[i + insn->off] - addrs[i];
958 			if (!jmp_offset)
959 				/* optimize out nop jumps */
960 				break;
961 emit_jmp:
962 			if (is_imm8(jmp_offset)) {
963 				EMIT2(0xEB, jmp_offset);
964 			} else if (is_simm32(jmp_offset)) {
965 				EMIT1_off32(0xE9, jmp_offset);
966 			} else {
967 				pr_err("jmp gen bug %llx\n", jmp_offset);
968 				return -EFAULT;
969 			}
970 			break;
971 
972 		case BPF_LD | BPF_IND | BPF_W:
973 			func = sk_load_word;
974 			goto common_load;
975 		case BPF_LD | BPF_ABS | BPF_W:
976 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
977 common_load:
978 			ctx->seen_ld_abs = seen_ld_abs = true;
979 			jmp_offset = func - (image + addrs[i]);
980 			if (!func || !is_simm32(jmp_offset)) {
981 				pr_err("unsupported bpf func %d addr %p image %p\n",
982 				       imm32, func, image);
983 				return -EINVAL;
984 			}
985 			if (BPF_MODE(insn->code) == BPF_ABS) {
986 				/* mov %esi, imm32 */
987 				EMIT1_off32(0xBE, imm32);
988 			} else {
989 				/* mov %rsi, src_reg */
990 				EMIT_mov(BPF_REG_2, src_reg);
991 				if (imm32) {
992 					if (is_imm8(imm32))
993 						/* add %esi, imm8 */
994 						EMIT3(0x83, 0xC6, imm32);
995 					else
996 						/* add %esi, imm32 */
997 						EMIT2_off32(0x81, 0xC6, imm32);
998 				}
999 			}
1000 			/* skb pointer is in R6 (%rbx), it will be copied into
1001 			 * %rdi if skb_copy_bits() call is necessary.
1002 			 * sk_load_* helpers also use %r10 and %r9d.
1003 			 * See bpf_jit.S
1004 			 */
1005 			EMIT1_off32(0xE8, jmp_offset); /* call */
1006 			break;
1007 
1008 		case BPF_LD | BPF_IND | BPF_H:
1009 			func = sk_load_half;
1010 			goto common_load;
1011 		case BPF_LD | BPF_ABS | BPF_H:
1012 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
1013 			goto common_load;
1014 		case BPF_LD | BPF_IND | BPF_B:
1015 			func = sk_load_byte;
1016 			goto common_load;
1017 		case BPF_LD | BPF_ABS | BPF_B:
1018 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
1019 			goto common_load;
1020 
1021 		case BPF_JMP | BPF_EXIT:
1022 			if (seen_exit) {
1023 				jmp_offset = ctx->cleanup_addr - addrs[i];
1024 				goto emit_jmp;
1025 			}
1026 			seen_exit = true;
1027 			/* update cleanup_addr */
1028 			ctx->cleanup_addr = proglen;
1029 			/* mov rbx, qword ptr [rbp-X] */
1030 			EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE);
1031 			/* mov r13, qword ptr [rbp-X] */
1032 			EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8);
1033 			/* mov r14, qword ptr [rbp-X] */
1034 			EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16);
1035 			/* mov r15, qword ptr [rbp-X] */
1036 			EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24);
1037 
1038 			EMIT1(0xC9); /* leave */
1039 			EMIT1(0xC3); /* ret */
1040 			break;
1041 
1042 		default:
1043 			/* By design x64 JIT should support all BPF instructions
1044 			 * This error will be seen if new instruction was added
1045 			 * to interpreter, but not to JIT
1046 			 * or if there is junk in bpf_prog
1047 			 */
1048 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1049 			return -EINVAL;
1050 		}
1051 
1052 		ilen = prog - temp;
1053 		if (ilen > BPF_MAX_INSN_SIZE) {
1054 			pr_err("bpf_jit_compile fatal insn size error\n");
1055 			return -EFAULT;
1056 		}
1057 
1058 		if (image) {
1059 			if (unlikely(proglen + ilen > oldproglen)) {
1060 				pr_err("bpf_jit_compile fatal error\n");
1061 				return -EFAULT;
1062 			}
1063 			memcpy(image + proglen, temp, ilen);
1064 		}
1065 		proglen += ilen;
1066 		addrs[i] = proglen;
1067 		prog = temp;
1068 	}
1069 	return proglen;
1070 }
1071 
1072 void bpf_jit_compile(struct bpf_prog *prog)
1073 {
1074 }
1075 
1076 void bpf_int_jit_compile(struct bpf_prog *prog)
1077 {
1078 	struct bpf_binary_header *header = NULL;
1079 	int proglen, oldproglen = 0;
1080 	struct jit_context ctx = {};
1081 	u8 *image = NULL;
1082 	int *addrs;
1083 	int pass;
1084 	int i;
1085 
1086 	if (!bpf_jit_enable)
1087 		return;
1088 
1089 	if (!prog || !prog->len)
1090 		return;
1091 
1092 	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
1093 	if (!addrs)
1094 		return;
1095 
1096 	/* Before first pass, make a rough estimation of addrs[]
1097 	 * each bpf instruction is translated to less than 64 bytes
1098 	 */
1099 	for (proglen = 0, i = 0; i < prog->len; i++) {
1100 		proglen += 64;
1101 		addrs[i] = proglen;
1102 	}
1103 	ctx.cleanup_addr = proglen;
1104 
1105 	/* JITed image shrinks with every pass and the loop iterates
1106 	 * until the image stops shrinking. Very large bpf programs
1107 	 * may converge on the last pass. In such case do one more
1108 	 * pass to emit the final image
1109 	 */
1110 	for (pass = 0; pass < 10 || image; pass++) {
1111 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1112 		if (proglen <= 0) {
1113 			image = NULL;
1114 			if (header)
1115 				bpf_jit_binary_free(header);
1116 			goto out;
1117 		}
1118 		if (image) {
1119 			if (proglen != oldproglen) {
1120 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1121 				       proglen, oldproglen);
1122 				goto out;
1123 			}
1124 			break;
1125 		}
1126 		if (proglen == oldproglen) {
1127 			header = bpf_jit_binary_alloc(proglen, &image,
1128 						      1, jit_fill_hole);
1129 			if (!header)
1130 				goto out;
1131 		}
1132 		oldproglen = proglen;
1133 	}
1134 
1135 	if (bpf_jit_enable > 1)
1136 		bpf_jit_dump(prog->len, proglen, pass + 1, image);
1137 
1138 	if (image) {
1139 		bpf_flush_icache(header, image + proglen);
1140 		set_memory_ro((unsigned long)header, header->pages);
1141 		prog->bpf_func = (void *)image;
1142 		prog->jited = 1;
1143 	}
1144 out:
1145 	kfree(addrs);
1146 }
1147 
1148 void bpf_jit_free(struct bpf_prog *fp)
1149 {
1150 	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1151 	struct bpf_binary_header *header = (void *)addr;
1152 
1153 	if (!fp->jited)
1154 		goto free_filter;
1155 
1156 	set_memory_rw(addr, header->pages);
1157 	bpf_jit_binary_free(header);
1158 
1159 free_filter:
1160 	bpf_prog_unlock_free(fp);
1161 }
1162