xref: /openbmc/linux/arch/x86/net/bpf_jit_comp.c (revision 179dd8c0348af75b02c7d72eaaf1cb179f1721ef)
1 /* bpf_jit_comp.c : BPF JIT compiler
2  *
3  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4  * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; version 2
9  * of the License.
10  */
11 #include <linux/netdevice.h>
12 #include <linux/filter.h>
13 #include <linux/if_vlan.h>
14 #include <asm/cacheflush.h>
15 #include <linux/bpf.h>
16 
17 int bpf_jit_enable __read_mostly;
18 
19 /*
20  * assembly code in arch/x86/net/bpf_jit.S
21  */
22 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
23 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
24 extern u8 sk_load_byte_positive_offset[];
25 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
26 extern u8 sk_load_byte_negative_offset[];
27 
28 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29 {
30 	if (len == 1)
31 		*ptr = bytes;
32 	else if (len == 2)
33 		*(u16 *)ptr = bytes;
34 	else {
35 		*(u32 *)ptr = bytes;
36 		barrier();
37 	}
38 	return ptr + len;
39 }
40 
41 #define EMIT(bytes, len) \
42 	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
43 
44 #define EMIT1(b1)		EMIT(b1, 1)
45 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
46 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
47 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
48 #define EMIT1_off32(b1, off) \
49 	do {EMIT1(b1); EMIT(off, 4); } while (0)
50 #define EMIT2_off32(b1, b2, off) \
51 	do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
52 #define EMIT3_off32(b1, b2, b3, off) \
53 	do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54 #define EMIT4_off32(b1, b2, b3, b4, off) \
55 	do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
56 
57 static bool is_imm8(int value)
58 {
59 	return value <= 127 && value >= -128;
60 }
61 
62 static bool is_simm32(s64 value)
63 {
64 	return value == (s64) (s32) value;
65 }
66 
67 /* mov dst, src */
68 #define EMIT_mov(DST, SRC) \
69 	do {if (DST != SRC) \
70 		EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
71 	} while (0)
72 
73 static int bpf_size_to_x86_bytes(int bpf_size)
74 {
75 	if (bpf_size == BPF_W)
76 		return 4;
77 	else if (bpf_size == BPF_H)
78 		return 2;
79 	else if (bpf_size == BPF_B)
80 		return 1;
81 	else if (bpf_size == BPF_DW)
82 		return 4; /* imm32 */
83 	else
84 		return 0;
85 }
86 
87 /* list of x86 cond jumps opcodes (. + s8)
88  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
89  */
90 #define X86_JB  0x72
91 #define X86_JAE 0x73
92 #define X86_JE  0x74
93 #define X86_JNE 0x75
94 #define X86_JBE 0x76
95 #define X86_JA  0x77
96 #define X86_JGE 0x7D
97 #define X86_JG  0x7F
98 
99 static void bpf_flush_icache(void *start, void *end)
100 {
101 	mm_segment_t old_fs = get_fs();
102 
103 	set_fs(KERNEL_DS);
104 	smp_wmb();
105 	flush_icache_range((unsigned long)start, (unsigned long)end);
106 	set_fs(old_fs);
107 }
108 
109 #define CHOOSE_LOAD_FUNC(K, func) \
110 	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
111 
112 /* pick a register outside of BPF range for JIT internal work */
113 #define AUX_REG (MAX_BPF_REG + 1)
114 
115 /* the following table maps BPF registers to x64 registers.
116  * x64 register r12 is unused, since if used as base address register
117  * in load/store instructions, it always needs an extra byte of encoding
118  */
119 static const int reg2hex[] = {
120 	[BPF_REG_0] = 0,  /* rax */
121 	[BPF_REG_1] = 7,  /* rdi */
122 	[BPF_REG_2] = 6,  /* rsi */
123 	[BPF_REG_3] = 2,  /* rdx */
124 	[BPF_REG_4] = 1,  /* rcx */
125 	[BPF_REG_5] = 0,  /* r8 */
126 	[BPF_REG_6] = 3,  /* rbx callee saved */
127 	[BPF_REG_7] = 5,  /* r13 callee saved */
128 	[BPF_REG_8] = 6,  /* r14 callee saved */
129 	[BPF_REG_9] = 7,  /* r15 callee saved */
130 	[BPF_REG_FP] = 5, /* rbp readonly */
131 	[AUX_REG] = 3,    /* r11 temp register */
132 };
133 
134 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
135  * which need extra byte of encoding.
136  * rax,rcx,...,rbp have simpler encoding
137  */
138 static bool is_ereg(u32 reg)
139 {
140 	return (1 << reg) & (BIT(BPF_REG_5) |
141 			     BIT(AUX_REG) |
142 			     BIT(BPF_REG_7) |
143 			     BIT(BPF_REG_8) |
144 			     BIT(BPF_REG_9));
145 }
146 
147 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
148 static u8 add_1mod(u8 byte, u32 reg)
149 {
150 	if (is_ereg(reg))
151 		byte |= 1;
152 	return byte;
153 }
154 
155 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
156 {
157 	if (is_ereg(r1))
158 		byte |= 1;
159 	if (is_ereg(r2))
160 		byte |= 4;
161 	return byte;
162 }
163 
164 /* encode 'dst_reg' register into x64 opcode 'byte' */
165 static u8 add_1reg(u8 byte, u32 dst_reg)
166 {
167 	return byte + reg2hex[dst_reg];
168 }
169 
170 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
171 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
172 {
173 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
174 }
175 
176 static void jit_fill_hole(void *area, unsigned int size)
177 {
178 	/* fill whole space with int3 instructions */
179 	memset(area, 0xcc, size);
180 }
181 
182 struct jit_context {
183 	int cleanup_addr; /* epilogue code offset */
184 	bool seen_ld_abs;
185 };
186 
187 /* maximum number of bytes emitted while JITing one eBPF insn */
188 #define BPF_MAX_INSN_SIZE	128
189 #define BPF_INSN_SAFETY		64
190 
191 #define STACKSIZE \
192 	(MAX_BPF_STACK + \
193 	 32 /* space for rbx, r13, r14, r15 */ + \
194 	 8 /* space for skb_copy_bits() buffer */)
195 
196 #define PROLOGUE_SIZE 51
197 
198 /* emit x64 prologue code for BPF program and check it's size.
199  * bpf_tail_call helper will skip it while jumping into another program
200  */
201 static void emit_prologue(u8 **pprog)
202 {
203 	u8 *prog = *pprog;
204 	int cnt = 0;
205 
206 	EMIT1(0x55); /* push rbp */
207 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
208 
209 	/* sub rsp, STACKSIZE */
210 	EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE);
211 
212 	/* all classic BPF filters use R6(rbx) save it */
213 
214 	/* mov qword ptr [rbp-X],rbx */
215 	EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE);
216 
217 	/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
218 	 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
219 	 * R8(r14). R9(r15) spill could be made conditional, but there is only
220 	 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
221 	 * The overhead of extra spill is negligible for any filter other
222 	 * than synthetic ones. Therefore not worth adding complexity.
223 	 */
224 
225 	/* mov qword ptr [rbp-X],r13 */
226 	EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE + 8);
227 	/* mov qword ptr [rbp-X],r14 */
228 	EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE + 16);
229 	/* mov qword ptr [rbp-X],r15 */
230 	EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24);
231 
232 	/* clear A and X registers */
233 	EMIT2(0x31, 0xc0); /* xor eax, eax */
234 	EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
235 
236 	/* clear tail_cnt: mov qword ptr [rbp-X], rax */
237 	EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32);
238 
239 	BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
240 	*pprog = prog;
241 }
242 
243 /* generate the following code:
244  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
245  *   if (index >= array->map.max_entries)
246  *     goto out;
247  *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
248  *     goto out;
249  *   prog = array->prog[index];
250  *   if (prog == NULL)
251  *     goto out;
252  *   goto *(prog->bpf_func + prologue_size);
253  * out:
254  */
255 static void emit_bpf_tail_call(u8 **pprog)
256 {
257 	u8 *prog = *pprog;
258 	int label1, label2, label3;
259 	int cnt = 0;
260 
261 	/* rdi - pointer to ctx
262 	 * rsi - pointer to bpf_array
263 	 * rdx - index in bpf_array
264 	 */
265 
266 	/* if (index >= array->map.max_entries)
267 	 *   goto out;
268 	 */
269 	EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 16] */
270 	      offsetof(struct bpf_array, map.max_entries));
271 	EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
272 #define OFFSET1 44 /* number of bytes to jump */
273 	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
274 	label1 = cnt;
275 
276 	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
277 	 *   goto out;
278 	 */
279 	EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
280 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
281 #define OFFSET2 33
282 	EMIT2(X86_JA, OFFSET2);                   /* ja out */
283 	label2 = cnt;
284 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
285 	EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
286 
287 	/* prog = array->prog[index]; */
288 	EMIT4(0x48, 0x8D, 0x44, 0xD6);            /* lea rax, [rsi + rdx * 8 + 0x50] */
289 	EMIT1(offsetof(struct bpf_array, prog));
290 	EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
291 
292 	/* if (prog == NULL)
293 	 *   goto out;
294 	 */
295 	EMIT4(0x48, 0x83, 0xF8, 0x00);            /* cmp rax, 0 */
296 #define OFFSET3 10
297 	EMIT2(X86_JE, OFFSET3);                   /* je out */
298 	label3 = cnt;
299 
300 	/* goto *(prog->bpf_func + prologue_size); */
301 	EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
302 	      offsetof(struct bpf_prog, bpf_func));
303 	EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */
304 
305 	/* now we're ready to jump into next BPF program
306 	 * rdi == ctx (1st arg)
307 	 * rax == prog->bpf_func + prologue_size
308 	 */
309 	EMIT2(0xFF, 0xE0);                        /* jmp rax */
310 
311 	/* out: */
312 	BUILD_BUG_ON(cnt - label1 != OFFSET1);
313 	BUILD_BUG_ON(cnt - label2 != OFFSET2);
314 	BUILD_BUG_ON(cnt - label3 != OFFSET3);
315 	*pprog = prog;
316 }
317 
318 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
319 		  int oldproglen, struct jit_context *ctx)
320 {
321 	struct bpf_insn *insn = bpf_prog->insnsi;
322 	int insn_cnt = bpf_prog->len;
323 	bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
324 	bool seen_exit = false;
325 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
326 	int i, cnt = 0;
327 	int proglen = 0;
328 	u8 *prog = temp;
329 
330 	emit_prologue(&prog);
331 
332 	if (seen_ld_abs) {
333 		/* r9d : skb->len - skb->data_len (headlen)
334 		 * r10 : skb->data
335 		 */
336 		if (is_imm8(offsetof(struct sk_buff, len)))
337 			/* mov %r9d, off8(%rdi) */
338 			EMIT4(0x44, 0x8b, 0x4f,
339 			      offsetof(struct sk_buff, len));
340 		else
341 			/* mov %r9d, off32(%rdi) */
342 			EMIT3_off32(0x44, 0x8b, 0x8f,
343 				    offsetof(struct sk_buff, len));
344 
345 		if (is_imm8(offsetof(struct sk_buff, data_len)))
346 			/* sub %r9d, off8(%rdi) */
347 			EMIT4(0x44, 0x2b, 0x4f,
348 			      offsetof(struct sk_buff, data_len));
349 		else
350 			EMIT3_off32(0x44, 0x2b, 0x8f,
351 				    offsetof(struct sk_buff, data_len));
352 
353 		if (is_imm8(offsetof(struct sk_buff, data)))
354 			/* mov %r10, off8(%rdi) */
355 			EMIT4(0x4c, 0x8b, 0x57,
356 			      offsetof(struct sk_buff, data));
357 		else
358 			/* mov %r10, off32(%rdi) */
359 			EMIT3_off32(0x4c, 0x8b, 0x97,
360 				    offsetof(struct sk_buff, data));
361 	}
362 
363 	for (i = 0; i < insn_cnt; i++, insn++) {
364 		const s32 imm32 = insn->imm;
365 		u32 dst_reg = insn->dst_reg;
366 		u32 src_reg = insn->src_reg;
367 		u8 b1 = 0, b2 = 0, b3 = 0;
368 		s64 jmp_offset;
369 		u8 jmp_cond;
370 		int ilen;
371 		u8 *func;
372 
373 		switch (insn->code) {
374 			/* ALU */
375 		case BPF_ALU | BPF_ADD | BPF_X:
376 		case BPF_ALU | BPF_SUB | BPF_X:
377 		case BPF_ALU | BPF_AND | BPF_X:
378 		case BPF_ALU | BPF_OR | BPF_X:
379 		case BPF_ALU | BPF_XOR | BPF_X:
380 		case BPF_ALU64 | BPF_ADD | BPF_X:
381 		case BPF_ALU64 | BPF_SUB | BPF_X:
382 		case BPF_ALU64 | BPF_AND | BPF_X:
383 		case BPF_ALU64 | BPF_OR | BPF_X:
384 		case BPF_ALU64 | BPF_XOR | BPF_X:
385 			switch (BPF_OP(insn->code)) {
386 			case BPF_ADD: b2 = 0x01; break;
387 			case BPF_SUB: b2 = 0x29; break;
388 			case BPF_AND: b2 = 0x21; break;
389 			case BPF_OR: b2 = 0x09; break;
390 			case BPF_XOR: b2 = 0x31; break;
391 			}
392 			if (BPF_CLASS(insn->code) == BPF_ALU64)
393 				EMIT1(add_2mod(0x48, dst_reg, src_reg));
394 			else if (is_ereg(dst_reg) || is_ereg(src_reg))
395 				EMIT1(add_2mod(0x40, dst_reg, src_reg));
396 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
397 			break;
398 
399 			/* mov dst, src */
400 		case BPF_ALU64 | BPF_MOV | BPF_X:
401 			EMIT_mov(dst_reg, src_reg);
402 			break;
403 
404 			/* mov32 dst, src */
405 		case BPF_ALU | BPF_MOV | BPF_X:
406 			if (is_ereg(dst_reg) || is_ereg(src_reg))
407 				EMIT1(add_2mod(0x40, dst_reg, src_reg));
408 			EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
409 			break;
410 
411 			/* neg dst */
412 		case BPF_ALU | BPF_NEG:
413 		case BPF_ALU64 | BPF_NEG:
414 			if (BPF_CLASS(insn->code) == BPF_ALU64)
415 				EMIT1(add_1mod(0x48, dst_reg));
416 			else if (is_ereg(dst_reg))
417 				EMIT1(add_1mod(0x40, dst_reg));
418 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
419 			break;
420 
421 		case BPF_ALU | BPF_ADD | BPF_K:
422 		case BPF_ALU | BPF_SUB | BPF_K:
423 		case BPF_ALU | BPF_AND | BPF_K:
424 		case BPF_ALU | BPF_OR | BPF_K:
425 		case BPF_ALU | BPF_XOR | BPF_K:
426 		case BPF_ALU64 | BPF_ADD | BPF_K:
427 		case BPF_ALU64 | BPF_SUB | BPF_K:
428 		case BPF_ALU64 | BPF_AND | BPF_K:
429 		case BPF_ALU64 | BPF_OR | BPF_K:
430 		case BPF_ALU64 | BPF_XOR | BPF_K:
431 			if (BPF_CLASS(insn->code) == BPF_ALU64)
432 				EMIT1(add_1mod(0x48, dst_reg));
433 			else if (is_ereg(dst_reg))
434 				EMIT1(add_1mod(0x40, dst_reg));
435 
436 			switch (BPF_OP(insn->code)) {
437 			case BPF_ADD: b3 = 0xC0; break;
438 			case BPF_SUB: b3 = 0xE8; break;
439 			case BPF_AND: b3 = 0xE0; break;
440 			case BPF_OR: b3 = 0xC8; break;
441 			case BPF_XOR: b3 = 0xF0; break;
442 			}
443 
444 			if (is_imm8(imm32))
445 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
446 			else
447 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
448 			break;
449 
450 		case BPF_ALU64 | BPF_MOV | BPF_K:
451 			/* optimization: if imm32 is positive,
452 			 * use 'mov eax, imm32' (which zero-extends imm32)
453 			 * to save 2 bytes
454 			 */
455 			if (imm32 < 0) {
456 				/* 'mov rax, imm32' sign extends imm32 */
457 				b1 = add_1mod(0x48, dst_reg);
458 				b2 = 0xC7;
459 				b3 = 0xC0;
460 				EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
461 				break;
462 			}
463 
464 		case BPF_ALU | BPF_MOV | BPF_K:
465 			/* mov %eax, imm32 */
466 			if (is_ereg(dst_reg))
467 				EMIT1(add_1mod(0x40, dst_reg));
468 			EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
469 			break;
470 
471 		case BPF_LD | BPF_IMM | BPF_DW:
472 			if (insn[1].code != 0 || insn[1].src_reg != 0 ||
473 			    insn[1].dst_reg != 0 || insn[1].off != 0) {
474 				/* verifier must catch invalid insns */
475 				pr_err("invalid BPF_LD_IMM64 insn\n");
476 				return -EINVAL;
477 			}
478 
479 			/* movabsq %rax, imm64 */
480 			EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
481 			EMIT(insn[0].imm, 4);
482 			EMIT(insn[1].imm, 4);
483 
484 			insn++;
485 			i++;
486 			break;
487 
488 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
489 		case BPF_ALU | BPF_MOD | BPF_X:
490 		case BPF_ALU | BPF_DIV | BPF_X:
491 		case BPF_ALU | BPF_MOD | BPF_K:
492 		case BPF_ALU | BPF_DIV | BPF_K:
493 		case BPF_ALU64 | BPF_MOD | BPF_X:
494 		case BPF_ALU64 | BPF_DIV | BPF_X:
495 		case BPF_ALU64 | BPF_MOD | BPF_K:
496 		case BPF_ALU64 | BPF_DIV | BPF_K:
497 			EMIT1(0x50); /* push rax */
498 			EMIT1(0x52); /* push rdx */
499 
500 			if (BPF_SRC(insn->code) == BPF_X)
501 				/* mov r11, src_reg */
502 				EMIT_mov(AUX_REG, src_reg);
503 			else
504 				/* mov r11, imm32 */
505 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
506 
507 			/* mov rax, dst_reg */
508 			EMIT_mov(BPF_REG_0, dst_reg);
509 
510 			/* xor edx, edx
511 			 * equivalent to 'xor rdx, rdx', but one byte less
512 			 */
513 			EMIT2(0x31, 0xd2);
514 
515 			if (BPF_SRC(insn->code) == BPF_X) {
516 				/* if (src_reg == 0) return 0 */
517 
518 				/* cmp r11, 0 */
519 				EMIT4(0x49, 0x83, 0xFB, 0x00);
520 
521 				/* jne .+9 (skip over pop, pop, xor and jmp) */
522 				EMIT2(X86_JNE, 1 + 1 + 2 + 5);
523 				EMIT1(0x5A); /* pop rdx */
524 				EMIT1(0x58); /* pop rax */
525 				EMIT2(0x31, 0xc0); /* xor eax, eax */
526 
527 				/* jmp cleanup_addr
528 				 * addrs[i] - 11, because there are 11 bytes
529 				 * after this insn: div, mov, pop, pop, mov
530 				 */
531 				jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
532 				EMIT1_off32(0xE9, jmp_offset);
533 			}
534 
535 			if (BPF_CLASS(insn->code) == BPF_ALU64)
536 				/* div r11 */
537 				EMIT3(0x49, 0xF7, 0xF3);
538 			else
539 				/* div r11d */
540 				EMIT3(0x41, 0xF7, 0xF3);
541 
542 			if (BPF_OP(insn->code) == BPF_MOD)
543 				/* mov r11, rdx */
544 				EMIT3(0x49, 0x89, 0xD3);
545 			else
546 				/* mov r11, rax */
547 				EMIT3(0x49, 0x89, 0xC3);
548 
549 			EMIT1(0x5A); /* pop rdx */
550 			EMIT1(0x58); /* pop rax */
551 
552 			/* mov dst_reg, r11 */
553 			EMIT_mov(dst_reg, AUX_REG);
554 			break;
555 
556 		case BPF_ALU | BPF_MUL | BPF_K:
557 		case BPF_ALU | BPF_MUL | BPF_X:
558 		case BPF_ALU64 | BPF_MUL | BPF_K:
559 		case BPF_ALU64 | BPF_MUL | BPF_X:
560 			EMIT1(0x50); /* push rax */
561 			EMIT1(0x52); /* push rdx */
562 
563 			/* mov r11, dst_reg */
564 			EMIT_mov(AUX_REG, dst_reg);
565 
566 			if (BPF_SRC(insn->code) == BPF_X)
567 				/* mov rax, src_reg */
568 				EMIT_mov(BPF_REG_0, src_reg);
569 			else
570 				/* mov rax, imm32 */
571 				EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
572 
573 			if (BPF_CLASS(insn->code) == BPF_ALU64)
574 				EMIT1(add_1mod(0x48, AUX_REG));
575 			else if (is_ereg(AUX_REG))
576 				EMIT1(add_1mod(0x40, AUX_REG));
577 			/* mul(q) r11 */
578 			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
579 
580 			/* mov r11, rax */
581 			EMIT_mov(AUX_REG, BPF_REG_0);
582 
583 			EMIT1(0x5A); /* pop rdx */
584 			EMIT1(0x58); /* pop rax */
585 
586 			/* mov dst_reg, r11 */
587 			EMIT_mov(dst_reg, AUX_REG);
588 			break;
589 
590 			/* shifts */
591 		case BPF_ALU | BPF_LSH | BPF_K:
592 		case BPF_ALU | BPF_RSH | BPF_K:
593 		case BPF_ALU | BPF_ARSH | BPF_K:
594 		case BPF_ALU64 | BPF_LSH | BPF_K:
595 		case BPF_ALU64 | BPF_RSH | BPF_K:
596 		case BPF_ALU64 | BPF_ARSH | BPF_K:
597 			if (BPF_CLASS(insn->code) == BPF_ALU64)
598 				EMIT1(add_1mod(0x48, dst_reg));
599 			else if (is_ereg(dst_reg))
600 				EMIT1(add_1mod(0x40, dst_reg));
601 
602 			switch (BPF_OP(insn->code)) {
603 			case BPF_LSH: b3 = 0xE0; break;
604 			case BPF_RSH: b3 = 0xE8; break;
605 			case BPF_ARSH: b3 = 0xF8; break;
606 			}
607 			EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
608 			break;
609 
610 		case BPF_ALU | BPF_LSH | BPF_X:
611 		case BPF_ALU | BPF_RSH | BPF_X:
612 		case BPF_ALU | BPF_ARSH | BPF_X:
613 		case BPF_ALU64 | BPF_LSH | BPF_X:
614 		case BPF_ALU64 | BPF_RSH | BPF_X:
615 		case BPF_ALU64 | BPF_ARSH | BPF_X:
616 
617 			/* check for bad case when dst_reg == rcx */
618 			if (dst_reg == BPF_REG_4) {
619 				/* mov r11, dst_reg */
620 				EMIT_mov(AUX_REG, dst_reg);
621 				dst_reg = AUX_REG;
622 			}
623 
624 			if (src_reg != BPF_REG_4) { /* common case */
625 				EMIT1(0x51); /* push rcx */
626 
627 				/* mov rcx, src_reg */
628 				EMIT_mov(BPF_REG_4, src_reg);
629 			}
630 
631 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
632 			if (BPF_CLASS(insn->code) == BPF_ALU64)
633 				EMIT1(add_1mod(0x48, dst_reg));
634 			else if (is_ereg(dst_reg))
635 				EMIT1(add_1mod(0x40, dst_reg));
636 
637 			switch (BPF_OP(insn->code)) {
638 			case BPF_LSH: b3 = 0xE0; break;
639 			case BPF_RSH: b3 = 0xE8; break;
640 			case BPF_ARSH: b3 = 0xF8; break;
641 			}
642 			EMIT2(0xD3, add_1reg(b3, dst_reg));
643 
644 			if (src_reg != BPF_REG_4)
645 				EMIT1(0x59); /* pop rcx */
646 
647 			if (insn->dst_reg == BPF_REG_4)
648 				/* mov dst_reg, r11 */
649 				EMIT_mov(insn->dst_reg, AUX_REG);
650 			break;
651 
652 		case BPF_ALU | BPF_END | BPF_FROM_BE:
653 			switch (imm32) {
654 			case 16:
655 				/* emit 'ror %ax, 8' to swap lower 2 bytes */
656 				EMIT1(0x66);
657 				if (is_ereg(dst_reg))
658 					EMIT1(0x41);
659 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
660 
661 				/* emit 'movzwl eax, ax' */
662 				if (is_ereg(dst_reg))
663 					EMIT3(0x45, 0x0F, 0xB7);
664 				else
665 					EMIT2(0x0F, 0xB7);
666 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
667 				break;
668 			case 32:
669 				/* emit 'bswap eax' to swap lower 4 bytes */
670 				if (is_ereg(dst_reg))
671 					EMIT2(0x41, 0x0F);
672 				else
673 					EMIT1(0x0F);
674 				EMIT1(add_1reg(0xC8, dst_reg));
675 				break;
676 			case 64:
677 				/* emit 'bswap rax' to swap 8 bytes */
678 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
679 				      add_1reg(0xC8, dst_reg));
680 				break;
681 			}
682 			break;
683 
684 		case BPF_ALU | BPF_END | BPF_FROM_LE:
685 			switch (imm32) {
686 			case 16:
687 				/* emit 'movzwl eax, ax' to zero extend 16-bit
688 				 * into 64 bit
689 				 */
690 				if (is_ereg(dst_reg))
691 					EMIT3(0x45, 0x0F, 0xB7);
692 				else
693 					EMIT2(0x0F, 0xB7);
694 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
695 				break;
696 			case 32:
697 				/* emit 'mov eax, eax' to clear upper 32-bits */
698 				if (is_ereg(dst_reg))
699 					EMIT1(0x45);
700 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
701 				break;
702 			case 64:
703 				/* nop */
704 				break;
705 			}
706 			break;
707 
708 			/* ST: *(u8*)(dst_reg + off) = imm */
709 		case BPF_ST | BPF_MEM | BPF_B:
710 			if (is_ereg(dst_reg))
711 				EMIT2(0x41, 0xC6);
712 			else
713 				EMIT1(0xC6);
714 			goto st;
715 		case BPF_ST | BPF_MEM | BPF_H:
716 			if (is_ereg(dst_reg))
717 				EMIT3(0x66, 0x41, 0xC7);
718 			else
719 				EMIT2(0x66, 0xC7);
720 			goto st;
721 		case BPF_ST | BPF_MEM | BPF_W:
722 			if (is_ereg(dst_reg))
723 				EMIT2(0x41, 0xC7);
724 			else
725 				EMIT1(0xC7);
726 			goto st;
727 		case BPF_ST | BPF_MEM | BPF_DW:
728 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
729 
730 st:			if (is_imm8(insn->off))
731 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
732 			else
733 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
734 
735 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
736 			break;
737 
738 			/* STX: *(u8*)(dst_reg + off) = src_reg */
739 		case BPF_STX | BPF_MEM | BPF_B:
740 			/* emit 'mov byte ptr [rax + off], al' */
741 			if (is_ereg(dst_reg) || is_ereg(src_reg) ||
742 			    /* have to add extra byte for x86 SIL, DIL regs */
743 			    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
744 				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
745 			else
746 				EMIT1(0x88);
747 			goto stx;
748 		case BPF_STX | BPF_MEM | BPF_H:
749 			if (is_ereg(dst_reg) || is_ereg(src_reg))
750 				EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
751 			else
752 				EMIT2(0x66, 0x89);
753 			goto stx;
754 		case BPF_STX | BPF_MEM | BPF_W:
755 			if (is_ereg(dst_reg) || is_ereg(src_reg))
756 				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
757 			else
758 				EMIT1(0x89);
759 			goto stx;
760 		case BPF_STX | BPF_MEM | BPF_DW:
761 			EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
762 stx:			if (is_imm8(insn->off))
763 				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
764 			else
765 				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
766 					    insn->off);
767 			break;
768 
769 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
770 		case BPF_LDX | BPF_MEM | BPF_B:
771 			/* emit 'movzx rax, byte ptr [rax + off]' */
772 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
773 			goto ldx;
774 		case BPF_LDX | BPF_MEM | BPF_H:
775 			/* emit 'movzx rax, word ptr [rax + off]' */
776 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
777 			goto ldx;
778 		case BPF_LDX | BPF_MEM | BPF_W:
779 			/* emit 'mov eax, dword ptr [rax+0x14]' */
780 			if (is_ereg(dst_reg) || is_ereg(src_reg))
781 				EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
782 			else
783 				EMIT1(0x8B);
784 			goto ldx;
785 		case BPF_LDX | BPF_MEM | BPF_DW:
786 			/* emit 'mov rax, qword ptr [rax+0x14]' */
787 			EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
788 ldx:			/* if insn->off == 0 we can save one extra byte, but
789 			 * special case of x86 r13 which always needs an offset
790 			 * is not worth the hassle
791 			 */
792 			if (is_imm8(insn->off))
793 				EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
794 			else
795 				EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
796 					    insn->off);
797 			break;
798 
799 			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
800 		case BPF_STX | BPF_XADD | BPF_W:
801 			/* emit 'lock add dword ptr [rax + off], eax' */
802 			if (is_ereg(dst_reg) || is_ereg(src_reg))
803 				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
804 			else
805 				EMIT2(0xF0, 0x01);
806 			goto xadd;
807 		case BPF_STX | BPF_XADD | BPF_DW:
808 			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
809 xadd:			if (is_imm8(insn->off))
810 				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
811 			else
812 				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
813 					    insn->off);
814 			break;
815 
816 			/* call */
817 		case BPF_JMP | BPF_CALL:
818 			func = (u8 *) __bpf_call_base + imm32;
819 			jmp_offset = func - (image + addrs[i]);
820 			if (seen_ld_abs) {
821 				EMIT2(0x41, 0x52); /* push %r10 */
822 				EMIT2(0x41, 0x51); /* push %r9 */
823 				/* need to adjust jmp offset, since
824 				 * pop %r9, pop %r10 take 4 bytes after call insn
825 				 */
826 				jmp_offset += 4;
827 			}
828 			if (!imm32 || !is_simm32(jmp_offset)) {
829 				pr_err("unsupported bpf func %d addr %p image %p\n",
830 				       imm32, func, image);
831 				return -EINVAL;
832 			}
833 			EMIT1_off32(0xE8, jmp_offset);
834 			if (seen_ld_abs) {
835 				EMIT2(0x41, 0x59); /* pop %r9 */
836 				EMIT2(0x41, 0x5A); /* pop %r10 */
837 			}
838 			break;
839 
840 		case BPF_JMP | BPF_CALL | BPF_X:
841 			emit_bpf_tail_call(&prog);
842 			break;
843 
844 			/* cond jump */
845 		case BPF_JMP | BPF_JEQ | BPF_X:
846 		case BPF_JMP | BPF_JNE | BPF_X:
847 		case BPF_JMP | BPF_JGT | BPF_X:
848 		case BPF_JMP | BPF_JGE | BPF_X:
849 		case BPF_JMP | BPF_JSGT | BPF_X:
850 		case BPF_JMP | BPF_JSGE | BPF_X:
851 			/* cmp dst_reg, src_reg */
852 			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
853 			      add_2reg(0xC0, dst_reg, src_reg));
854 			goto emit_cond_jmp;
855 
856 		case BPF_JMP | BPF_JSET | BPF_X:
857 			/* test dst_reg, src_reg */
858 			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
859 			      add_2reg(0xC0, dst_reg, src_reg));
860 			goto emit_cond_jmp;
861 
862 		case BPF_JMP | BPF_JSET | BPF_K:
863 			/* test dst_reg, imm32 */
864 			EMIT1(add_1mod(0x48, dst_reg));
865 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
866 			goto emit_cond_jmp;
867 
868 		case BPF_JMP | BPF_JEQ | BPF_K:
869 		case BPF_JMP | BPF_JNE | BPF_K:
870 		case BPF_JMP | BPF_JGT | BPF_K:
871 		case BPF_JMP | BPF_JGE | BPF_K:
872 		case BPF_JMP | BPF_JSGT | BPF_K:
873 		case BPF_JMP | BPF_JSGE | BPF_K:
874 			/* cmp dst_reg, imm8/32 */
875 			EMIT1(add_1mod(0x48, dst_reg));
876 
877 			if (is_imm8(imm32))
878 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
879 			else
880 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
881 
882 emit_cond_jmp:		/* convert BPF opcode to x86 */
883 			switch (BPF_OP(insn->code)) {
884 			case BPF_JEQ:
885 				jmp_cond = X86_JE;
886 				break;
887 			case BPF_JSET:
888 			case BPF_JNE:
889 				jmp_cond = X86_JNE;
890 				break;
891 			case BPF_JGT:
892 				/* GT is unsigned '>', JA in x86 */
893 				jmp_cond = X86_JA;
894 				break;
895 			case BPF_JGE:
896 				/* GE is unsigned '>=', JAE in x86 */
897 				jmp_cond = X86_JAE;
898 				break;
899 			case BPF_JSGT:
900 				/* signed '>', GT in x86 */
901 				jmp_cond = X86_JG;
902 				break;
903 			case BPF_JSGE:
904 				/* signed '>=', GE in x86 */
905 				jmp_cond = X86_JGE;
906 				break;
907 			default: /* to silence gcc warning */
908 				return -EFAULT;
909 			}
910 			jmp_offset = addrs[i + insn->off] - addrs[i];
911 			if (is_imm8(jmp_offset)) {
912 				EMIT2(jmp_cond, jmp_offset);
913 			} else if (is_simm32(jmp_offset)) {
914 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
915 			} else {
916 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
917 				return -EFAULT;
918 			}
919 
920 			break;
921 
922 		case BPF_JMP | BPF_JA:
923 			jmp_offset = addrs[i + insn->off] - addrs[i];
924 			if (!jmp_offset)
925 				/* optimize out nop jumps */
926 				break;
927 emit_jmp:
928 			if (is_imm8(jmp_offset)) {
929 				EMIT2(0xEB, jmp_offset);
930 			} else if (is_simm32(jmp_offset)) {
931 				EMIT1_off32(0xE9, jmp_offset);
932 			} else {
933 				pr_err("jmp gen bug %llx\n", jmp_offset);
934 				return -EFAULT;
935 			}
936 			break;
937 
938 		case BPF_LD | BPF_IND | BPF_W:
939 			func = sk_load_word;
940 			goto common_load;
941 		case BPF_LD | BPF_ABS | BPF_W:
942 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
943 common_load:
944 			ctx->seen_ld_abs = seen_ld_abs = true;
945 			jmp_offset = func - (image + addrs[i]);
946 			if (!func || !is_simm32(jmp_offset)) {
947 				pr_err("unsupported bpf func %d addr %p image %p\n",
948 				       imm32, func, image);
949 				return -EINVAL;
950 			}
951 			if (BPF_MODE(insn->code) == BPF_ABS) {
952 				/* mov %esi, imm32 */
953 				EMIT1_off32(0xBE, imm32);
954 			} else {
955 				/* mov %rsi, src_reg */
956 				EMIT_mov(BPF_REG_2, src_reg);
957 				if (imm32) {
958 					if (is_imm8(imm32))
959 						/* add %esi, imm8 */
960 						EMIT3(0x83, 0xC6, imm32);
961 					else
962 						/* add %esi, imm32 */
963 						EMIT2_off32(0x81, 0xC6, imm32);
964 				}
965 			}
966 			/* skb pointer is in R6 (%rbx), it will be copied into
967 			 * %rdi if skb_copy_bits() call is necessary.
968 			 * sk_load_* helpers also use %r10 and %r9d.
969 			 * See bpf_jit.S
970 			 */
971 			EMIT1_off32(0xE8, jmp_offset); /* call */
972 			break;
973 
974 		case BPF_LD | BPF_IND | BPF_H:
975 			func = sk_load_half;
976 			goto common_load;
977 		case BPF_LD | BPF_ABS | BPF_H:
978 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
979 			goto common_load;
980 		case BPF_LD | BPF_IND | BPF_B:
981 			func = sk_load_byte;
982 			goto common_load;
983 		case BPF_LD | BPF_ABS | BPF_B:
984 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
985 			goto common_load;
986 
987 		case BPF_JMP | BPF_EXIT:
988 			if (seen_exit) {
989 				jmp_offset = ctx->cleanup_addr - addrs[i];
990 				goto emit_jmp;
991 			}
992 			seen_exit = true;
993 			/* update cleanup_addr */
994 			ctx->cleanup_addr = proglen;
995 			/* mov rbx, qword ptr [rbp-X] */
996 			EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE);
997 			/* mov r13, qword ptr [rbp-X] */
998 			EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8);
999 			/* mov r14, qword ptr [rbp-X] */
1000 			EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16);
1001 			/* mov r15, qword ptr [rbp-X] */
1002 			EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24);
1003 
1004 			EMIT1(0xC9); /* leave */
1005 			EMIT1(0xC3); /* ret */
1006 			break;
1007 
1008 		default:
1009 			/* By design x64 JIT should support all BPF instructions
1010 			 * This error will be seen if new instruction was added
1011 			 * to interpreter, but not to JIT
1012 			 * or if there is junk in bpf_prog
1013 			 */
1014 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1015 			return -EINVAL;
1016 		}
1017 
1018 		ilen = prog - temp;
1019 		if (ilen > BPF_MAX_INSN_SIZE) {
1020 			pr_err("bpf_jit_compile fatal insn size error\n");
1021 			return -EFAULT;
1022 		}
1023 
1024 		if (image) {
1025 			if (unlikely(proglen + ilen > oldproglen)) {
1026 				pr_err("bpf_jit_compile fatal error\n");
1027 				return -EFAULT;
1028 			}
1029 			memcpy(image + proglen, temp, ilen);
1030 		}
1031 		proglen += ilen;
1032 		addrs[i] = proglen;
1033 		prog = temp;
1034 	}
1035 	return proglen;
1036 }
1037 
1038 void bpf_jit_compile(struct bpf_prog *prog)
1039 {
1040 }
1041 
1042 void bpf_int_jit_compile(struct bpf_prog *prog)
1043 {
1044 	struct bpf_binary_header *header = NULL;
1045 	int proglen, oldproglen = 0;
1046 	struct jit_context ctx = {};
1047 	u8 *image = NULL;
1048 	int *addrs;
1049 	int pass;
1050 	int i;
1051 
1052 	if (!bpf_jit_enable)
1053 		return;
1054 
1055 	if (!prog || !prog->len)
1056 		return;
1057 
1058 	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
1059 	if (!addrs)
1060 		return;
1061 
1062 	/* Before first pass, make a rough estimation of addrs[]
1063 	 * each bpf instruction is translated to less than 64 bytes
1064 	 */
1065 	for (proglen = 0, i = 0; i < prog->len; i++) {
1066 		proglen += 64;
1067 		addrs[i] = proglen;
1068 	}
1069 	ctx.cleanup_addr = proglen;
1070 
1071 	/* JITed image shrinks with every pass and the loop iterates
1072 	 * until the image stops shrinking. Very large bpf programs
1073 	 * may converge on the last pass. In such case do one more
1074 	 * pass to emit the final image
1075 	 */
1076 	for (pass = 0; pass < 10 || image; pass++) {
1077 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1078 		if (proglen <= 0) {
1079 			image = NULL;
1080 			if (header)
1081 				bpf_jit_binary_free(header);
1082 			goto out;
1083 		}
1084 		if (image) {
1085 			if (proglen != oldproglen) {
1086 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1087 				       proglen, oldproglen);
1088 				goto out;
1089 			}
1090 			break;
1091 		}
1092 		if (proglen == oldproglen) {
1093 			header = bpf_jit_binary_alloc(proglen, &image,
1094 						      1, jit_fill_hole);
1095 			if (!header)
1096 				goto out;
1097 		}
1098 		oldproglen = proglen;
1099 	}
1100 
1101 	if (bpf_jit_enable > 1)
1102 		bpf_jit_dump(prog->len, proglen, 0, image);
1103 
1104 	if (image) {
1105 		bpf_flush_icache(header, image + proglen);
1106 		set_memory_ro((unsigned long)header, header->pages);
1107 		prog->bpf_func = (void *)image;
1108 		prog->jited = true;
1109 	}
1110 out:
1111 	kfree(addrs);
1112 }
1113 
1114 void bpf_jit_free(struct bpf_prog *fp)
1115 {
1116 	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1117 	struct bpf_binary_header *header = (void *)addr;
1118 
1119 	if (!fp->jited)
1120 		goto free_filter;
1121 
1122 	set_memory_rw(addr, header->pages);
1123 	bpf_jit_binary_free(header);
1124 
1125 free_filter:
1126 	bpf_prog_unlock_free(fp);
1127 }
1128