xref: /openbmc/linux/arch/x86/net/bpf_jit_comp.c (revision 2d6bed9c)
1 /* bpf_jit_comp.c : BPF JIT compiler
2  *
3  * Copyright (C) 2011 Eric Dumazet (eric.dumazet@gmail.com)
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License
7  * as published by the Free Software Foundation; version 2
8  * of the License.
9  */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <linux/netdevice.h>
13 #include <linux/filter.h>
14 
15 /*
16  * Conventions :
17  *  EAX : BPF A accumulator
18  *  EBX : BPF X accumulator
19  *  RDI : pointer to skb   (first argument given to JIT function)
20  *  RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
21  *  ECX,EDX,ESI : scratch registers
22  *  r9d : skb->len - skb->data_len (headlen)
23  *  r8  : skb->data
24  * -8(RBP) : saved RBX value
25  * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
26  */
27 int bpf_jit_enable __read_mostly;
28 
29 /*
30  * assembly code in arch/x86/net/bpf_jit.S
31  */
32 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
33 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
34 extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
35 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
36 extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
37 
38 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
39 {
40 	if (len == 1)
41 		*ptr = bytes;
42 	else if (len == 2)
43 		*(u16 *)ptr = bytes;
44 	else {
45 		*(u32 *)ptr = bytes;
46 		barrier();
47 	}
48 	return ptr + len;
49 }
50 
51 #define EMIT(bytes, len)	do { prog = emit_code(prog, bytes, len); } while (0)
52 
53 #define EMIT1(b1)		EMIT(b1, 1)
54 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
55 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
56 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
57 #define EMIT1_off32(b1, off)	do { EMIT1(b1); EMIT(off, 4);} while (0)
58 
59 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
60 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
61 
62 static inline bool is_imm8(int value)
63 {
64 	return value <= 127 && value >= -128;
65 }
66 
67 static inline bool is_near(int offset)
68 {
69 	return offset <= 127 && offset >= -128;
70 }
71 
72 #define EMIT_JMP(offset)						\
73 do {									\
74 	if (offset) {							\
75 		if (is_near(offset))					\
76 			EMIT2(0xeb, offset); /* jmp .+off8 */		\
77 		else							\
78 			EMIT1_off32(0xe9, offset); /* jmp .+off32 */	\
79 	}								\
80 } while (0)
81 
82 /* list of x86 cond jumps opcodes (. + s8)
83  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
84  */
85 #define X86_JB  0x72
86 #define X86_JAE 0x73
87 #define X86_JE  0x74
88 #define X86_JNE 0x75
89 #define X86_JBE 0x76
90 #define X86_JA  0x77
91 
92 #define EMIT_COND_JMP(op, offset)				\
93 do {								\
94 	if (is_near(offset))					\
95 		EMIT2(op, offset); /* jxx .+off8 */		\
96 	else {							\
97 		EMIT2(0x0f, op + 0x10);				\
98 		EMIT(offset, 4); /* jxx .+off32 */		\
99 	}							\
100 } while (0)
101 
102 #define COND_SEL(CODE, TOP, FOP)	\
103 	case CODE:			\
104 		t_op = TOP;		\
105 		f_op = FOP;		\
106 		goto cond_branch
107 
108 
109 #define SEEN_DATAREF 1 /* might call external helpers */
110 #define SEEN_XREG    2 /* ebx is used */
111 #define SEEN_MEM     4 /* use mem[] for temporary storage */
112 
113 static inline void bpf_flush_icache(void *start, void *end)
114 {
115 	mm_segment_t old_fs = get_fs();
116 
117 	set_fs(KERNEL_DS);
118 	smp_wmb();
119 	flush_icache_range((unsigned long)start, (unsigned long)end);
120 	set_fs(old_fs);
121 }
122 
123 #define CHOOSE_LOAD_FUNC(K, func) \
124 	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
125 
126 void bpf_jit_compile(struct sk_filter *fp)
127 {
128 	u8 temp[64];
129 	u8 *prog;
130 	unsigned int proglen, oldproglen = 0;
131 	int ilen, i;
132 	int t_offset, f_offset;
133 	u8 t_op, f_op, seen = 0, pass;
134 	u8 *image = NULL;
135 	u8 *func;
136 	int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
137 	unsigned int cleanup_addr; /* epilogue code offset */
138 	unsigned int *addrs;
139 	const struct sock_filter *filter = fp->insns;
140 	int flen = fp->len;
141 
142 	if (!bpf_jit_enable)
143 		return;
144 
145 	addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
146 	if (addrs == NULL)
147 		return;
148 
149 	/* Before first pass, make a rough estimation of addrs[]
150 	 * each bpf instruction is translated to less than 64 bytes
151 	 */
152 	for (proglen = 0, i = 0; i < flen; i++) {
153 		proglen += 64;
154 		addrs[i] = proglen;
155 	}
156 	cleanup_addr = proglen; /* epilogue address */
157 
158 	for (pass = 0; pass < 10; pass++) {
159 		u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
160 		/* no prologue/epilogue for trivial filters (RET something) */
161 		proglen = 0;
162 		prog = temp;
163 
164 		if (seen_or_pass0) {
165 			EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
166 			EMIT4(0x48, 0x83, 0xec, 96);	/* subq  $96,%rsp	*/
167 			/* note : must save %rbx in case bpf_error is hit */
168 			if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
169 				EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
170 			if (seen_or_pass0 & SEEN_XREG)
171 				CLEAR_X(); /* make sure we dont leek kernel memory */
172 
173 			/*
174 			 * If this filter needs to access skb data,
175 			 * loads r9 and r8 with :
176 			 *  r9 = skb->len - skb->data_len
177 			 *  r8 = skb->data
178 			 */
179 			if (seen_or_pass0 & SEEN_DATAREF) {
180 				if (offsetof(struct sk_buff, len) <= 127)
181 					/* mov    off8(%rdi),%r9d */
182 					EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
183 				else {
184 					/* mov    off32(%rdi),%r9d */
185 					EMIT3(0x44, 0x8b, 0x8f);
186 					EMIT(offsetof(struct sk_buff, len), 4);
187 				}
188 				if (is_imm8(offsetof(struct sk_buff, data_len)))
189 					/* sub    off8(%rdi),%r9d */
190 					EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
191 				else {
192 					EMIT3(0x44, 0x2b, 0x8f);
193 					EMIT(offsetof(struct sk_buff, data_len), 4);
194 				}
195 
196 				if (is_imm8(offsetof(struct sk_buff, data)))
197 					/* mov off8(%rdi),%r8 */
198 					EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
199 				else {
200 					/* mov off32(%rdi),%r8 */
201 					EMIT3(0x4c, 0x8b, 0x87);
202 					EMIT(offsetof(struct sk_buff, data), 4);
203 				}
204 			}
205 		}
206 
207 		switch (filter[0].code) {
208 		case BPF_S_RET_K:
209 		case BPF_S_LD_W_LEN:
210 		case BPF_S_ANC_PROTOCOL:
211 		case BPF_S_ANC_IFINDEX:
212 		case BPF_S_ANC_MARK:
213 		case BPF_S_ANC_RXHASH:
214 		case BPF_S_ANC_CPU:
215 		case BPF_S_ANC_QUEUE:
216 		case BPF_S_LD_W_ABS:
217 		case BPF_S_LD_H_ABS:
218 		case BPF_S_LD_B_ABS:
219 			/* first instruction sets A register (or is RET 'constant') */
220 			break;
221 		default:
222 			/* make sure we dont leak kernel information to user */
223 			CLEAR_A(); /* A = 0 */
224 		}
225 
226 		for (i = 0; i < flen; i++) {
227 			unsigned int K = filter[i].k;
228 
229 			switch (filter[i].code) {
230 			case BPF_S_ALU_ADD_X: /* A += X; */
231 				seen |= SEEN_XREG;
232 				EMIT2(0x01, 0xd8);		/* add %ebx,%eax */
233 				break;
234 			case BPF_S_ALU_ADD_K: /* A += K; */
235 				if (!K)
236 					break;
237 				if (is_imm8(K))
238 					EMIT3(0x83, 0xc0, K);	/* add imm8,%eax */
239 				else
240 					EMIT1_off32(0x05, K);	/* add imm32,%eax */
241 				break;
242 			case BPF_S_ALU_SUB_X: /* A -= X; */
243 				seen |= SEEN_XREG;
244 				EMIT2(0x29, 0xd8);		/* sub    %ebx,%eax */
245 				break;
246 			case BPF_S_ALU_SUB_K: /* A -= K */
247 				if (!K)
248 					break;
249 				if (is_imm8(K))
250 					EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
251 				else
252 					EMIT1_off32(0x2d, K); /* sub imm32,%eax */
253 				break;
254 			case BPF_S_ALU_MUL_X: /* A *= X; */
255 				seen |= SEEN_XREG;
256 				EMIT3(0x0f, 0xaf, 0xc3);	/* imul %ebx,%eax */
257 				break;
258 			case BPF_S_ALU_MUL_K: /* A *= K */
259 				if (is_imm8(K))
260 					EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
261 				else {
262 					EMIT2(0x69, 0xc0);		/* imul imm32,%eax */
263 					EMIT(K, 4);
264 				}
265 				break;
266 			case BPF_S_ALU_DIV_X: /* A /= X; */
267 				seen |= SEEN_XREG;
268 				EMIT2(0x85, 0xdb);	/* test %ebx,%ebx */
269 				if (pc_ret0 > 0) {
270 					/* addrs[pc_ret0 - 1] is start address of target
271 					 * (addrs[i] - 4) is the address following this jmp
272 					 * ("xor %edx,%edx; div %ebx" being 4 bytes long)
273 					 */
274 					EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
275 								(addrs[i] - 4));
276 				} else {
277 					EMIT_COND_JMP(X86_JNE, 2 + 5);
278 					CLEAR_A();
279 					EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
280 				}
281 				EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
282 				break;
283 			case BPF_S_ALU_MOD_X: /* A %= X; */
284 				seen |= SEEN_XREG;
285 				EMIT2(0x85, 0xdb);	/* test %ebx,%ebx */
286 				if (pc_ret0 > 0) {
287 					/* addrs[pc_ret0 - 1] is start address of target
288 					 * (addrs[i] - 6) is the address following this jmp
289 					 * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
290 					 */
291 					EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
292 								(addrs[i] - 6));
293 				} else {
294 					EMIT_COND_JMP(X86_JNE, 2 + 5);
295 					CLEAR_A();
296 					EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
297 				}
298 				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
299 				EMIT2(0xf7, 0xf3);	/* div %ebx */
300 				EMIT2(0x89, 0xd0);	/* mov %edx,%eax */
301 				break;
302 			case BPF_S_ALU_MOD_K: /* A %= K; */
303 				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
304 				EMIT1(0xb9);EMIT(K, 4);	/* mov imm32,%ecx */
305 				EMIT2(0xf7, 0xf1);	/* div %ecx */
306 				EMIT2(0x89, 0xd0);	/* mov %edx,%eax */
307 				break;
308 			case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
309 				EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
310 				EMIT(K, 4);
311 				EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
312 				break;
313 			case BPF_S_ALU_AND_X:
314 				seen |= SEEN_XREG;
315 				EMIT2(0x21, 0xd8);		/* and %ebx,%eax */
316 				break;
317 			case BPF_S_ALU_AND_K:
318 				if (K >= 0xFFFFFF00) {
319 					EMIT2(0x24, K & 0xFF); /* and imm8,%al */
320 				} else if (K >= 0xFFFF0000) {
321 					EMIT2(0x66, 0x25);	/* and imm16,%ax */
322 					EMIT(K, 2);
323 				} else {
324 					EMIT1_off32(0x25, K);	/* and imm32,%eax */
325 				}
326 				break;
327 			case BPF_S_ALU_OR_X:
328 				seen |= SEEN_XREG;
329 				EMIT2(0x09, 0xd8);		/* or %ebx,%eax */
330 				break;
331 			case BPF_S_ALU_OR_K:
332 				if (is_imm8(K))
333 					EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
334 				else
335 					EMIT1_off32(0x0d, K);	/* or imm32,%eax */
336 				break;
337 			case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
338 			case BPF_S_ALU_XOR_X:
339 				seen |= SEEN_XREG;
340 				EMIT2(0x31, 0xd8);		/* xor %ebx,%eax */
341 				break;
342 			case BPF_S_ALU_XOR_K: /* A ^= K; */
343 				if (K == 0)
344 					break;
345 				if (is_imm8(K))
346 					EMIT3(0x83, 0xf0, K);	/* xor imm8,%eax */
347 				else
348 					EMIT1_off32(0x35, K);	/* xor imm32,%eax */
349 				break;
350 			case BPF_S_ALU_LSH_X: /* A <<= X; */
351 				seen |= SEEN_XREG;
352 				EMIT4(0x89, 0xd9, 0xd3, 0xe0);	/* mov %ebx,%ecx; shl %cl,%eax */
353 				break;
354 			case BPF_S_ALU_LSH_K:
355 				if (K == 0)
356 					break;
357 				else if (K == 1)
358 					EMIT2(0xd1, 0xe0); /* shl %eax */
359 				else
360 					EMIT3(0xc1, 0xe0, K);
361 				break;
362 			case BPF_S_ALU_RSH_X: /* A >>= X; */
363 				seen |= SEEN_XREG;
364 				EMIT4(0x89, 0xd9, 0xd3, 0xe8);	/* mov %ebx,%ecx; shr %cl,%eax */
365 				break;
366 			case BPF_S_ALU_RSH_K: /* A >>= K; */
367 				if (K == 0)
368 					break;
369 				else if (K == 1)
370 					EMIT2(0xd1, 0xe8); /* shr %eax */
371 				else
372 					EMIT3(0xc1, 0xe8, K);
373 				break;
374 			case BPF_S_ALU_NEG:
375 				EMIT2(0xf7, 0xd8);		/* neg %eax */
376 				break;
377 			case BPF_S_RET_K:
378 				if (!K) {
379 					if (pc_ret0 == -1)
380 						pc_ret0 = i;
381 					CLEAR_A();
382 				} else {
383 					EMIT1_off32(0xb8, K);	/* mov $imm32,%eax */
384 				}
385 				/* fallinto */
386 			case BPF_S_RET_A:
387 				if (seen_or_pass0) {
388 					if (i != flen - 1) {
389 						EMIT_JMP(cleanup_addr - addrs[i]);
390 						break;
391 					}
392 					if (seen_or_pass0 & SEEN_XREG)
393 						EMIT4(0x48, 0x8b, 0x5d, 0xf8);  /* mov  -8(%rbp),%rbx */
394 					EMIT1(0xc9);		/* leaveq */
395 				}
396 				EMIT1(0xc3);		/* ret */
397 				break;
398 			case BPF_S_MISC_TAX: /* X = A */
399 				seen |= SEEN_XREG;
400 				EMIT2(0x89, 0xc3);	/* mov    %eax,%ebx */
401 				break;
402 			case BPF_S_MISC_TXA: /* A = X */
403 				seen |= SEEN_XREG;
404 				EMIT2(0x89, 0xd8);	/* mov    %ebx,%eax */
405 				break;
406 			case BPF_S_LD_IMM: /* A = K */
407 				if (!K)
408 					CLEAR_A();
409 				else
410 					EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
411 				break;
412 			case BPF_S_LDX_IMM: /* X = K */
413 				seen |= SEEN_XREG;
414 				if (!K)
415 					CLEAR_X();
416 				else
417 					EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
418 				break;
419 			case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
420 				seen |= SEEN_MEM;
421 				EMIT3(0x8b, 0x45, 0xf0 - K*4);
422 				break;
423 			case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
424 				seen |= SEEN_XREG | SEEN_MEM;
425 				EMIT3(0x8b, 0x5d, 0xf0 - K*4);
426 				break;
427 			case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
428 				seen |= SEEN_MEM;
429 				EMIT3(0x89, 0x45, 0xf0 - K*4);
430 				break;
431 			case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
432 				seen |= SEEN_XREG | SEEN_MEM;
433 				EMIT3(0x89, 0x5d, 0xf0 - K*4);
434 				break;
435 			case BPF_S_LD_W_LEN: /*	A = skb->len; */
436 				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
437 				if (is_imm8(offsetof(struct sk_buff, len)))
438 					/* mov    off8(%rdi),%eax */
439 					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
440 				else {
441 					EMIT2(0x8b, 0x87);
442 					EMIT(offsetof(struct sk_buff, len), 4);
443 				}
444 				break;
445 			case BPF_S_LDX_W_LEN: /* X = skb->len; */
446 				seen |= SEEN_XREG;
447 				if (is_imm8(offsetof(struct sk_buff, len)))
448 					/* mov off8(%rdi),%ebx */
449 					EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
450 				else {
451 					EMIT2(0x8b, 0x9f);
452 					EMIT(offsetof(struct sk_buff, len), 4);
453 				}
454 				break;
455 			case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
456 				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
457 				if (is_imm8(offsetof(struct sk_buff, protocol))) {
458 					/* movzwl off8(%rdi),%eax */
459 					EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
460 				} else {
461 					EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
462 					EMIT(offsetof(struct sk_buff, protocol), 4);
463 				}
464 				EMIT2(0x86, 0xc4); /* ntohs() : xchg   %al,%ah */
465 				break;
466 			case BPF_S_ANC_IFINDEX:
467 				if (is_imm8(offsetof(struct sk_buff, dev))) {
468 					/* movq off8(%rdi),%rax */
469 					EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
470 				} else {
471 					EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
472 					EMIT(offsetof(struct sk_buff, dev), 4);
473 				}
474 				EMIT3(0x48, 0x85, 0xc0);	/* test %rax,%rax */
475 				EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
476 				BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
477 				EMIT2(0x8b, 0x80);	/* mov off32(%rax),%eax */
478 				EMIT(offsetof(struct net_device, ifindex), 4);
479 				break;
480 			case BPF_S_ANC_MARK:
481 				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
482 				if (is_imm8(offsetof(struct sk_buff, mark))) {
483 					/* mov off8(%rdi),%eax */
484 					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
485 				} else {
486 					EMIT2(0x8b, 0x87);
487 					EMIT(offsetof(struct sk_buff, mark), 4);
488 				}
489 				break;
490 			case BPF_S_ANC_RXHASH:
491 				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
492 				if (is_imm8(offsetof(struct sk_buff, rxhash))) {
493 					/* mov off8(%rdi),%eax */
494 					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash));
495 				} else {
496 					EMIT2(0x8b, 0x87);
497 					EMIT(offsetof(struct sk_buff, rxhash), 4);
498 				}
499 				break;
500 			case BPF_S_ANC_QUEUE:
501 				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
502 				if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
503 					/* movzwl off8(%rdi),%eax */
504 					EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
505 				} else {
506 					EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
507 					EMIT(offsetof(struct sk_buff, queue_mapping), 4);
508 				}
509 				break;
510 			case BPF_S_ANC_CPU:
511 #ifdef CONFIG_SMP
512 				EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
513 				EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
514 #else
515 				CLEAR_A();
516 #endif
517 				break;
518 			case BPF_S_LD_W_ABS:
519 				func = CHOOSE_LOAD_FUNC(K, sk_load_word);
520 common_load:			seen |= SEEN_DATAREF;
521 				t_offset = func - (image + addrs[i]);
522 				EMIT1_off32(0xbe, K); /* mov imm32,%esi */
523 				EMIT1_off32(0xe8, t_offset); /* call */
524 				break;
525 			case BPF_S_LD_H_ABS:
526 				func = CHOOSE_LOAD_FUNC(K, sk_load_half);
527 				goto common_load;
528 			case BPF_S_LD_B_ABS:
529 				func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
530 				goto common_load;
531 			case BPF_S_LDX_B_MSH:
532 				func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
533 				seen |= SEEN_DATAREF | SEEN_XREG;
534 				t_offset = func - (image + addrs[i]);
535 				EMIT1_off32(0xbe, K);	/* mov imm32,%esi */
536 				EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
537 				break;
538 			case BPF_S_LD_W_IND:
539 				func = sk_load_word;
540 common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;
541 				t_offset = func - (image + addrs[i]);
542 				if (K) {
543 					if (is_imm8(K)) {
544 						EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
545 					} else {
546 						EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
547 						EMIT(K, 4);
548 					}
549 				} else {
550 					EMIT2(0x89,0xde); /* mov %ebx,%esi */
551 				}
552 				EMIT1_off32(0xe8, t_offset);	/* call sk_load_xxx_ind */
553 				break;
554 			case BPF_S_LD_H_IND:
555 				func = sk_load_half;
556 				goto common_load_ind;
557 			case BPF_S_LD_B_IND:
558 				func = sk_load_byte;
559 				goto common_load_ind;
560 			case BPF_S_JMP_JA:
561 				t_offset = addrs[i + K] - addrs[i];
562 				EMIT_JMP(t_offset);
563 				break;
564 			COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
565 			COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
566 			COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
567 			COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
568 			COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
569 			COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
570 			COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
571 			COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
572 
573 cond_branch:			f_offset = addrs[i + filter[i].jf] - addrs[i];
574 				t_offset = addrs[i + filter[i].jt] - addrs[i];
575 
576 				/* same targets, can avoid doing the test :) */
577 				if (filter[i].jt == filter[i].jf) {
578 					EMIT_JMP(t_offset);
579 					break;
580 				}
581 
582 				switch (filter[i].code) {
583 				case BPF_S_JMP_JGT_X:
584 				case BPF_S_JMP_JGE_X:
585 				case BPF_S_JMP_JEQ_X:
586 					seen |= SEEN_XREG;
587 					EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
588 					break;
589 				case BPF_S_JMP_JSET_X:
590 					seen |= SEEN_XREG;
591 					EMIT2(0x85, 0xd8); /* test %ebx,%eax */
592 					break;
593 				case BPF_S_JMP_JEQ_K:
594 					if (K == 0) {
595 						EMIT2(0x85, 0xc0); /* test   %eax,%eax */
596 						break;
597 					}
598 				case BPF_S_JMP_JGT_K:
599 				case BPF_S_JMP_JGE_K:
600 					if (K <= 127)
601 						EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
602 					else
603 						EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
604 					break;
605 				case BPF_S_JMP_JSET_K:
606 					if (K <= 0xFF)
607 						EMIT2(0xa8, K); /* test imm8,%al */
608 					else if (!(K & 0xFFFF00FF))
609 						EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
610 					else if (K <= 0xFFFF) {
611 						EMIT2(0x66, 0xa9); /* test imm16,%ax */
612 						EMIT(K, 2);
613 					} else {
614 						EMIT1_off32(0xa9, K); /* test imm32,%eax */
615 					}
616 					break;
617 				}
618 				if (filter[i].jt != 0) {
619 					if (filter[i].jf && f_offset)
620 						t_offset += is_near(f_offset) ? 2 : 5;
621 					EMIT_COND_JMP(t_op, t_offset);
622 					if (filter[i].jf)
623 						EMIT_JMP(f_offset);
624 					break;
625 				}
626 				EMIT_COND_JMP(f_op, f_offset);
627 				break;
628 			default:
629 				/* hmm, too complex filter, give up with jit compiler */
630 				goto out;
631 			}
632 			ilen = prog - temp;
633 			if (image) {
634 				if (unlikely(proglen + ilen > oldproglen)) {
635 					pr_err("bpb_jit_compile fatal error\n");
636 					kfree(addrs);
637 					module_free(NULL, image);
638 					return;
639 				}
640 				memcpy(image + proglen, temp, ilen);
641 			}
642 			proglen += ilen;
643 			addrs[i] = proglen;
644 			prog = temp;
645 		}
646 		/* last bpf instruction is always a RET :
647 		 * use it to give the cleanup instruction(s) addr
648 		 */
649 		cleanup_addr = proglen - 1; /* ret */
650 		if (seen_or_pass0)
651 			cleanup_addr -= 1; /* leaveq */
652 		if (seen_or_pass0 & SEEN_XREG)
653 			cleanup_addr -= 4; /* mov  -8(%rbp),%rbx */
654 
655 		if (image) {
656 			if (proglen != oldproglen)
657 				pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
658 			break;
659 		}
660 		if (proglen == oldproglen) {
661 			image = module_alloc(max_t(unsigned int,
662 						   proglen,
663 						   sizeof(struct work_struct)));
664 			if (!image)
665 				goto out;
666 		}
667 		oldproglen = proglen;
668 	}
669 	if (bpf_jit_enable > 1)
670 		pr_err("flen=%d proglen=%u pass=%d image=%p\n",
671 		       flen, proglen, pass, image);
672 
673 	if (image) {
674 		if (bpf_jit_enable > 1)
675 			print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
676 				       16, 1, image, proglen, false);
677 
678 		bpf_flush_icache(image, image + proglen);
679 
680 		fp->bpf_func = (void *)image;
681 	}
682 out:
683 	kfree(addrs);
684 	return;
685 }
686 
687 static void jit_free_defer(struct work_struct *arg)
688 {
689 	module_free(NULL, arg);
690 }
691 
692 /* run from softirq, we must use a work_struct to call
693  * module_free() from process context
694  */
695 void bpf_jit_free(struct sk_filter *fp)
696 {
697 	if (fp->bpf_func != sk_run_filter) {
698 		struct work_struct *work = (struct work_struct *)fp->bpf_func;
699 
700 		INIT_WORK(work, jit_free_defer);
701 		schedule_work(work);
702 	}
703 }
704