xref: /openbmc/linux/arch/arm/net/bpf_jit_32.c (revision c0e297dc)
1 /*
2  * Just-In-Time compiler for BPF filters on 32bit ARM
3  *
4  * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation; version 2 of the License.
9  */
10 
11 #include <linux/bitops.h>
12 #include <linux/compiler.h>
13 #include <linux/errno.h>
14 #include <linux/filter.h>
15 #include <linux/netdevice.h>
16 #include <linux/string.h>
17 #include <linux/slab.h>
18 #include <linux/if_vlan.h>
19 
20 #include <asm/cacheflush.h>
21 #include <asm/hwcap.h>
22 #include <asm/opcodes.h>
23 
24 #include "bpf_jit_32.h"
25 
26 /*
27  * ABI:
28  *
29  * r0	scratch register
30  * r4	BPF register A
31  * r5	BPF register X
32  * r6	pointer to the skb
33  * r7	skb->data
34  * r8	skb_headlen(skb)
35  */
36 
37 #define r_scratch	ARM_R0
38 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
39 #define r_off		ARM_R1
40 #define r_A		ARM_R4
41 #define r_X		ARM_R5
42 #define r_skb		ARM_R6
43 #define r_skb_data	ARM_R7
44 #define r_skb_hl	ARM_R8
45 
46 #define SCRATCH_SP_OFFSET	0
47 #define SCRATCH_OFF(k)		(SCRATCH_SP_OFFSET + 4 * (k))
48 
49 #define SEEN_MEM		((1 << BPF_MEMWORDS) - 1)
50 #define SEEN_MEM_WORD(k)	(1 << (k))
51 #define SEEN_X			(1 << BPF_MEMWORDS)
52 #define SEEN_CALL		(1 << (BPF_MEMWORDS + 1))
53 #define SEEN_SKB		(1 << (BPF_MEMWORDS + 2))
54 #define SEEN_DATA		(1 << (BPF_MEMWORDS + 3))
55 
56 #define FLAG_NEED_X_RESET	(1 << 0)
57 #define FLAG_IMM_OVERFLOW	(1 << 1)
58 
59 struct jit_ctx {
60 	const struct bpf_prog *skf;
61 	unsigned idx;
62 	unsigned prologue_bytes;
63 	int ret0_fp_idx;
64 	u32 seen;
65 	u32 flags;
66 	u32 *offsets;
67 	u32 *target;
68 #if __LINUX_ARM_ARCH__ < 7
69 	u16 epilogue_bytes;
70 	u16 imm_count;
71 	u32 *imms;
72 #endif
73 };
74 
75 int bpf_jit_enable __read_mostly;
76 
77 static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
78 		      unsigned int size)
79 {
80 	void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
81 
82 	if (!ptr)
83 		return -EFAULT;
84 	memcpy(ret, ptr, size);
85 	return 0;
86 }
87 
88 static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
89 {
90 	u8 ret;
91 	int err;
92 
93 	if (offset < 0)
94 		err = call_neg_helper(skb, offset, &ret, 1);
95 	else
96 		err = skb_copy_bits(skb, offset, &ret, 1);
97 
98 	return (u64)err << 32 | ret;
99 }
100 
101 static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
102 {
103 	u16 ret;
104 	int err;
105 
106 	if (offset < 0)
107 		err = call_neg_helper(skb, offset, &ret, 2);
108 	else
109 		err = skb_copy_bits(skb, offset, &ret, 2);
110 
111 	return (u64)err << 32 | ntohs(ret);
112 }
113 
114 static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
115 {
116 	u32 ret;
117 	int err;
118 
119 	if (offset < 0)
120 		err = call_neg_helper(skb, offset, &ret, 4);
121 	else
122 		err = skb_copy_bits(skb, offset, &ret, 4);
123 
124 	return (u64)err << 32 | ntohl(ret);
125 }
126 
127 /*
128  * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
129  * (where the assembly routines like __aeabi_uidiv could cause problems).
130  */
131 static u32 jit_udiv(u32 dividend, u32 divisor)
132 {
133 	return dividend / divisor;
134 }
135 
136 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
137 {
138 	inst |= (cond << 28);
139 	inst = __opcode_to_mem_arm(inst);
140 
141 	if (ctx->target != NULL)
142 		ctx->target[ctx->idx] = inst;
143 
144 	ctx->idx++;
145 }
146 
147 /*
148  * Emit an instruction that will be executed unconditionally.
149  */
150 static inline void emit(u32 inst, struct jit_ctx *ctx)
151 {
152 	_emit(ARM_COND_AL, inst, ctx);
153 }
154 
155 static u16 saved_regs(struct jit_ctx *ctx)
156 {
157 	u16 ret = 0;
158 
159 	if ((ctx->skf->len > 1) ||
160 	    (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
161 		ret |= 1 << r_A;
162 
163 #ifdef CONFIG_FRAME_POINTER
164 	ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
165 #else
166 	if (ctx->seen & SEEN_CALL)
167 		ret |= 1 << ARM_LR;
168 #endif
169 	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
170 		ret |= 1 << r_skb;
171 	if (ctx->seen & SEEN_DATA)
172 		ret |= (1 << r_skb_data) | (1 << r_skb_hl);
173 	if (ctx->seen & SEEN_X)
174 		ret |= 1 << r_X;
175 
176 	return ret;
177 }
178 
179 static inline int mem_words_used(struct jit_ctx *ctx)
180 {
181 	/* yes, we do waste some stack space IF there are "holes" in the set" */
182 	return fls(ctx->seen & SEEN_MEM);
183 }
184 
185 static inline bool is_load_to_a(u16 inst)
186 {
187 	switch (inst) {
188 	case BPF_LD | BPF_W | BPF_LEN:
189 	case BPF_LD | BPF_W | BPF_ABS:
190 	case BPF_LD | BPF_H | BPF_ABS:
191 	case BPF_LD | BPF_B | BPF_ABS:
192 		return true;
193 	default:
194 		return false;
195 	}
196 }
197 
198 static void jit_fill_hole(void *area, unsigned int size)
199 {
200 	u32 *ptr;
201 	/* We are guaranteed to have aligned memory. */
202 	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
203 		*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
204 }
205 
206 static void build_prologue(struct jit_ctx *ctx)
207 {
208 	u16 reg_set = saved_regs(ctx);
209 	u16 first_inst = ctx->skf->insns[0].code;
210 	u16 off;
211 
212 #ifdef CONFIG_FRAME_POINTER
213 	emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
214 	emit(ARM_PUSH(reg_set), ctx);
215 	emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
216 #else
217 	if (reg_set)
218 		emit(ARM_PUSH(reg_set), ctx);
219 #endif
220 
221 	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
222 		emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
223 
224 	if (ctx->seen & SEEN_DATA) {
225 		off = offsetof(struct sk_buff, data);
226 		emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
227 		/* headlen = len - data_len */
228 		off = offsetof(struct sk_buff, len);
229 		emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
230 		off = offsetof(struct sk_buff, data_len);
231 		emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
232 		emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
233 	}
234 
235 	if (ctx->flags & FLAG_NEED_X_RESET)
236 		emit(ARM_MOV_I(r_X, 0), ctx);
237 
238 	/* do not leak kernel data to userspace */
239 	if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
240 		emit(ARM_MOV_I(r_A, 0), ctx);
241 
242 	/* stack space for the BPF_MEM words */
243 	if (ctx->seen & SEEN_MEM)
244 		emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
245 }
246 
247 static void build_epilogue(struct jit_ctx *ctx)
248 {
249 	u16 reg_set = saved_regs(ctx);
250 
251 	if (ctx->seen & SEEN_MEM)
252 		emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
253 
254 	reg_set &= ~(1 << ARM_LR);
255 
256 #ifdef CONFIG_FRAME_POINTER
257 	/* the first instruction of the prologue was: mov ip, sp */
258 	reg_set &= ~(1 << ARM_IP);
259 	reg_set |= (1 << ARM_SP);
260 	emit(ARM_LDM(ARM_SP, reg_set), ctx);
261 #else
262 	if (reg_set) {
263 		if (ctx->seen & SEEN_CALL)
264 			reg_set |= 1 << ARM_PC;
265 		emit(ARM_POP(reg_set), ctx);
266 	}
267 
268 	if (!(ctx->seen & SEEN_CALL))
269 		emit(ARM_BX(ARM_LR), ctx);
270 #endif
271 }
272 
273 static int16_t imm8m(u32 x)
274 {
275 	u32 rot;
276 
277 	for (rot = 0; rot < 16; rot++)
278 		if ((x & ~ror32(0xff, 2 * rot)) == 0)
279 			return rol32(x, 2 * rot) | (rot << 8);
280 
281 	return -1;
282 }
283 
284 #if __LINUX_ARM_ARCH__ < 7
285 
286 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
287 {
288 	unsigned i = 0, offset;
289 	u16 imm;
290 
291 	/* on the "fake" run we just count them (duplicates included) */
292 	if (ctx->target == NULL) {
293 		ctx->imm_count++;
294 		return 0;
295 	}
296 
297 	while ((i < ctx->imm_count) && ctx->imms[i]) {
298 		if (ctx->imms[i] == k)
299 			break;
300 		i++;
301 	}
302 
303 	if (ctx->imms[i] == 0)
304 		ctx->imms[i] = k;
305 
306 	/* constants go just after the epilogue */
307 	offset =  ctx->offsets[ctx->skf->len];
308 	offset += ctx->prologue_bytes;
309 	offset += ctx->epilogue_bytes;
310 	offset += i * 4;
311 
312 	ctx->target[offset / 4] = k;
313 
314 	/* PC in ARM mode == address of the instruction + 8 */
315 	imm = offset - (8 + ctx->idx * 4);
316 
317 	if (imm & ~0xfff) {
318 		/*
319 		 * literal pool is too far, signal it into flags. we
320 		 * can only detect it on the second pass unfortunately.
321 		 */
322 		ctx->flags |= FLAG_IMM_OVERFLOW;
323 		return 0;
324 	}
325 
326 	return imm;
327 }
328 
329 #endif /* __LINUX_ARM_ARCH__ */
330 
331 /*
332  * Move an immediate that's not an imm8m to a core register.
333  */
334 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
335 {
336 #if __LINUX_ARM_ARCH__ < 7
337 	emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
338 #else
339 	emit(ARM_MOVW(rd, val & 0xffff), ctx);
340 	if (val > 0xffff)
341 		emit(ARM_MOVT(rd, val >> 16), ctx);
342 #endif
343 }
344 
345 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
346 {
347 	int imm12 = imm8m(val);
348 
349 	if (imm12 >= 0)
350 		emit(ARM_MOV_I(rd, imm12), ctx);
351 	else
352 		emit_mov_i_no8m(rd, val, ctx);
353 }
354 
355 #if __LINUX_ARM_ARCH__ < 6
356 
357 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
358 {
359 	_emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
360 	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
361 	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
362 	_emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
363 	_emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
364 	_emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
365 	_emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
366 	_emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
367 }
368 
369 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
370 {
371 	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
372 	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
373 	_emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
374 }
375 
376 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
377 {
378 	/* r_dst = (r_src << 8) | (r_src >> 8) */
379 	emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
380 	emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
381 
382 	/*
383 	 * we need to mask out the bits set in r_dst[23:16] due to
384 	 * the first shift instruction.
385 	 *
386 	 * note that 0x8ff is the encoded immediate 0x00ff0000.
387 	 */
388 	emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
389 }
390 
391 #else  /* ARMv6+ */
392 
393 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
394 {
395 	_emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
396 #ifdef __LITTLE_ENDIAN
397 	_emit(cond, ARM_REV(r_res, r_res), ctx);
398 #endif
399 }
400 
401 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
402 {
403 	_emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
404 #ifdef __LITTLE_ENDIAN
405 	_emit(cond, ARM_REV16(r_res, r_res), ctx);
406 #endif
407 }
408 
409 static inline void emit_swap16(u8 r_dst __maybe_unused,
410 			       u8 r_src __maybe_unused,
411 			       struct jit_ctx *ctx __maybe_unused)
412 {
413 #ifdef __LITTLE_ENDIAN
414 	emit(ARM_REV16(r_dst, r_src), ctx);
415 #endif
416 }
417 
418 #endif /* __LINUX_ARM_ARCH__ < 6 */
419 
420 
421 /* Compute the immediate value for a PC-relative branch. */
422 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
423 {
424 	u32 imm;
425 
426 	if (ctx->target == NULL)
427 		return 0;
428 	/*
429 	 * BPF allows only forward jumps and the offset of the target is
430 	 * still the one computed during the first pass.
431 	 */
432 	imm  = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
433 
434 	return imm >> 2;
435 }
436 
437 #define OP_IMM3(op, r1, r2, imm_val, ctx)				\
438 	do {								\
439 		imm12 = imm8m(imm_val);					\
440 		if (imm12 < 0) {					\
441 			emit_mov_i_no8m(r_scratch, imm_val, ctx);	\
442 			emit(op ## _R((r1), (r2), r_scratch), ctx);	\
443 		} else {						\
444 			emit(op ## _I((r1), (r2), imm12), ctx);		\
445 		}							\
446 	} while (0)
447 
448 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
449 {
450 	if (ctx->ret0_fp_idx >= 0) {
451 		_emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
452 		/* NOP to keep the size constant between passes */
453 		emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
454 	} else {
455 		_emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
456 		_emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
457 	}
458 }
459 
460 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
461 {
462 #if __LINUX_ARM_ARCH__ < 5
463 	emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
464 
465 	if (elf_hwcap & HWCAP_THUMB)
466 		emit(ARM_BX(tgt_reg), ctx);
467 	else
468 		emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
469 #else
470 	emit(ARM_BLX_R(tgt_reg), ctx);
471 #endif
472 }
473 
474 static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
475 {
476 #if __LINUX_ARM_ARCH__ == 7
477 	if (elf_hwcap & HWCAP_IDIVA) {
478 		emit(ARM_UDIV(rd, rm, rn), ctx);
479 		return;
480 	}
481 #endif
482 
483 	/*
484 	 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
485 	 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
486 	 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
487 	 * before using it as a source for ARM_R1.
488 	 *
489 	 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
490 	 * ARM_R5 (r_X) so there is no particular register overlap
491 	 * issues.
492 	 */
493 	if (rn != ARM_R1)
494 		emit(ARM_MOV_R(ARM_R1, rn), ctx);
495 	if (rm != ARM_R0)
496 		emit(ARM_MOV_R(ARM_R0, rm), ctx);
497 
498 	ctx->seen |= SEEN_CALL;
499 	emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
500 	emit_blx_r(ARM_R3, ctx);
501 
502 	if (rd != ARM_R0)
503 		emit(ARM_MOV_R(rd, ARM_R0), ctx);
504 }
505 
506 static inline void update_on_xread(struct jit_ctx *ctx)
507 {
508 	if (!(ctx->seen & SEEN_X))
509 		ctx->flags |= FLAG_NEED_X_RESET;
510 
511 	ctx->seen |= SEEN_X;
512 }
513 
514 static int build_body(struct jit_ctx *ctx)
515 {
516 	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
517 	const struct bpf_prog *prog = ctx->skf;
518 	const struct sock_filter *inst;
519 	unsigned i, load_order, off, condt;
520 	int imm12;
521 	u32 k;
522 
523 	for (i = 0; i < prog->len; i++) {
524 		u16 code;
525 
526 		inst = &(prog->insns[i]);
527 		/* K as an immediate value operand */
528 		k = inst->k;
529 		code = bpf_anc_helper(inst);
530 
531 		/* compute offsets only in the fake pass */
532 		if (ctx->target == NULL)
533 			ctx->offsets[i] = ctx->idx * 4;
534 
535 		switch (code) {
536 		case BPF_LD | BPF_IMM:
537 			emit_mov_i(r_A, k, ctx);
538 			break;
539 		case BPF_LD | BPF_W | BPF_LEN:
540 			ctx->seen |= SEEN_SKB;
541 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
542 			emit(ARM_LDR_I(r_A, r_skb,
543 				       offsetof(struct sk_buff, len)), ctx);
544 			break;
545 		case BPF_LD | BPF_MEM:
546 			/* A = scratch[k] */
547 			ctx->seen |= SEEN_MEM_WORD(k);
548 			emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
549 			break;
550 		case BPF_LD | BPF_W | BPF_ABS:
551 			load_order = 2;
552 			goto load;
553 		case BPF_LD | BPF_H | BPF_ABS:
554 			load_order = 1;
555 			goto load;
556 		case BPF_LD | BPF_B | BPF_ABS:
557 			load_order = 0;
558 load:
559 			emit_mov_i(r_off, k, ctx);
560 load_common:
561 			ctx->seen |= SEEN_DATA | SEEN_CALL;
562 
563 			if (load_order > 0) {
564 				emit(ARM_SUB_I(r_scratch, r_skb_hl,
565 					       1 << load_order), ctx);
566 				emit(ARM_CMP_R(r_scratch, r_off), ctx);
567 				condt = ARM_COND_GE;
568 			} else {
569 				emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
570 				condt = ARM_COND_HI;
571 			}
572 
573 			/*
574 			 * test for negative offset, only if we are
575 			 * currently scheduled to take the fast
576 			 * path. this will update the flags so that
577 			 * the slowpath instruction are ignored if the
578 			 * offset is negative.
579 			 *
580 			 * for loard_order == 0 the HI condition will
581 			 * make loads at offset 0 take the slow path too.
582 			 */
583 			_emit(condt, ARM_CMP_I(r_off, 0), ctx);
584 
585 			_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
586 			      ctx);
587 
588 			if (load_order == 0)
589 				_emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
590 				      ctx);
591 			else if (load_order == 1)
592 				emit_load_be16(condt, r_A, r_scratch, ctx);
593 			else if (load_order == 2)
594 				emit_load_be32(condt, r_A, r_scratch, ctx);
595 
596 			_emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
597 
598 			/* the slowpath */
599 			emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
600 			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
601 			/* the offset is already in R1 */
602 			emit_blx_r(ARM_R3, ctx);
603 			/* check the result of skb_copy_bits */
604 			emit(ARM_CMP_I(ARM_R1, 0), ctx);
605 			emit_err_ret(ARM_COND_NE, ctx);
606 			emit(ARM_MOV_R(r_A, ARM_R0), ctx);
607 			break;
608 		case BPF_LD | BPF_W | BPF_IND:
609 			load_order = 2;
610 			goto load_ind;
611 		case BPF_LD | BPF_H | BPF_IND:
612 			load_order = 1;
613 			goto load_ind;
614 		case BPF_LD | BPF_B | BPF_IND:
615 			load_order = 0;
616 load_ind:
617 			OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
618 			goto load_common;
619 		case BPF_LDX | BPF_IMM:
620 			ctx->seen |= SEEN_X;
621 			emit_mov_i(r_X, k, ctx);
622 			break;
623 		case BPF_LDX | BPF_W | BPF_LEN:
624 			ctx->seen |= SEEN_X | SEEN_SKB;
625 			emit(ARM_LDR_I(r_X, r_skb,
626 				       offsetof(struct sk_buff, len)), ctx);
627 			break;
628 		case BPF_LDX | BPF_MEM:
629 			ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
630 			emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
631 			break;
632 		case BPF_LDX | BPF_B | BPF_MSH:
633 			/* x = ((*(frame + k)) & 0xf) << 2; */
634 			ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
635 			/* the interpreter should deal with the negative K */
636 			if ((int)k < 0)
637 				return -1;
638 			/* offset in r1: we might have to take the slow path */
639 			emit_mov_i(r_off, k, ctx);
640 			emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
641 
642 			/* load in r0: common with the slowpath */
643 			_emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
644 						      ARM_R1), ctx);
645 			/*
646 			 * emit_mov_i() might generate one or two instructions,
647 			 * the same holds for emit_blx_r()
648 			 */
649 			_emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
650 
651 			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
652 			/* r_off is r1 */
653 			emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
654 			emit_blx_r(ARM_R3, ctx);
655 			/* check the return value of skb_copy_bits */
656 			emit(ARM_CMP_I(ARM_R1, 0), ctx);
657 			emit_err_ret(ARM_COND_NE, ctx);
658 
659 			emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
660 			emit(ARM_LSL_I(r_X, r_X, 2), ctx);
661 			break;
662 		case BPF_ST:
663 			ctx->seen |= SEEN_MEM_WORD(k);
664 			emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
665 			break;
666 		case BPF_STX:
667 			update_on_xread(ctx);
668 			ctx->seen |= SEEN_MEM_WORD(k);
669 			emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
670 			break;
671 		case BPF_ALU | BPF_ADD | BPF_K:
672 			/* A += K */
673 			OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
674 			break;
675 		case BPF_ALU | BPF_ADD | BPF_X:
676 			update_on_xread(ctx);
677 			emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
678 			break;
679 		case BPF_ALU | BPF_SUB | BPF_K:
680 			/* A -= K */
681 			OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
682 			break;
683 		case BPF_ALU | BPF_SUB | BPF_X:
684 			update_on_xread(ctx);
685 			emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
686 			break;
687 		case BPF_ALU | BPF_MUL | BPF_K:
688 			/* A *= K */
689 			emit_mov_i(r_scratch, k, ctx);
690 			emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
691 			break;
692 		case BPF_ALU | BPF_MUL | BPF_X:
693 			update_on_xread(ctx);
694 			emit(ARM_MUL(r_A, r_A, r_X), ctx);
695 			break;
696 		case BPF_ALU | BPF_DIV | BPF_K:
697 			if (k == 1)
698 				break;
699 			emit_mov_i(r_scratch, k, ctx);
700 			emit_udiv(r_A, r_A, r_scratch, ctx);
701 			break;
702 		case BPF_ALU | BPF_DIV | BPF_X:
703 			update_on_xread(ctx);
704 			emit(ARM_CMP_I(r_X, 0), ctx);
705 			emit_err_ret(ARM_COND_EQ, ctx);
706 			emit_udiv(r_A, r_A, r_X, ctx);
707 			break;
708 		case BPF_ALU | BPF_OR | BPF_K:
709 			/* A |= K */
710 			OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
711 			break;
712 		case BPF_ALU | BPF_OR | BPF_X:
713 			update_on_xread(ctx);
714 			emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
715 			break;
716 		case BPF_ALU | BPF_XOR | BPF_K:
717 			/* A ^= K; */
718 			OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
719 			break;
720 		case BPF_ANC | SKF_AD_ALU_XOR_X:
721 		case BPF_ALU | BPF_XOR | BPF_X:
722 			/* A ^= X */
723 			update_on_xread(ctx);
724 			emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
725 			break;
726 		case BPF_ALU | BPF_AND | BPF_K:
727 			/* A &= K */
728 			OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
729 			break;
730 		case BPF_ALU | BPF_AND | BPF_X:
731 			update_on_xread(ctx);
732 			emit(ARM_AND_R(r_A, r_A, r_X), ctx);
733 			break;
734 		case BPF_ALU | BPF_LSH | BPF_K:
735 			if (unlikely(k > 31))
736 				return -1;
737 			emit(ARM_LSL_I(r_A, r_A, k), ctx);
738 			break;
739 		case BPF_ALU | BPF_LSH | BPF_X:
740 			update_on_xread(ctx);
741 			emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
742 			break;
743 		case BPF_ALU | BPF_RSH | BPF_K:
744 			if (unlikely(k > 31))
745 				return -1;
746 			emit(ARM_LSR_I(r_A, r_A, k), ctx);
747 			break;
748 		case BPF_ALU | BPF_RSH | BPF_X:
749 			update_on_xread(ctx);
750 			emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
751 			break;
752 		case BPF_ALU | BPF_NEG:
753 			/* A = -A */
754 			emit(ARM_RSB_I(r_A, r_A, 0), ctx);
755 			break;
756 		case BPF_JMP | BPF_JA:
757 			/* pc += K */
758 			emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
759 			break;
760 		case BPF_JMP | BPF_JEQ | BPF_K:
761 			/* pc += (A == K) ? pc->jt : pc->jf */
762 			condt  = ARM_COND_EQ;
763 			goto cmp_imm;
764 		case BPF_JMP | BPF_JGT | BPF_K:
765 			/* pc += (A > K) ? pc->jt : pc->jf */
766 			condt  = ARM_COND_HI;
767 			goto cmp_imm;
768 		case BPF_JMP | BPF_JGE | BPF_K:
769 			/* pc += (A >= K) ? pc->jt : pc->jf */
770 			condt  = ARM_COND_HS;
771 cmp_imm:
772 			imm12 = imm8m(k);
773 			if (imm12 < 0) {
774 				emit_mov_i_no8m(r_scratch, k, ctx);
775 				emit(ARM_CMP_R(r_A, r_scratch), ctx);
776 			} else {
777 				emit(ARM_CMP_I(r_A, imm12), ctx);
778 			}
779 cond_jump:
780 			if (inst->jt)
781 				_emit(condt, ARM_B(b_imm(i + inst->jt + 1,
782 						   ctx)), ctx);
783 			if (inst->jf)
784 				_emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
785 							     ctx)), ctx);
786 			break;
787 		case BPF_JMP | BPF_JEQ | BPF_X:
788 			/* pc += (A == X) ? pc->jt : pc->jf */
789 			condt   = ARM_COND_EQ;
790 			goto cmp_x;
791 		case BPF_JMP | BPF_JGT | BPF_X:
792 			/* pc += (A > X) ? pc->jt : pc->jf */
793 			condt   = ARM_COND_HI;
794 			goto cmp_x;
795 		case BPF_JMP | BPF_JGE | BPF_X:
796 			/* pc += (A >= X) ? pc->jt : pc->jf */
797 			condt   = ARM_COND_CS;
798 cmp_x:
799 			update_on_xread(ctx);
800 			emit(ARM_CMP_R(r_A, r_X), ctx);
801 			goto cond_jump;
802 		case BPF_JMP | BPF_JSET | BPF_K:
803 			/* pc += (A & K) ? pc->jt : pc->jf */
804 			condt  = ARM_COND_NE;
805 			/* not set iff all zeroes iff Z==1 iff EQ */
806 
807 			imm12 = imm8m(k);
808 			if (imm12 < 0) {
809 				emit_mov_i_no8m(r_scratch, k, ctx);
810 				emit(ARM_TST_R(r_A, r_scratch), ctx);
811 			} else {
812 				emit(ARM_TST_I(r_A, imm12), ctx);
813 			}
814 			goto cond_jump;
815 		case BPF_JMP | BPF_JSET | BPF_X:
816 			/* pc += (A & X) ? pc->jt : pc->jf */
817 			update_on_xread(ctx);
818 			condt  = ARM_COND_NE;
819 			emit(ARM_TST_R(r_A, r_X), ctx);
820 			goto cond_jump;
821 		case BPF_RET | BPF_A:
822 			emit(ARM_MOV_R(ARM_R0, r_A), ctx);
823 			goto b_epilogue;
824 		case BPF_RET | BPF_K:
825 			if ((k == 0) && (ctx->ret0_fp_idx < 0))
826 				ctx->ret0_fp_idx = i;
827 			emit_mov_i(ARM_R0, k, ctx);
828 b_epilogue:
829 			if (i != ctx->skf->len - 1)
830 				emit(ARM_B(b_imm(prog->len, ctx)), ctx);
831 			break;
832 		case BPF_MISC | BPF_TAX:
833 			/* X = A */
834 			ctx->seen |= SEEN_X;
835 			emit(ARM_MOV_R(r_X, r_A), ctx);
836 			break;
837 		case BPF_MISC | BPF_TXA:
838 			/* A = X */
839 			update_on_xread(ctx);
840 			emit(ARM_MOV_R(r_A, r_X), ctx);
841 			break;
842 		case BPF_ANC | SKF_AD_PROTOCOL:
843 			/* A = ntohs(skb->protocol) */
844 			ctx->seen |= SEEN_SKB;
845 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
846 						  protocol) != 2);
847 			off = offsetof(struct sk_buff, protocol);
848 			emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
849 			emit_swap16(r_A, r_scratch, ctx);
850 			break;
851 		case BPF_ANC | SKF_AD_CPU:
852 			/* r_scratch = current_thread_info() */
853 			OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
854 			/* A = current_thread_info()->cpu */
855 			BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
856 			off = offsetof(struct thread_info, cpu);
857 			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
858 			break;
859 		case BPF_ANC | SKF_AD_IFINDEX:
860 			/* A = skb->dev->ifindex */
861 			ctx->seen |= SEEN_SKB;
862 			off = offsetof(struct sk_buff, dev);
863 			emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
864 
865 			emit(ARM_CMP_I(r_scratch, 0), ctx);
866 			emit_err_ret(ARM_COND_EQ, ctx);
867 
868 			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
869 						  ifindex) != 4);
870 			off = offsetof(struct net_device, ifindex);
871 			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
872 			break;
873 		case BPF_ANC | SKF_AD_MARK:
874 			ctx->seen |= SEEN_SKB;
875 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
876 			off = offsetof(struct sk_buff, mark);
877 			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
878 			break;
879 		case BPF_ANC | SKF_AD_RXHASH:
880 			ctx->seen |= SEEN_SKB;
881 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
882 			off = offsetof(struct sk_buff, hash);
883 			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
884 			break;
885 		case BPF_ANC | SKF_AD_VLAN_TAG:
886 		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
887 			ctx->seen |= SEEN_SKB;
888 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
889 			off = offsetof(struct sk_buff, vlan_tci);
890 			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
891 			if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
892 				OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
893 			else {
894 				OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
895 				OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
896 			}
897 			break;
898 		case BPF_ANC | SKF_AD_QUEUE:
899 			ctx->seen |= SEEN_SKB;
900 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
901 						  queue_mapping) != 2);
902 			BUILD_BUG_ON(offsetof(struct sk_buff,
903 					      queue_mapping) > 0xff);
904 			off = offsetof(struct sk_buff, queue_mapping);
905 			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
906 			break;
907 		case BPF_LDX | BPF_W | BPF_ABS:
908 			/*
909 			 * load a 32bit word from struct seccomp_data.
910 			 * seccomp_check_filter() will already have checked
911 			 * that k is 32bit aligned and lies within the
912 			 * struct seccomp_data.
913 			 */
914 			ctx->seen |= SEEN_SKB;
915 			emit(ARM_LDR_I(r_A, r_skb, k), ctx);
916 			break;
917 		default:
918 			return -1;
919 		}
920 
921 		if (ctx->flags & FLAG_IMM_OVERFLOW)
922 			/*
923 			 * this instruction generated an overflow when
924 			 * trying to access the literal pool, so
925 			 * delegate this filter to the kernel interpreter.
926 			 */
927 			return -1;
928 	}
929 
930 	/* compute offsets only during the first pass */
931 	if (ctx->target == NULL)
932 		ctx->offsets[i] = ctx->idx * 4;
933 
934 	return 0;
935 }
936 
937 
938 void bpf_jit_compile(struct bpf_prog *fp)
939 {
940 	struct bpf_binary_header *header;
941 	struct jit_ctx ctx;
942 	unsigned tmp_idx;
943 	unsigned alloc_size;
944 	u8 *target_ptr;
945 
946 	if (!bpf_jit_enable)
947 		return;
948 
949 	memset(&ctx, 0, sizeof(ctx));
950 	ctx.skf		= fp;
951 	ctx.ret0_fp_idx = -1;
952 
953 	ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
954 	if (ctx.offsets == NULL)
955 		return;
956 
957 	/* fake pass to fill in the ctx->seen */
958 	if (unlikely(build_body(&ctx)))
959 		goto out;
960 
961 	tmp_idx = ctx.idx;
962 	build_prologue(&ctx);
963 	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
964 
965 #if __LINUX_ARM_ARCH__ < 7
966 	tmp_idx = ctx.idx;
967 	build_epilogue(&ctx);
968 	ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
969 
970 	ctx.idx += ctx.imm_count;
971 	if (ctx.imm_count) {
972 		ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
973 		if (ctx.imms == NULL)
974 			goto out;
975 	}
976 #else
977 	/* there's nothing after the epilogue on ARMv7 */
978 	build_epilogue(&ctx);
979 #endif
980 	alloc_size = 4 * ctx.idx;
981 	header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
982 				      4, jit_fill_hole);
983 	if (header == NULL)
984 		goto out;
985 
986 	ctx.target = (u32 *) target_ptr;
987 	ctx.idx = 0;
988 
989 	build_prologue(&ctx);
990 	if (build_body(&ctx) < 0) {
991 #if __LINUX_ARM_ARCH__ < 7
992 		if (ctx.imm_count)
993 			kfree(ctx.imms);
994 #endif
995 		bpf_jit_binary_free(header);
996 		goto out;
997 	}
998 	build_epilogue(&ctx);
999 
1000 	flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
1001 
1002 #if __LINUX_ARM_ARCH__ < 7
1003 	if (ctx.imm_count)
1004 		kfree(ctx.imms);
1005 #endif
1006 
1007 	if (bpf_jit_enable > 1)
1008 		/* there are 2 passes here */
1009 		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1010 
1011 	set_memory_ro((unsigned long)header, header->pages);
1012 	fp->bpf_func = (void *)ctx.target;
1013 	fp->jited = true;
1014 out:
1015 	kfree(ctx.offsets);
1016 	return;
1017 }
1018 
1019 void bpf_jit_free(struct bpf_prog *fp)
1020 {
1021 	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1022 	struct bpf_binary_header *header = (void *)addr;
1023 
1024 	if (!fp->jited)
1025 		goto free_filter;
1026 
1027 	set_memory_rw(addr, header->pages);
1028 	bpf_jit_binary_free(header);
1029 
1030 free_filter:
1031 	bpf_prog_unlock_free(fp);
1032 }
1033