xref: /openbmc/linux/arch/arm/net/bpf_jit_32.c (revision f7777dcc)
1 /*
2  * Just-In-Time compiler for BPF filters on 32bit ARM
3  *
4  * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation; version 2 of the License.
9  */
10 
11 #include <linux/bitops.h>
12 #include <linux/compiler.h>
13 #include <linux/errno.h>
14 #include <linux/filter.h>
15 #include <linux/moduleloader.h>
16 #include <linux/netdevice.h>
17 #include <linux/string.h>
18 #include <linux/slab.h>
19 #include <linux/if_vlan.h>
20 #include <asm/cacheflush.h>
21 #include <asm/hwcap.h>
22 
23 #include "bpf_jit_32.h"
24 
25 /*
26  * ABI:
27  *
28  * r0	scratch register
29  * r4	BPF register A
30  * r5	BPF register X
31  * r6	pointer to the skb
32  * r7	skb->data
33  * r8	skb_headlen(skb)
34  */
35 
36 #define r_scratch	ARM_R0
37 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
38 #define r_off		ARM_R1
39 #define r_A		ARM_R4
40 #define r_X		ARM_R5
41 #define r_skb		ARM_R6
42 #define r_skb_data	ARM_R7
43 #define r_skb_hl	ARM_R8
44 
45 #define SCRATCH_SP_OFFSET	0
46 #define SCRATCH_OFF(k)		(SCRATCH_SP_OFFSET + 4 * (k))
47 
48 #define SEEN_MEM		((1 << BPF_MEMWORDS) - 1)
49 #define SEEN_MEM_WORD(k)	(1 << (k))
50 #define SEEN_X			(1 << BPF_MEMWORDS)
51 #define SEEN_CALL		(1 << (BPF_MEMWORDS + 1))
52 #define SEEN_SKB		(1 << (BPF_MEMWORDS + 2))
53 #define SEEN_DATA		(1 << (BPF_MEMWORDS + 3))
54 
55 #define FLAG_NEED_X_RESET	(1 << 0)
56 
57 struct jit_ctx {
58 	const struct sk_filter *skf;
59 	unsigned idx;
60 	unsigned prologue_bytes;
61 	int ret0_fp_idx;
62 	u32 seen;
63 	u32 flags;
64 	u32 *offsets;
65 	u32 *target;
66 #if __LINUX_ARM_ARCH__ < 7
67 	u16 epilogue_bytes;
68 	u16 imm_count;
69 	u32 *imms;
70 #endif
71 };
72 
73 int bpf_jit_enable __read_mostly;
74 
75 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
76 {
77 	u8 ret;
78 	int err;
79 
80 	err = skb_copy_bits(skb, offset, &ret, 1);
81 
82 	return (u64)err << 32 | ret;
83 }
84 
85 static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
86 {
87 	u16 ret;
88 	int err;
89 
90 	err = skb_copy_bits(skb, offset, &ret, 2);
91 
92 	return (u64)err << 32 | ntohs(ret);
93 }
94 
95 static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
96 {
97 	u32 ret;
98 	int err;
99 
100 	err = skb_copy_bits(skb, offset, &ret, 4);
101 
102 	return (u64)err << 32 | ntohl(ret);
103 }
104 
105 /*
106  * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
107  * (where the assembly routines like __aeabi_uidiv could cause problems).
108  */
109 static u32 jit_udiv(u32 dividend, u32 divisor)
110 {
111 	return dividend / divisor;
112 }
113 
114 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
115 {
116 	if (ctx->target != NULL)
117 		ctx->target[ctx->idx] = inst | (cond << 28);
118 
119 	ctx->idx++;
120 }
121 
122 /*
123  * Emit an instruction that will be executed unconditionally.
124  */
125 static inline void emit(u32 inst, struct jit_ctx *ctx)
126 {
127 	_emit(ARM_COND_AL, inst, ctx);
128 }
129 
130 static u16 saved_regs(struct jit_ctx *ctx)
131 {
132 	u16 ret = 0;
133 
134 	if ((ctx->skf->len > 1) ||
135 	    (ctx->skf->insns[0].code == BPF_S_RET_A))
136 		ret |= 1 << r_A;
137 
138 #ifdef CONFIG_FRAME_POINTER
139 	ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
140 #else
141 	if (ctx->seen & SEEN_CALL)
142 		ret |= 1 << ARM_LR;
143 #endif
144 	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
145 		ret |= 1 << r_skb;
146 	if (ctx->seen & SEEN_DATA)
147 		ret |= (1 << r_skb_data) | (1 << r_skb_hl);
148 	if (ctx->seen & SEEN_X)
149 		ret |= 1 << r_X;
150 
151 	return ret;
152 }
153 
154 static inline int mem_words_used(struct jit_ctx *ctx)
155 {
156 	/* yes, we do waste some stack space IF there are "holes" in the set" */
157 	return fls(ctx->seen & SEEN_MEM);
158 }
159 
160 static inline bool is_load_to_a(u16 inst)
161 {
162 	switch (inst) {
163 	case BPF_S_LD_W_LEN:
164 	case BPF_S_LD_W_ABS:
165 	case BPF_S_LD_H_ABS:
166 	case BPF_S_LD_B_ABS:
167 	case BPF_S_ANC_CPU:
168 	case BPF_S_ANC_IFINDEX:
169 	case BPF_S_ANC_MARK:
170 	case BPF_S_ANC_PROTOCOL:
171 	case BPF_S_ANC_RXHASH:
172 	case BPF_S_ANC_VLAN_TAG:
173 	case BPF_S_ANC_VLAN_TAG_PRESENT:
174 	case BPF_S_ANC_QUEUE:
175 		return true;
176 	default:
177 		return false;
178 	}
179 }
180 
181 static void build_prologue(struct jit_ctx *ctx)
182 {
183 	u16 reg_set = saved_regs(ctx);
184 	u16 first_inst = ctx->skf->insns[0].code;
185 	u16 off;
186 
187 #ifdef CONFIG_FRAME_POINTER
188 	emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
189 	emit(ARM_PUSH(reg_set), ctx);
190 	emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
191 #else
192 	if (reg_set)
193 		emit(ARM_PUSH(reg_set), ctx);
194 #endif
195 
196 	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
197 		emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
198 
199 	if (ctx->seen & SEEN_DATA) {
200 		off = offsetof(struct sk_buff, data);
201 		emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
202 		/* headlen = len - data_len */
203 		off = offsetof(struct sk_buff, len);
204 		emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
205 		off = offsetof(struct sk_buff, data_len);
206 		emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
207 		emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
208 	}
209 
210 	if (ctx->flags & FLAG_NEED_X_RESET)
211 		emit(ARM_MOV_I(r_X, 0), ctx);
212 
213 	/* do not leak kernel data to userspace */
214 	if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
215 		emit(ARM_MOV_I(r_A, 0), ctx);
216 
217 	/* stack space for the BPF_MEM words */
218 	if (ctx->seen & SEEN_MEM)
219 		emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
220 }
221 
222 static void build_epilogue(struct jit_ctx *ctx)
223 {
224 	u16 reg_set = saved_regs(ctx);
225 
226 	if (ctx->seen & SEEN_MEM)
227 		emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
228 
229 	reg_set &= ~(1 << ARM_LR);
230 
231 #ifdef CONFIG_FRAME_POINTER
232 	/* the first instruction of the prologue was: mov ip, sp */
233 	reg_set &= ~(1 << ARM_IP);
234 	reg_set |= (1 << ARM_SP);
235 	emit(ARM_LDM(ARM_SP, reg_set), ctx);
236 #else
237 	if (reg_set) {
238 		if (ctx->seen & SEEN_CALL)
239 			reg_set |= 1 << ARM_PC;
240 		emit(ARM_POP(reg_set), ctx);
241 	}
242 
243 	if (!(ctx->seen & SEEN_CALL))
244 		emit(ARM_BX(ARM_LR), ctx);
245 #endif
246 }
247 
248 static int16_t imm8m(u32 x)
249 {
250 	u32 rot;
251 
252 	for (rot = 0; rot < 16; rot++)
253 		if ((x & ~ror32(0xff, 2 * rot)) == 0)
254 			return rol32(x, 2 * rot) | (rot << 8);
255 
256 	return -1;
257 }
258 
259 #if __LINUX_ARM_ARCH__ < 7
260 
261 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
262 {
263 	unsigned i = 0, offset;
264 	u16 imm;
265 
266 	/* on the "fake" run we just count them (duplicates included) */
267 	if (ctx->target == NULL) {
268 		ctx->imm_count++;
269 		return 0;
270 	}
271 
272 	while ((i < ctx->imm_count) && ctx->imms[i]) {
273 		if (ctx->imms[i] == k)
274 			break;
275 		i++;
276 	}
277 
278 	if (ctx->imms[i] == 0)
279 		ctx->imms[i] = k;
280 
281 	/* constants go just after the epilogue */
282 	offset =  ctx->offsets[ctx->skf->len];
283 	offset += ctx->prologue_bytes;
284 	offset += ctx->epilogue_bytes;
285 	offset += i * 4;
286 
287 	ctx->target[offset / 4] = k;
288 
289 	/* PC in ARM mode == address of the instruction + 8 */
290 	imm = offset - (8 + ctx->idx * 4);
291 
292 	return imm;
293 }
294 
295 #endif /* __LINUX_ARM_ARCH__ */
296 
297 /*
298  * Move an immediate that's not an imm8m to a core register.
299  */
300 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
301 {
302 #if __LINUX_ARM_ARCH__ < 7
303 	emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
304 #else
305 	emit(ARM_MOVW(rd, val & 0xffff), ctx);
306 	if (val > 0xffff)
307 		emit(ARM_MOVT(rd, val >> 16), ctx);
308 #endif
309 }
310 
311 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
312 {
313 	int imm12 = imm8m(val);
314 
315 	if (imm12 >= 0)
316 		emit(ARM_MOV_I(rd, imm12), ctx);
317 	else
318 		emit_mov_i_no8m(rd, val, ctx);
319 }
320 
321 #if __LINUX_ARM_ARCH__ < 6
322 
323 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
324 {
325 	_emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
326 	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
327 	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
328 	_emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
329 	_emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
330 	_emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
331 	_emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
332 	_emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
333 }
334 
335 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
336 {
337 	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
338 	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
339 	_emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
340 }
341 
342 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
343 {
344 	/* r_dst = (r_src << 8) | (r_src >> 8) */
345 	emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
346 	emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
347 
348 	/*
349 	 * we need to mask out the bits set in r_dst[23:16] due to
350 	 * the first shift instruction.
351 	 *
352 	 * note that 0x8ff is the encoded immediate 0x00ff0000.
353 	 */
354 	emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
355 }
356 
357 #else  /* ARMv6+ */
358 
359 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
360 {
361 	_emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
362 #ifdef __LITTLE_ENDIAN
363 	_emit(cond, ARM_REV(r_res, r_res), ctx);
364 #endif
365 }
366 
367 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
368 {
369 	_emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
370 #ifdef __LITTLE_ENDIAN
371 	_emit(cond, ARM_REV16(r_res, r_res), ctx);
372 #endif
373 }
374 
375 static inline void emit_swap16(u8 r_dst __maybe_unused,
376 			       u8 r_src __maybe_unused,
377 			       struct jit_ctx *ctx __maybe_unused)
378 {
379 #ifdef __LITTLE_ENDIAN
380 	emit(ARM_REV16(r_dst, r_src), ctx);
381 #endif
382 }
383 
384 #endif /* __LINUX_ARM_ARCH__ < 6 */
385 
386 
387 /* Compute the immediate value for a PC-relative branch. */
388 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
389 {
390 	u32 imm;
391 
392 	if (ctx->target == NULL)
393 		return 0;
394 	/*
395 	 * BPF allows only forward jumps and the offset of the target is
396 	 * still the one computed during the first pass.
397 	 */
398 	imm  = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
399 
400 	return imm >> 2;
401 }
402 
403 #define OP_IMM3(op, r1, r2, imm_val, ctx)				\
404 	do {								\
405 		imm12 = imm8m(imm_val);					\
406 		if (imm12 < 0) {					\
407 			emit_mov_i_no8m(r_scratch, imm_val, ctx);	\
408 			emit(op ## _R((r1), (r2), r_scratch), ctx);	\
409 		} else {						\
410 			emit(op ## _I((r1), (r2), imm12), ctx);		\
411 		}							\
412 	} while (0)
413 
414 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
415 {
416 	if (ctx->ret0_fp_idx >= 0) {
417 		_emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
418 		/* NOP to keep the size constant between passes */
419 		emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
420 	} else {
421 		_emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
422 		_emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
423 	}
424 }
425 
426 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
427 {
428 #if __LINUX_ARM_ARCH__ < 5
429 	emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
430 
431 	if (elf_hwcap & HWCAP_THUMB)
432 		emit(ARM_BX(tgt_reg), ctx);
433 	else
434 		emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
435 #else
436 	emit(ARM_BLX_R(tgt_reg), ctx);
437 #endif
438 }
439 
440 static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
441 {
442 #if __LINUX_ARM_ARCH__ == 7
443 	if (elf_hwcap & HWCAP_IDIVA) {
444 		emit(ARM_UDIV(rd, rm, rn), ctx);
445 		return;
446 	}
447 #endif
448 	if (rm != ARM_R0)
449 		emit(ARM_MOV_R(ARM_R0, rm), ctx);
450 	if (rn != ARM_R1)
451 		emit(ARM_MOV_R(ARM_R1, rn), ctx);
452 
453 	ctx->seen |= SEEN_CALL;
454 	emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
455 	emit_blx_r(ARM_R3, ctx);
456 
457 	if (rd != ARM_R0)
458 		emit(ARM_MOV_R(rd, ARM_R0), ctx);
459 }
460 
461 static inline void update_on_xread(struct jit_ctx *ctx)
462 {
463 	if (!(ctx->seen & SEEN_X))
464 		ctx->flags |= FLAG_NEED_X_RESET;
465 
466 	ctx->seen |= SEEN_X;
467 }
468 
469 static int build_body(struct jit_ctx *ctx)
470 {
471 	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
472 	const struct sk_filter *prog = ctx->skf;
473 	const struct sock_filter *inst;
474 	unsigned i, load_order, off, condt;
475 	int imm12;
476 	u32 k;
477 
478 	for (i = 0; i < prog->len; i++) {
479 		inst = &(prog->insns[i]);
480 		/* K as an immediate value operand */
481 		k = inst->k;
482 
483 		/* compute offsets only in the fake pass */
484 		if (ctx->target == NULL)
485 			ctx->offsets[i] = ctx->idx * 4;
486 
487 		switch (inst->code) {
488 		case BPF_S_LD_IMM:
489 			emit_mov_i(r_A, k, ctx);
490 			break;
491 		case BPF_S_LD_W_LEN:
492 			ctx->seen |= SEEN_SKB;
493 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
494 			emit(ARM_LDR_I(r_A, r_skb,
495 				       offsetof(struct sk_buff, len)), ctx);
496 			break;
497 		case BPF_S_LD_MEM:
498 			/* A = scratch[k] */
499 			ctx->seen |= SEEN_MEM_WORD(k);
500 			emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
501 			break;
502 		case BPF_S_LD_W_ABS:
503 			load_order = 2;
504 			goto load;
505 		case BPF_S_LD_H_ABS:
506 			load_order = 1;
507 			goto load;
508 		case BPF_S_LD_B_ABS:
509 			load_order = 0;
510 load:
511 			/* the interpreter will deal with the negative K */
512 			if ((int)k < 0)
513 				return -ENOTSUPP;
514 			emit_mov_i(r_off, k, ctx);
515 load_common:
516 			ctx->seen |= SEEN_DATA | SEEN_CALL;
517 
518 			if (load_order > 0) {
519 				emit(ARM_SUB_I(r_scratch, r_skb_hl,
520 					       1 << load_order), ctx);
521 				emit(ARM_CMP_R(r_scratch, r_off), ctx);
522 				condt = ARM_COND_HS;
523 			} else {
524 				emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
525 				condt = ARM_COND_HI;
526 			}
527 
528 			_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
529 			      ctx);
530 
531 			if (load_order == 0)
532 				_emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
533 				      ctx);
534 			else if (load_order == 1)
535 				emit_load_be16(condt, r_A, r_scratch, ctx);
536 			else if (load_order == 2)
537 				emit_load_be32(condt, r_A, r_scratch, ctx);
538 
539 			_emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
540 
541 			/* the slowpath */
542 			emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
543 			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
544 			/* the offset is already in R1 */
545 			emit_blx_r(ARM_R3, ctx);
546 			/* check the result of skb_copy_bits */
547 			emit(ARM_CMP_I(ARM_R1, 0), ctx);
548 			emit_err_ret(ARM_COND_NE, ctx);
549 			emit(ARM_MOV_R(r_A, ARM_R0), ctx);
550 			break;
551 		case BPF_S_LD_W_IND:
552 			load_order = 2;
553 			goto load_ind;
554 		case BPF_S_LD_H_IND:
555 			load_order = 1;
556 			goto load_ind;
557 		case BPF_S_LD_B_IND:
558 			load_order = 0;
559 load_ind:
560 			OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
561 			goto load_common;
562 		case BPF_S_LDX_IMM:
563 			ctx->seen |= SEEN_X;
564 			emit_mov_i(r_X, k, ctx);
565 			break;
566 		case BPF_S_LDX_W_LEN:
567 			ctx->seen |= SEEN_X | SEEN_SKB;
568 			emit(ARM_LDR_I(r_X, r_skb,
569 				       offsetof(struct sk_buff, len)), ctx);
570 			break;
571 		case BPF_S_LDX_MEM:
572 			ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
573 			emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
574 			break;
575 		case BPF_S_LDX_B_MSH:
576 			/* x = ((*(frame + k)) & 0xf) << 2; */
577 			ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
578 			/* the interpreter should deal with the negative K */
579 			if ((int)k < 0)
580 				return -1;
581 			/* offset in r1: we might have to take the slow path */
582 			emit_mov_i(r_off, k, ctx);
583 			emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
584 
585 			/* load in r0: common with the slowpath */
586 			_emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
587 						      ARM_R1), ctx);
588 			/*
589 			 * emit_mov_i() might generate one or two instructions,
590 			 * the same holds for emit_blx_r()
591 			 */
592 			_emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
593 
594 			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
595 			/* r_off is r1 */
596 			emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
597 			emit_blx_r(ARM_R3, ctx);
598 			/* check the return value of skb_copy_bits */
599 			emit(ARM_CMP_I(ARM_R1, 0), ctx);
600 			emit_err_ret(ARM_COND_NE, ctx);
601 
602 			emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
603 			emit(ARM_LSL_I(r_X, r_X, 2), ctx);
604 			break;
605 		case BPF_S_ST:
606 			ctx->seen |= SEEN_MEM_WORD(k);
607 			emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
608 			break;
609 		case BPF_S_STX:
610 			update_on_xread(ctx);
611 			ctx->seen |= SEEN_MEM_WORD(k);
612 			emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
613 			break;
614 		case BPF_S_ALU_ADD_K:
615 			/* A += K */
616 			OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
617 			break;
618 		case BPF_S_ALU_ADD_X:
619 			update_on_xread(ctx);
620 			emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
621 			break;
622 		case BPF_S_ALU_SUB_K:
623 			/* A -= K */
624 			OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
625 			break;
626 		case BPF_S_ALU_SUB_X:
627 			update_on_xread(ctx);
628 			emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
629 			break;
630 		case BPF_S_ALU_MUL_K:
631 			/* A *= K */
632 			emit_mov_i(r_scratch, k, ctx);
633 			emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
634 			break;
635 		case BPF_S_ALU_MUL_X:
636 			update_on_xread(ctx);
637 			emit(ARM_MUL(r_A, r_A, r_X), ctx);
638 			break;
639 		case BPF_S_ALU_DIV_K:
640 			/* current k == reciprocal_value(userspace k) */
641 			emit_mov_i(r_scratch, k, ctx);
642 			/* A = top 32 bits of the product */
643 			emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
644 			break;
645 		case BPF_S_ALU_DIV_X:
646 			update_on_xread(ctx);
647 			emit(ARM_CMP_I(r_X, 0), ctx);
648 			emit_err_ret(ARM_COND_EQ, ctx);
649 			emit_udiv(r_A, r_A, r_X, ctx);
650 			break;
651 		case BPF_S_ALU_OR_K:
652 			/* A |= K */
653 			OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
654 			break;
655 		case BPF_S_ALU_OR_X:
656 			update_on_xread(ctx);
657 			emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
658 			break;
659 		case BPF_S_ALU_XOR_K:
660 			/* A ^= K; */
661 			OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
662 			break;
663 		case BPF_S_ANC_ALU_XOR_X:
664 		case BPF_S_ALU_XOR_X:
665 			/* A ^= X */
666 			update_on_xread(ctx);
667 			emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
668 			break;
669 		case BPF_S_ALU_AND_K:
670 			/* A &= K */
671 			OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
672 			break;
673 		case BPF_S_ALU_AND_X:
674 			update_on_xread(ctx);
675 			emit(ARM_AND_R(r_A, r_A, r_X), ctx);
676 			break;
677 		case BPF_S_ALU_LSH_K:
678 			if (unlikely(k > 31))
679 				return -1;
680 			emit(ARM_LSL_I(r_A, r_A, k), ctx);
681 			break;
682 		case BPF_S_ALU_LSH_X:
683 			update_on_xread(ctx);
684 			emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
685 			break;
686 		case BPF_S_ALU_RSH_K:
687 			if (unlikely(k > 31))
688 				return -1;
689 			emit(ARM_LSR_I(r_A, r_A, k), ctx);
690 			break;
691 		case BPF_S_ALU_RSH_X:
692 			update_on_xread(ctx);
693 			emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
694 			break;
695 		case BPF_S_ALU_NEG:
696 			/* A = -A */
697 			emit(ARM_RSB_I(r_A, r_A, 0), ctx);
698 			break;
699 		case BPF_S_JMP_JA:
700 			/* pc += K */
701 			emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
702 			break;
703 		case BPF_S_JMP_JEQ_K:
704 			/* pc += (A == K) ? pc->jt : pc->jf */
705 			condt  = ARM_COND_EQ;
706 			goto cmp_imm;
707 		case BPF_S_JMP_JGT_K:
708 			/* pc += (A > K) ? pc->jt : pc->jf */
709 			condt  = ARM_COND_HI;
710 			goto cmp_imm;
711 		case BPF_S_JMP_JGE_K:
712 			/* pc += (A >= K) ? pc->jt : pc->jf */
713 			condt  = ARM_COND_HS;
714 cmp_imm:
715 			imm12 = imm8m(k);
716 			if (imm12 < 0) {
717 				emit_mov_i_no8m(r_scratch, k, ctx);
718 				emit(ARM_CMP_R(r_A, r_scratch), ctx);
719 			} else {
720 				emit(ARM_CMP_I(r_A, imm12), ctx);
721 			}
722 cond_jump:
723 			if (inst->jt)
724 				_emit(condt, ARM_B(b_imm(i + inst->jt + 1,
725 						   ctx)), ctx);
726 			if (inst->jf)
727 				_emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
728 							     ctx)), ctx);
729 			break;
730 		case BPF_S_JMP_JEQ_X:
731 			/* pc += (A == X) ? pc->jt : pc->jf */
732 			condt   = ARM_COND_EQ;
733 			goto cmp_x;
734 		case BPF_S_JMP_JGT_X:
735 			/* pc += (A > X) ? pc->jt : pc->jf */
736 			condt   = ARM_COND_HI;
737 			goto cmp_x;
738 		case BPF_S_JMP_JGE_X:
739 			/* pc += (A >= X) ? pc->jt : pc->jf */
740 			condt   = ARM_COND_CS;
741 cmp_x:
742 			update_on_xread(ctx);
743 			emit(ARM_CMP_R(r_A, r_X), ctx);
744 			goto cond_jump;
745 		case BPF_S_JMP_JSET_K:
746 			/* pc += (A & K) ? pc->jt : pc->jf */
747 			condt  = ARM_COND_NE;
748 			/* not set iff all zeroes iff Z==1 iff EQ */
749 
750 			imm12 = imm8m(k);
751 			if (imm12 < 0) {
752 				emit_mov_i_no8m(r_scratch, k, ctx);
753 				emit(ARM_TST_R(r_A, r_scratch), ctx);
754 			} else {
755 				emit(ARM_TST_I(r_A, imm12), ctx);
756 			}
757 			goto cond_jump;
758 		case BPF_S_JMP_JSET_X:
759 			/* pc += (A & X) ? pc->jt : pc->jf */
760 			update_on_xread(ctx);
761 			condt  = ARM_COND_NE;
762 			emit(ARM_TST_R(r_A, r_X), ctx);
763 			goto cond_jump;
764 		case BPF_S_RET_A:
765 			emit(ARM_MOV_R(ARM_R0, r_A), ctx);
766 			goto b_epilogue;
767 		case BPF_S_RET_K:
768 			if ((k == 0) && (ctx->ret0_fp_idx < 0))
769 				ctx->ret0_fp_idx = i;
770 			emit_mov_i(ARM_R0, k, ctx);
771 b_epilogue:
772 			if (i != ctx->skf->len - 1)
773 				emit(ARM_B(b_imm(prog->len, ctx)), ctx);
774 			break;
775 		case BPF_S_MISC_TAX:
776 			/* X = A */
777 			ctx->seen |= SEEN_X;
778 			emit(ARM_MOV_R(r_X, r_A), ctx);
779 			break;
780 		case BPF_S_MISC_TXA:
781 			/* A = X */
782 			update_on_xread(ctx);
783 			emit(ARM_MOV_R(r_A, r_X), ctx);
784 			break;
785 		case BPF_S_ANC_PROTOCOL:
786 			/* A = ntohs(skb->protocol) */
787 			ctx->seen |= SEEN_SKB;
788 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
789 						  protocol) != 2);
790 			off = offsetof(struct sk_buff, protocol);
791 			emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
792 			emit_swap16(r_A, r_scratch, ctx);
793 			break;
794 		case BPF_S_ANC_CPU:
795 			/* r_scratch = current_thread_info() */
796 			OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
797 			/* A = current_thread_info()->cpu */
798 			BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
799 			off = offsetof(struct thread_info, cpu);
800 			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
801 			break;
802 		case BPF_S_ANC_IFINDEX:
803 			/* A = skb->dev->ifindex */
804 			ctx->seen |= SEEN_SKB;
805 			off = offsetof(struct sk_buff, dev);
806 			emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
807 
808 			emit(ARM_CMP_I(r_scratch, 0), ctx);
809 			emit_err_ret(ARM_COND_EQ, ctx);
810 
811 			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
812 						  ifindex) != 4);
813 			off = offsetof(struct net_device, ifindex);
814 			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
815 			break;
816 		case BPF_S_ANC_MARK:
817 			ctx->seen |= SEEN_SKB;
818 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
819 			off = offsetof(struct sk_buff, mark);
820 			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
821 			break;
822 		case BPF_S_ANC_RXHASH:
823 			ctx->seen |= SEEN_SKB;
824 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
825 			off = offsetof(struct sk_buff, rxhash);
826 			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
827 			break;
828 		case BPF_S_ANC_VLAN_TAG:
829 		case BPF_S_ANC_VLAN_TAG_PRESENT:
830 			ctx->seen |= SEEN_SKB;
831 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
832 			off = offsetof(struct sk_buff, vlan_tci);
833 			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
834 			if (inst->code == BPF_S_ANC_VLAN_TAG)
835 				OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
836 			else
837 				OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
838 			break;
839 		case BPF_S_ANC_QUEUE:
840 			ctx->seen |= SEEN_SKB;
841 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
842 						  queue_mapping) != 2);
843 			BUILD_BUG_ON(offsetof(struct sk_buff,
844 					      queue_mapping) > 0xff);
845 			off = offsetof(struct sk_buff, queue_mapping);
846 			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
847 			break;
848 		default:
849 			return -1;
850 		}
851 	}
852 
853 	/* compute offsets only during the first pass */
854 	if (ctx->target == NULL)
855 		ctx->offsets[i] = ctx->idx * 4;
856 
857 	return 0;
858 }
859 
860 
861 void bpf_jit_compile(struct sk_filter *fp)
862 {
863 	struct jit_ctx ctx;
864 	unsigned tmp_idx;
865 	unsigned alloc_size;
866 
867 	if (!bpf_jit_enable)
868 		return;
869 
870 	memset(&ctx, 0, sizeof(ctx));
871 	ctx.skf		= fp;
872 	ctx.ret0_fp_idx = -1;
873 
874 	ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
875 	if (ctx.offsets == NULL)
876 		return;
877 
878 	/* fake pass to fill in the ctx->seen */
879 	if (unlikely(build_body(&ctx)))
880 		goto out;
881 
882 	tmp_idx = ctx.idx;
883 	build_prologue(&ctx);
884 	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
885 
886 #if __LINUX_ARM_ARCH__ < 7
887 	tmp_idx = ctx.idx;
888 	build_epilogue(&ctx);
889 	ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
890 
891 	ctx.idx += ctx.imm_count;
892 	if (ctx.imm_count) {
893 		ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
894 		if (ctx.imms == NULL)
895 			goto out;
896 	}
897 #else
898 	/* there's nothing after the epilogue on ARMv7 */
899 	build_epilogue(&ctx);
900 #endif
901 
902 	alloc_size = 4 * ctx.idx;
903 	ctx.target = module_alloc(alloc_size);
904 	if (unlikely(ctx.target == NULL))
905 		goto out;
906 
907 	ctx.idx = 0;
908 	build_prologue(&ctx);
909 	build_body(&ctx);
910 	build_epilogue(&ctx);
911 
912 	flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
913 
914 #if __LINUX_ARM_ARCH__ < 7
915 	if (ctx.imm_count)
916 		kfree(ctx.imms);
917 #endif
918 
919 	if (bpf_jit_enable > 1)
920 		/* there are 2 passes here */
921 		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
922 
923 	fp->bpf_func = (void *)ctx.target;
924 out:
925 	kfree(ctx.offsets);
926 	return;
927 }
928 
929 void bpf_jit_free(struct sk_filter *fp)
930 {
931 	if (fp->bpf_func != sk_run_filter)
932 		module_free(NULL, fp->bpf_func);
933 	kfree(fp);
934 }
935