xref: /openbmc/linux/arch/arm/net/bpf_jit_32.c (revision 2d972b6a)
1 /*
2  * Just-In-Time compiler for eBPF filters on 32bit ARM
3  *
4  * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
5  * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License as published by the
9  * Free Software Foundation; version 2 of the License.
10  */
11 
12 #include <linux/bpf.h>
13 #include <linux/bitops.h>
14 #include <linux/compiler.h>
15 #include <linux/errno.h>
16 #include <linux/filter.h>
17 #include <linux/netdevice.h>
18 #include <linux/string.h>
19 #include <linux/slab.h>
20 #include <linux/if_vlan.h>
21 
22 #include <asm/cacheflush.h>
23 #include <asm/hwcap.h>
24 #include <asm/opcodes.h>
25 
26 #include "bpf_jit_32.h"
27 
28 /*
29  * eBPF prog stack layout:
30  *
31  *                         high
32  * original ARM_SP =>     +-----+
33  *                        |     | callee saved registers
34  *                        +-----+ <= (BPF_FP + SCRATCH_SIZE)
35  *                        | ... | eBPF JIT scratch space
36  * eBPF fp register =>    +-----+
37  *   (BPF_FP)             | ... | eBPF prog stack
38  *                        +-----+
39  *                        |RSVD | JIT scratchpad
40  * current ARM_SP =>      +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
41  *                        |     |
42  *                        | ... | Function call stack
43  *                        |     |
44  *                        +-----+
45  *                          low
46  *
47  * The callee saved registers depends on whether frame pointers are enabled.
48  * With frame pointers (to be compliant with the ABI):
49  *
50  *                                high
51  * original ARM_SP =>     +------------------+ \
52  *                        |        pc        | |
53  * current ARM_FP =>      +------------------+ } callee saved registers
54  *                        |r4-r8,r10,fp,ip,lr| |
55  *                        +------------------+ /
56  *                                low
57  *
58  * Without frame pointers:
59  *
60  *                                high
61  * original ARM_SP =>     +------------------+
62  *                        | r4-r8,r10,fp,lr  | callee saved registers
63  * current ARM_FP =>      +------------------+
64  *                                low
65  *
66  * When popping registers off the stack at the end of a BPF function, we
67  * reference them via the current ARM_FP register.
68  */
69 #define CALLEE_MASK	(1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
70 			 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
71 			 1 << ARM_FP)
72 #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
73 #define CALLEE_POP_MASK  (CALLEE_MASK | 1 << ARM_PC)
74 
75 #define STACK_OFFSET(k)	(k)
76 #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)	/* TEMP Register 1 */
77 #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)	/* TEMP Register 2 */
78 #define TCALL_CNT	(MAX_BPF_JIT_REG + 2)	/* Tail Call Count */
79 
80 #define FLAG_IMM_OVERFLOW	(1 << 0)
81 
82 /*
83  * Map eBPF registers to ARM 32bit registers or stack scratch space.
84  *
85  * 1. First argument is passed using the arm 32bit registers and rest of the
86  * arguments are passed on stack scratch space.
87  * 2. First callee-saved arugument is mapped to arm 32 bit registers and rest
88  * arguments are mapped to scratch space on stack.
89  * 3. We need two 64 bit temp registers to do complex operations on eBPF
90  * registers.
91  *
92  * As the eBPF registers are all 64 bit registers and arm has only 32 bit
93  * registers, we have to map each eBPF registers with two arm 32 bit regs or
94  * scratch memory space and we have to build eBPF 64 bit register from those.
95  *
96  */
97 static const u8 bpf2a32[][2] = {
98 	/* return value from in-kernel function, and exit value from eBPF */
99 	[BPF_REG_0] = {ARM_R1, ARM_R0},
100 	/* arguments from eBPF program to in-kernel function */
101 	[BPF_REG_1] = {ARM_R3, ARM_R2},
102 	/* Stored on stack scratch space */
103 	[BPF_REG_2] = {STACK_OFFSET(0), STACK_OFFSET(4)},
104 	[BPF_REG_3] = {STACK_OFFSET(8), STACK_OFFSET(12)},
105 	[BPF_REG_4] = {STACK_OFFSET(16), STACK_OFFSET(20)},
106 	[BPF_REG_5] = {STACK_OFFSET(24), STACK_OFFSET(28)},
107 	/* callee saved registers that in-kernel function will preserve */
108 	[BPF_REG_6] = {ARM_R5, ARM_R4},
109 	/* Stored on stack scratch space */
110 	[BPF_REG_7] = {STACK_OFFSET(32), STACK_OFFSET(36)},
111 	[BPF_REG_8] = {STACK_OFFSET(40), STACK_OFFSET(44)},
112 	[BPF_REG_9] = {STACK_OFFSET(48), STACK_OFFSET(52)},
113 	/* Read only Frame Pointer to access Stack */
114 	[BPF_REG_FP] = {STACK_OFFSET(56), STACK_OFFSET(60)},
115 	/* Temporary Register for internal BPF JIT, can be used
116 	 * for constant blindings and others.
117 	 */
118 	[TMP_REG_1] = {ARM_R7, ARM_R6},
119 	[TMP_REG_2] = {ARM_R10, ARM_R8},
120 	/* Tail call count. Stored on stack scratch space. */
121 	[TCALL_CNT] = {STACK_OFFSET(64), STACK_OFFSET(68)},
122 	/* temporary register for blinding constants.
123 	 * Stored on stack scratch space.
124 	 */
125 	[BPF_REG_AX] = {STACK_OFFSET(72), STACK_OFFSET(76)},
126 };
127 
128 #define	dst_lo	dst[1]
129 #define dst_hi	dst[0]
130 #define src_lo	src[1]
131 #define src_hi	src[0]
132 
133 /*
134  * JIT Context:
135  *
136  * prog			:	bpf_prog
137  * idx			:	index of current last JITed instruction.
138  * prologue_bytes	:	bytes used in prologue.
139  * epilogue_offset	:	offset of epilogue starting.
140  * offsets		:	array of eBPF instruction offsets in
141  *				JITed code.
142  * target		:	final JITed code.
143  * epilogue_bytes	:	no of bytes used in epilogue.
144  * imm_count		:	no of immediate counts used for global
145  *				variables.
146  * imms			:	array of global variable addresses.
147  */
148 
149 struct jit_ctx {
150 	const struct bpf_prog *prog;
151 	unsigned int idx;
152 	unsigned int prologue_bytes;
153 	unsigned int epilogue_offset;
154 	u32 flags;
155 	u32 *offsets;
156 	u32 *target;
157 	u32 stack_size;
158 #if __LINUX_ARM_ARCH__ < 7
159 	u16 epilogue_bytes;
160 	u16 imm_count;
161 	u32 *imms;
162 #endif
163 };
164 
165 /*
166  * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
167  * (where the assembly routines like __aeabi_uidiv could cause problems).
168  */
169 static u32 jit_udiv32(u32 dividend, u32 divisor)
170 {
171 	return dividend / divisor;
172 }
173 
174 static u32 jit_mod32(u32 dividend, u32 divisor)
175 {
176 	return dividend % divisor;
177 }
178 
179 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
180 {
181 	inst |= (cond << 28);
182 	inst = __opcode_to_mem_arm(inst);
183 
184 	if (ctx->target != NULL)
185 		ctx->target[ctx->idx] = inst;
186 
187 	ctx->idx++;
188 }
189 
190 /*
191  * Emit an instruction that will be executed unconditionally.
192  */
193 static inline void emit(u32 inst, struct jit_ctx *ctx)
194 {
195 	_emit(ARM_COND_AL, inst, ctx);
196 }
197 
198 /*
199  * Checks if immediate value can be converted to imm12(12 bits) value.
200  */
201 static int16_t imm8m(u32 x)
202 {
203 	u32 rot;
204 
205 	for (rot = 0; rot < 16; rot++)
206 		if ((x & ~ror32(0xff, 2 * rot)) == 0)
207 			return rol32(x, 2 * rot) | (rot << 8);
208 	return -1;
209 }
210 
211 /*
212  * Initializes the JIT space with undefined instructions.
213  */
214 static void jit_fill_hole(void *area, unsigned int size)
215 {
216 	u32 *ptr;
217 	/* We are guaranteed to have aligned memory. */
218 	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
219 		*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
220 }
221 
222 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
223 /* EABI requires the stack to be aligned to 64-bit boundaries */
224 #define STACK_ALIGNMENT	8
225 #else
226 /* Stack must be aligned to 32-bit boundaries */
227 #define STACK_ALIGNMENT	4
228 #endif
229 
230 /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
231  * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
232  * BPF_REG_FP and Tail call counts.
233  */
234 #define SCRATCH_SIZE 80
235 
236 /* total stack size used in JITed code */
237 #define _STACK_SIZE \
238 	(ctx->prog->aux->stack_depth + \
239 	 + SCRATCH_SIZE + \
240 	 + 4 /* extra for skb_copy_bits buffer */)
241 
242 #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
243 
244 /* Get the offset of eBPF REGISTERs stored on scratch space. */
245 #define STACK_VAR(off) (STACK_SIZE-off-4)
246 
247 /* Offset of skb_copy_bits buffer */
248 #define SKB_BUFFER STACK_VAR(SCRATCH_SIZE)
249 
250 #if __LINUX_ARM_ARCH__ < 7
251 
252 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
253 {
254 	unsigned int i = 0, offset;
255 	u16 imm;
256 
257 	/* on the "fake" run we just count them (duplicates included) */
258 	if (ctx->target == NULL) {
259 		ctx->imm_count++;
260 		return 0;
261 	}
262 
263 	while ((i < ctx->imm_count) && ctx->imms[i]) {
264 		if (ctx->imms[i] == k)
265 			break;
266 		i++;
267 	}
268 
269 	if (ctx->imms[i] == 0)
270 		ctx->imms[i] = k;
271 
272 	/* constants go just after the epilogue */
273 	offset =  ctx->offsets[ctx->prog->len - 1] * 4;
274 	offset += ctx->prologue_bytes;
275 	offset += ctx->epilogue_bytes;
276 	offset += i * 4;
277 
278 	ctx->target[offset / 4] = k;
279 
280 	/* PC in ARM mode == address of the instruction + 8 */
281 	imm = offset - (8 + ctx->idx * 4);
282 
283 	if (imm & ~0xfff) {
284 		/*
285 		 * literal pool is too far, signal it into flags. we
286 		 * can only detect it on the second pass unfortunately.
287 		 */
288 		ctx->flags |= FLAG_IMM_OVERFLOW;
289 		return 0;
290 	}
291 
292 	return imm;
293 }
294 
295 #endif /* __LINUX_ARM_ARCH__ */
296 
297 static inline int bpf2a32_offset(int bpf_to, int bpf_from,
298 				 const struct jit_ctx *ctx) {
299 	int to, from;
300 
301 	if (ctx->target == NULL)
302 		return 0;
303 	to = ctx->offsets[bpf_to];
304 	from = ctx->offsets[bpf_from];
305 
306 	return to - from - 1;
307 }
308 
309 /*
310  * Move an immediate that's not an imm8m to a core register.
311  */
312 static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx)
313 {
314 #if __LINUX_ARM_ARCH__ < 7
315 	emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
316 #else
317 	emit(ARM_MOVW(rd, val & 0xffff), ctx);
318 	if (val > 0xffff)
319 		emit(ARM_MOVT(rd, val >> 16), ctx);
320 #endif
321 }
322 
323 static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
324 {
325 	int imm12 = imm8m(val);
326 
327 	if (imm12 >= 0)
328 		emit(ARM_MOV_I(rd, imm12), ctx);
329 	else
330 		emit_mov_i_no8m(rd, val, ctx);
331 }
332 
333 static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
334 {
335 	if (elf_hwcap & HWCAP_THUMB)
336 		emit(ARM_BX(tgt_reg), ctx);
337 	else
338 		emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
339 }
340 
341 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
342 {
343 #if __LINUX_ARM_ARCH__ < 5
344 	emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
345 	emit_bx_r(tgt_reg, ctx);
346 #else
347 	emit(ARM_BLX_R(tgt_reg), ctx);
348 #endif
349 }
350 
351 static inline int epilogue_offset(const struct jit_ctx *ctx)
352 {
353 	int to, from;
354 	/* No need for 1st dummy run */
355 	if (ctx->target == NULL)
356 		return 0;
357 	to = ctx->epilogue_offset;
358 	from = ctx->idx;
359 
360 	return to - from - 2;
361 }
362 
363 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
364 {
365 	const u8 *tmp = bpf2a32[TMP_REG_1];
366 
367 #if __LINUX_ARM_ARCH__ == 7
368 	if (elf_hwcap & HWCAP_IDIVA) {
369 		if (op == BPF_DIV)
370 			emit(ARM_UDIV(rd, rm, rn), ctx);
371 		else {
372 			emit(ARM_UDIV(ARM_IP, rm, rn), ctx);
373 			emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx);
374 		}
375 		return;
376 	}
377 #endif
378 
379 	/*
380 	 * For BPF_ALU | BPF_DIV | BPF_K instructions
381 	 * As ARM_R1 and ARM_R0 contains 1st argument of bpf
382 	 * function, we need to save it on caller side to save
383 	 * it from getting destroyed within callee.
384 	 * After the return from the callee, we restore ARM_R0
385 	 * ARM_R1.
386 	 */
387 	if (rn != ARM_R1) {
388 		emit(ARM_MOV_R(tmp[0], ARM_R1), ctx);
389 		emit(ARM_MOV_R(ARM_R1, rn), ctx);
390 	}
391 	if (rm != ARM_R0) {
392 		emit(ARM_MOV_R(tmp[1], ARM_R0), ctx);
393 		emit(ARM_MOV_R(ARM_R0, rm), ctx);
394 	}
395 
396 	/* Call appropriate function */
397 	emit_mov_i(ARM_IP, op == BPF_DIV ?
398 		   (u32)jit_udiv32 : (u32)jit_mod32, ctx);
399 	emit_blx_r(ARM_IP, ctx);
400 
401 	/* Save return value */
402 	if (rd != ARM_R0)
403 		emit(ARM_MOV_R(rd, ARM_R0), ctx);
404 
405 	/* Restore ARM_R0 and ARM_R1 */
406 	if (rn != ARM_R1)
407 		emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx);
408 	if (rm != ARM_R0)
409 		emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
410 }
411 
412 /* Checks whether BPF register is on scratch stack space or not. */
413 static inline bool is_on_stack(u8 bpf_reg)
414 {
415 	static u8 stack_regs[] = {BPF_REG_AX, BPF_REG_3, BPF_REG_4, BPF_REG_5,
416 				BPF_REG_7, BPF_REG_8, BPF_REG_9, TCALL_CNT,
417 				BPF_REG_2, BPF_REG_FP};
418 	int i, reg_len = sizeof(stack_regs);
419 
420 	for (i = 0 ; i < reg_len ; i++) {
421 		if (bpf_reg == stack_regs[i])
422 			return true;
423 	}
424 	return false;
425 }
426 
427 static inline void emit_a32_mov_i(const u8 dst, const u32 val,
428 				  bool dstk, struct jit_ctx *ctx)
429 {
430 	const u8 *tmp = bpf2a32[TMP_REG_1];
431 
432 	if (dstk) {
433 		emit_mov_i(tmp[1], val, ctx);
434 		emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(dst)), ctx);
435 	} else {
436 		emit_mov_i(dst, val, ctx);
437 	}
438 }
439 
440 /* Sign extended move */
441 static inline void emit_a32_mov_i64(const bool is64, const u8 dst[],
442 				  const u32 val, bool dstk,
443 				  struct jit_ctx *ctx) {
444 	u32 hi = 0;
445 
446 	if (is64 && (val & (1<<31)))
447 		hi = (u32)~0;
448 	emit_a32_mov_i(dst_lo, val, dstk, ctx);
449 	emit_a32_mov_i(dst_hi, hi, dstk, ctx);
450 }
451 
452 static inline void emit_a32_add_r(const u8 dst, const u8 src,
453 			      const bool is64, const bool hi,
454 			      struct jit_ctx *ctx) {
455 	/* 64 bit :
456 	 *	adds dst_lo, dst_lo, src_lo
457 	 *	adc dst_hi, dst_hi, src_hi
458 	 * 32 bit :
459 	 *	add dst_lo, dst_lo, src_lo
460 	 */
461 	if (!hi && is64)
462 		emit(ARM_ADDS_R(dst, dst, src), ctx);
463 	else if (hi && is64)
464 		emit(ARM_ADC_R(dst, dst, src), ctx);
465 	else
466 		emit(ARM_ADD_R(dst, dst, src), ctx);
467 }
468 
469 static inline void emit_a32_sub_r(const u8 dst, const u8 src,
470 				  const bool is64, const bool hi,
471 				  struct jit_ctx *ctx) {
472 	/* 64 bit :
473 	 *	subs dst_lo, dst_lo, src_lo
474 	 *	sbc dst_hi, dst_hi, src_hi
475 	 * 32 bit :
476 	 *	sub dst_lo, dst_lo, src_lo
477 	 */
478 	if (!hi && is64)
479 		emit(ARM_SUBS_R(dst, dst, src), ctx);
480 	else if (hi && is64)
481 		emit(ARM_SBC_R(dst, dst, src), ctx);
482 	else
483 		emit(ARM_SUB_R(dst, dst, src), ctx);
484 }
485 
486 static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
487 			      const bool hi, const u8 op, struct jit_ctx *ctx){
488 	switch (BPF_OP(op)) {
489 	/* dst = dst + src */
490 	case BPF_ADD:
491 		emit_a32_add_r(dst, src, is64, hi, ctx);
492 		break;
493 	/* dst = dst - src */
494 	case BPF_SUB:
495 		emit_a32_sub_r(dst, src, is64, hi, ctx);
496 		break;
497 	/* dst = dst | src */
498 	case BPF_OR:
499 		emit(ARM_ORR_R(dst, dst, src), ctx);
500 		break;
501 	/* dst = dst & src */
502 	case BPF_AND:
503 		emit(ARM_AND_R(dst, dst, src), ctx);
504 		break;
505 	/* dst = dst ^ src */
506 	case BPF_XOR:
507 		emit(ARM_EOR_R(dst, dst, src), ctx);
508 		break;
509 	/* dst = dst * src */
510 	case BPF_MUL:
511 		emit(ARM_MUL(dst, dst, src), ctx);
512 		break;
513 	/* dst = dst << src */
514 	case BPF_LSH:
515 		emit(ARM_LSL_R(dst, dst, src), ctx);
516 		break;
517 	/* dst = dst >> src */
518 	case BPF_RSH:
519 		emit(ARM_LSR_R(dst, dst, src), ctx);
520 		break;
521 	/* dst = dst >> src (signed)*/
522 	case BPF_ARSH:
523 		emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx);
524 		break;
525 	}
526 }
527 
528 /* ALU operation (32 bit)
529  * dst = dst (op) src
530  */
531 static inline void emit_a32_alu_r(const u8 dst, const u8 src,
532 				  bool dstk, bool sstk,
533 				  struct jit_ctx *ctx, const bool is64,
534 				  const bool hi, const u8 op) {
535 	const u8 *tmp = bpf2a32[TMP_REG_1];
536 	u8 rn = sstk ? tmp[1] : src;
537 
538 	if (sstk)
539 		emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx);
540 
541 	/* ALU operation */
542 	if (dstk) {
543 		emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
544 		emit_alu_r(tmp[0], rn, is64, hi, op, ctx);
545 		emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
546 	} else {
547 		emit_alu_r(dst, rn, is64, hi, op, ctx);
548 	}
549 }
550 
551 /* ALU operation (64 bit) */
552 static inline void emit_a32_alu_r64(const bool is64, const u8 dst[],
553 				  const u8 src[], bool dstk,
554 				  bool sstk, struct jit_ctx *ctx,
555 				  const u8 op) {
556 	emit_a32_alu_r(dst_lo, src_lo, dstk, sstk, ctx, is64, false, op);
557 	if (is64)
558 		emit_a32_alu_r(dst_hi, src_hi, dstk, sstk, ctx, is64, true, op);
559 	else
560 		emit_a32_mov_i(dst_hi, 0, dstk, ctx);
561 }
562 
563 /* dst = imm (4 bytes)*/
564 static inline void emit_a32_mov_r(const u8 dst, const u8 src,
565 				  bool dstk, bool sstk,
566 				  struct jit_ctx *ctx) {
567 	const u8 *tmp = bpf2a32[TMP_REG_1];
568 	u8 rt = sstk ? tmp[0] : src;
569 
570 	if (sstk)
571 		emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx);
572 	if (dstk)
573 		emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst)), ctx);
574 	else
575 		emit(ARM_MOV_R(dst, rt), ctx);
576 }
577 
578 /* dst = src */
579 static inline void emit_a32_mov_r64(const bool is64, const u8 dst[],
580 				  const u8 src[], bool dstk,
581 				  bool sstk, struct jit_ctx *ctx) {
582 	emit_a32_mov_r(dst_lo, src_lo, dstk, sstk, ctx);
583 	if (is64) {
584 		/* complete 8 byte move */
585 		emit_a32_mov_r(dst_hi, src_hi, dstk, sstk, ctx);
586 	} else {
587 		/* Zero out high 4 bytes */
588 		emit_a32_mov_i(dst_hi, 0, dstk, ctx);
589 	}
590 }
591 
592 /* Shift operations */
593 static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk,
594 				struct jit_ctx *ctx, const u8 op) {
595 	const u8 *tmp = bpf2a32[TMP_REG_1];
596 	u8 rd = dstk ? tmp[0] : dst;
597 
598 	if (dstk)
599 		emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
600 
601 	/* Do shift operation */
602 	switch (op) {
603 	case BPF_LSH:
604 		emit(ARM_LSL_I(rd, rd, val), ctx);
605 		break;
606 	case BPF_RSH:
607 		emit(ARM_LSR_I(rd, rd, val), ctx);
608 		break;
609 	case BPF_NEG:
610 		emit(ARM_RSB_I(rd, rd, val), ctx);
611 		break;
612 	}
613 
614 	if (dstk)
615 		emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
616 }
617 
618 /* dst = ~dst (64 bit) */
619 static inline void emit_a32_neg64(const u8 dst[], bool dstk,
620 				struct jit_ctx *ctx){
621 	const u8 *tmp = bpf2a32[TMP_REG_1];
622 	u8 rd = dstk ? tmp[1] : dst[1];
623 	u8 rm = dstk ? tmp[0] : dst[0];
624 
625 	/* Setup Operand */
626 	if (dstk) {
627 		emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
628 		emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
629 	}
630 
631 	/* Do Negate Operation */
632 	emit(ARM_RSBS_I(rd, rd, 0), ctx);
633 	emit(ARM_RSC_I(rm, rm, 0), ctx);
634 
635 	if (dstk) {
636 		emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
637 		emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
638 	}
639 }
640 
641 /* dst = dst << src */
642 static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
643 				    bool sstk, struct jit_ctx *ctx) {
644 	const u8 *tmp = bpf2a32[TMP_REG_1];
645 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
646 
647 	/* Setup Operands */
648 	u8 rt = sstk ? tmp2[1] : src_lo;
649 	u8 rd = dstk ? tmp[1] : dst_lo;
650 	u8 rm = dstk ? tmp[0] : dst_hi;
651 
652 	if (sstk)
653 		emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
654 	if (dstk) {
655 		emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
656 		emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
657 	}
658 
659 	/* Do LSH operation */
660 	emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
661 	emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
662 	emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
663 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
664 	emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
665 	emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx);
666 
667 	if (dstk) {
668 		emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
669 		emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
670 	} else {
671 		emit(ARM_MOV_R(rd, ARM_LR), ctx);
672 		emit(ARM_MOV_R(rm, ARM_IP), ctx);
673 	}
674 }
675 
676 /* dst = dst >> src (signed)*/
677 static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
678 				    bool sstk, struct jit_ctx *ctx) {
679 	const u8 *tmp = bpf2a32[TMP_REG_1];
680 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
681 	/* Setup Operands */
682 	u8 rt = sstk ? tmp2[1] : src_lo;
683 	u8 rd = dstk ? tmp[1] : dst_lo;
684 	u8 rm = dstk ? tmp[0] : dst_hi;
685 
686 	if (sstk)
687 		emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
688 	if (dstk) {
689 		emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
690 		emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
691 	}
692 
693 	/* Do the ARSH operation */
694 	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
695 	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
696 	emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
697 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
698 	_emit(ARM_COND_MI, ARM_B(0), ctx);
699 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx);
700 	emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx);
701 	if (dstk) {
702 		emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
703 		emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
704 	} else {
705 		emit(ARM_MOV_R(rd, ARM_LR), ctx);
706 		emit(ARM_MOV_R(rm, ARM_IP), ctx);
707 	}
708 }
709 
710 /* dst = dst >> src */
711 static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
712 				     bool sstk, struct jit_ctx *ctx) {
713 	const u8 *tmp = bpf2a32[TMP_REG_1];
714 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
715 	/* Setup Operands */
716 	u8 rt = sstk ? tmp2[1] : src_lo;
717 	u8 rd = dstk ? tmp[1] : dst_lo;
718 	u8 rm = dstk ? tmp[0] : dst_hi;
719 
720 	if (sstk)
721 		emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
722 	if (dstk) {
723 		emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
724 		emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
725 	}
726 
727 	/* Do LSH operation */
728 	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
729 	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
730 	emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
731 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
732 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
733 	emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx);
734 	if (dstk) {
735 		emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
736 		emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
737 	} else {
738 		emit(ARM_MOV_R(rd, ARM_LR), ctx);
739 		emit(ARM_MOV_R(rm, ARM_IP), ctx);
740 	}
741 }
742 
743 /* dst = dst << val */
744 static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk,
745 				     const u32 val, struct jit_ctx *ctx){
746 	const u8 *tmp = bpf2a32[TMP_REG_1];
747 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
748 	/* Setup operands */
749 	u8 rd = dstk ? tmp[1] : dst_lo;
750 	u8 rm = dstk ? tmp[0] : dst_hi;
751 
752 	if (dstk) {
753 		emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
754 		emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
755 	}
756 
757 	/* Do LSH operation */
758 	if (val < 32) {
759 		emit(ARM_MOV_SI(tmp2[0], rm, SRTYPE_ASL, val), ctx);
760 		emit(ARM_ORR_SI(rm, tmp2[0], rd, SRTYPE_LSR, 32 - val), ctx);
761 		emit(ARM_MOV_SI(rd, rd, SRTYPE_ASL, val), ctx);
762 	} else {
763 		if (val == 32)
764 			emit(ARM_MOV_R(rm, rd), ctx);
765 		else
766 			emit(ARM_MOV_SI(rm, rd, SRTYPE_ASL, val - 32), ctx);
767 		emit(ARM_EOR_R(rd, rd, rd), ctx);
768 	}
769 
770 	if (dstk) {
771 		emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
772 		emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
773 	}
774 }
775 
776 /* dst = dst >> val */
777 static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk,
778 				    const u32 val, struct jit_ctx *ctx) {
779 	const u8 *tmp = bpf2a32[TMP_REG_1];
780 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
781 	/* Setup operands */
782 	u8 rd = dstk ? tmp[1] : dst_lo;
783 	u8 rm = dstk ? tmp[0] : dst_hi;
784 
785 	if (dstk) {
786 		emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
787 		emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
788 	}
789 
790 	/* Do LSR operation */
791 	if (val < 32) {
792 		emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
793 		emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
794 		emit(ARM_MOV_SI(rm, rm, SRTYPE_LSR, val), ctx);
795 	} else if (val == 32) {
796 		emit(ARM_MOV_R(rd, rm), ctx);
797 		emit(ARM_MOV_I(rm, 0), ctx);
798 	} else {
799 		emit(ARM_MOV_SI(rd, rm, SRTYPE_LSR, val - 32), ctx);
800 		emit(ARM_MOV_I(rm, 0), ctx);
801 	}
802 
803 	if (dstk) {
804 		emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
805 		emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
806 	}
807 }
808 
809 /* dst = dst >> val (signed) */
810 static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk,
811 				     const u32 val, struct jit_ctx *ctx){
812 	const u8 *tmp = bpf2a32[TMP_REG_1];
813 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
814 	 /* Setup operands */
815 	u8 rd = dstk ? tmp[1] : dst_lo;
816 	u8 rm = dstk ? tmp[0] : dst_hi;
817 
818 	if (dstk) {
819 		emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
820 		emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
821 	}
822 
823 	/* Do ARSH operation */
824 	if (val < 32) {
825 		emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
826 		emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
827 		emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, val), ctx);
828 	} else if (val == 32) {
829 		emit(ARM_MOV_R(rd, rm), ctx);
830 		emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
831 	} else {
832 		emit(ARM_MOV_SI(rd, rm, SRTYPE_ASR, val - 32), ctx);
833 		emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
834 	}
835 
836 	if (dstk) {
837 		emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
838 		emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
839 	}
840 }
841 
842 static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
843 				    bool sstk, struct jit_ctx *ctx) {
844 	const u8 *tmp = bpf2a32[TMP_REG_1];
845 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
846 	/* Setup operands for multiplication */
847 	u8 rd = dstk ? tmp[1] : dst_lo;
848 	u8 rm = dstk ? tmp[0] : dst_hi;
849 	u8 rt = sstk ? tmp2[1] : src_lo;
850 	u8 rn = sstk ? tmp2[0] : src_hi;
851 
852 	if (dstk) {
853 		emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
854 		emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
855 	}
856 	if (sstk) {
857 		emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
858 		emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_hi)), ctx);
859 	}
860 
861 	/* Do Multiplication */
862 	emit(ARM_MUL(ARM_IP, rd, rn), ctx);
863 	emit(ARM_MUL(ARM_LR, rm, rt), ctx);
864 	emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
865 
866 	emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
867 	emit(ARM_ADD_R(rm, ARM_LR, rm), ctx);
868 	if (dstk) {
869 		emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_lo)), ctx);
870 		emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
871 	} else {
872 		emit(ARM_MOV_R(rd, ARM_IP), ctx);
873 	}
874 }
875 
876 /* *(size *)(dst + off) = src */
877 static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
878 			      const s32 off, struct jit_ctx *ctx, const u8 sz){
879 	const u8 *tmp = bpf2a32[TMP_REG_1];
880 	u8 rd = dstk ? tmp[1] : dst;
881 
882 	if (dstk)
883 		emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
884 	if (off) {
885 		emit_a32_mov_i(tmp[0], off, false, ctx);
886 		emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx);
887 		rd = tmp[0];
888 	}
889 	switch (sz) {
890 	case BPF_W:
891 		/* Store a Word */
892 		emit(ARM_STR_I(src, rd, 0), ctx);
893 		break;
894 	case BPF_H:
895 		/* Store a HalfWord */
896 		emit(ARM_STRH_I(src, rd, 0), ctx);
897 		break;
898 	case BPF_B:
899 		/* Store a Byte */
900 		emit(ARM_STRB_I(src, rd, 0), ctx);
901 		break;
902 	}
903 }
904 
905 /* dst = *(size*)(src + off) */
906 static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
907 			      s32 off, struct jit_ctx *ctx, const u8 sz){
908 	const u8 *tmp = bpf2a32[TMP_REG_1];
909 	const u8 *rd = dstk ? tmp : dst;
910 	u8 rm = src;
911 	s32 off_max;
912 
913 	if (sz == BPF_H)
914 		off_max = 0xff;
915 	else
916 		off_max = 0xfff;
917 
918 	if (off < 0 || off > off_max) {
919 		emit_a32_mov_i(tmp[0], off, false, ctx);
920 		emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
921 		rm = tmp[0];
922 		off = 0;
923 	} else if (rd[1] == rm) {
924 		emit(ARM_MOV_R(tmp[0], rm), ctx);
925 		rm = tmp[0];
926 	}
927 	switch (sz) {
928 	case BPF_B:
929 		/* Load a Byte */
930 		emit(ARM_LDRB_I(rd[1], rm, off), ctx);
931 		emit_a32_mov_i(dst[0], 0, dstk, ctx);
932 		break;
933 	case BPF_H:
934 		/* Load a HalfWord */
935 		emit(ARM_LDRH_I(rd[1], rm, off), ctx);
936 		emit_a32_mov_i(dst[0], 0, dstk, ctx);
937 		break;
938 	case BPF_W:
939 		/* Load a Word */
940 		emit(ARM_LDR_I(rd[1], rm, off), ctx);
941 		emit_a32_mov_i(dst[0], 0, dstk, ctx);
942 		break;
943 	case BPF_DW:
944 		/* Load a Double Word */
945 		emit(ARM_LDR_I(rd[1], rm, off), ctx);
946 		emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
947 		break;
948 	}
949 	if (dstk)
950 		emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
951 	if (dstk && sz == BPF_DW)
952 		emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
953 }
954 
955 /* Arithmatic Operation */
956 static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
957 			     const u8 rn, struct jit_ctx *ctx, u8 op) {
958 	switch (op) {
959 	case BPF_JSET:
960 		emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
961 		emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
962 		emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
963 		break;
964 	case BPF_JEQ:
965 	case BPF_JNE:
966 	case BPF_JGT:
967 	case BPF_JGE:
968 	case BPF_JLE:
969 	case BPF_JLT:
970 		emit(ARM_CMP_R(rd, rm), ctx);
971 		_emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
972 		break;
973 	case BPF_JSLE:
974 	case BPF_JSGT:
975 		emit(ARM_CMP_R(rn, rt), ctx);
976 		emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
977 		break;
978 	case BPF_JSLT:
979 	case BPF_JSGE:
980 		emit(ARM_CMP_R(rt, rn), ctx);
981 		emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
982 		break;
983 	}
984 }
985 
986 static int out_offset = -1; /* initialized on the first pass of build_body() */
987 static int emit_bpf_tail_call(struct jit_ctx *ctx)
988 {
989 
990 	/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
991 	const u8 *r2 = bpf2a32[BPF_REG_2];
992 	const u8 *r3 = bpf2a32[BPF_REG_3];
993 	const u8 *tmp = bpf2a32[TMP_REG_1];
994 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
995 	const u8 *tcc = bpf2a32[TCALL_CNT];
996 	const int idx0 = ctx->idx;
997 #define cur_offset (ctx->idx - idx0)
998 #define jmp_offset (out_offset - (cur_offset) - 2)
999 	u32 off, lo, hi;
1000 
1001 	/* if (index >= array->map.max_entries)
1002 	 *	goto out;
1003 	 */
1004 	off = offsetof(struct bpf_array, map.max_entries);
1005 	/* array->map.max_entries */
1006 	emit_a32_mov_i(tmp[1], off, false, ctx);
1007 	emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
1008 	emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
1009 	/* index is 32-bit for arrays */
1010 	emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
1011 	/* index >= array->map.max_entries */
1012 	emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
1013 	_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1014 
1015 	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
1016 	 *	goto out;
1017 	 * tail_call_cnt++;
1018 	 */
1019 	lo = (u32)MAX_TAIL_CALL_CNT;
1020 	hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
1021 	emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
1022 	emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
1023 	emit(ARM_CMP_I(tmp[0], hi), ctx);
1024 	_emit(ARM_COND_EQ, ARM_CMP_I(tmp[1], lo), ctx);
1025 	_emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1026 	emit(ARM_ADDS_I(tmp[1], tmp[1], 1), ctx);
1027 	emit(ARM_ADC_I(tmp[0], tmp[0], 0), ctx);
1028 	emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
1029 	emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
1030 
1031 	/* prog = array->ptrs[index]
1032 	 * if (prog == NULL)
1033 	 *	goto out;
1034 	 */
1035 	off = offsetof(struct bpf_array, ptrs);
1036 	emit_a32_mov_i(tmp[1], off, false, ctx);
1037 	emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
1038 	emit(ARM_ADD_R(tmp[1], tmp2[1], tmp[1]), ctx);
1039 	emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
1040 	emit(ARM_MOV_SI(tmp[0], tmp2[1], SRTYPE_ASL, 2), ctx);
1041 	emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx);
1042 	emit(ARM_CMP_I(tmp[1], 0), ctx);
1043 	_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1044 
1045 	/* goto *(prog->bpf_func + prologue_size); */
1046 	off = offsetof(struct bpf_prog, bpf_func);
1047 	emit_a32_mov_i(tmp2[1], off, false, ctx);
1048 	emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
1049 	emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
1050 	emit_bx_r(tmp[1], ctx);
1051 
1052 	/* out: */
1053 	if (out_offset == -1)
1054 		out_offset = cur_offset;
1055 	if (cur_offset != out_offset) {
1056 		pr_err_once("tail_call out_offset = %d, expected %d!\n",
1057 			    cur_offset, out_offset);
1058 		return -1;
1059 	}
1060 	return 0;
1061 #undef cur_offset
1062 #undef jmp_offset
1063 }
1064 
1065 /* 0xabcd => 0xcdab */
1066 static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1067 {
1068 #if __LINUX_ARM_ARCH__ < 6
1069 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
1070 
1071 	emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1072 	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
1073 	emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1074 	emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx);
1075 #else /* ARMv6+ */
1076 	emit(ARM_REV16(rd, rn), ctx);
1077 #endif
1078 }
1079 
1080 /* 0xabcdefgh => 0xghefcdab */
1081 static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1082 {
1083 #if __LINUX_ARM_ARCH__ < 6
1084 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
1085 
1086 	emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1087 	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
1088 	emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx);
1089 
1090 	emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx);
1091 	emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx);
1092 	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx);
1093 	emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1094 	emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx);
1095 	emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx);
1096 	emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx);
1097 
1098 #else /* ARMv6+ */
1099 	emit(ARM_REV(rd, rn), ctx);
1100 #endif
1101 }
1102 
1103 // push the scratch stack register on top of the stack
1104 static inline void emit_push_r64(const u8 src[], const u8 shift,
1105 		struct jit_ctx *ctx)
1106 {
1107 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
1108 	u16 reg_set = 0;
1109 
1110 	emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(src[1]+shift)), ctx);
1111 	emit(ARM_LDR_I(tmp2[0], ARM_SP, STACK_VAR(src[0]+shift)), ctx);
1112 
1113 	reg_set = (1 << tmp2[1]) | (1 << tmp2[0]);
1114 	emit(ARM_PUSH(reg_set), ctx);
1115 }
1116 
1117 static void build_prologue(struct jit_ctx *ctx)
1118 {
1119 	const u8 r0 = bpf2a32[BPF_REG_0][1];
1120 	const u8 r2 = bpf2a32[BPF_REG_1][1];
1121 	const u8 r3 = bpf2a32[BPF_REG_1][0];
1122 	const u8 r4 = bpf2a32[BPF_REG_6][1];
1123 	const u8 fplo = bpf2a32[BPF_REG_FP][1];
1124 	const u8 fphi = bpf2a32[BPF_REG_FP][0];
1125 	const u8 *tcc = bpf2a32[TCALL_CNT];
1126 
1127 	/* Save callee saved registers. */
1128 #ifdef CONFIG_FRAME_POINTER
1129 	u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
1130 	emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
1131 	emit(ARM_PUSH(reg_set), ctx);
1132 	emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
1133 #else
1134 	emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
1135 	emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
1136 #endif
1137 	/* Save frame pointer for later */
1138 	emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx);
1139 
1140 	ctx->stack_size = imm8m(STACK_SIZE);
1141 
1142 	/* Set up function call stack */
1143 	emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
1144 
1145 	/* Set up BPF prog stack base register */
1146 	emit_a32_mov_r(fplo, ARM_IP, true, false, ctx);
1147 	emit_a32_mov_i(fphi, 0, true, ctx);
1148 
1149 	/* mov r4, 0 */
1150 	emit(ARM_MOV_I(r4, 0), ctx);
1151 
1152 	/* Move BPF_CTX to BPF_R1 */
1153 	emit(ARM_MOV_R(r3, r4), ctx);
1154 	emit(ARM_MOV_R(r2, r0), ctx);
1155 	/* Initialize Tail Count */
1156 	emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[0])), ctx);
1157 	emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[1])), ctx);
1158 	/* end of prologue */
1159 }
1160 
1161 /* restore callee saved registers. */
1162 static void build_epilogue(struct jit_ctx *ctx)
1163 {
1164 #ifdef CONFIG_FRAME_POINTER
1165 	/* When using frame pointers, some additional registers need to
1166 	 * be loaded. */
1167 	u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
1168 	emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
1169 	emit(ARM_LDM(ARM_SP, reg_set), ctx);
1170 #else
1171 	/* Restore callee saved registers. */
1172 	emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
1173 	emit(ARM_POP(CALLEE_POP_MASK), ctx);
1174 #endif
1175 }
1176 
1177 /*
1178  * Convert an eBPF instruction to native instruction, i.e
1179  * JITs an eBPF instruction.
1180  * Returns :
1181  *	0  - Successfully JITed an 8-byte eBPF instruction
1182  *	>0 - Successfully JITed a 16-byte eBPF instruction
1183  *	<0 - Failed to JIT.
1184  */
1185 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1186 {
1187 	const u8 code = insn->code;
1188 	const u8 *dst = bpf2a32[insn->dst_reg];
1189 	const u8 *src = bpf2a32[insn->src_reg];
1190 	const u8 *tmp = bpf2a32[TMP_REG_1];
1191 	const u8 *tmp2 = bpf2a32[TMP_REG_2];
1192 	const s16 off = insn->off;
1193 	const s32 imm = insn->imm;
1194 	const int i = insn - ctx->prog->insnsi;
1195 	const bool is64 = BPF_CLASS(code) == BPF_ALU64;
1196 	const bool dstk = is_on_stack(insn->dst_reg);
1197 	const bool sstk = is_on_stack(insn->src_reg);
1198 	u8 rd, rt, rm, rn;
1199 	s32 jmp_offset;
1200 
1201 #define check_imm(bits, imm) do {				\
1202 	if ((((imm) > 0) && ((imm) >> (bits))) ||		\
1203 	    (((imm) < 0) && (~(imm) >> (bits)))) {		\
1204 		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
1205 			i, imm, imm);				\
1206 		return -EINVAL;					\
1207 	}							\
1208 } while (0)
1209 #define check_imm24(imm) check_imm(24, imm)
1210 
1211 	switch (code) {
1212 	/* ALU operations */
1213 
1214 	/* dst = src */
1215 	case BPF_ALU | BPF_MOV | BPF_K:
1216 	case BPF_ALU | BPF_MOV | BPF_X:
1217 	case BPF_ALU64 | BPF_MOV | BPF_K:
1218 	case BPF_ALU64 | BPF_MOV | BPF_X:
1219 		switch (BPF_SRC(code)) {
1220 		case BPF_X:
1221 			emit_a32_mov_r64(is64, dst, src, dstk, sstk, ctx);
1222 			break;
1223 		case BPF_K:
1224 			/* Sign-extend immediate value to destination reg */
1225 			emit_a32_mov_i64(is64, dst, imm, dstk, ctx);
1226 			break;
1227 		}
1228 		break;
1229 	/* dst = dst + src/imm */
1230 	/* dst = dst - src/imm */
1231 	/* dst = dst | src/imm */
1232 	/* dst = dst & src/imm */
1233 	/* dst = dst ^ src/imm */
1234 	/* dst = dst * src/imm */
1235 	/* dst = dst << src */
1236 	/* dst = dst >> src */
1237 	case BPF_ALU | BPF_ADD | BPF_K:
1238 	case BPF_ALU | BPF_ADD | BPF_X:
1239 	case BPF_ALU | BPF_SUB | BPF_K:
1240 	case BPF_ALU | BPF_SUB | BPF_X:
1241 	case BPF_ALU | BPF_OR | BPF_K:
1242 	case BPF_ALU | BPF_OR | BPF_X:
1243 	case BPF_ALU | BPF_AND | BPF_K:
1244 	case BPF_ALU | BPF_AND | BPF_X:
1245 	case BPF_ALU | BPF_XOR | BPF_K:
1246 	case BPF_ALU | BPF_XOR | BPF_X:
1247 	case BPF_ALU | BPF_MUL | BPF_K:
1248 	case BPF_ALU | BPF_MUL | BPF_X:
1249 	case BPF_ALU | BPF_LSH | BPF_X:
1250 	case BPF_ALU | BPF_RSH | BPF_X:
1251 	case BPF_ALU | BPF_ARSH | BPF_K:
1252 	case BPF_ALU | BPF_ARSH | BPF_X:
1253 	case BPF_ALU64 | BPF_ADD | BPF_K:
1254 	case BPF_ALU64 | BPF_ADD | BPF_X:
1255 	case BPF_ALU64 | BPF_SUB | BPF_K:
1256 	case BPF_ALU64 | BPF_SUB | BPF_X:
1257 	case BPF_ALU64 | BPF_OR | BPF_K:
1258 	case BPF_ALU64 | BPF_OR | BPF_X:
1259 	case BPF_ALU64 | BPF_AND | BPF_K:
1260 	case BPF_ALU64 | BPF_AND | BPF_X:
1261 	case BPF_ALU64 | BPF_XOR | BPF_K:
1262 	case BPF_ALU64 | BPF_XOR | BPF_X:
1263 		switch (BPF_SRC(code)) {
1264 		case BPF_X:
1265 			emit_a32_alu_r64(is64, dst, src, dstk, sstk,
1266 					 ctx, BPF_OP(code));
1267 			break;
1268 		case BPF_K:
1269 			/* Move immediate value to the temporary register
1270 			 * and then do the ALU operation on the temporary
1271 			 * register as this will sign-extend the immediate
1272 			 * value into temporary reg and then it would be
1273 			 * safe to do the operation on it.
1274 			 */
1275 			emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
1276 			emit_a32_alu_r64(is64, dst, tmp2, dstk, false,
1277 					 ctx, BPF_OP(code));
1278 			break;
1279 		}
1280 		break;
1281 	/* dst = dst / src(imm) */
1282 	/* dst = dst % src(imm) */
1283 	case BPF_ALU | BPF_DIV | BPF_K:
1284 	case BPF_ALU | BPF_DIV | BPF_X:
1285 	case BPF_ALU | BPF_MOD | BPF_K:
1286 	case BPF_ALU | BPF_MOD | BPF_X:
1287 		rt = src_lo;
1288 		rd = dstk ? tmp2[1] : dst_lo;
1289 		if (dstk)
1290 			emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
1291 		switch (BPF_SRC(code)) {
1292 		case BPF_X:
1293 			rt = sstk ? tmp2[0] : rt;
1294 			if (sstk)
1295 				emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)),
1296 				     ctx);
1297 			break;
1298 		case BPF_K:
1299 			rt = tmp2[0];
1300 			emit_a32_mov_i(rt, imm, false, ctx);
1301 			break;
1302 		}
1303 		emit_udivmod(rd, rd, rt, ctx, BPF_OP(code));
1304 		if (dstk)
1305 			emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
1306 		emit_a32_mov_i(dst_hi, 0, dstk, ctx);
1307 		break;
1308 	case BPF_ALU64 | BPF_DIV | BPF_K:
1309 	case BPF_ALU64 | BPF_DIV | BPF_X:
1310 	case BPF_ALU64 | BPF_MOD | BPF_K:
1311 	case BPF_ALU64 | BPF_MOD | BPF_X:
1312 		goto notyet;
1313 	/* dst = dst >> imm */
1314 	/* dst = dst << imm */
1315 	case BPF_ALU | BPF_RSH | BPF_K:
1316 	case BPF_ALU | BPF_LSH | BPF_K:
1317 		if (unlikely(imm > 31))
1318 			return -EINVAL;
1319 		if (imm)
1320 			emit_a32_alu_i(dst_lo, imm, dstk, ctx, BPF_OP(code));
1321 		emit_a32_mov_i(dst_hi, 0, dstk, ctx);
1322 		break;
1323 	/* dst = dst << imm */
1324 	case BPF_ALU64 | BPF_LSH | BPF_K:
1325 		if (unlikely(imm > 63))
1326 			return -EINVAL;
1327 		emit_a32_lsh_i64(dst, dstk, imm, ctx);
1328 		break;
1329 	/* dst = dst >> imm */
1330 	case BPF_ALU64 | BPF_RSH | BPF_K:
1331 		if (unlikely(imm > 63))
1332 			return -EINVAL;
1333 		emit_a32_lsr_i64(dst, dstk, imm, ctx);
1334 		break;
1335 	/* dst = dst << src */
1336 	case BPF_ALU64 | BPF_LSH | BPF_X:
1337 		emit_a32_lsh_r64(dst, src, dstk, sstk, ctx);
1338 		break;
1339 	/* dst = dst >> src */
1340 	case BPF_ALU64 | BPF_RSH | BPF_X:
1341 		emit_a32_lsr_r64(dst, src, dstk, sstk, ctx);
1342 		break;
1343 	/* dst = dst >> src (signed) */
1344 	case BPF_ALU64 | BPF_ARSH | BPF_X:
1345 		emit_a32_arsh_r64(dst, src, dstk, sstk, ctx);
1346 		break;
1347 	/* dst = dst >> imm (signed) */
1348 	case BPF_ALU64 | BPF_ARSH | BPF_K:
1349 		if (unlikely(imm > 63))
1350 			return -EINVAL;
1351 		emit_a32_arsh_i64(dst, dstk, imm, ctx);
1352 		break;
1353 	/* dst = ~dst */
1354 	case BPF_ALU | BPF_NEG:
1355 		emit_a32_alu_i(dst_lo, 0, dstk, ctx, BPF_OP(code));
1356 		emit_a32_mov_i(dst_hi, 0, dstk, ctx);
1357 		break;
1358 	/* dst = ~dst (64 bit) */
1359 	case BPF_ALU64 | BPF_NEG:
1360 		emit_a32_neg64(dst, dstk, ctx);
1361 		break;
1362 	/* dst = dst * src/imm */
1363 	case BPF_ALU64 | BPF_MUL | BPF_X:
1364 	case BPF_ALU64 | BPF_MUL | BPF_K:
1365 		switch (BPF_SRC(code)) {
1366 		case BPF_X:
1367 			emit_a32_mul_r64(dst, src, dstk, sstk, ctx);
1368 			break;
1369 		case BPF_K:
1370 			/* Move immediate value to the temporary register
1371 			 * and then do the multiplication on it as this
1372 			 * will sign-extend the immediate value into temp
1373 			 * reg then it would be safe to do the operation
1374 			 * on it.
1375 			 */
1376 			emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
1377 			emit_a32_mul_r64(dst, tmp2, dstk, false, ctx);
1378 			break;
1379 		}
1380 		break;
1381 	/* dst = htole(dst) */
1382 	/* dst = htobe(dst) */
1383 	case BPF_ALU | BPF_END | BPF_FROM_LE:
1384 	case BPF_ALU | BPF_END | BPF_FROM_BE:
1385 		rd = dstk ? tmp[0] : dst_hi;
1386 		rt = dstk ? tmp[1] : dst_lo;
1387 		if (dstk) {
1388 			emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
1389 			emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
1390 		}
1391 		if (BPF_SRC(code) == BPF_FROM_LE)
1392 			goto emit_bswap_uxt;
1393 		switch (imm) {
1394 		case 16:
1395 			emit_rev16(rt, rt, ctx);
1396 			goto emit_bswap_uxt;
1397 		case 32:
1398 			emit_rev32(rt, rt, ctx);
1399 			goto emit_bswap_uxt;
1400 		case 64:
1401 			emit_rev32(ARM_LR, rt, ctx);
1402 			emit_rev32(rt, rd, ctx);
1403 			emit(ARM_MOV_R(rd, ARM_LR), ctx);
1404 			break;
1405 		}
1406 		goto exit;
1407 emit_bswap_uxt:
1408 		switch (imm) {
1409 		case 16:
1410 			/* zero-extend 16 bits into 64 bits */
1411 #if __LINUX_ARM_ARCH__ < 6
1412 			emit_a32_mov_i(tmp2[1], 0xffff, false, ctx);
1413 			emit(ARM_AND_R(rt, rt, tmp2[1]), ctx);
1414 #else /* ARMv6+ */
1415 			emit(ARM_UXTH(rt, rt), ctx);
1416 #endif
1417 			emit(ARM_EOR_R(rd, rd, rd), ctx);
1418 			break;
1419 		case 32:
1420 			/* zero-extend 32 bits into 64 bits */
1421 			emit(ARM_EOR_R(rd, rd, rd), ctx);
1422 			break;
1423 		case 64:
1424 			/* nop */
1425 			break;
1426 		}
1427 exit:
1428 		if (dstk) {
1429 			emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
1430 			emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
1431 		}
1432 		break;
1433 	/* dst = imm64 */
1434 	case BPF_LD | BPF_IMM | BPF_DW:
1435 	{
1436 		const struct bpf_insn insn1 = insn[1];
1437 		u32 hi, lo = imm;
1438 
1439 		hi = insn1.imm;
1440 		emit_a32_mov_i(dst_lo, lo, dstk, ctx);
1441 		emit_a32_mov_i(dst_hi, hi, dstk, ctx);
1442 
1443 		return 1;
1444 	}
1445 	/* LDX: dst = *(size *)(src + off) */
1446 	case BPF_LDX | BPF_MEM | BPF_W:
1447 	case BPF_LDX | BPF_MEM | BPF_H:
1448 	case BPF_LDX | BPF_MEM | BPF_B:
1449 	case BPF_LDX | BPF_MEM | BPF_DW:
1450 		rn = sstk ? tmp2[1] : src_lo;
1451 		if (sstk)
1452 			emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
1453 		emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
1454 		break;
1455 	/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
1456 	case BPF_LD | BPF_ABS | BPF_W:
1457 	case BPF_LD | BPF_ABS | BPF_H:
1458 	case BPF_LD | BPF_ABS | BPF_B:
1459 	/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
1460 	case BPF_LD | BPF_IND | BPF_W:
1461 	case BPF_LD | BPF_IND | BPF_H:
1462 	case BPF_LD | BPF_IND | BPF_B:
1463 	{
1464 		const u8 r4 = bpf2a32[BPF_REG_6][1]; /* r4 = ptr to sk_buff */
1465 		const u8 r0 = bpf2a32[BPF_REG_0][1]; /*r0: struct sk_buff *skb*/
1466 						     /* rtn value */
1467 		const u8 r1 = bpf2a32[BPF_REG_0][0]; /* r1: int k */
1468 		const u8 r2 = bpf2a32[BPF_REG_1][1]; /* r2: unsigned int size */
1469 		const u8 r3 = bpf2a32[BPF_REG_1][0]; /* r3: void *buffer */
1470 		const u8 r6 = bpf2a32[TMP_REG_1][1]; /* r6: void *(*func)(..) */
1471 		int size;
1472 
1473 		/* Setting up first argument */
1474 		emit(ARM_MOV_R(r0, r4), ctx);
1475 
1476 		/* Setting up second argument */
1477 		emit_a32_mov_i(r1, imm, false, ctx);
1478 		if (BPF_MODE(code) == BPF_IND)
1479 			emit_a32_alu_r(r1, src_lo, false, sstk, ctx,
1480 				       false, false, BPF_ADD);
1481 
1482 		/* Setting up third argument */
1483 		switch (BPF_SIZE(code)) {
1484 		case BPF_W:
1485 			size = 4;
1486 			break;
1487 		case BPF_H:
1488 			size = 2;
1489 			break;
1490 		case BPF_B:
1491 			size = 1;
1492 			break;
1493 		default:
1494 			return -EINVAL;
1495 		}
1496 		emit_a32_mov_i(r2, size, false, ctx);
1497 
1498 		/* Setting up fourth argument */
1499 		emit(ARM_ADD_I(r3, ARM_SP, imm8m(SKB_BUFFER)), ctx);
1500 
1501 		/* Setting up function pointer to call */
1502 		emit_a32_mov_i(r6, (unsigned int)bpf_load_pointer, false, ctx);
1503 		emit_blx_r(r6, ctx);
1504 
1505 		emit(ARM_EOR_R(r1, r1, r1), ctx);
1506 		/* Check if return address is NULL or not.
1507 		 * if NULL then jump to epilogue
1508 		 * else continue to load the value from retn address
1509 		 */
1510 		emit(ARM_CMP_I(r0, 0), ctx);
1511 		jmp_offset = epilogue_offset(ctx);
1512 		check_imm24(jmp_offset);
1513 		_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1514 
1515 		/* Load value from the address */
1516 		switch (BPF_SIZE(code)) {
1517 		case BPF_W:
1518 			emit(ARM_LDR_I(r0, r0, 0), ctx);
1519 			emit_rev32(r0, r0, ctx);
1520 			break;
1521 		case BPF_H:
1522 			emit(ARM_LDRH_I(r0, r0, 0), ctx);
1523 			emit_rev16(r0, r0, ctx);
1524 			break;
1525 		case BPF_B:
1526 			emit(ARM_LDRB_I(r0, r0, 0), ctx);
1527 			/* No need to reverse */
1528 			break;
1529 		}
1530 		break;
1531 	}
1532 	/* ST: *(size *)(dst + off) = imm */
1533 	case BPF_ST | BPF_MEM | BPF_W:
1534 	case BPF_ST | BPF_MEM | BPF_H:
1535 	case BPF_ST | BPF_MEM | BPF_B:
1536 	case BPF_ST | BPF_MEM | BPF_DW:
1537 		switch (BPF_SIZE(code)) {
1538 		case BPF_DW:
1539 			/* Sign-extend immediate value into temp reg */
1540 			emit_a32_mov_i64(true, tmp2, imm, false, ctx);
1541 			emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, BPF_W);
1542 			emit_str_r(dst_lo, tmp2[0], dstk, off+4, ctx, BPF_W);
1543 			break;
1544 		case BPF_W:
1545 		case BPF_H:
1546 		case BPF_B:
1547 			emit_a32_mov_i(tmp2[1], imm, false, ctx);
1548 			emit_str_r(dst_lo, tmp2[1], dstk, off, ctx,
1549 				   BPF_SIZE(code));
1550 			break;
1551 		}
1552 		break;
1553 	/* STX XADD: lock *(u32 *)(dst + off) += src */
1554 	case BPF_STX | BPF_XADD | BPF_W:
1555 	/* STX XADD: lock *(u64 *)(dst + off) += src */
1556 	case BPF_STX | BPF_XADD | BPF_DW:
1557 		goto notyet;
1558 	/* STX: *(size *)(dst + off) = src */
1559 	case BPF_STX | BPF_MEM | BPF_W:
1560 	case BPF_STX | BPF_MEM | BPF_H:
1561 	case BPF_STX | BPF_MEM | BPF_B:
1562 	case BPF_STX | BPF_MEM | BPF_DW:
1563 	{
1564 		u8 sz = BPF_SIZE(code);
1565 
1566 		rn = sstk ? tmp2[1] : src_lo;
1567 		rm = sstk ? tmp2[0] : src_hi;
1568 		if (sstk) {
1569 			emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
1570 			emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
1571 		}
1572 
1573 		/* Store the value */
1574 		if (BPF_SIZE(code) == BPF_DW) {
1575 			emit_str_r(dst_lo, rn, dstk, off, ctx, BPF_W);
1576 			emit_str_r(dst_lo, rm, dstk, off+4, ctx, BPF_W);
1577 		} else {
1578 			emit_str_r(dst_lo, rn, dstk, off, ctx, sz);
1579 		}
1580 		break;
1581 	}
1582 	/* PC += off if dst == src */
1583 	/* PC += off if dst > src */
1584 	/* PC += off if dst >= src */
1585 	/* PC += off if dst < src */
1586 	/* PC += off if dst <= src */
1587 	/* PC += off if dst != src */
1588 	/* PC += off if dst > src (signed) */
1589 	/* PC += off if dst >= src (signed) */
1590 	/* PC += off if dst < src (signed) */
1591 	/* PC += off if dst <= src (signed) */
1592 	/* PC += off if dst & src */
1593 	case BPF_JMP | BPF_JEQ | BPF_X:
1594 	case BPF_JMP | BPF_JGT | BPF_X:
1595 	case BPF_JMP | BPF_JGE | BPF_X:
1596 	case BPF_JMP | BPF_JNE | BPF_X:
1597 	case BPF_JMP | BPF_JSGT | BPF_X:
1598 	case BPF_JMP | BPF_JSGE | BPF_X:
1599 	case BPF_JMP | BPF_JSET | BPF_X:
1600 	case BPF_JMP | BPF_JLE | BPF_X:
1601 	case BPF_JMP | BPF_JLT | BPF_X:
1602 	case BPF_JMP | BPF_JSLT | BPF_X:
1603 	case BPF_JMP | BPF_JSLE | BPF_X:
1604 		/* Setup source registers */
1605 		rm = sstk ? tmp2[0] : src_hi;
1606 		rn = sstk ? tmp2[1] : src_lo;
1607 		if (sstk) {
1608 			emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
1609 			emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
1610 		}
1611 		goto go_jmp;
1612 	/* PC += off if dst == imm */
1613 	/* PC += off if dst > imm */
1614 	/* PC += off if dst >= imm */
1615 	/* PC += off if dst < imm */
1616 	/* PC += off if dst <= imm */
1617 	/* PC += off if dst != imm */
1618 	/* PC += off if dst > imm (signed) */
1619 	/* PC += off if dst >= imm (signed) */
1620 	/* PC += off if dst < imm (signed) */
1621 	/* PC += off if dst <= imm (signed) */
1622 	/* PC += off if dst & imm */
1623 	case BPF_JMP | BPF_JEQ | BPF_K:
1624 	case BPF_JMP | BPF_JGT | BPF_K:
1625 	case BPF_JMP | BPF_JGE | BPF_K:
1626 	case BPF_JMP | BPF_JNE | BPF_K:
1627 	case BPF_JMP | BPF_JSGT | BPF_K:
1628 	case BPF_JMP | BPF_JSGE | BPF_K:
1629 	case BPF_JMP | BPF_JSET | BPF_K:
1630 	case BPF_JMP | BPF_JLT | BPF_K:
1631 	case BPF_JMP | BPF_JLE | BPF_K:
1632 	case BPF_JMP | BPF_JSLT | BPF_K:
1633 	case BPF_JMP | BPF_JSLE | BPF_K:
1634 		if (off == 0)
1635 			break;
1636 		rm = tmp2[0];
1637 		rn = tmp2[1];
1638 		/* Sign-extend immediate value */
1639 		emit_a32_mov_i64(true, tmp2, imm, false, ctx);
1640 go_jmp:
1641 		/* Setup destination register */
1642 		rd = dstk ? tmp[0] : dst_hi;
1643 		rt = dstk ? tmp[1] : dst_lo;
1644 		if (dstk) {
1645 			emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
1646 			emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
1647 		}
1648 
1649 		/* Check for the condition */
1650 		emit_ar_r(rd, rt, rm, rn, ctx, BPF_OP(code));
1651 
1652 		/* Setup JUMP instruction */
1653 		jmp_offset = bpf2a32_offset(i+off, i, ctx);
1654 		switch (BPF_OP(code)) {
1655 		case BPF_JNE:
1656 		case BPF_JSET:
1657 			_emit(ARM_COND_NE, ARM_B(jmp_offset), ctx);
1658 			break;
1659 		case BPF_JEQ:
1660 			_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1661 			break;
1662 		case BPF_JGT:
1663 			_emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1664 			break;
1665 		case BPF_JGE:
1666 			_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1667 			break;
1668 		case BPF_JSGT:
1669 			_emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1670 			break;
1671 		case BPF_JSGE:
1672 			_emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1673 			break;
1674 		case BPF_JLE:
1675 			_emit(ARM_COND_LS, ARM_B(jmp_offset), ctx);
1676 			break;
1677 		case BPF_JLT:
1678 			_emit(ARM_COND_CC, ARM_B(jmp_offset), ctx);
1679 			break;
1680 		case BPF_JSLT:
1681 			_emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1682 			break;
1683 		case BPF_JSLE:
1684 			_emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1685 			break;
1686 		}
1687 		break;
1688 	/* JMP OFF */
1689 	case BPF_JMP | BPF_JA:
1690 	{
1691 		if (off == 0)
1692 			break;
1693 		jmp_offset = bpf2a32_offset(i+off, i, ctx);
1694 		check_imm24(jmp_offset);
1695 		emit(ARM_B(jmp_offset), ctx);
1696 		break;
1697 	}
1698 	/* tail call */
1699 	case BPF_JMP | BPF_TAIL_CALL:
1700 		if (emit_bpf_tail_call(ctx))
1701 			return -EFAULT;
1702 		break;
1703 	/* function call */
1704 	case BPF_JMP | BPF_CALL:
1705 	{
1706 		const u8 *r0 = bpf2a32[BPF_REG_0];
1707 		const u8 *r1 = bpf2a32[BPF_REG_1];
1708 		const u8 *r2 = bpf2a32[BPF_REG_2];
1709 		const u8 *r3 = bpf2a32[BPF_REG_3];
1710 		const u8 *r4 = bpf2a32[BPF_REG_4];
1711 		const u8 *r5 = bpf2a32[BPF_REG_5];
1712 		const u32 func = (u32)__bpf_call_base + (u32)imm;
1713 
1714 		emit_a32_mov_r64(true, r0, r1, false, false, ctx);
1715 		emit_a32_mov_r64(true, r1, r2, false, true, ctx);
1716 		emit_push_r64(r5, 0, ctx);
1717 		emit_push_r64(r4, 8, ctx);
1718 		emit_push_r64(r3, 16, ctx);
1719 
1720 		emit_a32_mov_i(tmp[1], func, false, ctx);
1721 		emit_blx_r(tmp[1], ctx);
1722 
1723 		emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
1724 		break;
1725 	}
1726 	/* function return */
1727 	case BPF_JMP | BPF_EXIT:
1728 		/* Optimization: when last instruction is EXIT
1729 		 * simply fallthrough to epilogue.
1730 		 */
1731 		if (i == ctx->prog->len - 1)
1732 			break;
1733 		jmp_offset = epilogue_offset(ctx);
1734 		check_imm24(jmp_offset);
1735 		emit(ARM_B(jmp_offset), ctx);
1736 		break;
1737 notyet:
1738 		pr_info_once("*** NOT YET: opcode %02x ***\n", code);
1739 		return -EFAULT;
1740 	default:
1741 		pr_err_once("unknown opcode %02x\n", code);
1742 		return -EINVAL;
1743 	}
1744 
1745 	if (ctx->flags & FLAG_IMM_OVERFLOW)
1746 		/*
1747 		 * this instruction generated an overflow when
1748 		 * trying to access the literal pool, so
1749 		 * delegate this filter to the kernel interpreter.
1750 		 */
1751 		return -1;
1752 	return 0;
1753 }
1754 
1755 static int build_body(struct jit_ctx *ctx)
1756 {
1757 	const struct bpf_prog *prog = ctx->prog;
1758 	unsigned int i;
1759 
1760 	for (i = 0; i < prog->len; i++) {
1761 		const struct bpf_insn *insn = &(prog->insnsi[i]);
1762 		int ret;
1763 
1764 		ret = build_insn(insn, ctx);
1765 
1766 		/* It's used with loading the 64 bit immediate value. */
1767 		if (ret > 0) {
1768 			i++;
1769 			if (ctx->target == NULL)
1770 				ctx->offsets[i] = ctx->idx;
1771 			continue;
1772 		}
1773 
1774 		if (ctx->target == NULL)
1775 			ctx->offsets[i] = ctx->idx;
1776 
1777 		/* If unsuccesfull, return with error code */
1778 		if (ret)
1779 			return ret;
1780 	}
1781 	return 0;
1782 }
1783 
1784 static int validate_code(struct jit_ctx *ctx)
1785 {
1786 	int i;
1787 
1788 	for (i = 0; i < ctx->idx; i++) {
1789 		if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF))
1790 			return -1;
1791 	}
1792 
1793 	return 0;
1794 }
1795 
1796 void bpf_jit_compile(struct bpf_prog *prog)
1797 {
1798 	/* Nothing to do here. We support Internal BPF. */
1799 }
1800 
1801 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1802 {
1803 	struct bpf_prog *tmp, *orig_prog = prog;
1804 	struct bpf_binary_header *header;
1805 	bool tmp_blinded = false;
1806 	struct jit_ctx ctx;
1807 	unsigned int tmp_idx;
1808 	unsigned int image_size;
1809 	u8 *image_ptr;
1810 
1811 	/* If BPF JIT was not enabled then we must fall back to
1812 	 * the interpreter.
1813 	 */
1814 	if (!prog->jit_requested)
1815 		return orig_prog;
1816 
1817 	/* If constant blinding was enabled and we failed during blinding
1818 	 * then we must fall back to the interpreter. Otherwise, we save
1819 	 * the new JITed code.
1820 	 */
1821 	tmp = bpf_jit_blind_constants(prog);
1822 
1823 	if (IS_ERR(tmp))
1824 		return orig_prog;
1825 	if (tmp != prog) {
1826 		tmp_blinded = true;
1827 		prog = tmp;
1828 	}
1829 
1830 	memset(&ctx, 0, sizeof(ctx));
1831 	ctx.prog = prog;
1832 
1833 	/* Not able to allocate memory for offsets[] , then
1834 	 * we must fall back to the interpreter
1835 	 */
1836 	ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
1837 	if (ctx.offsets == NULL) {
1838 		prog = orig_prog;
1839 		goto out;
1840 	}
1841 
1842 	/* 1) fake pass to find in the length of the JITed code,
1843 	 * to compute ctx->offsets and other context variables
1844 	 * needed to compute final JITed code.
1845 	 * Also, calculate random starting pointer/start of JITed code
1846 	 * which is prefixed by random number of fault instructions.
1847 	 *
1848 	 * If the first pass fails then there is no chance of it
1849 	 * being successful in the second pass, so just fall back
1850 	 * to the interpreter.
1851 	 */
1852 	if (build_body(&ctx)) {
1853 		prog = orig_prog;
1854 		goto out_off;
1855 	}
1856 
1857 	tmp_idx = ctx.idx;
1858 	build_prologue(&ctx);
1859 	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1860 
1861 	ctx.epilogue_offset = ctx.idx;
1862 
1863 #if __LINUX_ARM_ARCH__ < 7
1864 	tmp_idx = ctx.idx;
1865 	build_epilogue(&ctx);
1866 	ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1867 
1868 	ctx.idx += ctx.imm_count;
1869 	if (ctx.imm_count) {
1870 		ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL);
1871 		if (ctx.imms == NULL) {
1872 			prog = orig_prog;
1873 			goto out_off;
1874 		}
1875 	}
1876 #else
1877 	/* there's nothing about the epilogue on ARMv7 */
1878 	build_epilogue(&ctx);
1879 #endif
1880 	/* Now we can get the actual image size of the JITed arm code.
1881 	 * Currently, we are not considering the THUMB-2 instructions
1882 	 * for jit, although it can decrease the size of the image.
1883 	 *
1884 	 * As each arm instruction is of length 32bit, we are translating
1885 	 * number of JITed intructions into the size required to store these
1886 	 * JITed code.
1887 	 */
1888 	image_size = sizeof(u32) * ctx.idx;
1889 
1890 	/* Now we know the size of the structure to make */
1891 	header = bpf_jit_binary_alloc(image_size, &image_ptr,
1892 				      sizeof(u32), jit_fill_hole);
1893 	/* Not able to allocate memory for the structure then
1894 	 * we must fall back to the interpretation
1895 	 */
1896 	if (header == NULL) {
1897 		prog = orig_prog;
1898 		goto out_imms;
1899 	}
1900 
1901 	/* 2.) Actual pass to generate final JIT code */
1902 	ctx.target = (u32 *) image_ptr;
1903 	ctx.idx = 0;
1904 
1905 	build_prologue(&ctx);
1906 
1907 	/* If building the body of the JITed code fails somehow,
1908 	 * we fall back to the interpretation.
1909 	 */
1910 	if (build_body(&ctx) < 0) {
1911 		image_ptr = NULL;
1912 		bpf_jit_binary_free(header);
1913 		prog = orig_prog;
1914 		goto out_imms;
1915 	}
1916 	build_epilogue(&ctx);
1917 
1918 	/* 3.) Extra pass to validate JITed Code */
1919 	if (validate_code(&ctx)) {
1920 		image_ptr = NULL;
1921 		bpf_jit_binary_free(header);
1922 		prog = orig_prog;
1923 		goto out_imms;
1924 	}
1925 	flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1926 
1927 	if (bpf_jit_enable > 1)
1928 		/* there are 2 passes here */
1929 		bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1930 
1931 	set_memory_ro((unsigned long)header, header->pages);
1932 	prog->bpf_func = (void *)ctx.target;
1933 	prog->jited = 1;
1934 	prog->jited_len = image_size;
1935 
1936 out_imms:
1937 #if __LINUX_ARM_ARCH__ < 7
1938 	if (ctx.imm_count)
1939 		kfree(ctx.imms);
1940 #endif
1941 out_off:
1942 	kfree(ctx.offsets);
1943 out:
1944 	if (tmp_blinded)
1945 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
1946 					   tmp : orig_prog);
1947 	return prog;
1948 }
1949 
1950