xref: /openbmc/linux/arch/loongarch/net/bpf_jit.h (revision 4eece7e6)
15dc61552STiezhu Yang /* SPDX-License-Identifier: GPL-2.0-only */
25dc61552STiezhu Yang /*
35dc61552STiezhu Yang  * BPF JIT compiler for LoongArch
45dc61552STiezhu Yang  *
55dc61552STiezhu Yang  * Copyright (C) 2022 Loongson Technology Corporation Limited
65dc61552STiezhu Yang  */
7dbcd7f5fSYouling Tang #include <linux/bitfield.h>
85dc61552STiezhu Yang #include <linux/bpf.h>
95dc61552STiezhu Yang #include <linux/filter.h>
105dc61552STiezhu Yang #include <asm/cacheflush.h>
115dc61552STiezhu Yang #include <asm/inst.h>
125dc61552STiezhu Yang 
135dc61552STiezhu Yang struct jit_ctx {
145dc61552STiezhu Yang 	const struct bpf_prog *prog;
155dc61552STiezhu Yang 	unsigned int idx;
165dc61552STiezhu Yang 	unsigned int flags;
175dc61552STiezhu Yang 	unsigned int epilogue_offset;
185dc61552STiezhu Yang 	u32 *offset;
19dbcd7f5fSYouling Tang 	int num_exentries;
205dc61552STiezhu Yang 	union loongarch_instruction *image;
215dc61552STiezhu Yang 	u32 stack_size;
225dc61552STiezhu Yang };
235dc61552STiezhu Yang 
245dc61552STiezhu Yang struct jit_data {
255dc61552STiezhu Yang 	struct bpf_binary_header *header;
265dc61552STiezhu Yang 	u8 *image;
275dc61552STiezhu Yang 	struct jit_ctx ctx;
285dc61552STiezhu Yang };
295dc61552STiezhu Yang 
305dc61552STiezhu Yang #define emit_insn(ctx, func, ...)						\
315dc61552STiezhu Yang do {										\
325dc61552STiezhu Yang 	if (ctx->image != NULL) {						\
335dc61552STiezhu Yang 		union loongarch_instruction *insn = &ctx->image[ctx->idx];	\
345dc61552STiezhu Yang 		emit_##func(insn, ##__VA_ARGS__);				\
355dc61552STiezhu Yang 	}									\
365dc61552STiezhu Yang 	ctx->idx++;								\
375dc61552STiezhu Yang } while (0)
385dc61552STiezhu Yang 
395dc61552STiezhu Yang #define is_signed_imm12(val)	signed_imm_check(val, 12)
405dc61552STiezhu Yang #define is_signed_imm14(val)	signed_imm_check(val, 14)
415dc61552STiezhu Yang #define is_signed_imm16(val)	signed_imm_check(val, 16)
425dc61552STiezhu Yang #define is_signed_imm26(val)	signed_imm_check(val, 26)
435dc61552STiezhu Yang #define is_signed_imm32(val)	signed_imm_check(val, 32)
445dc61552STiezhu Yang #define is_signed_imm52(val)	signed_imm_check(val, 52)
455dc61552STiezhu Yang #define is_unsigned_imm12(val)	unsigned_imm_check(val, 12)
465dc61552STiezhu Yang 
bpf2la_offset(int bpf_insn,int off,const struct jit_ctx * ctx)475dc61552STiezhu Yang static inline int bpf2la_offset(int bpf_insn, int off, const struct jit_ctx *ctx)
485dc61552STiezhu Yang {
495dc61552STiezhu Yang 	/* BPF JMP offset is relative to the next instruction */
505dc61552STiezhu Yang 	bpf_insn++;
515dc61552STiezhu Yang 	/*
525dc61552STiezhu Yang 	 * Whereas LoongArch branch instructions encode the offset
535dc61552STiezhu Yang 	 * from the branch itself, so we must subtract 1 from the
545dc61552STiezhu Yang 	 * instruction offset.
555dc61552STiezhu Yang 	 */
565dc61552STiezhu Yang 	return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1));
575dc61552STiezhu Yang }
585dc61552STiezhu Yang 
epilogue_offset(const struct jit_ctx * ctx)595dc61552STiezhu Yang static inline int epilogue_offset(const struct jit_ctx *ctx)
605dc61552STiezhu Yang {
615dc61552STiezhu Yang 	int from = ctx->idx;
625dc61552STiezhu Yang 	int to = ctx->epilogue_offset;
635dc61552STiezhu Yang 
645dc61552STiezhu Yang 	return (to - from);
655dc61552STiezhu Yang }
665dc61552STiezhu Yang 
675dc61552STiezhu Yang /* Zero-extend 32 bits into 64 bits */
emit_zext_32(struct jit_ctx * ctx,enum loongarch_gpr reg,bool is32)685dc61552STiezhu Yang static inline void emit_zext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
695dc61552STiezhu Yang {
705dc61552STiezhu Yang 	if (!is32)
715dc61552STiezhu Yang 		return;
725dc61552STiezhu Yang 
735dc61552STiezhu Yang 	emit_insn(ctx, lu32id, reg, 0);
745dc61552STiezhu Yang }
755dc61552STiezhu Yang 
765dc61552STiezhu Yang /* Signed-extend 32 bits into 64 bits */
emit_sext_32(struct jit_ctx * ctx,enum loongarch_gpr reg,bool is32)775dc61552STiezhu Yang static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
785dc61552STiezhu Yang {
795dc61552STiezhu Yang 	if (!is32)
805dc61552STiezhu Yang 		return;
815dc61552STiezhu Yang 
825dc61552STiezhu Yang 	emit_insn(ctx, addiw, reg, reg, 0);
835dc61552STiezhu Yang }
845dc61552STiezhu Yang 
move_addr(struct jit_ctx * ctx,enum loongarch_gpr rd,u64 addr)8564f50f65SHengqi Chen static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
8664f50f65SHengqi Chen {
8764f50f65SHengqi Chen 	u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;
8864f50f65SHengqi Chen 
8964f50f65SHengqi Chen 	/* lu12iw rd, imm_31_12 */
9064f50f65SHengqi Chen 	imm_31_12 = (addr >> 12) & 0xfffff;
9164f50f65SHengqi Chen 	emit_insn(ctx, lu12iw, rd, imm_31_12);
9264f50f65SHengqi Chen 
9364f50f65SHengqi Chen 	/* ori rd, rd, imm_11_0 */
9464f50f65SHengqi Chen 	imm_11_0 = addr & 0xfff;
9564f50f65SHengqi Chen 	emit_insn(ctx, ori, rd, rd, imm_11_0);
9664f50f65SHengqi Chen 
9764f50f65SHengqi Chen 	/* lu32id rd, imm_51_32 */
9864f50f65SHengqi Chen 	imm_51_32 = (addr >> 32) & 0xfffff;
9964f50f65SHengqi Chen 	emit_insn(ctx, lu32id, rd, imm_51_32);
10064f50f65SHengqi Chen 
10164f50f65SHengqi Chen 	/* lu52id rd, rd, imm_63_52 */
10264f50f65SHengqi Chen 	imm_63_52 = (addr >> 52) & 0xfff;
10364f50f65SHengqi Chen 	emit_insn(ctx, lu52id, rd, rd, imm_63_52);
10464f50f65SHengqi Chen }
10564f50f65SHengqi Chen 
move_imm(struct jit_ctx * ctx,enum loongarch_gpr rd,long imm,bool is32)1065dc61552STiezhu Yang static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32)
1075dc61552STiezhu Yang {
1085dc61552STiezhu Yang 	long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31;
1095dc61552STiezhu Yang 
1105dc61552STiezhu Yang 	/* or rd, $zero, $zero */
1115dc61552STiezhu Yang 	if (imm == 0) {
1125dc61552STiezhu Yang 		emit_insn(ctx, or, rd, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_ZERO);
1135dc61552STiezhu Yang 		return;
1145dc61552STiezhu Yang 	}
1155dc61552STiezhu Yang 
1165dc61552STiezhu Yang 	/* addiw rd, $zero, imm_11_0 */
1175dc61552STiezhu Yang 	if (is_signed_imm12(imm)) {
1185dc61552STiezhu Yang 		emit_insn(ctx, addiw, rd, LOONGARCH_GPR_ZERO, imm);
1195dc61552STiezhu Yang 		goto zext;
1205dc61552STiezhu Yang 	}
1215dc61552STiezhu Yang 
1225dc61552STiezhu Yang 	/* ori rd, $zero, imm_11_0 */
1235dc61552STiezhu Yang 	if (is_unsigned_imm12(imm)) {
1245dc61552STiezhu Yang 		emit_insn(ctx, ori, rd, LOONGARCH_GPR_ZERO, imm);
1255dc61552STiezhu Yang 		goto zext;
1265dc61552STiezhu Yang 	}
1275dc61552STiezhu Yang 
1285dc61552STiezhu Yang 	/* lu52id rd, $zero, imm_63_52 */
1295dc61552STiezhu Yang 	imm_63_52 = (imm >> 52) & 0xfff;
1305dc61552STiezhu Yang 	imm_51_0 = imm & 0xfffffffffffff;
1315dc61552STiezhu Yang 	if (imm_63_52 != 0 && imm_51_0 == 0) {
1325dc61552STiezhu Yang 		emit_insn(ctx, lu52id, rd, LOONGARCH_GPR_ZERO, imm_63_52);
1335dc61552STiezhu Yang 		return;
1345dc61552STiezhu Yang 	}
1355dc61552STiezhu Yang 
1365dc61552STiezhu Yang 	/* lu12iw rd, imm_31_12 */
1375dc61552STiezhu Yang 	imm_31_12 = (imm >> 12) & 0xfffff;
1385dc61552STiezhu Yang 	emit_insn(ctx, lu12iw, rd, imm_31_12);
1395dc61552STiezhu Yang 
1405dc61552STiezhu Yang 	/* ori rd, rd, imm_11_0 */
1415dc61552STiezhu Yang 	imm_11_0 = imm & 0xfff;
1425dc61552STiezhu Yang 	if (imm_11_0 != 0)
1435dc61552STiezhu Yang 		emit_insn(ctx, ori, rd, rd, imm_11_0);
1445dc61552STiezhu Yang 
1455dc61552STiezhu Yang 	if (!is_signed_imm32(imm)) {
1465dc61552STiezhu Yang 		if (imm_51_0 != 0) {
1475dc61552STiezhu Yang 			/*
1485dc61552STiezhu Yang 			 * If bit[51:31] is all 0 or all 1,
1495dc61552STiezhu Yang 			 * it means bit[51:32] is sign extended by lu12iw,
1505dc61552STiezhu Yang 			 * no need to call lu32id to do a new filled operation.
1515dc61552STiezhu Yang 			 */
1525dc61552STiezhu Yang 			imm_51_31 = (imm >> 31) & 0x1fffff;
153*4eece7e6STiezhu Yang 			if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) {
1545dc61552STiezhu Yang 				/* lu32id rd, imm_51_32 */
1555dc61552STiezhu Yang 				imm_51_32 = (imm >> 32) & 0xfffff;
1565dc61552STiezhu Yang 				emit_insn(ctx, lu32id, rd, imm_51_32);
1575dc61552STiezhu Yang 			}
1585dc61552STiezhu Yang 		}
1595dc61552STiezhu Yang 
1605dc61552STiezhu Yang 		/* lu52id rd, rd, imm_63_52 */
1615dc61552STiezhu Yang 		if (!is_signed_imm52(imm))
1625dc61552STiezhu Yang 			emit_insn(ctx, lu52id, rd, rd, imm_63_52);
1635dc61552STiezhu Yang 	}
1645dc61552STiezhu Yang 
1655dc61552STiezhu Yang zext:
1665dc61552STiezhu Yang 	emit_zext_32(ctx, rd, is32);
1675dc61552STiezhu Yang }
1685dc61552STiezhu Yang 
move_reg(struct jit_ctx * ctx,enum loongarch_gpr rd,enum loongarch_gpr rj)1695dc61552STiezhu Yang static inline void move_reg(struct jit_ctx *ctx, enum loongarch_gpr rd,
1705dc61552STiezhu Yang 			    enum loongarch_gpr rj)
1715dc61552STiezhu Yang {
1725dc61552STiezhu Yang 	emit_insn(ctx, or, rd, rj, LOONGARCH_GPR_ZERO);
1735dc61552STiezhu Yang }
1745dc61552STiezhu Yang 
invert_jmp_cond(u8 cond)1755dc61552STiezhu Yang static inline int invert_jmp_cond(u8 cond)
1765dc61552STiezhu Yang {
1775dc61552STiezhu Yang 	switch (cond) {
1785dc61552STiezhu Yang 	case BPF_JEQ:
1795dc61552STiezhu Yang 		return BPF_JNE;
1805dc61552STiezhu Yang 	case BPF_JNE:
1815dc61552STiezhu Yang 	case BPF_JSET:
1825dc61552STiezhu Yang 		return BPF_JEQ;
1835dc61552STiezhu Yang 	case BPF_JGT:
1845dc61552STiezhu Yang 		return BPF_JLE;
1855dc61552STiezhu Yang 	case BPF_JGE:
1865dc61552STiezhu Yang 		return BPF_JLT;
1875dc61552STiezhu Yang 	case BPF_JLT:
1885dc61552STiezhu Yang 		return BPF_JGE;
1895dc61552STiezhu Yang 	case BPF_JLE:
1905dc61552STiezhu Yang 		return BPF_JGT;
1915dc61552STiezhu Yang 	case BPF_JSGT:
1925dc61552STiezhu Yang 		return BPF_JSLE;
1935dc61552STiezhu Yang 	case BPF_JSGE:
1945dc61552STiezhu Yang 		return BPF_JSLT;
1955dc61552STiezhu Yang 	case BPF_JSLT:
1965dc61552STiezhu Yang 		return BPF_JSGE;
1975dc61552STiezhu Yang 	case BPF_JSLE:
1985dc61552STiezhu Yang 		return BPF_JSGT;
1995dc61552STiezhu Yang 	}
2005dc61552STiezhu Yang 	return -1;
2015dc61552STiezhu Yang }
2025dc61552STiezhu Yang 
cond_jmp_offset(struct jit_ctx * ctx,u8 cond,enum loongarch_gpr rj,enum loongarch_gpr rd,int jmp_offset)2035dc61552STiezhu Yang static inline void cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
2045dc61552STiezhu Yang 				   enum loongarch_gpr rd, int jmp_offset)
2055dc61552STiezhu Yang {
2065dc61552STiezhu Yang 	switch (cond) {
2075dc61552STiezhu Yang 	case BPF_JEQ:
2085dc61552STiezhu Yang 		/* PC += jmp_offset if rj == rd */
2095dc61552STiezhu Yang 		emit_insn(ctx, beq, rj, rd, jmp_offset);
2105dc61552STiezhu Yang 		return;
2115dc61552STiezhu Yang 	case BPF_JNE:
2125dc61552STiezhu Yang 	case BPF_JSET:
2135dc61552STiezhu Yang 		/* PC += jmp_offset if rj != rd */
2145dc61552STiezhu Yang 		emit_insn(ctx, bne, rj, rd, jmp_offset);
2155dc61552STiezhu Yang 		return;
2165dc61552STiezhu Yang 	case BPF_JGT:
2175dc61552STiezhu Yang 		/* PC += jmp_offset if rj > rd (unsigned) */
2185dc61552STiezhu Yang 		emit_insn(ctx, bltu, rd, rj, jmp_offset);
2195dc61552STiezhu Yang 		return;
2205dc61552STiezhu Yang 	case BPF_JLT:
2215dc61552STiezhu Yang 		/* PC += jmp_offset if rj < rd (unsigned) */
2225dc61552STiezhu Yang 		emit_insn(ctx, bltu, rj, rd, jmp_offset);
2235dc61552STiezhu Yang 		return;
2245dc61552STiezhu Yang 	case BPF_JGE:
2255dc61552STiezhu Yang 		/* PC += jmp_offset if rj >= rd (unsigned) */
2265dc61552STiezhu Yang 		emit_insn(ctx, bgeu, rj, rd, jmp_offset);
2275dc61552STiezhu Yang 		return;
2285dc61552STiezhu Yang 	case BPF_JLE:
2295dc61552STiezhu Yang 		/* PC += jmp_offset if rj <= rd (unsigned) */
2305dc61552STiezhu Yang 		emit_insn(ctx, bgeu, rd, rj, jmp_offset);
2315dc61552STiezhu Yang 		return;
2325dc61552STiezhu Yang 	case BPF_JSGT:
2335dc61552STiezhu Yang 		/* PC += jmp_offset if rj > rd (signed) */
2345dc61552STiezhu Yang 		emit_insn(ctx, blt, rd, rj, jmp_offset);
2355dc61552STiezhu Yang 		return;
2365dc61552STiezhu Yang 	case BPF_JSLT:
2375dc61552STiezhu Yang 		/* PC += jmp_offset if rj < rd (signed) */
2385dc61552STiezhu Yang 		emit_insn(ctx, blt, rj, rd, jmp_offset);
2395dc61552STiezhu Yang 		return;
2405dc61552STiezhu Yang 	case BPF_JSGE:
2415dc61552STiezhu Yang 		/* PC += jmp_offset if rj >= rd (signed) */
2425dc61552STiezhu Yang 		emit_insn(ctx, bge, rj, rd, jmp_offset);
2435dc61552STiezhu Yang 		return;
2445dc61552STiezhu Yang 	case BPF_JSLE:
2455dc61552STiezhu Yang 		/* PC += jmp_offset if rj <= rd (signed) */
2465dc61552STiezhu Yang 		emit_insn(ctx, bge, rd, rj, jmp_offset);
2475dc61552STiezhu Yang 		return;
2485dc61552STiezhu Yang 	}
2495dc61552STiezhu Yang }
2505dc61552STiezhu Yang 
cond_jmp_offs26(struct jit_ctx * ctx,u8 cond,enum loongarch_gpr rj,enum loongarch_gpr rd,int jmp_offset)2515dc61552STiezhu Yang static inline void cond_jmp_offs26(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
2525dc61552STiezhu Yang 				   enum loongarch_gpr rd, int jmp_offset)
2535dc61552STiezhu Yang {
2545dc61552STiezhu Yang 	cond = invert_jmp_cond(cond);
2555dc61552STiezhu Yang 	cond_jmp_offset(ctx, cond, rj, rd, 2);
2565dc61552STiezhu Yang 	emit_insn(ctx, b, jmp_offset);
2575dc61552STiezhu Yang }
2585dc61552STiezhu Yang 
uncond_jmp_offs26(struct jit_ctx * ctx,int jmp_offset)2595dc61552STiezhu Yang static inline void uncond_jmp_offs26(struct jit_ctx *ctx, int jmp_offset)
2605dc61552STiezhu Yang {
2615dc61552STiezhu Yang 	emit_insn(ctx, b, jmp_offset);
2625dc61552STiezhu Yang }
2635dc61552STiezhu Yang 
emit_cond_jmp(struct jit_ctx * ctx,u8 cond,enum loongarch_gpr rj,enum loongarch_gpr rd,int jmp_offset)2645dc61552STiezhu Yang static inline int emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
2655dc61552STiezhu Yang 				enum loongarch_gpr rd, int jmp_offset)
2665dc61552STiezhu Yang {
2675dc61552STiezhu Yang 	/*
2685dc61552STiezhu Yang 	 * A large PC-relative jump offset may overflow the immediate field of
2695dc61552STiezhu Yang 	 * the native conditional branch instruction, triggering a conversion
2705dc61552STiezhu Yang 	 * to use an absolute jump instead, this jump sequence is particularly
2715dc61552STiezhu Yang 	 * nasty. For now, use cond_jmp_offs26() directly to keep it simple.
2725dc61552STiezhu Yang 	 * In the future, maybe we can add support for far branching, the branch
2735dc61552STiezhu Yang 	 * relaxation requires more than two passes to converge, the code seems
2745dc61552STiezhu Yang 	 * too complex to understand, not quite sure whether it is necessary and
2755dc61552STiezhu Yang 	 * worth the extra pain. Anyway, just leave it as it is to enhance code
2765dc61552STiezhu Yang 	 * readability now.
2775dc61552STiezhu Yang 	 */
2785dc61552STiezhu Yang 	if (is_signed_imm26(jmp_offset)) {
2795dc61552STiezhu Yang 		cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset);
2805dc61552STiezhu Yang 		return 0;
2815dc61552STiezhu Yang 	}
2825dc61552STiezhu Yang 
2835dc61552STiezhu Yang 	return -EINVAL;
2845dc61552STiezhu Yang }
2855dc61552STiezhu Yang 
emit_uncond_jmp(struct jit_ctx * ctx,int jmp_offset)2865dc61552STiezhu Yang static inline int emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset)
2875dc61552STiezhu Yang {
2885dc61552STiezhu Yang 	if (is_signed_imm26(jmp_offset)) {
2895dc61552STiezhu Yang 		uncond_jmp_offs26(ctx, jmp_offset);
2905dc61552STiezhu Yang 		return 0;
2915dc61552STiezhu Yang 	}
2925dc61552STiezhu Yang 
2935dc61552STiezhu Yang 	return -EINVAL;
2945dc61552STiezhu Yang }
2955dc61552STiezhu Yang 
emit_tailcall_jmp(struct jit_ctx * ctx,u8 cond,enum loongarch_gpr rj,enum loongarch_gpr rd,int jmp_offset)2965dc61552STiezhu Yang static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
2975dc61552STiezhu Yang 				    enum loongarch_gpr rd, int jmp_offset)
2985dc61552STiezhu Yang {
2995dc61552STiezhu Yang 	if (is_signed_imm16(jmp_offset)) {
3005dc61552STiezhu Yang 		cond_jmp_offset(ctx, cond, rj, rd, jmp_offset);
3015dc61552STiezhu Yang 		return 0;
3025dc61552STiezhu Yang 	}
3035dc61552STiezhu Yang 
3045dc61552STiezhu Yang 	return -EINVAL;
3055dc61552STiezhu Yang }
306