1 // SPDX-License-Identifier: GPL-2.0 2 /* BPF JIT compiler for RV64G 3 * 4 * Copyright(c) 2019 Björn Töpel <bjorn.topel@gmail.com> 5 * 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/bpf.h> 10 #include <linux/filter.h> 11 #include <linux/memory.h> 12 #include <linux/stop_machine.h> 13 #include "bpf_jit.h" 14 15 #define RV_REG_TCC RV_REG_A6 16 #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ 17 18 static const int regmap[] = { 19 [BPF_REG_0] = RV_REG_A5, 20 [BPF_REG_1] = RV_REG_A0, 21 [BPF_REG_2] = RV_REG_A1, 22 [BPF_REG_3] = RV_REG_A2, 23 [BPF_REG_4] = RV_REG_A3, 24 [BPF_REG_5] = RV_REG_A4, 25 [BPF_REG_6] = RV_REG_S1, 26 [BPF_REG_7] = RV_REG_S2, 27 [BPF_REG_8] = RV_REG_S3, 28 [BPF_REG_9] = RV_REG_S4, 29 [BPF_REG_FP] = RV_REG_S5, 30 [BPF_REG_AX] = RV_REG_T0, 31 }; 32 33 static const int pt_regmap[] = { 34 [RV_REG_A0] = offsetof(struct pt_regs, a0), 35 [RV_REG_A1] = offsetof(struct pt_regs, a1), 36 [RV_REG_A2] = offsetof(struct pt_regs, a2), 37 [RV_REG_A3] = offsetof(struct pt_regs, a3), 38 [RV_REG_A4] = offsetof(struct pt_regs, a4), 39 [RV_REG_A5] = offsetof(struct pt_regs, a5), 40 [RV_REG_S1] = offsetof(struct pt_regs, s1), 41 [RV_REG_S2] = offsetof(struct pt_regs, s2), 42 [RV_REG_S3] = offsetof(struct pt_regs, s3), 43 [RV_REG_S4] = offsetof(struct pt_regs, s4), 44 [RV_REG_S5] = offsetof(struct pt_regs, s5), 45 [RV_REG_T0] = offsetof(struct pt_regs, t0), 46 }; 47 48 enum { 49 RV_CTX_F_SEEN_TAIL_CALL = 0, 50 RV_CTX_F_SEEN_CALL = RV_REG_RA, 51 RV_CTX_F_SEEN_S1 = RV_REG_S1, 52 RV_CTX_F_SEEN_S2 = RV_REG_S2, 53 RV_CTX_F_SEEN_S3 = RV_REG_S3, 54 RV_CTX_F_SEEN_S4 = RV_REG_S4, 55 RV_CTX_F_SEEN_S5 = RV_REG_S5, 56 RV_CTX_F_SEEN_S6 = RV_REG_S6, 57 }; 58 59 static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx) 60 { 61 u8 reg = regmap[bpf_reg]; 62 63 switch (reg) { 64 case RV_CTX_F_SEEN_S1: 65 case RV_CTX_F_SEEN_S2: 66 case RV_CTX_F_SEEN_S3: 67 case RV_CTX_F_SEEN_S4: 68 case RV_CTX_F_SEEN_S5: 69 case RV_CTX_F_SEEN_S6: 70 __set_bit(reg, &ctx->flags); 71 } 72 return reg; 73 }; 74 75 static bool seen_reg(int reg, struct rv_jit_context *ctx) 76 { 77 switch (reg) { 78 case RV_CTX_F_SEEN_CALL: 79 case RV_CTX_F_SEEN_S1: 80 case RV_CTX_F_SEEN_S2: 81 case RV_CTX_F_SEEN_S3: 82 case RV_CTX_F_SEEN_S4: 83 case RV_CTX_F_SEEN_S5: 84 case RV_CTX_F_SEEN_S6: 85 return test_bit(reg, &ctx->flags); 86 } 87 return false; 88 } 89 90 static void mark_fp(struct rv_jit_context *ctx) 91 { 92 __set_bit(RV_CTX_F_SEEN_S5, &ctx->flags); 93 } 94 95 static void mark_call(struct rv_jit_context *ctx) 96 { 97 __set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags); 98 } 99 100 static bool seen_call(struct rv_jit_context *ctx) 101 { 102 return test_bit(RV_CTX_F_SEEN_CALL, &ctx->flags); 103 } 104 105 static void mark_tail_call(struct rv_jit_context *ctx) 106 { 107 __set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags); 108 } 109 110 static bool seen_tail_call(struct rv_jit_context *ctx) 111 { 112 return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags); 113 } 114 115 static u8 rv_tail_call_reg(struct rv_jit_context *ctx) 116 { 117 mark_tail_call(ctx); 118 119 if (seen_call(ctx)) { 120 __set_bit(RV_CTX_F_SEEN_S6, &ctx->flags); 121 return RV_REG_S6; 122 } 123 return RV_REG_A6; 124 } 125 126 static bool is_32b_int(s64 val) 127 { 128 return -(1L << 31) <= val && val < (1L << 31); 129 } 130 131 static bool in_auipc_jalr_range(s64 val) 132 { 133 /* 134 * auipc+jalr can reach any signed PC-relative offset in the range 135 * [-2^31 - 2^11, 2^31 - 2^11). 136 */ 137 return (-(1L << 31) - (1L << 11)) <= val && 138 val < ((1L << 31) - (1L << 11)); 139 } 140 141 /* Emit fixed-length instructions for address */ 142 static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx) 143 { 144 u64 ip = (u64)(ctx->insns + ctx->ninsns); 145 s64 off = addr - ip; 146 s64 upper = (off + (1 << 11)) >> 12; 147 s64 lower = off & 0xfff; 148 149 if (extra_pass && !in_auipc_jalr_range(off)) { 150 pr_err("bpf-jit: target offset 0x%llx is out of range\n", off); 151 return -ERANGE; 152 } 153 154 emit(rv_auipc(rd, upper), ctx); 155 emit(rv_addi(rd, rd, lower), ctx); 156 return 0; 157 } 158 159 /* Emit variable-length instructions for 32-bit and 64-bit imm */ 160 static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) 161 { 162 /* Note that the immediate from the add is sign-extended, 163 * which means that we need to compensate this by adding 2^12, 164 * when the 12th bit is set. A simpler way of doing this, and 165 * getting rid of the check, is to just add 2**11 before the 166 * shift. The "Loading a 32-Bit constant" example from the 167 * "Computer Organization and Design, RISC-V edition" book by 168 * Patterson/Hennessy highlights this fact. 169 * 170 * This also means that we need to process LSB to MSB. 171 */ 172 s64 upper = (val + (1 << 11)) >> 12; 173 /* Sign-extend lower 12 bits to 64 bits since immediates for li, addiw, 174 * and addi are signed and RVC checks will perform signed comparisons. 175 */ 176 s64 lower = ((val & 0xfff) << 52) >> 52; 177 int shift; 178 179 if (is_32b_int(val)) { 180 if (upper) 181 emit_lui(rd, upper, ctx); 182 183 if (!upper) { 184 emit_li(rd, lower, ctx); 185 return; 186 } 187 188 emit_addiw(rd, rd, lower, ctx); 189 return; 190 } 191 192 shift = __ffs(upper); 193 upper >>= shift; 194 shift += 12; 195 196 emit_imm(rd, upper, ctx); 197 198 emit_slli(rd, rd, shift, ctx); 199 if (lower) 200 emit_addi(rd, rd, lower, ctx); 201 } 202 203 static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx) 204 { 205 int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8; 206 207 if (seen_reg(RV_REG_RA, ctx)) { 208 emit_ld(RV_REG_RA, store_offset, RV_REG_SP, ctx); 209 store_offset -= 8; 210 } 211 emit_ld(RV_REG_FP, store_offset, RV_REG_SP, ctx); 212 store_offset -= 8; 213 if (seen_reg(RV_REG_S1, ctx)) { 214 emit_ld(RV_REG_S1, store_offset, RV_REG_SP, ctx); 215 store_offset -= 8; 216 } 217 if (seen_reg(RV_REG_S2, ctx)) { 218 emit_ld(RV_REG_S2, store_offset, RV_REG_SP, ctx); 219 store_offset -= 8; 220 } 221 if (seen_reg(RV_REG_S3, ctx)) { 222 emit_ld(RV_REG_S3, store_offset, RV_REG_SP, ctx); 223 store_offset -= 8; 224 } 225 if (seen_reg(RV_REG_S4, ctx)) { 226 emit_ld(RV_REG_S4, store_offset, RV_REG_SP, ctx); 227 store_offset -= 8; 228 } 229 if (seen_reg(RV_REG_S5, ctx)) { 230 emit_ld(RV_REG_S5, store_offset, RV_REG_SP, ctx); 231 store_offset -= 8; 232 } 233 if (seen_reg(RV_REG_S6, ctx)) { 234 emit_ld(RV_REG_S6, store_offset, RV_REG_SP, ctx); 235 store_offset -= 8; 236 } 237 238 emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx); 239 /* Set return value. */ 240 if (!is_tail_call) 241 emit_mv(RV_REG_A0, RV_REG_A5, ctx); 242 emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA, 243 is_tail_call ? 20 : 0, /* skip reserved nops and TCC init */ 244 ctx); 245 } 246 247 static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff, 248 struct rv_jit_context *ctx) 249 { 250 switch (cond) { 251 case BPF_JEQ: 252 emit(rv_beq(rd, rs, rvoff >> 1), ctx); 253 return; 254 case BPF_JGT: 255 emit(rv_bltu(rs, rd, rvoff >> 1), ctx); 256 return; 257 case BPF_JLT: 258 emit(rv_bltu(rd, rs, rvoff >> 1), ctx); 259 return; 260 case BPF_JGE: 261 emit(rv_bgeu(rd, rs, rvoff >> 1), ctx); 262 return; 263 case BPF_JLE: 264 emit(rv_bgeu(rs, rd, rvoff >> 1), ctx); 265 return; 266 case BPF_JNE: 267 emit(rv_bne(rd, rs, rvoff >> 1), ctx); 268 return; 269 case BPF_JSGT: 270 emit(rv_blt(rs, rd, rvoff >> 1), ctx); 271 return; 272 case BPF_JSLT: 273 emit(rv_blt(rd, rs, rvoff >> 1), ctx); 274 return; 275 case BPF_JSGE: 276 emit(rv_bge(rd, rs, rvoff >> 1), ctx); 277 return; 278 case BPF_JSLE: 279 emit(rv_bge(rs, rd, rvoff >> 1), ctx); 280 } 281 } 282 283 static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff, 284 struct rv_jit_context *ctx) 285 { 286 s64 upper, lower; 287 288 if (is_13b_int(rvoff)) { 289 emit_bcc(cond, rd, rs, rvoff, ctx); 290 return; 291 } 292 293 /* Adjust for jal */ 294 rvoff -= 4; 295 296 /* Transform, e.g.: 297 * bne rd,rs,foo 298 * to 299 * beq rd,rs,<.L1> 300 * (auipc foo) 301 * jal(r) foo 302 * .L1 303 */ 304 cond = invert_bpf_cond(cond); 305 if (is_21b_int(rvoff)) { 306 emit_bcc(cond, rd, rs, 8, ctx); 307 emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx); 308 return; 309 } 310 311 /* 32b No need for an additional rvoff adjustment, since we 312 * get that from the auipc at PC', where PC = PC' + 4. 313 */ 314 upper = (rvoff + (1 << 11)) >> 12; 315 lower = rvoff & 0xfff; 316 317 emit_bcc(cond, rd, rs, 12, ctx); 318 emit(rv_auipc(RV_REG_T1, upper), ctx); 319 emit(rv_jalr(RV_REG_ZERO, RV_REG_T1, lower), ctx); 320 } 321 322 static void emit_zext_32(u8 reg, struct rv_jit_context *ctx) 323 { 324 emit_slli(reg, reg, 32, ctx); 325 emit_srli(reg, reg, 32, ctx); 326 } 327 328 static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) 329 { 330 int tc_ninsn, off, start_insn = ctx->ninsns; 331 u8 tcc = rv_tail_call_reg(ctx); 332 333 /* a0: &ctx 334 * a1: &array 335 * a2: index 336 * 337 * if (index >= array->map.max_entries) 338 * goto out; 339 */ 340 tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] : 341 ctx->offset[0]; 342 emit_zext_32(RV_REG_A2, ctx); 343 344 off = offsetof(struct bpf_array, map.max_entries); 345 if (is_12b_check(off, insn)) 346 return -1; 347 emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx); 348 off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); 349 emit_branch(BPF_JGE, RV_REG_A2, RV_REG_T1, off, ctx); 350 351 /* if (--TCC < 0) 352 * goto out; 353 */ 354 emit_addi(RV_REG_TCC, tcc, -1, ctx); 355 off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); 356 emit_branch(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx); 357 358 /* prog = array->ptrs[index]; 359 * if (!prog) 360 * goto out; 361 */ 362 emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx); 363 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx); 364 off = offsetof(struct bpf_array, ptrs); 365 if (is_12b_check(off, insn)) 366 return -1; 367 emit_ld(RV_REG_T2, off, RV_REG_T2, ctx); 368 off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); 369 emit_branch(BPF_JEQ, RV_REG_T2, RV_REG_ZERO, off, ctx); 370 371 /* goto *(prog->bpf_func + 4); */ 372 off = offsetof(struct bpf_prog, bpf_func); 373 if (is_12b_check(off, insn)) 374 return -1; 375 emit_ld(RV_REG_T3, off, RV_REG_T2, ctx); 376 __build_epilogue(true, ctx); 377 return 0; 378 } 379 380 static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn, 381 struct rv_jit_context *ctx) 382 { 383 u8 code = insn->code; 384 385 switch (code) { 386 case BPF_JMP | BPF_JA: 387 case BPF_JMP | BPF_CALL: 388 case BPF_JMP | BPF_EXIT: 389 case BPF_JMP | BPF_TAIL_CALL: 390 break; 391 default: 392 *rd = bpf_to_rv_reg(insn->dst_reg, ctx); 393 } 394 395 if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) || 396 code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) || 397 code & BPF_LDX || code & BPF_STX) 398 *rs = bpf_to_rv_reg(insn->src_reg, ctx); 399 } 400 401 static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) 402 { 403 emit_mv(RV_REG_T2, *rd, ctx); 404 emit_zext_32(RV_REG_T2, ctx); 405 emit_mv(RV_REG_T1, *rs, ctx); 406 emit_zext_32(RV_REG_T1, ctx); 407 *rd = RV_REG_T2; 408 *rs = RV_REG_T1; 409 } 410 411 static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) 412 { 413 emit_addiw(RV_REG_T2, *rd, 0, ctx); 414 emit_addiw(RV_REG_T1, *rs, 0, ctx); 415 *rd = RV_REG_T2; 416 *rs = RV_REG_T1; 417 } 418 419 static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx) 420 { 421 emit_mv(RV_REG_T2, *rd, ctx); 422 emit_zext_32(RV_REG_T2, ctx); 423 emit_zext_32(RV_REG_T1, ctx); 424 *rd = RV_REG_T2; 425 } 426 427 static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx) 428 { 429 emit_addiw(RV_REG_T2, *rd, 0, ctx); 430 *rd = RV_REG_T2; 431 } 432 433 static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr, 434 struct rv_jit_context *ctx) 435 { 436 s64 upper, lower; 437 438 if (rvoff && fixed_addr && is_21b_int(rvoff)) { 439 emit(rv_jal(rd, rvoff >> 1), ctx); 440 return 0; 441 } else if (in_auipc_jalr_range(rvoff)) { 442 upper = (rvoff + (1 << 11)) >> 12; 443 lower = rvoff & 0xfff; 444 emit(rv_auipc(RV_REG_T1, upper), ctx); 445 emit(rv_jalr(rd, RV_REG_T1, lower), ctx); 446 return 0; 447 } 448 449 pr_err("bpf-jit: target offset 0x%llx is out of range\n", rvoff); 450 return -ERANGE; 451 } 452 453 static bool is_signed_bpf_cond(u8 cond) 454 { 455 return cond == BPF_JSGT || cond == BPF_JSLT || 456 cond == BPF_JSGE || cond == BPF_JSLE; 457 } 458 459 static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx) 460 { 461 s64 off = 0; 462 u64 ip; 463 464 if (addr && ctx->insns) { 465 ip = (u64)(long)(ctx->insns + ctx->ninsns); 466 off = addr - ip; 467 } 468 469 return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx); 470 } 471 472 static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, 473 struct rv_jit_context *ctx) 474 { 475 u8 r0; 476 int jmp_offset; 477 478 if (off) { 479 if (is_12b_int(off)) { 480 emit_addi(RV_REG_T1, rd, off, ctx); 481 } else { 482 emit_imm(RV_REG_T1, off, ctx); 483 emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); 484 } 485 rd = RV_REG_T1; 486 } 487 488 switch (imm) { 489 /* lock *(u32/u64 *)(dst_reg + off16) <op>= src_reg */ 490 case BPF_ADD: 491 emit(is64 ? rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0) : 492 rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0), ctx); 493 break; 494 case BPF_AND: 495 emit(is64 ? rv_amoand_d(RV_REG_ZERO, rs, rd, 0, 0) : 496 rv_amoand_w(RV_REG_ZERO, rs, rd, 0, 0), ctx); 497 break; 498 case BPF_OR: 499 emit(is64 ? rv_amoor_d(RV_REG_ZERO, rs, rd, 0, 0) : 500 rv_amoor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx); 501 break; 502 case BPF_XOR: 503 emit(is64 ? rv_amoxor_d(RV_REG_ZERO, rs, rd, 0, 0) : 504 rv_amoxor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx); 505 break; 506 /* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */ 507 case BPF_ADD | BPF_FETCH: 508 emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) : 509 rv_amoadd_w(rs, rs, rd, 0, 0), ctx); 510 if (!is64) 511 emit_zext_32(rs, ctx); 512 break; 513 case BPF_AND | BPF_FETCH: 514 emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) : 515 rv_amoand_w(rs, rs, rd, 0, 0), ctx); 516 if (!is64) 517 emit_zext_32(rs, ctx); 518 break; 519 case BPF_OR | BPF_FETCH: 520 emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) : 521 rv_amoor_w(rs, rs, rd, 0, 0), ctx); 522 if (!is64) 523 emit_zext_32(rs, ctx); 524 break; 525 case BPF_XOR | BPF_FETCH: 526 emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) : 527 rv_amoxor_w(rs, rs, rd, 0, 0), ctx); 528 if (!is64) 529 emit_zext_32(rs, ctx); 530 break; 531 /* src_reg = atomic_xchg(dst_reg + off16, src_reg); */ 532 case BPF_XCHG: 533 emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) : 534 rv_amoswap_w(rs, rs, rd, 0, 0), ctx); 535 if (!is64) 536 emit_zext_32(rs, ctx); 537 break; 538 /* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */ 539 case BPF_CMPXCHG: 540 r0 = bpf_to_rv_reg(BPF_REG_0, ctx); 541 emit(is64 ? rv_addi(RV_REG_T2, r0, 0) : 542 rv_addiw(RV_REG_T2, r0, 0), ctx); 543 emit(is64 ? rv_lr_d(r0, 0, rd, 0, 0) : 544 rv_lr_w(r0, 0, rd, 0, 0), ctx); 545 jmp_offset = ninsns_rvoff(8); 546 emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx); 547 emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) : 548 rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx); 549 jmp_offset = ninsns_rvoff(-6); 550 emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx); 551 emit(rv_fence(0x3, 0x3), ctx); 552 break; 553 } 554 } 555 556 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0) 557 #define BPF_FIXUP_REG_MASK GENMASK(31, 27) 558 559 bool ex_handler_bpf(const struct exception_table_entry *ex, 560 struct pt_regs *regs) 561 { 562 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup); 563 int regs_offset = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup); 564 565 *(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0; 566 regs->epc = (unsigned long)&ex->fixup - offset; 567 568 return true; 569 } 570 571 /* For accesses to BTF pointers, add an entry to the exception table */ 572 static int add_exception_handler(const struct bpf_insn *insn, 573 struct rv_jit_context *ctx, 574 int dst_reg, int insn_len) 575 { 576 struct exception_table_entry *ex; 577 unsigned long pc; 578 off_t offset; 579 580 if (!ctx->insns || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM) 581 return 0; 582 583 if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries)) 584 return -EINVAL; 585 586 if (WARN_ON_ONCE(insn_len > ctx->ninsns)) 587 return -EINVAL; 588 589 if (WARN_ON_ONCE(!rvc_enabled() && insn_len == 1)) 590 return -EINVAL; 591 592 ex = &ctx->prog->aux->extable[ctx->nexentries]; 593 pc = (unsigned long)&ctx->insns[ctx->ninsns - insn_len]; 594 595 offset = pc - (long)&ex->insn; 596 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) 597 return -ERANGE; 598 ex->insn = offset; 599 600 /* 601 * Since the extable follows the program, the fixup offset is always 602 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value 603 * to keep things simple, and put the destination register in the upper 604 * bits. We don't need to worry about buildtime or runtime sort 605 * modifying the upper bits because the table is already sorted, and 606 * isn't part of the main exception table. 607 */ 608 offset = (long)&ex->fixup - (pc + insn_len * sizeof(u16)); 609 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset)) 610 return -ERANGE; 611 612 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | 613 FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg); 614 ex->type = EX_TYPE_BPF; 615 616 ctx->nexentries++; 617 return 0; 618 } 619 620 static int gen_call_or_nops(void *target, void *ip, u32 *insns) 621 { 622 s64 rvoff; 623 int i, ret; 624 struct rv_jit_context ctx; 625 626 ctx.ninsns = 0; 627 ctx.insns = (u16 *)insns; 628 629 if (!target) { 630 for (i = 0; i < 4; i++) 631 emit(rv_nop(), &ctx); 632 return 0; 633 } 634 635 rvoff = (s64)(target - (ip + 4)); 636 emit(rv_sd(RV_REG_SP, -8, RV_REG_RA), &ctx); 637 ret = emit_jump_and_link(RV_REG_RA, rvoff, false, &ctx); 638 if (ret) 639 return ret; 640 emit(rv_ld(RV_REG_RA, -8, RV_REG_SP), &ctx); 641 642 return 0; 643 } 644 645 static int gen_jump_or_nops(void *target, void *ip, u32 *insns) 646 { 647 s64 rvoff; 648 struct rv_jit_context ctx; 649 650 ctx.ninsns = 0; 651 ctx.insns = (u16 *)insns; 652 653 if (!target) { 654 emit(rv_nop(), &ctx); 655 emit(rv_nop(), &ctx); 656 return 0; 657 } 658 659 rvoff = (s64)(target - ip); 660 return emit_jump_and_link(RV_REG_ZERO, rvoff, false, &ctx); 661 } 662 663 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, 664 void *old_addr, void *new_addr) 665 { 666 u32 old_insns[4], new_insns[4]; 667 bool is_call = poke_type == BPF_MOD_CALL; 668 int (*gen_insns)(void *target, void *ip, u32 *insns); 669 int ninsns = is_call ? 4 : 2; 670 int ret; 671 672 if (!is_bpf_text_address((unsigned long)ip)) 673 return -ENOTSUPP; 674 675 gen_insns = is_call ? gen_call_or_nops : gen_jump_or_nops; 676 677 ret = gen_insns(old_addr, ip, old_insns); 678 if (ret) 679 return ret; 680 681 if (memcmp(ip, old_insns, ninsns * 4)) 682 return -EFAULT; 683 684 ret = gen_insns(new_addr, ip, new_insns); 685 if (ret) 686 return ret; 687 688 cpus_read_lock(); 689 mutex_lock(&text_mutex); 690 if (memcmp(ip, new_insns, ninsns * 4)) 691 ret = patch_text(ip, new_insns, ninsns); 692 mutex_unlock(&text_mutex); 693 cpus_read_unlock(); 694 695 return ret; 696 } 697 698 int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, 699 bool extra_pass) 700 { 701 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 || 702 BPF_CLASS(insn->code) == BPF_JMP; 703 int s, e, rvoff, ret, i = insn - ctx->prog->insnsi; 704 struct bpf_prog_aux *aux = ctx->prog->aux; 705 u8 rd = -1, rs = -1, code = insn->code; 706 s16 off = insn->off; 707 s32 imm = insn->imm; 708 709 init_regs(&rd, &rs, insn, ctx); 710 711 switch (code) { 712 /* dst = src */ 713 case BPF_ALU | BPF_MOV | BPF_X: 714 case BPF_ALU64 | BPF_MOV | BPF_X: 715 if (imm == 1) { 716 /* Special mov32 for zext */ 717 emit_zext_32(rd, ctx); 718 break; 719 } 720 emit_mv(rd, rs, ctx); 721 if (!is64 && !aux->verifier_zext) 722 emit_zext_32(rd, ctx); 723 break; 724 725 /* dst = dst OP src */ 726 case BPF_ALU | BPF_ADD | BPF_X: 727 case BPF_ALU64 | BPF_ADD | BPF_X: 728 emit_add(rd, rd, rs, ctx); 729 if (!is64 && !aux->verifier_zext) 730 emit_zext_32(rd, ctx); 731 break; 732 case BPF_ALU | BPF_SUB | BPF_X: 733 case BPF_ALU64 | BPF_SUB | BPF_X: 734 if (is64) 735 emit_sub(rd, rd, rs, ctx); 736 else 737 emit_subw(rd, rd, rs, ctx); 738 739 if (!is64 && !aux->verifier_zext) 740 emit_zext_32(rd, ctx); 741 break; 742 case BPF_ALU | BPF_AND | BPF_X: 743 case BPF_ALU64 | BPF_AND | BPF_X: 744 emit_and(rd, rd, rs, ctx); 745 if (!is64 && !aux->verifier_zext) 746 emit_zext_32(rd, ctx); 747 break; 748 case BPF_ALU | BPF_OR | BPF_X: 749 case BPF_ALU64 | BPF_OR | BPF_X: 750 emit_or(rd, rd, rs, ctx); 751 if (!is64 && !aux->verifier_zext) 752 emit_zext_32(rd, ctx); 753 break; 754 case BPF_ALU | BPF_XOR | BPF_X: 755 case BPF_ALU64 | BPF_XOR | BPF_X: 756 emit_xor(rd, rd, rs, ctx); 757 if (!is64 && !aux->verifier_zext) 758 emit_zext_32(rd, ctx); 759 break; 760 case BPF_ALU | BPF_MUL | BPF_X: 761 case BPF_ALU64 | BPF_MUL | BPF_X: 762 emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx); 763 if (!is64 && !aux->verifier_zext) 764 emit_zext_32(rd, ctx); 765 break; 766 case BPF_ALU | BPF_DIV | BPF_X: 767 case BPF_ALU64 | BPF_DIV | BPF_X: 768 emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx); 769 if (!is64 && !aux->verifier_zext) 770 emit_zext_32(rd, ctx); 771 break; 772 case BPF_ALU | BPF_MOD | BPF_X: 773 case BPF_ALU64 | BPF_MOD | BPF_X: 774 emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx); 775 if (!is64 && !aux->verifier_zext) 776 emit_zext_32(rd, ctx); 777 break; 778 case BPF_ALU | BPF_LSH | BPF_X: 779 case BPF_ALU64 | BPF_LSH | BPF_X: 780 emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx); 781 if (!is64 && !aux->verifier_zext) 782 emit_zext_32(rd, ctx); 783 break; 784 case BPF_ALU | BPF_RSH | BPF_X: 785 case BPF_ALU64 | BPF_RSH | BPF_X: 786 emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx); 787 if (!is64 && !aux->verifier_zext) 788 emit_zext_32(rd, ctx); 789 break; 790 case BPF_ALU | BPF_ARSH | BPF_X: 791 case BPF_ALU64 | BPF_ARSH | BPF_X: 792 emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx); 793 if (!is64 && !aux->verifier_zext) 794 emit_zext_32(rd, ctx); 795 break; 796 797 /* dst = -dst */ 798 case BPF_ALU | BPF_NEG: 799 case BPF_ALU64 | BPF_NEG: 800 emit_sub(rd, RV_REG_ZERO, rd, ctx); 801 if (!is64 && !aux->verifier_zext) 802 emit_zext_32(rd, ctx); 803 break; 804 805 /* dst = BSWAP##imm(dst) */ 806 case BPF_ALU | BPF_END | BPF_FROM_LE: 807 switch (imm) { 808 case 16: 809 emit_slli(rd, rd, 48, ctx); 810 emit_srli(rd, rd, 48, ctx); 811 break; 812 case 32: 813 if (!aux->verifier_zext) 814 emit_zext_32(rd, ctx); 815 break; 816 case 64: 817 /* Do nothing */ 818 break; 819 } 820 break; 821 822 case BPF_ALU | BPF_END | BPF_FROM_BE: 823 emit_li(RV_REG_T2, 0, ctx); 824 825 emit_andi(RV_REG_T1, rd, 0xff, ctx); 826 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); 827 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); 828 emit_srli(rd, rd, 8, ctx); 829 if (imm == 16) 830 goto out_be; 831 832 emit_andi(RV_REG_T1, rd, 0xff, ctx); 833 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); 834 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); 835 emit_srli(rd, rd, 8, ctx); 836 837 emit_andi(RV_REG_T1, rd, 0xff, ctx); 838 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); 839 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); 840 emit_srli(rd, rd, 8, ctx); 841 if (imm == 32) 842 goto out_be; 843 844 emit_andi(RV_REG_T1, rd, 0xff, ctx); 845 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); 846 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); 847 emit_srli(rd, rd, 8, ctx); 848 849 emit_andi(RV_REG_T1, rd, 0xff, ctx); 850 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); 851 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); 852 emit_srli(rd, rd, 8, ctx); 853 854 emit_andi(RV_REG_T1, rd, 0xff, ctx); 855 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); 856 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); 857 emit_srli(rd, rd, 8, ctx); 858 859 emit_andi(RV_REG_T1, rd, 0xff, ctx); 860 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); 861 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); 862 emit_srli(rd, rd, 8, ctx); 863 out_be: 864 emit_andi(RV_REG_T1, rd, 0xff, ctx); 865 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); 866 867 emit_mv(rd, RV_REG_T2, ctx); 868 break; 869 870 /* dst = imm */ 871 case BPF_ALU | BPF_MOV | BPF_K: 872 case BPF_ALU64 | BPF_MOV | BPF_K: 873 emit_imm(rd, imm, ctx); 874 if (!is64 && !aux->verifier_zext) 875 emit_zext_32(rd, ctx); 876 break; 877 878 /* dst = dst OP imm */ 879 case BPF_ALU | BPF_ADD | BPF_K: 880 case BPF_ALU64 | BPF_ADD | BPF_K: 881 if (is_12b_int(imm)) { 882 emit_addi(rd, rd, imm, ctx); 883 } else { 884 emit_imm(RV_REG_T1, imm, ctx); 885 emit_add(rd, rd, RV_REG_T1, ctx); 886 } 887 if (!is64 && !aux->verifier_zext) 888 emit_zext_32(rd, ctx); 889 break; 890 case BPF_ALU | BPF_SUB | BPF_K: 891 case BPF_ALU64 | BPF_SUB | BPF_K: 892 if (is_12b_int(-imm)) { 893 emit_addi(rd, rd, -imm, ctx); 894 } else { 895 emit_imm(RV_REG_T1, imm, ctx); 896 emit_sub(rd, rd, RV_REG_T1, ctx); 897 } 898 if (!is64 && !aux->verifier_zext) 899 emit_zext_32(rd, ctx); 900 break; 901 case BPF_ALU | BPF_AND | BPF_K: 902 case BPF_ALU64 | BPF_AND | BPF_K: 903 if (is_12b_int(imm)) { 904 emit_andi(rd, rd, imm, ctx); 905 } else { 906 emit_imm(RV_REG_T1, imm, ctx); 907 emit_and(rd, rd, RV_REG_T1, ctx); 908 } 909 if (!is64 && !aux->verifier_zext) 910 emit_zext_32(rd, ctx); 911 break; 912 case BPF_ALU | BPF_OR | BPF_K: 913 case BPF_ALU64 | BPF_OR | BPF_K: 914 if (is_12b_int(imm)) { 915 emit(rv_ori(rd, rd, imm), ctx); 916 } else { 917 emit_imm(RV_REG_T1, imm, ctx); 918 emit_or(rd, rd, RV_REG_T1, ctx); 919 } 920 if (!is64 && !aux->verifier_zext) 921 emit_zext_32(rd, ctx); 922 break; 923 case BPF_ALU | BPF_XOR | BPF_K: 924 case BPF_ALU64 | BPF_XOR | BPF_K: 925 if (is_12b_int(imm)) { 926 emit(rv_xori(rd, rd, imm), ctx); 927 } else { 928 emit_imm(RV_REG_T1, imm, ctx); 929 emit_xor(rd, rd, RV_REG_T1, ctx); 930 } 931 if (!is64 && !aux->verifier_zext) 932 emit_zext_32(rd, ctx); 933 break; 934 case BPF_ALU | BPF_MUL | BPF_K: 935 case BPF_ALU64 | BPF_MUL | BPF_K: 936 emit_imm(RV_REG_T1, imm, ctx); 937 emit(is64 ? rv_mul(rd, rd, RV_REG_T1) : 938 rv_mulw(rd, rd, RV_REG_T1), ctx); 939 if (!is64 && !aux->verifier_zext) 940 emit_zext_32(rd, ctx); 941 break; 942 case BPF_ALU | BPF_DIV | BPF_K: 943 case BPF_ALU64 | BPF_DIV | BPF_K: 944 emit_imm(RV_REG_T1, imm, ctx); 945 emit(is64 ? rv_divu(rd, rd, RV_REG_T1) : 946 rv_divuw(rd, rd, RV_REG_T1), ctx); 947 if (!is64 && !aux->verifier_zext) 948 emit_zext_32(rd, ctx); 949 break; 950 case BPF_ALU | BPF_MOD | BPF_K: 951 case BPF_ALU64 | BPF_MOD | BPF_K: 952 emit_imm(RV_REG_T1, imm, ctx); 953 emit(is64 ? rv_remu(rd, rd, RV_REG_T1) : 954 rv_remuw(rd, rd, RV_REG_T1), ctx); 955 if (!is64 && !aux->verifier_zext) 956 emit_zext_32(rd, ctx); 957 break; 958 case BPF_ALU | BPF_LSH | BPF_K: 959 case BPF_ALU64 | BPF_LSH | BPF_K: 960 emit_slli(rd, rd, imm, ctx); 961 962 if (!is64 && !aux->verifier_zext) 963 emit_zext_32(rd, ctx); 964 break; 965 case BPF_ALU | BPF_RSH | BPF_K: 966 case BPF_ALU64 | BPF_RSH | BPF_K: 967 if (is64) 968 emit_srli(rd, rd, imm, ctx); 969 else 970 emit(rv_srliw(rd, rd, imm), ctx); 971 972 if (!is64 && !aux->verifier_zext) 973 emit_zext_32(rd, ctx); 974 break; 975 case BPF_ALU | BPF_ARSH | BPF_K: 976 case BPF_ALU64 | BPF_ARSH | BPF_K: 977 if (is64) 978 emit_srai(rd, rd, imm, ctx); 979 else 980 emit(rv_sraiw(rd, rd, imm), ctx); 981 982 if (!is64 && !aux->verifier_zext) 983 emit_zext_32(rd, ctx); 984 break; 985 986 /* JUMP off */ 987 case BPF_JMP | BPF_JA: 988 rvoff = rv_offset(i, off, ctx); 989 ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx); 990 if (ret) 991 return ret; 992 break; 993 994 /* IF (dst COND src) JUMP off */ 995 case BPF_JMP | BPF_JEQ | BPF_X: 996 case BPF_JMP32 | BPF_JEQ | BPF_X: 997 case BPF_JMP | BPF_JGT | BPF_X: 998 case BPF_JMP32 | BPF_JGT | BPF_X: 999 case BPF_JMP | BPF_JLT | BPF_X: 1000 case BPF_JMP32 | BPF_JLT | BPF_X: 1001 case BPF_JMP | BPF_JGE | BPF_X: 1002 case BPF_JMP32 | BPF_JGE | BPF_X: 1003 case BPF_JMP | BPF_JLE | BPF_X: 1004 case BPF_JMP32 | BPF_JLE | BPF_X: 1005 case BPF_JMP | BPF_JNE | BPF_X: 1006 case BPF_JMP32 | BPF_JNE | BPF_X: 1007 case BPF_JMP | BPF_JSGT | BPF_X: 1008 case BPF_JMP32 | BPF_JSGT | BPF_X: 1009 case BPF_JMP | BPF_JSLT | BPF_X: 1010 case BPF_JMP32 | BPF_JSLT | BPF_X: 1011 case BPF_JMP | BPF_JSGE | BPF_X: 1012 case BPF_JMP32 | BPF_JSGE | BPF_X: 1013 case BPF_JMP | BPF_JSLE | BPF_X: 1014 case BPF_JMP32 | BPF_JSLE | BPF_X: 1015 case BPF_JMP | BPF_JSET | BPF_X: 1016 case BPF_JMP32 | BPF_JSET | BPF_X: 1017 rvoff = rv_offset(i, off, ctx); 1018 if (!is64) { 1019 s = ctx->ninsns; 1020 if (is_signed_bpf_cond(BPF_OP(code))) 1021 emit_sext_32_rd_rs(&rd, &rs, ctx); 1022 else 1023 emit_zext_32_rd_rs(&rd, &rs, ctx); 1024 e = ctx->ninsns; 1025 1026 /* Adjust for extra insns */ 1027 rvoff -= ninsns_rvoff(e - s); 1028 } 1029 1030 if (BPF_OP(code) == BPF_JSET) { 1031 /* Adjust for and */ 1032 rvoff -= 4; 1033 emit_and(RV_REG_T1, rd, rs, ctx); 1034 emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, 1035 ctx); 1036 } else { 1037 emit_branch(BPF_OP(code), rd, rs, rvoff, ctx); 1038 } 1039 break; 1040 1041 /* IF (dst COND imm) JUMP off */ 1042 case BPF_JMP | BPF_JEQ | BPF_K: 1043 case BPF_JMP32 | BPF_JEQ | BPF_K: 1044 case BPF_JMP | BPF_JGT | BPF_K: 1045 case BPF_JMP32 | BPF_JGT | BPF_K: 1046 case BPF_JMP | BPF_JLT | BPF_K: 1047 case BPF_JMP32 | BPF_JLT | BPF_K: 1048 case BPF_JMP | BPF_JGE | BPF_K: 1049 case BPF_JMP32 | BPF_JGE | BPF_K: 1050 case BPF_JMP | BPF_JLE | BPF_K: 1051 case BPF_JMP32 | BPF_JLE | BPF_K: 1052 case BPF_JMP | BPF_JNE | BPF_K: 1053 case BPF_JMP32 | BPF_JNE | BPF_K: 1054 case BPF_JMP | BPF_JSGT | BPF_K: 1055 case BPF_JMP32 | BPF_JSGT | BPF_K: 1056 case BPF_JMP | BPF_JSLT | BPF_K: 1057 case BPF_JMP32 | BPF_JSLT | BPF_K: 1058 case BPF_JMP | BPF_JSGE | BPF_K: 1059 case BPF_JMP32 | BPF_JSGE | BPF_K: 1060 case BPF_JMP | BPF_JSLE | BPF_K: 1061 case BPF_JMP32 | BPF_JSLE | BPF_K: 1062 rvoff = rv_offset(i, off, ctx); 1063 s = ctx->ninsns; 1064 if (imm) { 1065 emit_imm(RV_REG_T1, imm, ctx); 1066 rs = RV_REG_T1; 1067 } else { 1068 /* If imm is 0, simply use zero register. */ 1069 rs = RV_REG_ZERO; 1070 } 1071 if (!is64) { 1072 if (is_signed_bpf_cond(BPF_OP(code))) 1073 emit_sext_32_rd(&rd, ctx); 1074 else 1075 emit_zext_32_rd_t1(&rd, ctx); 1076 } 1077 e = ctx->ninsns; 1078 1079 /* Adjust for extra insns */ 1080 rvoff -= ninsns_rvoff(e - s); 1081 emit_branch(BPF_OP(code), rd, rs, rvoff, ctx); 1082 break; 1083 1084 case BPF_JMP | BPF_JSET | BPF_K: 1085 case BPF_JMP32 | BPF_JSET | BPF_K: 1086 rvoff = rv_offset(i, off, ctx); 1087 s = ctx->ninsns; 1088 if (is_12b_int(imm)) { 1089 emit_andi(RV_REG_T1, rd, imm, ctx); 1090 } else { 1091 emit_imm(RV_REG_T1, imm, ctx); 1092 emit_and(RV_REG_T1, rd, RV_REG_T1, ctx); 1093 } 1094 /* For jset32, we should clear the upper 32 bits of t1, but 1095 * sign-extension is sufficient here and saves one instruction, 1096 * as t1 is used only in comparison against zero. 1097 */ 1098 if (!is64 && imm < 0) 1099 emit_addiw(RV_REG_T1, RV_REG_T1, 0, ctx); 1100 e = ctx->ninsns; 1101 rvoff -= ninsns_rvoff(e - s); 1102 emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx); 1103 break; 1104 1105 /* function call */ 1106 case BPF_JMP | BPF_CALL: 1107 { 1108 bool fixed_addr; 1109 u64 addr; 1110 1111 mark_call(ctx); 1112 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, 1113 &addr, &fixed_addr); 1114 if (ret < 0) 1115 return ret; 1116 1117 ret = emit_call(addr, fixed_addr, ctx); 1118 if (ret) 1119 return ret; 1120 1121 emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx); 1122 break; 1123 } 1124 /* tail call */ 1125 case BPF_JMP | BPF_TAIL_CALL: 1126 if (emit_bpf_tail_call(i, ctx)) 1127 return -1; 1128 break; 1129 1130 /* function return */ 1131 case BPF_JMP | BPF_EXIT: 1132 if (i == ctx->prog->len - 1) 1133 break; 1134 1135 rvoff = epilogue_offset(ctx); 1136 ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx); 1137 if (ret) 1138 return ret; 1139 break; 1140 1141 /* dst = imm64 */ 1142 case BPF_LD | BPF_IMM | BPF_DW: 1143 { 1144 struct bpf_insn insn1 = insn[1]; 1145 u64 imm64; 1146 1147 imm64 = (u64)insn1.imm << 32 | (u32)imm; 1148 if (bpf_pseudo_func(insn)) { 1149 /* fixed-length insns for extra jit pass */ 1150 ret = emit_addr(rd, imm64, extra_pass, ctx); 1151 if (ret) 1152 return ret; 1153 } else { 1154 emit_imm(rd, imm64, ctx); 1155 } 1156 1157 return 1; 1158 } 1159 1160 /* LDX: dst = *(size *)(src + off) */ 1161 case BPF_LDX | BPF_MEM | BPF_B: 1162 case BPF_LDX | BPF_MEM | BPF_H: 1163 case BPF_LDX | BPF_MEM | BPF_W: 1164 case BPF_LDX | BPF_MEM | BPF_DW: 1165 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1166 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1167 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1168 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1169 { 1170 int insn_len, insns_start; 1171 1172 switch (BPF_SIZE(code)) { 1173 case BPF_B: 1174 if (is_12b_int(off)) { 1175 insns_start = ctx->ninsns; 1176 emit(rv_lbu(rd, off, rs), ctx); 1177 insn_len = ctx->ninsns - insns_start; 1178 break; 1179 } 1180 1181 emit_imm(RV_REG_T1, off, ctx); 1182 emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); 1183 insns_start = ctx->ninsns; 1184 emit(rv_lbu(rd, 0, RV_REG_T1), ctx); 1185 insn_len = ctx->ninsns - insns_start; 1186 if (insn_is_zext(&insn[1])) 1187 return 1; 1188 break; 1189 case BPF_H: 1190 if (is_12b_int(off)) { 1191 insns_start = ctx->ninsns; 1192 emit(rv_lhu(rd, off, rs), ctx); 1193 insn_len = ctx->ninsns - insns_start; 1194 break; 1195 } 1196 1197 emit_imm(RV_REG_T1, off, ctx); 1198 emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); 1199 insns_start = ctx->ninsns; 1200 emit(rv_lhu(rd, 0, RV_REG_T1), ctx); 1201 insn_len = ctx->ninsns - insns_start; 1202 if (insn_is_zext(&insn[1])) 1203 return 1; 1204 break; 1205 case BPF_W: 1206 if (is_12b_int(off)) { 1207 insns_start = ctx->ninsns; 1208 emit(rv_lwu(rd, off, rs), ctx); 1209 insn_len = ctx->ninsns - insns_start; 1210 break; 1211 } 1212 1213 emit_imm(RV_REG_T1, off, ctx); 1214 emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); 1215 insns_start = ctx->ninsns; 1216 emit(rv_lwu(rd, 0, RV_REG_T1), ctx); 1217 insn_len = ctx->ninsns - insns_start; 1218 if (insn_is_zext(&insn[1])) 1219 return 1; 1220 break; 1221 case BPF_DW: 1222 if (is_12b_int(off)) { 1223 insns_start = ctx->ninsns; 1224 emit_ld(rd, off, rs, ctx); 1225 insn_len = ctx->ninsns - insns_start; 1226 break; 1227 } 1228 1229 emit_imm(RV_REG_T1, off, ctx); 1230 emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); 1231 insns_start = ctx->ninsns; 1232 emit_ld(rd, 0, RV_REG_T1, ctx); 1233 insn_len = ctx->ninsns - insns_start; 1234 break; 1235 } 1236 1237 ret = add_exception_handler(insn, ctx, rd, insn_len); 1238 if (ret) 1239 return ret; 1240 break; 1241 } 1242 /* speculation barrier */ 1243 case BPF_ST | BPF_NOSPEC: 1244 break; 1245 1246 /* ST: *(size *)(dst + off) = imm */ 1247 case BPF_ST | BPF_MEM | BPF_B: 1248 emit_imm(RV_REG_T1, imm, ctx); 1249 if (is_12b_int(off)) { 1250 emit(rv_sb(rd, off, RV_REG_T1), ctx); 1251 break; 1252 } 1253 1254 emit_imm(RV_REG_T2, off, ctx); 1255 emit_add(RV_REG_T2, RV_REG_T2, rd, ctx); 1256 emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx); 1257 break; 1258 1259 case BPF_ST | BPF_MEM | BPF_H: 1260 emit_imm(RV_REG_T1, imm, ctx); 1261 if (is_12b_int(off)) { 1262 emit(rv_sh(rd, off, RV_REG_T1), ctx); 1263 break; 1264 } 1265 1266 emit_imm(RV_REG_T2, off, ctx); 1267 emit_add(RV_REG_T2, RV_REG_T2, rd, ctx); 1268 emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx); 1269 break; 1270 case BPF_ST | BPF_MEM | BPF_W: 1271 emit_imm(RV_REG_T1, imm, ctx); 1272 if (is_12b_int(off)) { 1273 emit_sw(rd, off, RV_REG_T1, ctx); 1274 break; 1275 } 1276 1277 emit_imm(RV_REG_T2, off, ctx); 1278 emit_add(RV_REG_T2, RV_REG_T2, rd, ctx); 1279 emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx); 1280 break; 1281 case BPF_ST | BPF_MEM | BPF_DW: 1282 emit_imm(RV_REG_T1, imm, ctx); 1283 if (is_12b_int(off)) { 1284 emit_sd(rd, off, RV_REG_T1, ctx); 1285 break; 1286 } 1287 1288 emit_imm(RV_REG_T2, off, ctx); 1289 emit_add(RV_REG_T2, RV_REG_T2, rd, ctx); 1290 emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx); 1291 break; 1292 1293 /* STX: *(size *)(dst + off) = src */ 1294 case BPF_STX | BPF_MEM | BPF_B: 1295 if (is_12b_int(off)) { 1296 emit(rv_sb(rd, off, rs), ctx); 1297 break; 1298 } 1299 1300 emit_imm(RV_REG_T1, off, ctx); 1301 emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); 1302 emit(rv_sb(RV_REG_T1, 0, rs), ctx); 1303 break; 1304 case BPF_STX | BPF_MEM | BPF_H: 1305 if (is_12b_int(off)) { 1306 emit(rv_sh(rd, off, rs), ctx); 1307 break; 1308 } 1309 1310 emit_imm(RV_REG_T1, off, ctx); 1311 emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); 1312 emit(rv_sh(RV_REG_T1, 0, rs), ctx); 1313 break; 1314 case BPF_STX | BPF_MEM | BPF_W: 1315 if (is_12b_int(off)) { 1316 emit_sw(rd, off, rs, ctx); 1317 break; 1318 } 1319 1320 emit_imm(RV_REG_T1, off, ctx); 1321 emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); 1322 emit_sw(RV_REG_T1, 0, rs, ctx); 1323 break; 1324 case BPF_STX | BPF_MEM | BPF_DW: 1325 if (is_12b_int(off)) { 1326 emit_sd(rd, off, rs, ctx); 1327 break; 1328 } 1329 1330 emit_imm(RV_REG_T1, off, ctx); 1331 emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); 1332 emit_sd(RV_REG_T1, 0, rs, ctx); 1333 break; 1334 case BPF_STX | BPF_ATOMIC | BPF_W: 1335 case BPF_STX | BPF_ATOMIC | BPF_DW: 1336 emit_atomic(rd, rs, off, imm, 1337 BPF_SIZE(code) == BPF_DW, ctx); 1338 break; 1339 default: 1340 pr_err("bpf-jit: unknown opcode %02x\n", code); 1341 return -EINVAL; 1342 } 1343 1344 return 0; 1345 } 1346 1347 void bpf_jit_build_prologue(struct rv_jit_context *ctx) 1348 { 1349 int i, stack_adjust = 0, store_offset, bpf_stack_adjust; 1350 1351 bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); 1352 if (bpf_stack_adjust) 1353 mark_fp(ctx); 1354 1355 if (seen_reg(RV_REG_RA, ctx)) 1356 stack_adjust += 8; 1357 stack_adjust += 8; /* RV_REG_FP */ 1358 if (seen_reg(RV_REG_S1, ctx)) 1359 stack_adjust += 8; 1360 if (seen_reg(RV_REG_S2, ctx)) 1361 stack_adjust += 8; 1362 if (seen_reg(RV_REG_S3, ctx)) 1363 stack_adjust += 8; 1364 if (seen_reg(RV_REG_S4, ctx)) 1365 stack_adjust += 8; 1366 if (seen_reg(RV_REG_S5, ctx)) 1367 stack_adjust += 8; 1368 if (seen_reg(RV_REG_S6, ctx)) 1369 stack_adjust += 8; 1370 1371 stack_adjust = round_up(stack_adjust, 16); 1372 stack_adjust += bpf_stack_adjust; 1373 1374 store_offset = stack_adjust - 8; 1375 1376 /* reserve 4 nop insns */ 1377 for (i = 0; i < 4; i++) 1378 emit(rv_nop(), ctx); 1379 1380 /* First instruction is always setting the tail-call-counter 1381 * (TCC) register. This instruction is skipped for tail calls. 1382 * Force using a 4-byte (non-compressed) instruction. 1383 */ 1384 emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx); 1385 1386 emit_addi(RV_REG_SP, RV_REG_SP, -stack_adjust, ctx); 1387 1388 if (seen_reg(RV_REG_RA, ctx)) { 1389 emit_sd(RV_REG_SP, store_offset, RV_REG_RA, ctx); 1390 store_offset -= 8; 1391 } 1392 emit_sd(RV_REG_SP, store_offset, RV_REG_FP, ctx); 1393 store_offset -= 8; 1394 if (seen_reg(RV_REG_S1, ctx)) { 1395 emit_sd(RV_REG_SP, store_offset, RV_REG_S1, ctx); 1396 store_offset -= 8; 1397 } 1398 if (seen_reg(RV_REG_S2, ctx)) { 1399 emit_sd(RV_REG_SP, store_offset, RV_REG_S2, ctx); 1400 store_offset -= 8; 1401 } 1402 if (seen_reg(RV_REG_S3, ctx)) { 1403 emit_sd(RV_REG_SP, store_offset, RV_REG_S3, ctx); 1404 store_offset -= 8; 1405 } 1406 if (seen_reg(RV_REG_S4, ctx)) { 1407 emit_sd(RV_REG_SP, store_offset, RV_REG_S4, ctx); 1408 store_offset -= 8; 1409 } 1410 if (seen_reg(RV_REG_S5, ctx)) { 1411 emit_sd(RV_REG_SP, store_offset, RV_REG_S5, ctx); 1412 store_offset -= 8; 1413 } 1414 if (seen_reg(RV_REG_S6, ctx)) { 1415 emit_sd(RV_REG_SP, store_offset, RV_REG_S6, ctx); 1416 store_offset -= 8; 1417 } 1418 1419 emit_addi(RV_REG_FP, RV_REG_SP, stack_adjust, ctx); 1420 1421 if (bpf_stack_adjust) 1422 emit_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust, ctx); 1423 1424 /* Program contains calls and tail calls, so RV_REG_TCC need 1425 * to be saved across calls. 1426 */ 1427 if (seen_tail_call(ctx) && seen_call(ctx)) 1428 emit_mv(RV_REG_TCC_SAVED, RV_REG_TCC, ctx); 1429 1430 ctx->stack_size = stack_adjust; 1431 } 1432 1433 void bpf_jit_build_epilogue(struct rv_jit_context *ctx) 1434 { 1435 __build_epilogue(false, ctx); 1436 } 1437