1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Just-In-Time compiler for eBPF bytecode on MIPS. 4 * Implementation of JIT functions for 64-bit CPUs. 5 * 6 * Copyright (c) 2021 Anyfi Networks AB. 7 * Author: Johan Almbladh <johan.almbladh@gmail.com> 8 * 9 * Based on code and ideas from 10 * Copyright (c) 2017 Cavium, Inc. 11 * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com> 12 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> 13 */ 14 15 #include <linux/errno.h> 16 #include <linux/filter.h> 17 #include <linux/bpf.h> 18 #include <asm/cpu-features.h> 19 #include <asm/isa-rev.h> 20 #include <asm/uasm.h> 21 22 #include "bpf_jit_comp.h" 23 24 /* MIPS t0-t3 are not available in the n64 ABI */ 25 #undef MIPS_R_T0 26 #undef MIPS_R_T1 27 #undef MIPS_R_T2 28 #undef MIPS_R_T3 29 30 /* Stack is 16-byte aligned in n64 ABI */ 31 #define MIPS_STACK_ALIGNMENT 16 32 33 /* Extra 64-bit eBPF registers used by JIT */ 34 #define JIT_REG_TC (MAX_BPF_JIT_REG + 0) 35 #define JIT_REG_ZX (MAX_BPF_JIT_REG + 1) 36 37 /* Number of prologue bytes to skip when doing a tail call */ 38 #define JIT_TCALL_SKIP 4 39 40 /* Callee-saved CPU registers that the JIT must preserve */ 41 #define JIT_CALLEE_REGS \ 42 (BIT(MIPS_R_S0) | \ 43 BIT(MIPS_R_S1) | \ 44 BIT(MIPS_R_S2) | \ 45 BIT(MIPS_R_S3) | \ 46 BIT(MIPS_R_S4) | \ 47 BIT(MIPS_R_S5) | \ 48 BIT(MIPS_R_S6) | \ 49 BIT(MIPS_R_S7) | \ 50 BIT(MIPS_R_GP) | \ 51 BIT(MIPS_R_FP) | \ 52 BIT(MIPS_R_RA)) 53 54 /* Caller-saved CPU registers available for JIT use */ 55 #define JIT_CALLER_REGS \ 56 (BIT(MIPS_R_A5) | \ 57 BIT(MIPS_R_A6) | \ 58 BIT(MIPS_R_A7)) 59 /* 60 * Mapping of 64-bit eBPF registers to 64-bit native MIPS registers. 61 * MIPS registers t4 - t7 may be used by the JIT as temporary registers. 62 * MIPS registers t8 - t9 are reserved for single-register common functions. 63 */ 64 static const u8 bpf2mips64[] = { 65 /* Return value from in-kernel function, and exit value from eBPF */ 66 [BPF_REG_0] = MIPS_R_V0, 67 /* Arguments from eBPF program to in-kernel function */ 68 [BPF_REG_1] = MIPS_R_A0, 69 [BPF_REG_2] = MIPS_R_A1, 70 [BPF_REG_3] = MIPS_R_A2, 71 [BPF_REG_4] = MIPS_R_A3, 72 [BPF_REG_5] = MIPS_R_A4, 73 /* Callee-saved registers that in-kernel function will preserve */ 74 [BPF_REG_6] = MIPS_R_S0, 75 [BPF_REG_7] = MIPS_R_S1, 76 [BPF_REG_8] = MIPS_R_S2, 77 [BPF_REG_9] = MIPS_R_S3, 78 /* Read-only frame pointer to access the eBPF stack */ 79 [BPF_REG_FP] = MIPS_R_FP, 80 /* Temporary register for blinding constants */ 81 [BPF_REG_AX] = MIPS_R_AT, 82 /* Tail call count register, caller-saved */ 83 [JIT_REG_TC] = MIPS_R_A5, 84 /* Constant for register zero-extension */ 85 [JIT_REG_ZX] = MIPS_R_V1, 86 }; 87 88 /* 89 * MIPS 32-bit operations on 64-bit registers generate a sign-extended 90 * result. However, the eBPF ISA mandates zero-extension, so we rely on the 91 * verifier to add that for us (emit_zext_ver). In addition, ALU arithmetic 92 * operations, right shift and byte swap require properly sign-extended 93 * operands or the result is unpredictable. We emit explicit sign-extensions 94 * in those cases. 95 */ 96 97 /* Sign extension */ 98 static void emit_sext(struct jit_context *ctx, u8 dst, u8 src) 99 { 100 emit(ctx, sll, dst, src, 0); 101 clobber_reg(ctx, dst); 102 } 103 104 /* Zero extension */ 105 static void emit_zext(struct jit_context *ctx, u8 dst) 106 { 107 if (cpu_has_mips64r2 || cpu_has_mips64r6) { 108 emit(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 109 } else { 110 emit(ctx, and, dst, dst, bpf2mips64[JIT_REG_ZX]); 111 access_reg(ctx, JIT_REG_ZX); /* We need the ZX register */ 112 } 113 clobber_reg(ctx, dst); 114 } 115 116 /* Zero extension, if verifier does not do it for us */ 117 static void emit_zext_ver(struct jit_context *ctx, u8 dst) 118 { 119 if (!ctx->program->aux->verifier_zext) 120 emit_zext(ctx, dst); 121 } 122 123 /* dst = imm (64-bit) */ 124 static void emit_mov_i64(struct jit_context *ctx, u8 dst, u64 imm64) 125 { 126 if (imm64 >= 0xffffffffffff8000ULL || imm64 < 0x8000ULL) { 127 emit(ctx, daddiu, dst, MIPS_R_ZERO, (s16)imm64); 128 } else if (imm64 >= 0xffffffff80000000ULL || 129 (imm64 < 0x80000000 && imm64 > 0xffff)) { 130 emit(ctx, lui, dst, (s16)(imm64 >> 16)); 131 emit(ctx, ori, dst, dst, (u16)imm64 & 0xffff); 132 } else { 133 u8 acc = MIPS_R_ZERO; 134 int shift = 0; 135 int k; 136 137 for (k = 0; k < 4; k++) { 138 u16 half = imm64 >> (48 - 16 * k); 139 140 if (acc == dst) 141 shift += 16; 142 143 if (half) { 144 if (shift) 145 emit(ctx, dsll_safe, dst, dst, shift); 146 emit(ctx, ori, dst, acc, half); 147 acc = dst; 148 shift = 0; 149 } 150 } 151 if (shift) 152 emit(ctx, dsll_safe, dst, dst, shift); 153 } 154 clobber_reg(ctx, dst); 155 } 156 157 /* ALU immediate operation (64-bit) */ 158 static void emit_alu_i64(struct jit_context *ctx, u8 dst, s32 imm, u8 op) 159 { 160 switch (BPF_OP(op)) { 161 /* dst = dst | imm */ 162 case BPF_OR: 163 emit(ctx, ori, dst, dst, (u16)imm); 164 break; 165 /* dst = dst ^ imm */ 166 case BPF_XOR: 167 emit(ctx, xori, dst, dst, (u16)imm); 168 break; 169 /* dst = -dst */ 170 case BPF_NEG: 171 emit(ctx, dsubu, dst, MIPS_R_ZERO, dst); 172 break; 173 /* dst = dst << imm */ 174 case BPF_LSH: 175 emit(ctx, dsll_safe, dst, dst, imm); 176 break; 177 /* dst = dst >> imm */ 178 case BPF_RSH: 179 emit(ctx, dsrl_safe, dst, dst, imm); 180 break; 181 /* dst = dst >> imm (arithmetic) */ 182 case BPF_ARSH: 183 emit(ctx, dsra_safe, dst, dst, imm); 184 break; 185 /* dst = dst + imm */ 186 case BPF_ADD: 187 emit(ctx, daddiu, dst, dst, imm); 188 break; 189 /* dst = dst - imm */ 190 case BPF_SUB: 191 emit(ctx, daddiu, dst, dst, -imm); 192 break; 193 default: 194 /* Width-generic operations */ 195 emit_alu_i(ctx, dst, imm, op); 196 } 197 clobber_reg(ctx, dst); 198 } 199 200 /* ALU register operation (64-bit) */ 201 static void emit_alu_r64(struct jit_context *ctx, u8 dst, u8 src, u8 op) 202 { 203 switch (BPF_OP(op)) { 204 /* dst = dst << src */ 205 case BPF_LSH: 206 emit(ctx, dsllv, dst, dst, src); 207 break; 208 /* dst = dst >> src */ 209 case BPF_RSH: 210 emit(ctx, dsrlv, dst, dst, src); 211 break; 212 /* dst = dst >> src (arithmetic) */ 213 case BPF_ARSH: 214 emit(ctx, dsrav, dst, dst, src); 215 break; 216 /* dst = dst + src */ 217 case BPF_ADD: 218 emit(ctx, daddu, dst, dst, src); 219 break; 220 /* dst = dst - src */ 221 case BPF_SUB: 222 emit(ctx, dsubu, dst, dst, src); 223 break; 224 /* dst = dst * src */ 225 case BPF_MUL: 226 if (cpu_has_mips64r6) { 227 emit(ctx, dmulu, dst, dst, src); 228 } else { 229 emit(ctx, dmultu, dst, src); 230 emit(ctx, mflo, dst); 231 } 232 break; 233 /* dst = dst / src */ 234 case BPF_DIV: 235 if (cpu_has_mips64r6) { 236 emit(ctx, ddivu_r6, dst, dst, src); 237 } else { 238 emit(ctx, ddivu, dst, src); 239 emit(ctx, mflo, dst); 240 } 241 break; 242 /* dst = dst % src */ 243 case BPF_MOD: 244 if (cpu_has_mips64r6) { 245 emit(ctx, dmodu, dst, dst, src); 246 } else { 247 emit(ctx, ddivu, dst, src); 248 emit(ctx, mfhi, dst); 249 } 250 break; 251 default: 252 /* Width-generic operations */ 253 emit_alu_r(ctx, dst, src, op); 254 } 255 clobber_reg(ctx, dst); 256 } 257 258 /* Swap sub words in a register double word */ 259 static void emit_swap_r64(struct jit_context *ctx, u8 dst, u8 mask, u32 bits) 260 { 261 u8 tmp = MIPS_R_T9; 262 263 emit(ctx, and, tmp, dst, mask); /* tmp = dst & mask */ 264 emit(ctx, dsll, tmp, tmp, bits); /* tmp = tmp << bits */ 265 emit(ctx, dsrl, dst, dst, bits); /* dst = dst >> bits */ 266 emit(ctx, and, dst, dst, mask); /* dst = dst & mask */ 267 emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ 268 } 269 270 /* Swap bytes and truncate a register double word, word or half word */ 271 static void emit_bswap_r64(struct jit_context *ctx, u8 dst, u32 width) 272 { 273 switch (width) { 274 /* Swap bytes in a double word */ 275 case 64: 276 if (cpu_has_mips64r2 || cpu_has_mips64r6) { 277 emit(ctx, dsbh, dst, dst); 278 emit(ctx, dshd, dst, dst); 279 } else { 280 u8 t1 = MIPS_R_T6; 281 u8 t2 = MIPS_R_T7; 282 283 emit(ctx, dsll32, t2, dst, 0); /* t2 = dst << 32 */ 284 emit(ctx, dsrl32, dst, dst, 0); /* dst = dst >> 32 */ 285 emit(ctx, or, dst, dst, t2); /* dst = dst | t2 */ 286 287 emit(ctx, ori, t2, MIPS_R_ZERO, 0xffff); 288 emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ 289 emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ 290 emit_swap_r64(ctx, dst, t1, 16);/* dst = swap16(dst) */ 291 292 emit(ctx, lui, t2, 0xff); /* t2 = 0x00ff0000 */ 293 emit(ctx, ori, t2, t2, 0xff); /* t2 = t2 | 0x00ff */ 294 emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ 295 emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ 296 emit_swap_r64(ctx, dst, t1, 8); /* dst = swap8(dst) */ 297 } 298 break; 299 /* Swap bytes in a half word */ 300 /* Swap bytes in a word */ 301 case 32: 302 case 16: 303 emit_sext(ctx, dst, dst); 304 emit_bswap_r(ctx, dst, width); 305 if (cpu_has_mips64r2 || cpu_has_mips64r6) 306 emit_zext(ctx, dst); 307 break; 308 } 309 clobber_reg(ctx, dst); 310 } 311 312 /* Truncate a register double word, word or half word */ 313 static void emit_trunc_r64(struct jit_context *ctx, u8 dst, u32 width) 314 { 315 switch (width) { 316 case 64: 317 break; 318 /* Zero-extend a word */ 319 case 32: 320 emit_zext(ctx, dst); 321 break; 322 /* Zero-extend a half word */ 323 case 16: 324 emit(ctx, andi, dst, dst, 0xffff); 325 break; 326 } 327 clobber_reg(ctx, dst); 328 } 329 330 /* Load operation: dst = *(size*)(src + off) */ 331 static void emit_ldx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) 332 { 333 switch (size) { 334 /* Load a byte */ 335 case BPF_B: 336 emit(ctx, lbu, dst, off, src); 337 break; 338 /* Load a half word */ 339 case BPF_H: 340 emit(ctx, lhu, dst, off, src); 341 break; 342 /* Load a word */ 343 case BPF_W: 344 emit(ctx, lwu, dst, off, src); 345 break; 346 /* Load a double word */ 347 case BPF_DW: 348 emit(ctx, ld, dst, off, src); 349 break; 350 } 351 clobber_reg(ctx, dst); 352 } 353 354 /* Store operation: *(size *)(dst + off) = src */ 355 static void emit_stx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) 356 { 357 switch (size) { 358 /* Store a byte */ 359 case BPF_B: 360 emit(ctx, sb, src, off, dst); 361 break; 362 /* Store a half word */ 363 case BPF_H: 364 emit(ctx, sh, src, off, dst); 365 break; 366 /* Store a word */ 367 case BPF_W: 368 emit(ctx, sw, src, off, dst); 369 break; 370 /* Store a double word */ 371 case BPF_DW: 372 emit(ctx, sd, src, off, dst); 373 break; 374 } 375 } 376 377 /* Atomic read-modify-write */ 378 static void emit_atomic_r64(struct jit_context *ctx, 379 u8 dst, u8 src, s16 off, u8 code) 380 { 381 u8 t1 = MIPS_R_T6; 382 u8 t2 = MIPS_R_T7; 383 384 LLSC_sync(ctx); 385 emit(ctx, lld, t1, off, dst); 386 switch (code) { 387 case BPF_ADD: 388 case BPF_ADD | BPF_FETCH: 389 emit(ctx, daddu, t2, t1, src); 390 break; 391 case BPF_AND: 392 case BPF_AND | BPF_FETCH: 393 emit(ctx, and, t2, t1, src); 394 break; 395 case BPF_OR: 396 case BPF_OR | BPF_FETCH: 397 emit(ctx, or, t2, t1, src); 398 break; 399 case BPF_XOR: 400 case BPF_XOR | BPF_FETCH: 401 emit(ctx, xor, t2, t1, src); 402 break; 403 case BPF_XCHG: 404 emit(ctx, move, t2, src); 405 break; 406 } 407 emit(ctx, scd, t2, off, dst); 408 emit(ctx, LLSC_beqz, t2, -16 - LLSC_offset); 409 emit(ctx, nop); /* Delay slot */ 410 411 if (code & BPF_FETCH) { 412 emit(ctx, move, src, t1); 413 clobber_reg(ctx, src); 414 } 415 } 416 417 /* Atomic compare-and-exchange */ 418 static void emit_cmpxchg_r64(struct jit_context *ctx, u8 dst, u8 src, s16 off) 419 { 420 u8 r0 = bpf2mips64[BPF_REG_0]; 421 u8 t1 = MIPS_R_T6; 422 u8 t2 = MIPS_R_T7; 423 424 LLSC_sync(ctx); 425 emit(ctx, lld, t1, off, dst); 426 emit(ctx, bne, t1, r0, 12); 427 emit(ctx, move, t2, src); /* Delay slot */ 428 emit(ctx, scd, t2, off, dst); 429 emit(ctx, LLSC_beqz, t2, -20 - LLSC_offset); 430 emit(ctx, move, r0, t1); /* Delay slot */ 431 432 clobber_reg(ctx, r0); 433 } 434 435 /* Function call */ 436 static int emit_call(struct jit_context *ctx, const struct bpf_insn *insn) 437 { 438 u8 zx = bpf2mips64[JIT_REG_ZX]; 439 u8 tmp = MIPS_R_T6; 440 bool fixed; 441 u64 addr; 442 443 /* Decode the call address */ 444 if (bpf_jit_get_func_addr(ctx->program, insn, false, 445 &addr, &fixed) < 0) 446 return -1; 447 if (!fixed) 448 return -1; 449 450 /* Push caller-saved registers on stack */ 451 push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); 452 453 /* Emit function call */ 454 emit_mov_i64(ctx, tmp, addr & JALR_MASK); 455 emit(ctx, jalr, MIPS_R_RA, tmp); 456 emit(ctx, nop); /* Delay slot */ 457 458 /* Restore caller-saved registers */ 459 pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); 460 461 /* Re-initialize the JIT zero-extension register if accessed */ 462 if (ctx->accessed & BIT(JIT_REG_ZX)) { 463 emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); 464 emit(ctx, dsrl32, zx, zx, 0); 465 } 466 467 clobber_reg(ctx, MIPS_R_RA); 468 clobber_reg(ctx, MIPS_R_V0); 469 clobber_reg(ctx, MIPS_R_V1); 470 return 0; 471 } 472 473 /* Function tail call */ 474 static int emit_tail_call(struct jit_context *ctx) 475 { 476 u8 ary = bpf2mips64[BPF_REG_2]; 477 u8 ind = bpf2mips64[BPF_REG_3]; 478 u8 tcc = bpf2mips64[JIT_REG_TC]; 479 u8 tmp = MIPS_R_T6; 480 int off; 481 482 /* 483 * Tail call: 484 * eBPF R1 - function argument (context ptr), passed in a0-a1 485 * eBPF R2 - ptr to object with array of function entry points 486 * eBPF R3 - array index of function to be called 487 */ 488 489 /* if (ind >= ary->map.max_entries) goto out */ 490 off = offsetof(struct bpf_array, map.max_entries); 491 if (off > 0x7fff) 492 return -1; 493 emit(ctx, lwu, tmp, off, ary); /* tmp = ary->map.max_entrs*/ 494 emit(ctx, sltu, tmp, ind, tmp); /* tmp = ind < t1 */ 495 emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ 496 497 /* if (--TCC < 0) goto out */ 498 emit(ctx, daddiu, tcc, tcc, -1); /* tcc-- (delay slot) */ 499 emit(ctx, bltz, tcc, get_offset(ctx, 1)); /* PC += off(1) if tcc < 0 */ 500 /* (next insn delay slot) */ 501 /* prog = ary->ptrs[ind] */ 502 off = offsetof(struct bpf_array, ptrs); 503 if (off > 0x7fff) 504 return -1; 505 emit(ctx, dsll, tmp, ind, 3); /* tmp = ind << 3 */ 506 emit(ctx, daddu, tmp, tmp, ary); /* tmp += ary */ 507 emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ 508 509 /* if (prog == 0) goto out */ 510 emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ 511 emit(ctx, nop); /* Delay slot */ 512 513 /* func = prog->bpf_func + 8 (prologue skip offset) */ 514 off = offsetof(struct bpf_prog, bpf_func); 515 if (off > 0x7fff) 516 return -1; 517 emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ 518 emit(ctx, daddiu, tmp, tmp, JIT_TCALL_SKIP); /* tmp += skip (4) */ 519 520 /* goto func */ 521 build_epilogue(ctx, tmp); 522 access_reg(ctx, JIT_REG_TC); 523 return 0; 524 } 525 526 /* 527 * Stack frame layout for a JITed program (stack grows down). 528 * 529 * Higher address : Previous stack frame : 530 * +===========================+ <--- MIPS sp before call 531 * | Callee-saved registers, | 532 * | including RA and FP | 533 * +---------------------------+ <--- eBPF FP (MIPS fp) 534 * | Local eBPF variables | 535 * | allocated by program | 536 * +---------------------------+ 537 * | Reserved for caller-saved | 538 * | registers | 539 * Lower address +===========================+ <--- MIPS sp 540 */ 541 542 /* Build program prologue to set up the stack and registers */ 543 void build_prologue(struct jit_context *ctx) 544 { 545 u8 fp = bpf2mips64[BPF_REG_FP]; 546 u8 tc = bpf2mips64[JIT_REG_TC]; 547 u8 zx = bpf2mips64[JIT_REG_ZX]; 548 int stack, saved, locals, reserved; 549 550 /* 551 * The first instruction initializes the tail call count register. 552 * On a tail call, the calling function jumps into the prologue 553 * after this instruction. 554 */ 555 emit(ctx, ori, tc, MIPS_R_ZERO, min(MAX_TAIL_CALL_CNT, 0xffff)); 556 557 /* === Entry-point for tail calls === */ 558 559 /* 560 * If the eBPF frame pointer and tail call count registers were 561 * accessed they must be preserved. Mark them as clobbered here 562 * to save and restore them on the stack as needed. 563 */ 564 if (ctx->accessed & BIT(BPF_REG_FP)) 565 clobber_reg(ctx, fp); 566 if (ctx->accessed & BIT(JIT_REG_TC)) 567 clobber_reg(ctx, tc); 568 if (ctx->accessed & BIT(JIT_REG_ZX)) 569 clobber_reg(ctx, zx); 570 571 /* Compute the stack space needed for callee-saved registers */ 572 saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u64); 573 saved = ALIGN(saved, MIPS_STACK_ALIGNMENT); 574 575 /* Stack space used by eBPF program local data */ 576 locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT); 577 578 /* 579 * If we are emitting function calls, reserve extra stack space for 580 * caller-saved registers needed by the JIT. The required space is 581 * computed automatically during resource usage discovery (pass 1). 582 */ 583 reserved = ctx->stack_used; 584 585 /* Allocate the stack frame */ 586 stack = ALIGN(saved + locals + reserved, MIPS_STACK_ALIGNMENT); 587 if (stack) 588 emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack); 589 590 /* Store callee-saved registers on stack */ 591 push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved); 592 593 /* Initialize the eBPF frame pointer if accessed */ 594 if (ctx->accessed & BIT(BPF_REG_FP)) 595 emit(ctx, daddiu, fp, MIPS_R_SP, stack - saved); 596 597 /* Initialize the ePF JIT zero-extension register if accessed */ 598 if (ctx->accessed & BIT(JIT_REG_ZX)) { 599 emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); 600 emit(ctx, dsrl32, zx, zx, 0); 601 } 602 603 ctx->saved_size = saved; 604 ctx->stack_size = stack; 605 } 606 607 /* Build the program epilogue to restore the stack and registers */ 608 void build_epilogue(struct jit_context *ctx, int dest_reg) 609 { 610 /* Restore callee-saved registers from stack */ 611 pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, 612 ctx->stack_size - ctx->saved_size); 613 614 /* Release the stack frame */ 615 if (ctx->stack_size) 616 emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size); 617 618 /* Jump to return address and sign-extend the 32-bit return value */ 619 emit(ctx, jr, dest_reg); 620 emit(ctx, sll, MIPS_R_V0, MIPS_R_V0, 0); /* Delay slot */ 621 } 622 623 /* Build one eBPF instruction */ 624 int build_insn(const struct bpf_insn *insn, struct jit_context *ctx) 625 { 626 u8 dst = bpf2mips64[insn->dst_reg]; 627 u8 src = bpf2mips64[insn->src_reg]; 628 u8 res = bpf2mips64[BPF_REG_0]; 629 u8 code = insn->code; 630 s16 off = insn->off; 631 s32 imm = insn->imm; 632 s32 val, rel; 633 u8 alu, jmp; 634 635 switch (code) { 636 /* ALU operations */ 637 /* dst = imm */ 638 case BPF_ALU | BPF_MOV | BPF_K: 639 emit_mov_i(ctx, dst, imm); 640 emit_zext_ver(ctx, dst); 641 break; 642 /* dst = src */ 643 case BPF_ALU | BPF_MOV | BPF_X: 644 if (imm == 1) { 645 /* Special mov32 for zext */ 646 emit_zext(ctx, dst); 647 } else { 648 emit_mov_r(ctx, dst, src); 649 emit_zext_ver(ctx, dst); 650 } 651 break; 652 /* dst = -dst */ 653 case BPF_ALU | BPF_NEG: 654 emit_sext(ctx, dst, dst); 655 emit_alu_i(ctx, dst, 0, BPF_NEG); 656 emit_zext_ver(ctx, dst); 657 break; 658 /* dst = dst & imm */ 659 /* dst = dst | imm */ 660 /* dst = dst ^ imm */ 661 /* dst = dst << imm */ 662 case BPF_ALU | BPF_OR | BPF_K: 663 case BPF_ALU | BPF_AND | BPF_K: 664 case BPF_ALU | BPF_XOR | BPF_K: 665 case BPF_ALU | BPF_LSH | BPF_K: 666 if (!valid_alu_i(BPF_OP(code), imm)) { 667 emit_mov_i(ctx, MIPS_R_T4, imm); 668 emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); 669 } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { 670 emit_alu_i(ctx, dst, val, alu); 671 } 672 emit_zext_ver(ctx, dst); 673 break; 674 /* dst = dst >> imm */ 675 /* dst = dst >> imm (arithmetic) */ 676 /* dst = dst + imm */ 677 /* dst = dst - imm */ 678 /* dst = dst * imm */ 679 /* dst = dst / imm */ 680 /* dst = dst % imm */ 681 case BPF_ALU | BPF_RSH | BPF_K: 682 case BPF_ALU | BPF_ARSH | BPF_K: 683 case BPF_ALU | BPF_ADD | BPF_K: 684 case BPF_ALU | BPF_SUB | BPF_K: 685 case BPF_ALU | BPF_MUL | BPF_K: 686 case BPF_ALU | BPF_DIV | BPF_K: 687 case BPF_ALU | BPF_MOD | BPF_K: 688 if (!valid_alu_i(BPF_OP(code), imm)) { 689 emit_sext(ctx, dst, dst); 690 emit_mov_i(ctx, MIPS_R_T4, imm); 691 emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); 692 } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { 693 emit_sext(ctx, dst, dst); 694 emit_alu_i(ctx, dst, val, alu); 695 } 696 emit_zext_ver(ctx, dst); 697 break; 698 /* dst = dst & src */ 699 /* dst = dst | src */ 700 /* dst = dst ^ src */ 701 /* dst = dst << src */ 702 case BPF_ALU | BPF_AND | BPF_X: 703 case BPF_ALU | BPF_OR | BPF_X: 704 case BPF_ALU | BPF_XOR | BPF_X: 705 case BPF_ALU | BPF_LSH | BPF_X: 706 emit_alu_r(ctx, dst, src, BPF_OP(code)); 707 emit_zext_ver(ctx, dst); 708 break; 709 /* dst = dst >> src */ 710 /* dst = dst >> src (arithmetic) */ 711 /* dst = dst + src */ 712 /* dst = dst - src */ 713 /* dst = dst * src */ 714 /* dst = dst / src */ 715 /* dst = dst % src */ 716 case BPF_ALU | BPF_RSH | BPF_X: 717 case BPF_ALU | BPF_ARSH | BPF_X: 718 case BPF_ALU | BPF_ADD | BPF_X: 719 case BPF_ALU | BPF_SUB | BPF_X: 720 case BPF_ALU | BPF_MUL | BPF_X: 721 case BPF_ALU | BPF_DIV | BPF_X: 722 case BPF_ALU | BPF_MOD | BPF_X: 723 emit_sext(ctx, dst, dst); 724 emit_sext(ctx, MIPS_R_T4, src); 725 emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); 726 emit_zext_ver(ctx, dst); 727 break; 728 /* dst = imm (64-bit) */ 729 case BPF_ALU64 | BPF_MOV | BPF_K: 730 emit_mov_i(ctx, dst, imm); 731 break; 732 /* dst = src (64-bit) */ 733 case BPF_ALU64 | BPF_MOV | BPF_X: 734 emit_mov_r(ctx, dst, src); 735 break; 736 /* dst = -dst (64-bit) */ 737 case BPF_ALU64 | BPF_NEG: 738 emit_alu_i64(ctx, dst, 0, BPF_NEG); 739 break; 740 /* dst = dst & imm (64-bit) */ 741 /* dst = dst | imm (64-bit) */ 742 /* dst = dst ^ imm (64-bit) */ 743 /* dst = dst << imm (64-bit) */ 744 /* dst = dst >> imm (64-bit) */ 745 /* dst = dst >> imm ((64-bit, arithmetic) */ 746 /* dst = dst + imm (64-bit) */ 747 /* dst = dst - imm (64-bit) */ 748 /* dst = dst * imm (64-bit) */ 749 /* dst = dst / imm (64-bit) */ 750 /* dst = dst % imm (64-bit) */ 751 case BPF_ALU64 | BPF_AND | BPF_K: 752 case BPF_ALU64 | BPF_OR | BPF_K: 753 case BPF_ALU64 | BPF_XOR | BPF_K: 754 case BPF_ALU64 | BPF_LSH | BPF_K: 755 case BPF_ALU64 | BPF_RSH | BPF_K: 756 case BPF_ALU64 | BPF_ARSH | BPF_K: 757 case BPF_ALU64 | BPF_ADD | BPF_K: 758 case BPF_ALU64 | BPF_SUB | BPF_K: 759 case BPF_ALU64 | BPF_MUL | BPF_K: 760 case BPF_ALU64 | BPF_DIV | BPF_K: 761 case BPF_ALU64 | BPF_MOD | BPF_K: 762 if (!valid_alu_i(BPF_OP(code), imm)) { 763 emit_mov_i(ctx, MIPS_R_T4, imm); 764 emit_alu_r64(ctx, dst, MIPS_R_T4, BPF_OP(code)); 765 } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { 766 emit_alu_i64(ctx, dst, val, alu); 767 } 768 break; 769 /* dst = dst & src (64-bit) */ 770 /* dst = dst | src (64-bit) */ 771 /* dst = dst ^ src (64-bit) */ 772 /* dst = dst << src (64-bit) */ 773 /* dst = dst >> src (64-bit) */ 774 /* dst = dst >> src (64-bit, arithmetic) */ 775 /* dst = dst + src (64-bit) */ 776 /* dst = dst - src (64-bit) */ 777 /* dst = dst * src (64-bit) */ 778 /* dst = dst / src (64-bit) */ 779 /* dst = dst % src (64-bit) */ 780 case BPF_ALU64 | BPF_AND | BPF_X: 781 case BPF_ALU64 | BPF_OR | BPF_X: 782 case BPF_ALU64 | BPF_XOR | BPF_X: 783 case BPF_ALU64 | BPF_LSH | BPF_X: 784 case BPF_ALU64 | BPF_RSH | BPF_X: 785 case BPF_ALU64 | BPF_ARSH | BPF_X: 786 case BPF_ALU64 | BPF_ADD | BPF_X: 787 case BPF_ALU64 | BPF_SUB | BPF_X: 788 case BPF_ALU64 | BPF_MUL | BPF_X: 789 case BPF_ALU64 | BPF_DIV | BPF_X: 790 case BPF_ALU64 | BPF_MOD | BPF_X: 791 emit_alu_r64(ctx, dst, src, BPF_OP(code)); 792 break; 793 /* dst = htole(dst) */ 794 /* dst = htobe(dst) */ 795 case BPF_ALU | BPF_END | BPF_FROM_LE: 796 case BPF_ALU | BPF_END | BPF_FROM_BE: 797 if (BPF_SRC(code) == 798 #ifdef __BIG_ENDIAN 799 BPF_FROM_LE 800 #else 801 BPF_FROM_BE 802 #endif 803 ) 804 emit_bswap_r64(ctx, dst, imm); 805 else 806 emit_trunc_r64(ctx, dst, imm); 807 break; 808 /* dst = imm64 */ 809 case BPF_LD | BPF_IMM | BPF_DW: 810 emit_mov_i64(ctx, dst, (u32)imm | ((u64)insn[1].imm << 32)); 811 return 1; 812 /* LDX: dst = *(size *)(src + off) */ 813 case BPF_LDX | BPF_MEM | BPF_W: 814 case BPF_LDX | BPF_MEM | BPF_H: 815 case BPF_LDX | BPF_MEM | BPF_B: 816 case BPF_LDX | BPF_MEM | BPF_DW: 817 emit_ldx(ctx, dst, src, off, BPF_SIZE(code)); 818 break; 819 /* ST: *(size *)(dst + off) = imm */ 820 case BPF_ST | BPF_MEM | BPF_W: 821 case BPF_ST | BPF_MEM | BPF_H: 822 case BPF_ST | BPF_MEM | BPF_B: 823 case BPF_ST | BPF_MEM | BPF_DW: 824 emit_mov_i(ctx, MIPS_R_T4, imm); 825 emit_stx(ctx, dst, MIPS_R_T4, off, BPF_SIZE(code)); 826 break; 827 /* STX: *(size *)(dst + off) = src */ 828 case BPF_STX | BPF_MEM | BPF_W: 829 case BPF_STX | BPF_MEM | BPF_H: 830 case BPF_STX | BPF_MEM | BPF_B: 831 case BPF_STX | BPF_MEM | BPF_DW: 832 emit_stx(ctx, dst, src, off, BPF_SIZE(code)); 833 break; 834 /* Speculation barrier */ 835 case BPF_ST | BPF_NOSPEC: 836 break; 837 /* Atomics */ 838 case BPF_STX | BPF_ATOMIC | BPF_W: 839 case BPF_STX | BPF_ATOMIC | BPF_DW: 840 switch (imm) { 841 case BPF_ADD: 842 case BPF_ADD | BPF_FETCH: 843 case BPF_AND: 844 case BPF_AND | BPF_FETCH: 845 case BPF_OR: 846 case BPF_OR | BPF_FETCH: 847 case BPF_XOR: 848 case BPF_XOR | BPF_FETCH: 849 case BPF_XCHG: 850 if (BPF_SIZE(code) == BPF_DW) { 851 emit_atomic_r64(ctx, dst, src, off, imm); 852 } else if (imm & BPF_FETCH) { 853 u8 tmp = dst; 854 855 if (src == dst) { /* Don't overwrite dst */ 856 emit_mov_r(ctx, MIPS_R_T4, dst); 857 tmp = MIPS_R_T4; 858 } 859 emit_sext(ctx, src, src); 860 emit_atomic_r(ctx, tmp, src, off, imm); 861 emit_zext_ver(ctx, src); 862 } else { /* 32-bit, no fetch */ 863 emit_sext(ctx, MIPS_R_T4, src); 864 emit_atomic_r(ctx, dst, MIPS_R_T4, off, imm); 865 } 866 break; 867 case BPF_CMPXCHG: 868 if (BPF_SIZE(code) == BPF_DW) { 869 emit_cmpxchg_r64(ctx, dst, src, off); 870 } else { 871 u8 tmp = res; 872 873 if (res == dst) /* Don't overwrite dst */ 874 tmp = MIPS_R_T4; 875 emit_sext(ctx, tmp, res); 876 emit_sext(ctx, MIPS_R_T5, src); 877 emit_cmpxchg_r(ctx, dst, MIPS_R_T5, tmp, off); 878 if (res == dst) /* Restore result */ 879 emit_mov_r(ctx, res, MIPS_R_T4); 880 /* Result zext inserted by verifier */ 881 } 882 break; 883 default: 884 goto notyet; 885 } 886 break; 887 /* PC += off if dst == src */ 888 /* PC += off if dst != src */ 889 /* PC += off if dst & src */ 890 /* PC += off if dst > src */ 891 /* PC += off if dst >= src */ 892 /* PC += off if dst < src */ 893 /* PC += off if dst <= src */ 894 /* PC += off if dst > src (signed) */ 895 /* PC += off if dst >= src (signed) */ 896 /* PC += off if dst < src (signed) */ 897 /* PC += off if dst <= src (signed) */ 898 case BPF_JMP32 | BPF_JEQ | BPF_X: 899 case BPF_JMP32 | BPF_JNE | BPF_X: 900 case BPF_JMP32 | BPF_JSET | BPF_X: 901 case BPF_JMP32 | BPF_JGT | BPF_X: 902 case BPF_JMP32 | BPF_JGE | BPF_X: 903 case BPF_JMP32 | BPF_JLT | BPF_X: 904 case BPF_JMP32 | BPF_JLE | BPF_X: 905 case BPF_JMP32 | BPF_JSGT | BPF_X: 906 case BPF_JMP32 | BPF_JSGE | BPF_X: 907 case BPF_JMP32 | BPF_JSLT | BPF_X: 908 case BPF_JMP32 | BPF_JSLE | BPF_X: 909 if (off == 0) 910 break; 911 setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); 912 emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ 913 emit_sext(ctx, MIPS_R_T5, src); /* Sign-extended src */ 914 emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp); 915 if (finish_jmp(ctx, jmp, off) < 0) 916 goto toofar; 917 break; 918 /* PC += off if dst == imm */ 919 /* PC += off if dst != imm */ 920 /* PC += off if dst & imm */ 921 /* PC += off if dst > imm */ 922 /* PC += off if dst >= imm */ 923 /* PC += off if dst < imm */ 924 /* PC += off if dst <= imm */ 925 /* PC += off if dst > imm (signed) */ 926 /* PC += off if dst >= imm (signed) */ 927 /* PC += off if dst < imm (signed) */ 928 /* PC += off if dst <= imm (signed) */ 929 case BPF_JMP32 | BPF_JEQ | BPF_K: 930 case BPF_JMP32 | BPF_JNE | BPF_K: 931 case BPF_JMP32 | BPF_JSET | BPF_K: 932 case BPF_JMP32 | BPF_JGT | BPF_K: 933 case BPF_JMP32 | BPF_JGE | BPF_K: 934 case BPF_JMP32 | BPF_JLT | BPF_K: 935 case BPF_JMP32 | BPF_JLE | BPF_K: 936 case BPF_JMP32 | BPF_JSGT | BPF_K: 937 case BPF_JMP32 | BPF_JSGE | BPF_K: 938 case BPF_JMP32 | BPF_JSLT | BPF_K: 939 case BPF_JMP32 | BPF_JSLE | BPF_K: 940 if (off == 0) 941 break; 942 setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel); 943 emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ 944 if (valid_jmp_i(jmp, imm)) { 945 emit_jmp_i(ctx, MIPS_R_T4, imm, rel, jmp); 946 } else { 947 /* Move large immediate to register, sign-extended */ 948 emit_mov_i(ctx, MIPS_R_T5, imm); 949 emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp); 950 } 951 if (finish_jmp(ctx, jmp, off) < 0) 952 goto toofar; 953 break; 954 /* PC += off if dst == src */ 955 /* PC += off if dst != src */ 956 /* PC += off if dst & src */ 957 /* PC += off if dst > src */ 958 /* PC += off if dst >= src */ 959 /* PC += off if dst < src */ 960 /* PC += off if dst <= src */ 961 /* PC += off if dst > src (signed) */ 962 /* PC += off if dst >= src (signed) */ 963 /* PC += off if dst < src (signed) */ 964 /* PC += off if dst <= src (signed) */ 965 case BPF_JMP | BPF_JEQ | BPF_X: 966 case BPF_JMP | BPF_JNE | BPF_X: 967 case BPF_JMP | BPF_JSET | BPF_X: 968 case BPF_JMP | BPF_JGT | BPF_X: 969 case BPF_JMP | BPF_JGE | BPF_X: 970 case BPF_JMP | BPF_JLT | BPF_X: 971 case BPF_JMP | BPF_JLE | BPF_X: 972 case BPF_JMP | BPF_JSGT | BPF_X: 973 case BPF_JMP | BPF_JSGE | BPF_X: 974 case BPF_JMP | BPF_JSLT | BPF_X: 975 case BPF_JMP | BPF_JSLE | BPF_X: 976 if (off == 0) 977 break; 978 setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); 979 emit_jmp_r(ctx, dst, src, rel, jmp); 980 if (finish_jmp(ctx, jmp, off) < 0) 981 goto toofar; 982 break; 983 /* PC += off if dst == imm */ 984 /* PC += off if dst != imm */ 985 /* PC += off if dst & imm */ 986 /* PC += off if dst > imm */ 987 /* PC += off if dst >= imm */ 988 /* PC += off if dst < imm */ 989 /* PC += off if dst <= imm */ 990 /* PC += off if dst > imm (signed) */ 991 /* PC += off if dst >= imm (signed) */ 992 /* PC += off if dst < imm (signed) */ 993 /* PC += off if dst <= imm (signed) */ 994 case BPF_JMP | BPF_JEQ | BPF_K: 995 case BPF_JMP | BPF_JNE | BPF_K: 996 case BPF_JMP | BPF_JSET | BPF_K: 997 case BPF_JMP | BPF_JGT | BPF_K: 998 case BPF_JMP | BPF_JGE | BPF_K: 999 case BPF_JMP | BPF_JLT | BPF_K: 1000 case BPF_JMP | BPF_JLE | BPF_K: 1001 case BPF_JMP | BPF_JSGT | BPF_K: 1002 case BPF_JMP | BPF_JSGE | BPF_K: 1003 case BPF_JMP | BPF_JSLT | BPF_K: 1004 case BPF_JMP | BPF_JSLE | BPF_K: 1005 if (off == 0) 1006 break; 1007 setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel); 1008 if (valid_jmp_i(jmp, imm)) { 1009 emit_jmp_i(ctx, dst, imm, rel, jmp); 1010 } else { 1011 /* Move large immediate to register */ 1012 emit_mov_i(ctx, MIPS_R_T4, imm); 1013 emit_jmp_r(ctx, dst, MIPS_R_T4, rel, jmp); 1014 } 1015 if (finish_jmp(ctx, jmp, off) < 0) 1016 goto toofar; 1017 break; 1018 /* PC += off */ 1019 case BPF_JMP | BPF_JA: 1020 if (off == 0) 1021 break; 1022 if (emit_ja(ctx, off) < 0) 1023 goto toofar; 1024 break; 1025 /* Tail call */ 1026 case BPF_JMP | BPF_TAIL_CALL: 1027 if (emit_tail_call(ctx) < 0) 1028 goto invalid; 1029 break; 1030 /* Function call */ 1031 case BPF_JMP | BPF_CALL: 1032 if (emit_call(ctx, insn) < 0) 1033 goto invalid; 1034 break; 1035 /* Function return */ 1036 case BPF_JMP | BPF_EXIT: 1037 /* 1038 * Optimization: when last instruction is EXIT 1039 * simply continue to epilogue. 1040 */ 1041 if (ctx->bpf_index == ctx->program->len - 1) 1042 break; 1043 if (emit_exit(ctx) < 0) 1044 goto toofar; 1045 break; 1046 1047 default: 1048 invalid: 1049 pr_err_once("unknown opcode %02x\n", code); 1050 return -EINVAL; 1051 notyet: 1052 pr_info_once("*** NOT YET: opcode %02x ***\n", code); 1053 return -EFAULT; 1054 toofar: 1055 pr_info_once("*** TOO FAR: jump at %u opcode %02x ***\n", 1056 ctx->bpf_index, code); 1057 return -E2BIG; 1058 } 1059 return 0; 1060 } 1061