1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */ 3 4 #define pr_fmt(fmt) "NFP net bpf: " fmt 5 6 #include <linux/bug.h> 7 #include <linux/bpf.h> 8 #include <linux/filter.h> 9 #include <linux/kernel.h> 10 #include <linux/pkt_cls.h> 11 #include <linux/reciprocal_div.h> 12 #include <linux/unistd.h> 13 14 #include "main.h" 15 #include "../nfp_asm.h" 16 #include "../nfp_net_ctrl.h" 17 18 /* --- NFP prog --- */ 19 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 20 * It's safe to modify the next pointers (but not pos). 21 */ 22 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 23 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 24 next = list_next_entry(pos, l); \ 25 &(nfp_prog)->insns != &pos->l && \ 26 &(nfp_prog)->insns != &next->l; \ 27 pos = nfp_meta_next(pos), \ 28 next = nfp_meta_next(pos)) 29 30 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 31 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 32 next = list_next_entry(pos, l), \ 33 next2 = list_next_entry(next, l); \ 34 &(nfp_prog)->insns != &pos->l && \ 35 &(nfp_prog)->insns != &next->l && \ 36 &(nfp_prog)->insns != &next2->l; \ 37 pos = nfp_meta_next(pos), \ 38 next = nfp_meta_next(pos), \ 39 next2 = nfp_meta_next(next)) 40 41 static bool 42 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 43 { 44 return meta->l.prev != &nfp_prog->insns; 45 } 46 47 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 48 { 49 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { 50 pr_warn("instruction limit reached (%u NFP instructions)\n", 51 nfp_prog->prog_len); 52 nfp_prog->error = -ENOSPC; 53 return; 54 } 55 56 nfp_prog->prog[nfp_prog->prog_len] = insn; 57 nfp_prog->prog_len++; 58 } 59 60 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 61 { 62 return nfp_prog->prog_len; 63 } 64 65 static bool 66 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 67 { 68 /* If there is a recorded error we may have dropped instructions; 69 * that doesn't have to be due to translator bug, and the translation 70 * will fail anyway, so just return OK. 71 */ 72 if (nfp_prog->error) 73 return true; 74 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 75 } 76 77 /* --- Emitters --- */ 78 static void 79 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 80 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, 81 bool indir) 82 { 83 u64 insn; 84 85 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 86 FIELD_PREP(OP_CMD_CTX, ctx) | 87 FIELD_PREP(OP_CMD_B_SRC, breg) | 88 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 89 FIELD_PREP(OP_CMD_XFER, xfer) | 90 FIELD_PREP(OP_CMD_CNT, size) | 91 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | 92 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 93 FIELD_PREP(OP_CMD_INDIR, indir) | 94 FIELD_PREP(OP_CMD_MODE, mode); 95 96 nfp_prog_push(nfp_prog, insn); 97 } 98 99 static void 100 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 101 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) 102 { 103 struct nfp_insn_re_regs reg; 104 int err; 105 106 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 107 if (err) { 108 nfp_prog->error = err; 109 return; 110 } 111 if (reg.swap) { 112 pr_err("cmd can't swap arguments\n"); 113 nfp_prog->error = -EFAULT; 114 return; 115 } 116 if (reg.dst_lmextn || reg.src_lmextn) { 117 pr_err("cmd can't use LMextn\n"); 118 nfp_prog->error = -EFAULT; 119 return; 120 } 121 122 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, 123 indir); 124 } 125 126 static void 127 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 128 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 129 { 130 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); 131 } 132 133 static void 134 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 135 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 136 { 137 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); 138 } 139 140 static void 141 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 142 enum br_ctx_signal_state css, u16 addr, u8 defer) 143 { 144 u16 addr_lo, addr_hi; 145 u64 insn; 146 147 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 148 addr_hi = addr != addr_lo; 149 150 insn = OP_BR_BASE | 151 FIELD_PREP(OP_BR_MASK, mask) | 152 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 153 FIELD_PREP(OP_BR_CSS, css) | 154 FIELD_PREP(OP_BR_DEFBR, defer) | 155 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 156 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 157 158 nfp_prog_push(nfp_prog, insn); 159 } 160 161 static void 162 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 163 enum nfp_relo_type relo) 164 { 165 if (mask == BR_UNC && defer > 2) { 166 pr_err("BUG: branch defer out of bounds %d\n", defer); 167 nfp_prog->error = -EFAULT; 168 return; 169 } 170 171 __emit_br(nfp_prog, mask, 172 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 173 BR_CSS_NONE, addr, defer); 174 175 nfp_prog->prog[nfp_prog->prog_len - 1] |= 176 FIELD_PREP(OP_RELO_TYPE, relo); 177 } 178 179 static void 180 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 181 { 182 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 183 } 184 185 static void 186 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer, 187 bool set, bool src_lmextn) 188 { 189 u16 addr_lo, addr_hi; 190 u64 insn; 191 192 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO)); 193 addr_hi = addr != addr_lo; 194 195 insn = OP_BR_BIT_BASE | 196 FIELD_PREP(OP_BR_BIT_A_SRC, areg) | 197 FIELD_PREP(OP_BR_BIT_B_SRC, breg) | 198 FIELD_PREP(OP_BR_BIT_BV, set) | 199 FIELD_PREP(OP_BR_BIT_DEFBR, defer) | 200 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) | 201 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) | 202 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn); 203 204 nfp_prog_push(nfp_prog, insn); 205 } 206 207 static void 208 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, 209 u8 defer, bool set, enum nfp_relo_type relo) 210 { 211 struct nfp_insn_re_regs reg; 212 int err; 213 214 /* NOTE: The bit to test is specified as an rotation amount, such that 215 * the bit to test will be placed on the MSB of the result when 216 * doing a rotate right. For bit X, we need right rotate X + 1. 217 */ 218 bit += 1; 219 220 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false); 221 if (err) { 222 nfp_prog->error = err; 223 return; 224 } 225 226 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set, 227 reg.src_lmextn); 228 229 nfp_prog->prog[nfp_prog->prog_len - 1] |= 230 FIELD_PREP(OP_RELO_TYPE, relo); 231 } 232 233 static void 234 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) 235 { 236 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL); 237 } 238 239 static void 240 __emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 241 u8 defer, bool dst_lmextn, bool src_lmextn) 242 { 243 u64 insn; 244 245 insn = OP_BR_ALU_BASE | 246 FIELD_PREP(OP_BR_ALU_A_SRC, areg) | 247 FIELD_PREP(OP_BR_ALU_B_SRC, breg) | 248 FIELD_PREP(OP_BR_ALU_DEFBR, defer) | 249 FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) | 250 FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) | 251 FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn); 252 253 nfp_prog_push(nfp_prog, insn); 254 } 255 256 static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer) 257 { 258 struct nfp_insn_ur_regs reg; 259 int err; 260 261 err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), ®); 262 if (err) { 263 nfp_prog->error = err; 264 return; 265 } 266 267 __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn, 268 reg.src_lmextn); 269 } 270 271 static void 272 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 273 enum immed_width width, bool invert, 274 enum immed_shift shift, bool wr_both, 275 bool dst_lmextn, bool src_lmextn) 276 { 277 u64 insn; 278 279 insn = OP_IMMED_BASE | 280 FIELD_PREP(OP_IMMED_A_SRC, areg) | 281 FIELD_PREP(OP_IMMED_B_SRC, breg) | 282 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 283 FIELD_PREP(OP_IMMED_WIDTH, width) | 284 FIELD_PREP(OP_IMMED_INV, invert) | 285 FIELD_PREP(OP_IMMED_SHIFT, shift) | 286 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 287 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 288 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 289 290 nfp_prog_push(nfp_prog, insn); 291 } 292 293 static void 294 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 295 enum immed_width width, bool invert, enum immed_shift shift) 296 { 297 struct nfp_insn_ur_regs reg; 298 int err; 299 300 if (swreg_type(dst) == NN_REG_IMM) { 301 nfp_prog->error = -EFAULT; 302 return; 303 } 304 305 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 306 if (err) { 307 nfp_prog->error = err; 308 return; 309 } 310 311 /* Use reg.dst when destination is No-Dest. */ 312 __emit_immed(nfp_prog, 313 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 314 reg.breg, imm >> 8, width, invert, shift, 315 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 316 } 317 318 static void 319 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 320 enum shf_sc sc, u8 shift, 321 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 322 bool dst_lmextn, bool src_lmextn) 323 { 324 u64 insn; 325 326 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 327 nfp_prog->error = -EFAULT; 328 return; 329 } 330 331 if (sc == SHF_SC_L_SHF) 332 shift = 32 - shift; 333 334 insn = OP_SHF_BASE | 335 FIELD_PREP(OP_SHF_A_SRC, areg) | 336 FIELD_PREP(OP_SHF_SC, sc) | 337 FIELD_PREP(OP_SHF_B_SRC, breg) | 338 FIELD_PREP(OP_SHF_I8, i8) | 339 FIELD_PREP(OP_SHF_SW, sw) | 340 FIELD_PREP(OP_SHF_DST, dst) | 341 FIELD_PREP(OP_SHF_SHIFT, shift) | 342 FIELD_PREP(OP_SHF_OP, op) | 343 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 344 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 345 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 346 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 347 348 nfp_prog_push(nfp_prog, insn); 349 } 350 351 static void 352 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 353 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 354 { 355 struct nfp_insn_re_regs reg; 356 int err; 357 358 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 359 if (err) { 360 nfp_prog->error = err; 361 return; 362 } 363 364 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 365 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 366 reg.dst_lmextn, reg.src_lmextn); 367 } 368 369 static void 370 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst, 371 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc) 372 { 373 if (sc == SHF_SC_R_ROT) { 374 pr_err("indirect shift is not allowed on rotation\n"); 375 nfp_prog->error = -EFAULT; 376 return; 377 } 378 379 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0); 380 } 381 382 static void 383 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 384 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 385 bool dst_lmextn, bool src_lmextn) 386 { 387 u64 insn; 388 389 insn = OP_ALU_BASE | 390 FIELD_PREP(OP_ALU_A_SRC, areg) | 391 FIELD_PREP(OP_ALU_B_SRC, breg) | 392 FIELD_PREP(OP_ALU_DST, dst) | 393 FIELD_PREP(OP_ALU_SW, swap) | 394 FIELD_PREP(OP_ALU_OP, op) | 395 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 396 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 397 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 398 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 399 400 nfp_prog_push(nfp_prog, insn); 401 } 402 403 static void 404 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 405 swreg lreg, enum alu_op op, swreg rreg) 406 { 407 struct nfp_insn_ur_regs reg; 408 int err; 409 410 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 411 if (err) { 412 nfp_prog->error = err; 413 return; 414 } 415 416 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 417 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 418 reg.dst_lmextn, reg.src_lmextn); 419 } 420 421 static void 422 __emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg, 423 enum mul_type type, enum mul_step step, u16 breg, bool swap, 424 bool wr_both, bool dst_lmextn, bool src_lmextn) 425 { 426 u64 insn; 427 428 insn = OP_MUL_BASE | 429 FIELD_PREP(OP_MUL_A_SRC, areg) | 430 FIELD_PREP(OP_MUL_B_SRC, breg) | 431 FIELD_PREP(OP_MUL_STEP, step) | 432 FIELD_PREP(OP_MUL_DST_AB, dst_ab) | 433 FIELD_PREP(OP_MUL_SW, swap) | 434 FIELD_PREP(OP_MUL_TYPE, type) | 435 FIELD_PREP(OP_MUL_WR_AB, wr_both) | 436 FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) | 437 FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn); 438 439 nfp_prog_push(nfp_prog, insn); 440 } 441 442 static void 443 emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type, 444 enum mul_step step, swreg rreg) 445 { 446 struct nfp_insn_ur_regs reg; 447 u16 areg; 448 int err; 449 450 if (type == MUL_TYPE_START && step != MUL_STEP_NONE) { 451 nfp_prog->error = -EINVAL; 452 return; 453 } 454 455 if (step == MUL_LAST || step == MUL_LAST_2) { 456 /* When type is step and step Number is LAST or LAST2, left 457 * source is used as destination. 458 */ 459 err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®); 460 areg = reg.dst; 461 } else { 462 err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®); 463 areg = reg.areg; 464 } 465 466 if (err) { 467 nfp_prog->error = err; 468 return; 469 } 470 471 __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap, 472 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 473 } 474 475 static void 476 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 477 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 478 bool zero, bool swap, bool wr_both, 479 bool dst_lmextn, bool src_lmextn) 480 { 481 u64 insn; 482 483 insn = OP_LDF_BASE | 484 FIELD_PREP(OP_LDF_A_SRC, areg) | 485 FIELD_PREP(OP_LDF_SC, sc) | 486 FIELD_PREP(OP_LDF_B_SRC, breg) | 487 FIELD_PREP(OP_LDF_I8, imm8) | 488 FIELD_PREP(OP_LDF_SW, swap) | 489 FIELD_PREP(OP_LDF_ZF, zero) | 490 FIELD_PREP(OP_LDF_BMASK, bmask) | 491 FIELD_PREP(OP_LDF_SHF, shift) | 492 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 493 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 494 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 495 496 nfp_prog_push(nfp_prog, insn); 497 } 498 499 static void 500 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 501 enum shf_sc sc, u8 shift, bool zero) 502 { 503 struct nfp_insn_re_regs reg; 504 int err; 505 506 /* Note: ld_field is special as it uses one of the src regs as dst */ 507 err = swreg_to_restricted(dst, dst, src, ®, true); 508 if (err) { 509 nfp_prog->error = err; 510 return; 511 } 512 513 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 514 reg.i8, zero, reg.swap, reg.wr_both, 515 reg.dst_lmextn, reg.src_lmextn); 516 } 517 518 static void 519 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 520 enum shf_sc sc, u8 shift) 521 { 522 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 523 } 524 525 static void 526 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 527 bool dst_lmextn, bool src_lmextn) 528 { 529 u64 insn; 530 531 insn = OP_LCSR_BASE | 532 FIELD_PREP(OP_LCSR_A_SRC, areg) | 533 FIELD_PREP(OP_LCSR_B_SRC, breg) | 534 FIELD_PREP(OP_LCSR_WRITE, wr) | 535 FIELD_PREP(OP_LCSR_ADDR, addr / 4) | 536 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 537 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 538 539 nfp_prog_push(nfp_prog, insn); 540 } 541 542 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 543 { 544 struct nfp_insn_ur_regs reg; 545 int err; 546 547 /* This instruction takes immeds instead of reg_none() for the ignored 548 * operand, but we can't encode 2 immeds in one instr with our normal 549 * swreg infra so if param is an immed, we encode as reg_none() and 550 * copy the immed to both operands. 551 */ 552 if (swreg_type(src) == NN_REG_IMM) { 553 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 554 reg.breg = reg.areg; 555 } else { 556 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 557 } 558 if (err) { 559 nfp_prog->error = err; 560 return; 561 } 562 563 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, 564 false, reg.src_lmextn); 565 } 566 567 /* CSR value is read in following immed[gpr, 0] */ 568 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) 569 { 570 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); 571 } 572 573 static void emit_nop(struct nfp_prog *nfp_prog) 574 { 575 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 576 } 577 578 /* --- Wrappers --- */ 579 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 580 { 581 if (!(imm & 0xffff0000)) { 582 *val = imm; 583 *shift = IMMED_SHIFT_0B; 584 } else if (!(imm & 0xff0000ff)) { 585 *val = imm >> 8; 586 *shift = IMMED_SHIFT_1B; 587 } else if (!(imm & 0x0000ffff)) { 588 *val = imm >> 16; 589 *shift = IMMED_SHIFT_2B; 590 } else { 591 return false; 592 } 593 594 return true; 595 } 596 597 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 598 { 599 enum immed_shift shift; 600 u16 val; 601 602 if (pack_immed(imm, &val, &shift)) { 603 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 604 } else if (pack_immed(~imm, &val, &shift)) { 605 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 606 } else { 607 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 608 false, IMMED_SHIFT_0B); 609 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 610 false, IMMED_SHIFT_2B); 611 } 612 } 613 614 static void 615 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 616 enum nfp_relo_type relo) 617 { 618 if (imm > 0xffff) { 619 pr_err("relocation of a large immediate!\n"); 620 nfp_prog->error = -EFAULT; 621 return; 622 } 623 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 624 625 nfp_prog->prog[nfp_prog->prog_len - 1] |= 626 FIELD_PREP(OP_RELO_TYPE, relo); 627 } 628 629 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 630 * If the @imm is small enough encode it directly in operand and return 631 * otherwise load @imm to a spare register and return its encoding. 632 */ 633 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 634 { 635 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 636 return reg_imm(imm); 637 638 wrp_immed(nfp_prog, tmp_reg, imm); 639 return tmp_reg; 640 } 641 642 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 643 * If the @imm is small enough encode it directly in operand and return 644 * otherwise load @imm to a spare register and return its encoding. 645 */ 646 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 647 { 648 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 649 return reg_imm(imm); 650 651 wrp_immed(nfp_prog, tmp_reg, imm); 652 return tmp_reg; 653 } 654 655 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 656 { 657 while (count--) 658 emit_nop(nfp_prog); 659 } 660 661 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 662 { 663 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 664 } 665 666 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 667 { 668 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 669 } 670 671 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 672 * result to @dst from low end. 673 */ 674 static void 675 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 676 u8 offset) 677 { 678 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 679 u8 mask = (1 << field_len) - 1; 680 681 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 682 } 683 684 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 685 * result to @dst from offset, there is no change on the other bits of @dst. 686 */ 687 static void 688 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 689 u8 field_len, u8 offset) 690 { 691 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 692 u8 mask = ((1 << field_len) - 1) << offset; 693 694 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 695 } 696 697 static void 698 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 699 swreg *rega, swreg *regb) 700 { 701 if (offset == reg_imm(0)) { 702 *rega = reg_a(src_gpr); 703 *regb = reg_b(src_gpr + 1); 704 return; 705 } 706 707 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 708 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 709 reg_imm(0)); 710 *rega = imm_a(nfp_prog); 711 *regb = imm_b(nfp_prog); 712 } 713 714 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 715 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 716 { 717 bool descending_seq = meta->ldst_gather_len < 0; 718 s16 len = abs(meta->ldst_gather_len); 719 swreg src_base, off; 720 bool src_40bit_addr; 721 unsigned int i; 722 u8 xfer_num; 723 724 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 725 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 726 src_base = reg_a(meta->insn.src_reg * 2); 727 xfer_num = round_up(len, 4) / 4; 728 729 if (src_40bit_addr) 730 addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base, 731 &off); 732 733 /* Setup PREV_ALU fields to override memory read length. */ 734 if (len > 32) 735 wrp_immed(nfp_prog, reg_none(), 736 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 737 738 /* Memory read from source addr into transfer-in registers. */ 739 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 740 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 741 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); 742 743 /* Move from transfer-in to transfer-out. */ 744 for (i = 0; i < xfer_num; i++) 745 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 746 747 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 748 749 if (len <= 8) { 750 /* Use single direct_ref write8. */ 751 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 752 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 753 CMD_CTX_SWAP); 754 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 755 /* Use single direct_ref write32. */ 756 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 757 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 758 CMD_CTX_SWAP); 759 } else if (len <= 32) { 760 /* Use single indirect_ref write8. */ 761 wrp_immed(nfp_prog, reg_none(), 762 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 763 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 764 reg_a(meta->paired_st->dst_reg * 2), off, 765 len - 1, CMD_CTX_SWAP); 766 } else if (IS_ALIGNED(len, 4)) { 767 /* Use single indirect_ref write32. */ 768 wrp_immed(nfp_prog, reg_none(), 769 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 770 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 771 reg_a(meta->paired_st->dst_reg * 2), off, 772 xfer_num - 1, CMD_CTX_SWAP); 773 } else if (len <= 40) { 774 /* Use one direct_ref write32 to write the first 32-bytes, then 775 * another direct_ref write8 to write the remaining bytes. 776 */ 777 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 778 reg_a(meta->paired_st->dst_reg * 2), off, 7, 779 CMD_CTX_SWAP); 780 781 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 782 imm_b(nfp_prog)); 783 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 784 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 785 CMD_CTX_SWAP); 786 } else { 787 /* Use one indirect_ref write32 to write 4-bytes aligned length, 788 * then another direct_ref write8 to write the remaining bytes. 789 */ 790 u8 new_off; 791 792 wrp_immed(nfp_prog, reg_none(), 793 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 794 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 795 reg_a(meta->paired_st->dst_reg * 2), off, 796 xfer_num - 2, CMD_CTX_SWAP); 797 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 798 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 799 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 800 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 801 (len & 0x3) - 1, CMD_CTX_SWAP); 802 } 803 804 /* TODO: The following extra load is to make sure data flow be identical 805 * before and after we do memory copy optimization. 806 * 807 * The load destination register is not guaranteed to be dead, so we 808 * need to make sure it is loaded with the value the same as before 809 * this transformation. 810 * 811 * These extra loads could be removed once we have accurate register 812 * usage information. 813 */ 814 if (descending_seq) 815 xfer_num = 0; 816 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 817 xfer_num = xfer_num - 1; 818 else 819 xfer_num = xfer_num - 2; 820 821 switch (BPF_SIZE(meta->insn.code)) { 822 case BPF_B: 823 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 824 reg_xfer(xfer_num), 1, 825 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 826 break; 827 case BPF_H: 828 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 829 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 830 break; 831 case BPF_W: 832 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 833 reg_xfer(0)); 834 break; 835 case BPF_DW: 836 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 837 reg_xfer(xfer_num)); 838 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 839 reg_xfer(xfer_num + 1)); 840 break; 841 } 842 843 if (BPF_SIZE(meta->insn.code) != BPF_DW) 844 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 845 846 return 0; 847 } 848 849 static int 850 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 851 { 852 unsigned int i; 853 u16 shift, sz; 854 855 /* We load the value from the address indicated in @offset and then 856 * shift out the data we don't need. Note: this is big endian! 857 */ 858 sz = max(size, 4); 859 shift = size < 4 ? 4 - size : 0; 860 861 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 862 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); 863 864 i = 0; 865 if (shift) 866 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 867 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 868 else 869 for (; i * 4 < size; i++) 870 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 871 872 if (i < 2) 873 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 874 875 return 0; 876 } 877 878 static int 879 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 880 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 881 { 882 unsigned int i; 883 u8 mask, sz; 884 885 /* We load the value from the address indicated in rreg + lreg and then 886 * mask out the data we don't need. Note: this is little endian! 887 */ 888 sz = max(size, 4); 889 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 890 891 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 892 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); 893 894 i = 0; 895 if (mask) 896 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 897 reg_xfer(0), SHF_SC_NONE, 0, true); 898 else 899 for (; i * 4 < size; i++) 900 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 901 902 if (i < 2) 903 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 904 905 return 0; 906 } 907 908 static int 909 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 910 u8 dst_gpr, u8 size) 911 { 912 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 913 size, CMD_MODE_32b); 914 } 915 916 static int 917 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 918 u8 dst_gpr, u8 size) 919 { 920 swreg rega, regb; 921 922 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 923 924 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 925 size, CMD_MODE_40b_BA); 926 } 927 928 static int 929 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 930 { 931 swreg tmp_reg; 932 933 /* Calculate the true offset (src_reg + imm) */ 934 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 935 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 936 937 /* Check packet length (size guaranteed to fit b/c it's u8) */ 938 emit_alu(nfp_prog, imm_a(nfp_prog), 939 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 940 emit_alu(nfp_prog, reg_none(), 941 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 942 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 943 944 /* Load data */ 945 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 946 } 947 948 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 949 { 950 swreg tmp_reg; 951 952 /* Check packet length */ 953 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 954 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 955 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 956 957 /* Load data */ 958 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 959 return data_ld(nfp_prog, tmp_reg, 0, size); 960 } 961 962 static int 963 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 964 u8 src_gpr, u8 size) 965 { 966 unsigned int i; 967 968 for (i = 0; i * 4 < size; i++) 969 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 970 971 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 972 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 973 974 return 0; 975 } 976 977 static int 978 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 979 u64 imm, u8 size) 980 { 981 wrp_immed(nfp_prog, reg_xfer(0), imm); 982 if (size == 8) 983 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 984 985 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 986 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 987 988 return 0; 989 } 990 991 typedef int 992 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 993 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 994 bool needs_inc); 995 996 static int 997 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 998 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 999 bool needs_inc) 1000 { 1001 bool should_inc = needs_inc && new_gpr && !last; 1002 u32 idx, src_byte; 1003 enum shf_sc sc; 1004 swreg reg; 1005 int shf; 1006 u8 mask; 1007 1008 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 1009 return -EOPNOTSUPP; 1010 1011 idx = off / 4; 1012 1013 /* Move the entire word */ 1014 if (size == 4) { 1015 wrp_mov(nfp_prog, reg_both(dst), 1016 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 1017 return 0; 1018 } 1019 1020 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1021 return -EOPNOTSUPP; 1022 1023 src_byte = off % 4; 1024 1025 mask = (1 << size) - 1; 1026 mask <<= dst_byte; 1027 1028 if (WARN_ON_ONCE(mask > 0xf)) 1029 return -EOPNOTSUPP; 1030 1031 shf = abs(src_byte - dst_byte) * 8; 1032 if (src_byte == dst_byte) { 1033 sc = SHF_SC_NONE; 1034 } else if (src_byte < dst_byte) { 1035 shf = 32 - shf; 1036 sc = SHF_SC_L_SHF; 1037 } else { 1038 sc = SHF_SC_R_SHF; 1039 } 1040 1041 /* ld_field can address fewer indexes, if offset too large do RMW. 1042 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1043 */ 1044 if (idx <= RE_REG_LM_IDX_MAX) { 1045 reg = reg_lm(lm3 ? 3 : 0, idx); 1046 } else { 1047 reg = imm_a(nfp_prog); 1048 /* If it's not the first part of the load and we start a new GPR 1049 * that means we are loading a second part of the LMEM word into 1050 * a new GPR. IOW we've already looked that LMEM word and 1051 * therefore it has been loaded into imm_a(). 1052 */ 1053 if (first || !new_gpr) 1054 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1055 } 1056 1057 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 1058 1059 if (should_inc) 1060 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1061 1062 return 0; 1063 } 1064 1065 static int 1066 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 1067 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 1068 bool needs_inc) 1069 { 1070 bool should_inc = needs_inc && new_gpr && !last; 1071 u32 idx, dst_byte; 1072 enum shf_sc sc; 1073 swreg reg; 1074 int shf; 1075 u8 mask; 1076 1077 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 1078 return -EOPNOTSUPP; 1079 1080 idx = off / 4; 1081 1082 /* Move the entire word */ 1083 if (size == 4) { 1084 wrp_mov(nfp_prog, 1085 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 1086 reg_b(src)); 1087 return 0; 1088 } 1089 1090 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1091 return -EOPNOTSUPP; 1092 1093 dst_byte = off % 4; 1094 1095 mask = (1 << size) - 1; 1096 mask <<= dst_byte; 1097 1098 if (WARN_ON_ONCE(mask > 0xf)) 1099 return -EOPNOTSUPP; 1100 1101 shf = abs(src_byte - dst_byte) * 8; 1102 if (src_byte == dst_byte) { 1103 sc = SHF_SC_NONE; 1104 } else if (src_byte < dst_byte) { 1105 shf = 32 - shf; 1106 sc = SHF_SC_L_SHF; 1107 } else { 1108 sc = SHF_SC_R_SHF; 1109 } 1110 1111 /* ld_field can address fewer indexes, if offset too large do RMW. 1112 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1113 */ 1114 if (idx <= RE_REG_LM_IDX_MAX) { 1115 reg = reg_lm(lm3 ? 3 : 0, idx); 1116 } else { 1117 reg = imm_a(nfp_prog); 1118 /* Only first and last LMEM locations are going to need RMW, 1119 * the middle location will be overwritten fully. 1120 */ 1121 if (first || last) 1122 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1123 } 1124 1125 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 1126 1127 if (new_gpr || last) { 1128 if (idx > RE_REG_LM_IDX_MAX) 1129 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1130 if (should_inc) 1131 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1132 } 1133 1134 return 0; 1135 } 1136 1137 static int 1138 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1139 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1140 bool clr_gpr, lmem_step step) 1141 { 1142 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; 1143 bool first = true, last; 1144 bool needs_inc = false; 1145 swreg stack_off_reg; 1146 u8 prev_gpr = 255; 1147 u32 gpr_byte = 0; 1148 bool lm3 = true; 1149 int ret; 1150 1151 if (meta->ptr_not_const || 1152 meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) { 1153 /* Use of the last encountered ptr_off is OK, they all have 1154 * the same alignment. Depend on low bits of value being 1155 * discarded when written to LMaddr register. 1156 */ 1157 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1158 stack_imm(nfp_prog)); 1159 1160 emit_alu(nfp_prog, imm_b(nfp_prog), 1161 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1162 1163 needs_inc = true; 1164 } else if (off + size <= 64) { 1165 /* We can reach bottom 64B with LMaddr0 */ 1166 lm3 = false; 1167 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1168 /* We have to set up a new pointer. If we know the offset 1169 * and the entire access falls into a single 32 byte aligned 1170 * window we won't have to increment the LM pointer. 1171 * The 32 byte alignment is imporant because offset is ORed in 1172 * not added when doing *l$indexN[off]. 1173 */ 1174 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1175 stack_imm(nfp_prog)); 1176 emit_alu(nfp_prog, imm_b(nfp_prog), 1177 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1178 1179 off %= 32; 1180 } else { 1181 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1182 stack_imm(nfp_prog)); 1183 1184 emit_alu(nfp_prog, imm_b(nfp_prog), 1185 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1186 1187 needs_inc = true; 1188 } 1189 if (lm3) { 1190 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1191 /* For size < 4 one slot will be filled by zeroing of upper. */ 1192 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1193 } 1194 1195 if (clr_gpr && size < 8) 1196 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1197 1198 while (size) { 1199 u32 slice_end; 1200 u8 slice_size; 1201 1202 slice_size = min(size, 4 - gpr_byte); 1203 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1204 slice_size = slice_end - off; 1205 1206 last = slice_size == size; 1207 1208 if (needs_inc) 1209 off %= 4; 1210 1211 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1212 first, gpr != prev_gpr, last, lm3, needs_inc); 1213 if (ret) 1214 return ret; 1215 1216 prev_gpr = gpr; 1217 first = false; 1218 1219 gpr_byte += slice_size; 1220 if (gpr_byte >= 4) { 1221 gpr_byte -= 4; 1222 gpr++; 1223 } 1224 1225 size -= slice_size; 1226 off += slice_size; 1227 } 1228 1229 return 0; 1230 } 1231 1232 static void 1233 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1234 { 1235 swreg tmp_reg; 1236 1237 if (alu_op == ALU_OP_AND) { 1238 if (!imm) 1239 wrp_immed(nfp_prog, reg_both(dst), 0); 1240 if (!imm || !~imm) 1241 return; 1242 } 1243 if (alu_op == ALU_OP_OR) { 1244 if (!~imm) 1245 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1246 if (!imm || !~imm) 1247 return; 1248 } 1249 if (alu_op == ALU_OP_XOR) { 1250 if (!~imm) 1251 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1252 ALU_OP_NOT, reg_b(dst)); 1253 if (!imm || !~imm) 1254 return; 1255 } 1256 1257 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1258 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1259 } 1260 1261 static int 1262 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1263 enum alu_op alu_op, bool skip) 1264 { 1265 const struct bpf_insn *insn = &meta->insn; 1266 u64 imm = insn->imm; /* sign extend */ 1267 1268 if (skip) { 1269 meta->flags |= FLAG_INSN_SKIP_NOOP; 1270 return 0; 1271 } 1272 1273 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1274 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1275 1276 return 0; 1277 } 1278 1279 static int 1280 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1281 enum alu_op alu_op) 1282 { 1283 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1284 1285 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1286 emit_alu(nfp_prog, reg_both(dst + 1), 1287 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1288 1289 return 0; 1290 } 1291 1292 static int 1293 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1294 enum alu_op alu_op, bool skip) 1295 { 1296 const struct bpf_insn *insn = &meta->insn; 1297 1298 if (skip) { 1299 meta->flags |= FLAG_INSN_SKIP_NOOP; 1300 return 0; 1301 } 1302 1303 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1304 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1305 1306 return 0; 1307 } 1308 1309 static int 1310 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1311 enum alu_op alu_op) 1312 { 1313 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1314 1315 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1316 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1317 1318 return 0; 1319 } 1320 1321 static void 1322 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1323 enum br_mask br_mask, u16 off) 1324 { 1325 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1326 emit_br(nfp_prog, br_mask, off, 0); 1327 } 1328 1329 static int 1330 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1331 enum alu_op alu_op, enum br_mask br_mask) 1332 { 1333 const struct bpf_insn *insn = &meta->insn; 1334 1335 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1336 insn->src_reg * 2, br_mask, insn->off); 1337 if (is_mbpf_jmp64(meta)) 1338 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1339 insn->src_reg * 2 + 1, br_mask, insn->off); 1340 1341 return 0; 1342 } 1343 1344 static const struct jmp_code_map { 1345 enum br_mask br_mask; 1346 bool swap; 1347 } jmp_code_map[] = { 1348 [BPF_JGT >> 4] = { BR_BLO, true }, 1349 [BPF_JGE >> 4] = { BR_BHS, false }, 1350 [BPF_JLT >> 4] = { BR_BLO, false }, 1351 [BPF_JLE >> 4] = { BR_BHS, true }, 1352 [BPF_JSGT >> 4] = { BR_BLT, true }, 1353 [BPF_JSGE >> 4] = { BR_BGE, false }, 1354 [BPF_JSLT >> 4] = { BR_BLT, false }, 1355 [BPF_JSLE >> 4] = { BR_BGE, true }, 1356 }; 1357 1358 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) 1359 { 1360 unsigned int op; 1361 1362 op = BPF_OP(meta->insn.code) >> 4; 1363 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ 1364 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || 1365 !jmp_code_map[op].br_mask, 1366 "no code found for jump instruction")) 1367 return NULL; 1368 1369 return &jmp_code_map[op]; 1370 } 1371 1372 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1373 { 1374 const struct bpf_insn *insn = &meta->insn; 1375 u64 imm = insn->imm; /* sign extend */ 1376 const struct jmp_code_map *code; 1377 enum alu_op alu_op, carry_op; 1378 u8 reg = insn->dst_reg * 2; 1379 swreg tmp_reg; 1380 1381 code = nfp_jmp_code_get(meta); 1382 if (!code) 1383 return -EINVAL; 1384 1385 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; 1386 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; 1387 1388 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1389 if (!code->swap) 1390 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); 1391 else 1392 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); 1393 1394 if (is_mbpf_jmp64(meta)) { 1395 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1396 if (!code->swap) 1397 emit_alu(nfp_prog, reg_none(), 1398 reg_a(reg + 1), carry_op, tmp_reg); 1399 else 1400 emit_alu(nfp_prog, reg_none(), 1401 tmp_reg, carry_op, reg_a(reg + 1)); 1402 } 1403 1404 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1405 1406 return 0; 1407 } 1408 1409 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1410 { 1411 const struct bpf_insn *insn = &meta->insn; 1412 const struct jmp_code_map *code; 1413 u8 areg, breg; 1414 1415 code = nfp_jmp_code_get(meta); 1416 if (!code) 1417 return -EINVAL; 1418 1419 areg = insn->dst_reg * 2; 1420 breg = insn->src_reg * 2; 1421 1422 if (code->swap) { 1423 areg ^= breg; 1424 breg ^= areg; 1425 areg ^= breg; 1426 } 1427 1428 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1429 if (is_mbpf_jmp64(meta)) 1430 emit_alu(nfp_prog, reg_none(), 1431 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1432 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1433 1434 return 0; 1435 } 1436 1437 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1438 { 1439 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1440 SHF_SC_R_ROT, 8); 1441 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1442 SHF_SC_R_ROT, 16); 1443 } 1444 1445 static void 1446 wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1447 swreg rreg, bool gen_high_half) 1448 { 1449 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1450 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg); 1451 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg); 1452 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg); 1453 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg); 1454 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none()); 1455 if (gen_high_half) 1456 emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2, 1457 reg_none()); 1458 else 1459 wrp_immed(nfp_prog, dst_hi, 0); 1460 } 1461 1462 static void 1463 wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1464 swreg rreg) 1465 { 1466 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1467 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg); 1468 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg); 1469 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none()); 1470 } 1471 1472 static int 1473 wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1474 bool gen_high_half, bool ropnd_from_reg) 1475 { 1476 swreg multiplier, multiplicand, dst_hi, dst_lo; 1477 const struct bpf_insn *insn = &meta->insn; 1478 u32 lopnd_max, ropnd_max; 1479 u8 dst_reg; 1480 1481 dst_reg = insn->dst_reg; 1482 multiplicand = reg_a(dst_reg * 2); 1483 dst_hi = reg_both(dst_reg * 2 + 1); 1484 dst_lo = reg_both(dst_reg * 2); 1485 lopnd_max = meta->umax_dst; 1486 if (ropnd_from_reg) { 1487 multiplier = reg_b(insn->src_reg * 2); 1488 ropnd_max = meta->umax_src; 1489 } else { 1490 u32 imm = insn->imm; 1491 1492 multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1493 ropnd_max = imm; 1494 } 1495 if (lopnd_max > U16_MAX || ropnd_max > U16_MAX) 1496 wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier, 1497 gen_high_half); 1498 else 1499 wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier); 1500 1501 return 0; 1502 } 1503 1504 static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) 1505 { 1506 swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst); 1507 struct reciprocal_value_adv rvalue; 1508 u8 pre_shift, exp; 1509 swreg magic; 1510 1511 if (imm > U32_MAX) { 1512 wrp_immed(nfp_prog, dst_both, 0); 1513 return 0; 1514 } 1515 1516 /* NOTE: because we are using "reciprocal_value_adv" which doesn't 1517 * support "divisor > (1u << 31)", we need to JIT separate NFP sequence 1518 * to handle such case which actually equals to the result of unsigned 1519 * comparison "dst >= imm" which could be calculated using the following 1520 * NFP sequence: 1521 * 1522 * alu[--, dst, -, imm] 1523 * immed[imm, 0] 1524 * alu[dst, imm, +carry, 0] 1525 * 1526 */ 1527 if (imm > 1U << 31) { 1528 swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1529 1530 emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b); 1531 wrp_immed(nfp_prog, imm_a(nfp_prog), 0); 1532 emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C, 1533 reg_imm(0)); 1534 return 0; 1535 } 1536 1537 rvalue = reciprocal_value_adv(imm, 32); 1538 exp = rvalue.exp; 1539 if (rvalue.is_wide_m && !(imm & 1)) { 1540 pre_shift = fls(imm & -imm) - 1; 1541 rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift); 1542 } else { 1543 pre_shift = 0; 1544 } 1545 magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog)); 1546 if (imm == 1U << exp) { 1547 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1548 SHF_SC_R_SHF, exp); 1549 } else if (rvalue.is_wide_m) { 1550 wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, 1551 magic, true); 1552 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, 1553 imm_b(nfp_prog)); 1554 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1555 SHF_SC_R_SHF, 1); 1556 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, 1557 imm_b(nfp_prog)); 1558 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1559 SHF_SC_R_SHF, rvalue.sh - 1); 1560 } else { 1561 if (pre_shift) 1562 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1563 dst_b, SHF_SC_R_SHF, pre_shift); 1564 wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true); 1565 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1566 dst_b, SHF_SC_R_SHF, rvalue.sh); 1567 } 1568 1569 return 0; 1570 } 1571 1572 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1573 { 1574 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1575 struct nfp_bpf_cap_adjust_head *adjust_head; 1576 u32 ret_einval, end; 1577 1578 adjust_head = &nfp_prog->bpf->adjust_head; 1579 1580 /* Optimized version - 5 vs 14 cycles */ 1581 if (nfp_prog->adjust_head_location != UINT_MAX) { 1582 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1583 return -EINVAL; 1584 1585 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1586 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1587 emit_alu(nfp_prog, plen_reg(nfp_prog), 1588 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1589 emit_alu(nfp_prog, pv_len(nfp_prog), 1590 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1591 1592 wrp_immed(nfp_prog, reg_both(0), 0); 1593 wrp_immed(nfp_prog, reg_both(1), 0); 1594 1595 /* TODO: when adjust head is guaranteed to succeed we can 1596 * also eliminate the following if (r0 == 0) branch. 1597 */ 1598 1599 return 0; 1600 } 1601 1602 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1603 end = ret_einval + 2; 1604 1605 /* We need to use a temp because offset is just a part of the pkt ptr */ 1606 emit_alu(nfp_prog, tmp, 1607 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1608 1609 /* Validate result will fit within FW datapath constraints */ 1610 emit_alu(nfp_prog, reg_none(), 1611 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1612 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1613 emit_alu(nfp_prog, reg_none(), 1614 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1615 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1616 1617 /* Validate the length is at least ETH_HLEN */ 1618 emit_alu(nfp_prog, tmp_len, 1619 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1620 emit_alu(nfp_prog, reg_none(), 1621 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1622 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1623 1624 /* Load the ret code */ 1625 wrp_immed(nfp_prog, reg_both(0), 0); 1626 wrp_immed(nfp_prog, reg_both(1), 0); 1627 1628 /* Modify the packet metadata */ 1629 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1630 1631 /* Skip over the -EINVAL ret code (defer 2) */ 1632 emit_br(nfp_prog, BR_UNC, end, 2); 1633 1634 emit_alu(nfp_prog, plen_reg(nfp_prog), 1635 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1636 emit_alu(nfp_prog, pv_len(nfp_prog), 1637 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1638 1639 /* return -EINVAL target */ 1640 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1641 return -EINVAL; 1642 1643 wrp_immed(nfp_prog, reg_both(0), -22); 1644 wrp_immed(nfp_prog, reg_both(1), ~0); 1645 1646 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1647 return -EINVAL; 1648 1649 return 0; 1650 } 1651 1652 static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1653 { 1654 u32 ret_einval, end; 1655 swreg plen, delta; 1656 1657 BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN)); 1658 1659 plen = imm_a(nfp_prog); 1660 delta = reg_a(2 * 2); 1661 1662 ret_einval = nfp_prog_current_offset(nfp_prog) + 9; 1663 end = nfp_prog_current_offset(nfp_prog) + 11; 1664 1665 /* Calculate resulting length */ 1666 emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta); 1667 /* delta == 0 is not allowed by the kernel, add must overflow to make 1668 * length smaller. 1669 */ 1670 emit_br(nfp_prog, BR_BCC, ret_einval, 0); 1671 1672 /* if (new_len < 14) then -EINVAL */ 1673 emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1674 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1675 1676 emit_alu(nfp_prog, plen_reg(nfp_prog), 1677 plen_reg(nfp_prog), ALU_OP_ADD, delta); 1678 emit_alu(nfp_prog, pv_len(nfp_prog), 1679 pv_len(nfp_prog), ALU_OP_ADD, delta); 1680 1681 emit_br(nfp_prog, BR_UNC, end, 2); 1682 wrp_immed(nfp_prog, reg_both(0), 0); 1683 wrp_immed(nfp_prog, reg_both(1), 0); 1684 1685 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1686 return -EINVAL; 1687 1688 wrp_immed(nfp_prog, reg_both(0), -22); 1689 wrp_immed(nfp_prog, reg_both(1), ~0); 1690 1691 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1692 return -EINVAL; 1693 1694 return 0; 1695 } 1696 1697 static int 1698 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1699 { 1700 bool load_lm_ptr; 1701 u32 ret_tgt; 1702 s64 lm_off; 1703 1704 /* We only have to reload LM0 if the key is not at start of stack */ 1705 lm_off = nfp_prog->stack_frame_depth; 1706 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1707 load_lm_ptr = meta->arg2.var_off || lm_off; 1708 1709 /* Set LM0 to start of key */ 1710 if (load_lm_ptr) 1711 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1712 if (meta->func_id == BPF_FUNC_map_update_elem) 1713 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1714 1715 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1716 2, RELO_BR_HELPER); 1717 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1718 1719 /* Load map ID into A0 */ 1720 wrp_mov(nfp_prog, reg_a(0), reg_a(2)); 1721 1722 /* Load the return address into B0 */ 1723 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1724 1725 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1726 return -EINVAL; 1727 1728 /* Reset the LM0 pointer */ 1729 if (!load_lm_ptr) 1730 return 0; 1731 1732 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1733 wrp_nops(nfp_prog, 3); 1734 1735 return 0; 1736 } 1737 1738 static int 1739 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1740 { 1741 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); 1742 /* CSR value is read in following immed[gpr, 0] */ 1743 emit_immed(nfp_prog, reg_both(0), 0, 1744 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1745 emit_immed(nfp_prog, reg_both(1), 0, 1746 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1747 return 0; 1748 } 1749 1750 static int 1751 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1752 { 1753 swreg ptr_type; 1754 u32 ret_tgt; 1755 1756 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); 1757 1758 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 1759 1760 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1761 2, RELO_BR_HELPER); 1762 1763 /* Load ptr type into A1 */ 1764 wrp_mov(nfp_prog, reg_a(1), ptr_type); 1765 1766 /* Load the return address into B0 */ 1767 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1768 1769 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1770 return -EINVAL; 1771 1772 return 0; 1773 } 1774 1775 static int 1776 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1777 { 1778 u32 jmp_tgt; 1779 1780 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; 1781 1782 /* Make sure the queue id fits into FW field */ 1783 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), 1784 ALU_OP_AND_NOT_B, reg_imm(0xff)); 1785 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); 1786 1787 /* Set the 'queue selected' bit and the queue value */ 1788 emit_shf(nfp_prog, pv_qsel_set(nfp_prog), 1789 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), 1790 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); 1791 emit_ld_field(nfp_prog, 1792 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), 1793 SHF_SC_NONE, 0); 1794 /* Delay slots end here, we will jump over next instruction if queue 1795 * value fits into the field. 1796 */ 1797 emit_ld_field(nfp_prog, 1798 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), 1799 SHF_SC_NONE, 0); 1800 1801 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) 1802 return -EINVAL; 1803 1804 return 0; 1805 } 1806 1807 /* --- Callbacks --- */ 1808 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1809 { 1810 const struct bpf_insn *insn = &meta->insn; 1811 u8 dst = insn->dst_reg * 2; 1812 u8 src = insn->src_reg * 2; 1813 1814 if (insn->src_reg == BPF_REG_10) { 1815 swreg stack_depth_reg; 1816 1817 stack_depth_reg = ur_load_imm_any(nfp_prog, 1818 nfp_prog->stack_frame_depth, 1819 stack_imm(nfp_prog)); 1820 emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog), 1821 ALU_OP_ADD, stack_depth_reg); 1822 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1823 } else { 1824 wrp_reg_mov(nfp_prog, dst, src); 1825 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1826 } 1827 1828 return 0; 1829 } 1830 1831 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1832 { 1833 u64 imm = meta->insn.imm; /* sign extend */ 1834 1835 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1836 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1837 1838 return 0; 1839 } 1840 1841 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1842 { 1843 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1844 } 1845 1846 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1847 { 1848 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1849 } 1850 1851 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1852 { 1853 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1854 } 1855 1856 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1857 { 1858 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1859 } 1860 1861 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1862 { 1863 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1864 } 1865 1866 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1867 { 1868 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1869 } 1870 1871 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1872 { 1873 const struct bpf_insn *insn = &meta->insn; 1874 1875 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1876 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1877 reg_b(insn->src_reg * 2)); 1878 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1879 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1880 reg_b(insn->src_reg * 2 + 1)); 1881 1882 return 0; 1883 } 1884 1885 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1886 { 1887 const struct bpf_insn *insn = &meta->insn; 1888 u64 imm = insn->imm; /* sign extend */ 1889 1890 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1891 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1892 1893 return 0; 1894 } 1895 1896 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1897 { 1898 const struct bpf_insn *insn = &meta->insn; 1899 1900 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1901 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1902 reg_b(insn->src_reg * 2)); 1903 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1904 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1905 reg_b(insn->src_reg * 2 + 1)); 1906 1907 return 0; 1908 } 1909 1910 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1911 { 1912 const struct bpf_insn *insn = &meta->insn; 1913 u64 imm = insn->imm; /* sign extend */ 1914 1915 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1916 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1917 1918 return 0; 1919 } 1920 1921 static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1922 { 1923 return wrp_mul(nfp_prog, meta, true, true); 1924 } 1925 1926 static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1927 { 1928 return wrp_mul(nfp_prog, meta, true, false); 1929 } 1930 1931 static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1932 { 1933 const struct bpf_insn *insn = &meta->insn; 1934 1935 return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm); 1936 } 1937 1938 static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1939 { 1940 /* NOTE: verifier hook has rejected cases for which verifier doesn't 1941 * know whether the source operand is constant or not. 1942 */ 1943 return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src); 1944 } 1945 1946 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1947 { 1948 const struct bpf_insn *insn = &meta->insn; 1949 1950 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1951 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1952 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1953 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1954 1955 return 0; 1956 } 1957 1958 /* Pseudo code: 1959 * if shift_amt >= 32 1960 * dst_high = dst_low << shift_amt[4:0] 1961 * dst_low = 0; 1962 * else 1963 * dst_high = (dst_high, dst_low) >> (32 - shift_amt) 1964 * dst_low = dst_low << shift_amt 1965 * 1966 * The indirect shift will use the same logic at runtime. 1967 */ 1968 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1969 { 1970 if (shift_amt < 32) { 1971 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), 1972 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, 1973 32 - shift_amt); 1974 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1975 reg_b(dst), SHF_SC_L_SHF, shift_amt); 1976 } else if (shift_amt == 32) { 1977 wrp_reg_mov(nfp_prog, dst + 1, dst); 1978 wrp_immed(nfp_prog, reg_both(dst), 0); 1979 } else if (shift_amt > 32) { 1980 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1981 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32); 1982 wrp_immed(nfp_prog, reg_both(dst), 0); 1983 } 1984 1985 return 0; 1986 } 1987 1988 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1989 { 1990 const struct bpf_insn *insn = &meta->insn; 1991 u8 dst = insn->dst_reg * 2; 1992 1993 return __shl_imm64(nfp_prog, dst, insn->imm); 1994 } 1995 1996 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1997 { 1998 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB, 1999 reg_b(src)); 2000 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0)); 2001 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, 2002 reg_b(dst), SHF_SC_R_DSHF); 2003 } 2004 2005 /* NOTE: for indirect left shift, HIGH part should be calculated first. */ 2006 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2007 { 2008 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2009 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2010 reg_b(dst), SHF_SC_L_SHF); 2011 } 2012 2013 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2014 { 2015 shl_reg64_lt32_high(nfp_prog, dst, src); 2016 shl_reg64_lt32_low(nfp_prog, dst, src); 2017 } 2018 2019 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2020 { 2021 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2022 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2023 reg_b(dst), SHF_SC_L_SHF); 2024 wrp_immed(nfp_prog, reg_both(dst), 0); 2025 } 2026 2027 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2028 { 2029 const struct bpf_insn *insn = &meta->insn; 2030 u64 umin, umax; 2031 u8 dst, src; 2032 2033 dst = insn->dst_reg * 2; 2034 umin = meta->umin_src; 2035 umax = meta->umax_src; 2036 if (umin == umax) 2037 return __shl_imm64(nfp_prog, dst, umin); 2038 2039 src = insn->src_reg * 2; 2040 if (umax < 32) { 2041 shl_reg64_lt32(nfp_prog, dst, src); 2042 } else if (umin >= 32) { 2043 shl_reg64_ge32(nfp_prog, dst, src); 2044 } else { 2045 /* Generate different instruction sequences depending on runtime 2046 * value of shift amount. 2047 */ 2048 u16 label_ge32, label_end; 2049 2050 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7; 2051 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2052 2053 shl_reg64_lt32_high(nfp_prog, dst, src); 2054 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2055 emit_br(nfp_prog, BR_UNC, label_end, 2); 2056 /* shl_reg64_lt32_low packed in delay slot. */ 2057 shl_reg64_lt32_low(nfp_prog, dst, src); 2058 2059 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2060 return -EINVAL; 2061 shl_reg64_ge32(nfp_prog, dst, src); 2062 2063 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2064 return -EINVAL; 2065 } 2066 2067 return 0; 2068 } 2069 2070 /* Pseudo code: 2071 * if shift_amt >= 32 2072 * dst_high = 0; 2073 * dst_low = dst_high >> shift_amt[4:0] 2074 * else 2075 * dst_high = dst_high >> shift_amt 2076 * dst_low = (dst_high, dst_low) >> shift_amt 2077 * 2078 * The indirect shift will use the same logic at runtime. 2079 */ 2080 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2081 { 2082 if (shift_amt < 32) { 2083 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2084 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2085 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2086 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2087 } else if (shift_amt == 32) { 2088 wrp_reg_mov(nfp_prog, dst, dst + 1); 2089 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2090 } else if (shift_amt > 32) { 2091 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2092 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2093 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2094 } 2095 2096 return 0; 2097 } 2098 2099 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2100 { 2101 const struct bpf_insn *insn = &meta->insn; 2102 u8 dst = insn->dst_reg * 2; 2103 2104 return __shr_imm64(nfp_prog, dst, insn->imm); 2105 } 2106 2107 /* NOTE: for indirect right shift, LOW part should be calculated first. */ 2108 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2109 { 2110 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2111 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2112 reg_b(dst + 1), SHF_SC_R_SHF); 2113 } 2114 2115 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2116 { 2117 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2118 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2119 reg_b(dst), SHF_SC_R_DSHF); 2120 } 2121 2122 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2123 { 2124 shr_reg64_lt32_low(nfp_prog, dst, src); 2125 shr_reg64_lt32_high(nfp_prog, dst, src); 2126 } 2127 2128 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2129 { 2130 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2131 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2132 reg_b(dst + 1), SHF_SC_R_SHF); 2133 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2134 } 2135 2136 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2137 { 2138 const struct bpf_insn *insn = &meta->insn; 2139 u64 umin, umax; 2140 u8 dst, src; 2141 2142 dst = insn->dst_reg * 2; 2143 umin = meta->umin_src; 2144 umax = meta->umax_src; 2145 if (umin == umax) 2146 return __shr_imm64(nfp_prog, dst, umin); 2147 2148 src = insn->src_reg * 2; 2149 if (umax < 32) { 2150 shr_reg64_lt32(nfp_prog, dst, src); 2151 } else if (umin >= 32) { 2152 shr_reg64_ge32(nfp_prog, dst, src); 2153 } else { 2154 /* Generate different instruction sequences depending on runtime 2155 * value of shift amount. 2156 */ 2157 u16 label_ge32, label_end; 2158 2159 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2160 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2161 shr_reg64_lt32_low(nfp_prog, dst, src); 2162 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2163 emit_br(nfp_prog, BR_UNC, label_end, 2); 2164 /* shr_reg64_lt32_high packed in delay slot. */ 2165 shr_reg64_lt32_high(nfp_prog, dst, src); 2166 2167 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2168 return -EINVAL; 2169 shr_reg64_ge32(nfp_prog, dst, src); 2170 2171 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2172 return -EINVAL; 2173 } 2174 2175 return 0; 2176 } 2177 2178 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit 2179 * told through PREV_ALU result. 2180 */ 2181 static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2182 { 2183 if (shift_amt < 32) { 2184 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2185 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2186 /* Set signedness bit. */ 2187 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2188 reg_imm(0)); 2189 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2190 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2191 } else if (shift_amt == 32) { 2192 /* NOTE: this also helps setting signedness bit. */ 2193 wrp_reg_mov(nfp_prog, dst, dst + 1); 2194 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2195 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2196 } else if (shift_amt > 32) { 2197 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2198 reg_imm(0)); 2199 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2200 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2201 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2202 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2203 } 2204 2205 return 0; 2206 } 2207 2208 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2209 { 2210 const struct bpf_insn *insn = &meta->insn; 2211 u8 dst = insn->dst_reg * 2; 2212 2213 return __ashr_imm64(nfp_prog, dst, insn->imm); 2214 } 2215 2216 static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2217 { 2218 /* NOTE: the first insn will set both indirect shift amount (source A) 2219 * and signedness bit (MSB of result). 2220 */ 2221 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2222 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2223 reg_b(dst + 1), SHF_SC_R_SHF); 2224 } 2225 2226 static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2227 { 2228 /* NOTE: it is the same as logic shift because we don't need to shift in 2229 * signedness bit when the shift amount is less than 32. 2230 */ 2231 return shr_reg64_lt32_low(nfp_prog, dst, src); 2232 } 2233 2234 static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2235 { 2236 ashr_reg64_lt32_low(nfp_prog, dst, src); 2237 ashr_reg64_lt32_high(nfp_prog, dst, src); 2238 } 2239 2240 static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2241 { 2242 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2243 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2244 reg_b(dst + 1), SHF_SC_R_SHF); 2245 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2246 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2247 } 2248 2249 /* Like ashr_imm64, but need to use indirect shift. */ 2250 static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2251 { 2252 const struct bpf_insn *insn = &meta->insn; 2253 u64 umin, umax; 2254 u8 dst, src; 2255 2256 dst = insn->dst_reg * 2; 2257 umin = meta->umin_src; 2258 umax = meta->umax_src; 2259 if (umin == umax) 2260 return __ashr_imm64(nfp_prog, dst, umin); 2261 2262 src = insn->src_reg * 2; 2263 if (umax < 32) { 2264 ashr_reg64_lt32(nfp_prog, dst, src); 2265 } else if (umin >= 32) { 2266 ashr_reg64_ge32(nfp_prog, dst, src); 2267 } else { 2268 u16 label_ge32, label_end; 2269 2270 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2271 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2272 ashr_reg64_lt32_low(nfp_prog, dst, src); 2273 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2274 emit_br(nfp_prog, BR_UNC, label_end, 2); 2275 /* ashr_reg64_lt32_high packed in delay slot. */ 2276 ashr_reg64_lt32_high(nfp_prog, dst, src); 2277 2278 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2279 return -EINVAL; 2280 ashr_reg64_ge32(nfp_prog, dst, src); 2281 2282 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2283 return -EINVAL; 2284 } 2285 2286 return 0; 2287 } 2288 2289 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2290 { 2291 const struct bpf_insn *insn = &meta->insn; 2292 2293 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 2294 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2295 2296 return 0; 2297 } 2298 2299 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2300 { 2301 const struct bpf_insn *insn = &meta->insn; 2302 2303 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 2304 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2305 2306 return 0; 2307 } 2308 2309 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2310 { 2311 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 2312 } 2313 2314 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2315 { 2316 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 2317 } 2318 2319 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2320 { 2321 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 2322 } 2323 2324 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2325 { 2326 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 2327 } 2328 2329 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2330 { 2331 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 2332 } 2333 2334 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2335 { 2336 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 2337 } 2338 2339 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2340 { 2341 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 2342 } 2343 2344 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2345 { 2346 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 2347 } 2348 2349 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2350 { 2351 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 2352 } 2353 2354 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2355 { 2356 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 2357 } 2358 2359 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2360 { 2361 return wrp_mul(nfp_prog, meta, false, true); 2362 } 2363 2364 static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2365 { 2366 return wrp_mul(nfp_prog, meta, false, false); 2367 } 2368 2369 static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2370 { 2371 return div_reg64(nfp_prog, meta); 2372 } 2373 2374 static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2375 { 2376 return div_imm64(nfp_prog, meta); 2377 } 2378 2379 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2380 { 2381 u8 dst = meta->insn.dst_reg * 2; 2382 2383 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 2384 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2385 2386 return 0; 2387 } 2388 2389 static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2390 { 2391 /* Set signedness bit (MSB of result). */ 2392 emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, reg_imm(0)); 2393 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst), 2394 SHF_SC_R_SHF, shift_amt); 2395 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2396 2397 return 0; 2398 } 2399 2400 static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2401 { 2402 const struct bpf_insn *insn = &meta->insn; 2403 u64 umin, umax; 2404 u8 dst, src; 2405 2406 dst = insn->dst_reg * 2; 2407 umin = meta->umin_src; 2408 umax = meta->umax_src; 2409 if (umin == umax) 2410 return __ashr_imm(nfp_prog, dst, umin); 2411 2412 src = insn->src_reg * 2; 2413 /* NOTE: the first insn will set both indirect shift amount (source A) 2414 * and signedness bit (MSB of result). 2415 */ 2416 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst)); 2417 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2418 reg_b(dst), SHF_SC_R_SHF); 2419 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2420 2421 return 0; 2422 } 2423 2424 static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2425 { 2426 const struct bpf_insn *insn = &meta->insn; 2427 u8 dst = insn->dst_reg * 2; 2428 2429 return __ashr_imm(nfp_prog, dst, insn->imm); 2430 } 2431 2432 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2433 { 2434 const struct bpf_insn *insn = &meta->insn; 2435 2436 if (!insn->imm) 2437 return 1; /* TODO: zero shift means indirect */ 2438 2439 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 2440 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 2441 SHF_SC_L_SHF, insn->imm); 2442 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2443 2444 return 0; 2445 } 2446 2447 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2448 { 2449 const struct bpf_insn *insn = &meta->insn; 2450 u8 gpr = insn->dst_reg * 2; 2451 2452 switch (insn->imm) { 2453 case 16: 2454 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 2455 SHF_SC_R_ROT, 8); 2456 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 2457 SHF_SC_R_SHF, 16); 2458 2459 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2460 break; 2461 case 32: 2462 wrp_end32(nfp_prog, reg_a(gpr), gpr); 2463 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2464 break; 2465 case 64: 2466 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 2467 2468 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 2469 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 2470 break; 2471 } 2472 2473 return 0; 2474 } 2475 2476 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2477 { 2478 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 2479 u32 imm_lo, imm_hi; 2480 u8 dst; 2481 2482 dst = prev->insn.dst_reg * 2; 2483 imm_lo = prev->insn.imm; 2484 imm_hi = meta->insn.imm; 2485 2486 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 2487 2488 /* mov is always 1 insn, load imm may be two, so try to use mov */ 2489 if (imm_hi == imm_lo) 2490 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 2491 else 2492 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 2493 2494 return 0; 2495 } 2496 2497 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2498 { 2499 meta->double_cb = imm_ld8_part2; 2500 return 0; 2501 } 2502 2503 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2504 { 2505 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 2506 } 2507 2508 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2509 { 2510 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 2511 } 2512 2513 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2514 { 2515 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 2516 } 2517 2518 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2519 { 2520 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2521 meta->insn.src_reg * 2, 1); 2522 } 2523 2524 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2525 { 2526 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2527 meta->insn.src_reg * 2, 2); 2528 } 2529 2530 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2531 { 2532 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2533 meta->insn.src_reg * 2, 4); 2534 } 2535 2536 static int 2537 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2538 unsigned int size, unsigned int ptr_off) 2539 { 2540 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2541 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 2542 true, wrp_lmem_load); 2543 } 2544 2545 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2546 u8 size) 2547 { 2548 swreg dst = reg_both(meta->insn.dst_reg * 2); 2549 2550 switch (meta->insn.off) { 2551 case offsetof(struct __sk_buff, len): 2552 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 2553 return -EOPNOTSUPP; 2554 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 2555 break; 2556 case offsetof(struct __sk_buff, data): 2557 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 2558 return -EOPNOTSUPP; 2559 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2560 break; 2561 case offsetof(struct __sk_buff, data_end): 2562 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 2563 return -EOPNOTSUPP; 2564 emit_alu(nfp_prog, dst, 2565 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2566 break; 2567 default: 2568 return -EOPNOTSUPP; 2569 } 2570 2571 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2572 2573 return 0; 2574 } 2575 2576 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2577 u8 size) 2578 { 2579 swreg dst = reg_both(meta->insn.dst_reg * 2); 2580 2581 switch (meta->insn.off) { 2582 case offsetof(struct xdp_md, data): 2583 if (size != FIELD_SIZEOF(struct xdp_md, data)) 2584 return -EOPNOTSUPP; 2585 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2586 break; 2587 case offsetof(struct xdp_md, data_end): 2588 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 2589 return -EOPNOTSUPP; 2590 emit_alu(nfp_prog, dst, 2591 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2592 break; 2593 default: 2594 return -EOPNOTSUPP; 2595 } 2596 2597 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2598 2599 return 0; 2600 } 2601 2602 static int 2603 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2604 unsigned int size) 2605 { 2606 swreg tmp_reg; 2607 2608 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2609 2610 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 2611 tmp_reg, meta->insn.dst_reg * 2, size); 2612 } 2613 2614 static int 2615 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2616 unsigned int size) 2617 { 2618 swreg tmp_reg; 2619 2620 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2621 2622 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 2623 tmp_reg, meta->insn.dst_reg * 2, size); 2624 } 2625 2626 static void 2627 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 2628 struct nfp_insn_meta *meta) 2629 { 2630 s16 range_start = meta->pkt_cache.range_start; 2631 s16 range_end = meta->pkt_cache.range_end; 2632 swreg src_base, off; 2633 u8 xfer_num, len; 2634 bool indir; 2635 2636 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 2637 src_base = reg_a(meta->insn.src_reg * 2); 2638 len = range_end - range_start; 2639 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 2640 2641 indir = len > 8 * REG_WIDTH; 2642 /* Setup PREV_ALU for indirect mode. */ 2643 if (indir) 2644 wrp_immed(nfp_prog, reg_none(), 2645 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 2646 2647 /* Cache memory into transfer-in registers. */ 2648 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 2649 off, xfer_num - 1, CMD_CTX_SWAP, indir); 2650 } 2651 2652 static int 2653 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 2654 struct nfp_insn_meta *meta, 2655 unsigned int size) 2656 { 2657 s16 range_start = meta->pkt_cache.range_start; 2658 s16 insn_off = meta->insn.off - range_start; 2659 swreg dst_lo, dst_hi, src_lo, src_mid; 2660 u8 dst_gpr = meta->insn.dst_reg * 2; 2661 u8 len_lo = size, len_mid = 0; 2662 u8 idx = insn_off / REG_WIDTH; 2663 u8 off = insn_off % REG_WIDTH; 2664 2665 dst_hi = reg_both(dst_gpr + 1); 2666 dst_lo = reg_both(dst_gpr); 2667 src_lo = reg_xfer(idx); 2668 2669 /* The read length could involve as many as three registers. */ 2670 if (size > REG_WIDTH - off) { 2671 /* Calculate the part in the second register. */ 2672 len_lo = REG_WIDTH - off; 2673 len_mid = size - len_lo; 2674 2675 /* Calculate the part in the third register. */ 2676 if (size > 2 * REG_WIDTH - off) 2677 len_mid = REG_WIDTH; 2678 } 2679 2680 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 2681 2682 if (!len_mid) { 2683 wrp_immed(nfp_prog, dst_hi, 0); 2684 return 0; 2685 } 2686 2687 src_mid = reg_xfer(idx + 1); 2688 2689 if (size <= REG_WIDTH) { 2690 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 2691 wrp_immed(nfp_prog, dst_hi, 0); 2692 } else { 2693 swreg src_hi = reg_xfer(idx + 2); 2694 2695 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 2696 REG_WIDTH - len_lo, len_lo); 2697 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 2698 REG_WIDTH - len_lo); 2699 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 2700 len_lo); 2701 } 2702 2703 return 0; 2704 } 2705 2706 static int 2707 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 2708 struct nfp_insn_meta *meta, 2709 unsigned int size) 2710 { 2711 swreg dst_lo, dst_hi, src_lo; 2712 u8 dst_gpr, idx; 2713 2714 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 2715 dst_gpr = meta->insn.dst_reg * 2; 2716 dst_hi = reg_both(dst_gpr + 1); 2717 dst_lo = reg_both(dst_gpr); 2718 src_lo = reg_xfer(idx); 2719 2720 if (size < REG_WIDTH) { 2721 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 2722 wrp_immed(nfp_prog, dst_hi, 0); 2723 } else if (size == REG_WIDTH) { 2724 wrp_mov(nfp_prog, dst_lo, src_lo); 2725 wrp_immed(nfp_prog, dst_hi, 0); 2726 } else { 2727 swreg src_hi = reg_xfer(idx + 1); 2728 2729 wrp_mov(nfp_prog, dst_lo, src_lo); 2730 wrp_mov(nfp_prog, dst_hi, src_hi); 2731 } 2732 2733 return 0; 2734 } 2735 2736 static int 2737 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 2738 struct nfp_insn_meta *meta, unsigned int size) 2739 { 2740 u8 off = meta->insn.off - meta->pkt_cache.range_start; 2741 2742 if (IS_ALIGNED(off, REG_WIDTH)) 2743 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 2744 2745 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 2746 } 2747 2748 static int 2749 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2750 unsigned int size) 2751 { 2752 if (meta->ldst_gather_len) 2753 return nfp_cpp_memcpy(nfp_prog, meta); 2754 2755 if (meta->ptr.type == PTR_TO_CTX) { 2756 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2757 return mem_ldx_xdp(nfp_prog, meta, size); 2758 else 2759 return mem_ldx_skb(nfp_prog, meta, size); 2760 } 2761 2762 if (meta->ptr.type == PTR_TO_PACKET) { 2763 if (meta->pkt_cache.range_end) { 2764 if (meta->pkt_cache.do_init) 2765 mem_ldx_data_init_pktcache(nfp_prog, meta); 2766 2767 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 2768 } else { 2769 return mem_ldx_data(nfp_prog, meta, size); 2770 } 2771 } 2772 2773 if (meta->ptr.type == PTR_TO_STACK) 2774 return mem_ldx_stack(nfp_prog, meta, size, 2775 meta->ptr.off + meta->ptr.var_off.value); 2776 2777 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2778 return mem_ldx_emem(nfp_prog, meta, size); 2779 2780 return -EOPNOTSUPP; 2781 } 2782 2783 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2784 { 2785 return mem_ldx(nfp_prog, meta, 1); 2786 } 2787 2788 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2789 { 2790 return mem_ldx(nfp_prog, meta, 2); 2791 } 2792 2793 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2794 { 2795 return mem_ldx(nfp_prog, meta, 4); 2796 } 2797 2798 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2799 { 2800 return mem_ldx(nfp_prog, meta, 8); 2801 } 2802 2803 static int 2804 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2805 unsigned int size) 2806 { 2807 u64 imm = meta->insn.imm; /* sign extend */ 2808 swreg off_reg; 2809 2810 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2811 2812 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2813 imm, size); 2814 } 2815 2816 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2817 unsigned int size) 2818 { 2819 if (meta->ptr.type == PTR_TO_PACKET) 2820 return mem_st_data(nfp_prog, meta, size); 2821 2822 return -EOPNOTSUPP; 2823 } 2824 2825 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2826 { 2827 return mem_st(nfp_prog, meta, 1); 2828 } 2829 2830 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2831 { 2832 return mem_st(nfp_prog, meta, 2); 2833 } 2834 2835 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2836 { 2837 return mem_st(nfp_prog, meta, 4); 2838 } 2839 2840 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2841 { 2842 return mem_st(nfp_prog, meta, 8); 2843 } 2844 2845 static int 2846 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2847 unsigned int size) 2848 { 2849 swreg off_reg; 2850 2851 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2852 2853 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2854 meta->insn.src_reg * 2, size); 2855 } 2856 2857 static int 2858 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2859 unsigned int size, unsigned int ptr_off) 2860 { 2861 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2862 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2863 false, wrp_lmem_store); 2864 } 2865 2866 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2867 { 2868 switch (meta->insn.off) { 2869 case offsetof(struct xdp_md, rx_queue_index): 2870 return nfp_queue_select(nfp_prog, meta); 2871 } 2872 2873 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ 2874 return -EOPNOTSUPP; 2875 } 2876 2877 static int 2878 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2879 unsigned int size) 2880 { 2881 if (meta->ptr.type == PTR_TO_PACKET) 2882 return mem_stx_data(nfp_prog, meta, size); 2883 2884 if (meta->ptr.type == PTR_TO_STACK) 2885 return mem_stx_stack(nfp_prog, meta, size, 2886 meta->ptr.off + meta->ptr.var_off.value); 2887 2888 return -EOPNOTSUPP; 2889 } 2890 2891 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2892 { 2893 return mem_stx(nfp_prog, meta, 1); 2894 } 2895 2896 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2897 { 2898 return mem_stx(nfp_prog, meta, 2); 2899 } 2900 2901 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2902 { 2903 if (meta->ptr.type == PTR_TO_CTX) 2904 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2905 return mem_stx_xdp(nfp_prog, meta); 2906 return mem_stx(nfp_prog, meta, 4); 2907 } 2908 2909 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2910 { 2911 return mem_stx(nfp_prog, meta, 8); 2912 } 2913 2914 static int 2915 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) 2916 { 2917 u8 dst_gpr = meta->insn.dst_reg * 2; 2918 u8 src_gpr = meta->insn.src_reg * 2; 2919 unsigned int full_add, out; 2920 swreg addra, addrb, off; 2921 2922 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2923 2924 /* We can fit 16 bits into command immediate, if we know the immediate 2925 * is guaranteed to either always or never fit into 16 bit we only 2926 * generate code to handle that particular case, otherwise generate 2927 * code for both. 2928 */ 2929 out = nfp_prog_current_offset(nfp_prog); 2930 full_add = nfp_prog_current_offset(nfp_prog); 2931 2932 if (meta->insn.off) { 2933 out += 2; 2934 full_add += 2; 2935 } 2936 if (meta->xadd_maybe_16bit) { 2937 out += 3; 2938 full_add += 3; 2939 } 2940 if (meta->xadd_over_16bit) 2941 out += 2 + is64; 2942 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2943 out += 5; 2944 full_add += 5; 2945 } 2946 2947 /* Generate the branch for choosing add_imm vs add */ 2948 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2949 swreg max_imm = imm_a(nfp_prog); 2950 2951 wrp_immed(nfp_prog, max_imm, 0xffff); 2952 emit_alu(nfp_prog, reg_none(), 2953 max_imm, ALU_OP_SUB, reg_b(src_gpr)); 2954 emit_alu(nfp_prog, reg_none(), 2955 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); 2956 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); 2957 /* defer for add */ 2958 } 2959 2960 /* If insn has an offset add to the address */ 2961 if (!meta->insn.off) { 2962 addra = reg_a(dst_gpr); 2963 addrb = reg_b(dst_gpr + 1); 2964 } else { 2965 emit_alu(nfp_prog, imma_a(nfp_prog), 2966 reg_a(dst_gpr), ALU_OP_ADD, off); 2967 emit_alu(nfp_prog, imma_b(nfp_prog), 2968 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); 2969 addra = imma_a(nfp_prog); 2970 addrb = imma_b(nfp_prog); 2971 } 2972 2973 /* Generate the add_imm if 16 bits are possible */ 2974 if (meta->xadd_maybe_16bit) { 2975 swreg prev_alu = imm_a(nfp_prog); 2976 2977 wrp_immed(nfp_prog, prev_alu, 2978 FIELD_PREP(CMD_OVE_DATA, 2) | 2979 CMD_OVE_LEN | 2980 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); 2981 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); 2982 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, 2983 addra, addrb, 0, CMD_CTX_NO_SWAP); 2984 2985 if (meta->xadd_over_16bit) 2986 emit_br(nfp_prog, BR_UNC, out, 0); 2987 } 2988 2989 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) 2990 return -EINVAL; 2991 2992 /* Generate the add if 16 bits are not guaranteed */ 2993 if (meta->xadd_over_16bit) { 2994 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, 2995 addra, addrb, is64 << 2, 2996 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); 2997 2998 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); 2999 if (is64) 3000 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); 3001 } 3002 3003 if (!nfp_prog_confirm_current_offset(nfp_prog, out)) 3004 return -EINVAL; 3005 3006 return 0; 3007 } 3008 3009 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3010 { 3011 return mem_xadd(nfp_prog, meta, false); 3012 } 3013 3014 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3015 { 3016 return mem_xadd(nfp_prog, meta, true); 3017 } 3018 3019 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3020 { 3021 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 3022 3023 return 0; 3024 } 3025 3026 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3027 { 3028 const struct bpf_insn *insn = &meta->insn; 3029 u64 imm = insn->imm; /* sign extend */ 3030 swreg or1, or2, tmp_reg; 3031 3032 or1 = reg_a(insn->dst_reg * 2); 3033 or2 = reg_b(insn->dst_reg * 2 + 1); 3034 3035 if (imm & ~0U) { 3036 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3037 emit_alu(nfp_prog, imm_a(nfp_prog), 3038 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3039 or1 = imm_a(nfp_prog); 3040 } 3041 3042 if (imm >> 32) { 3043 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3044 emit_alu(nfp_prog, imm_b(nfp_prog), 3045 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3046 or2 = imm_b(nfp_prog); 3047 } 3048 3049 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 3050 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3051 3052 return 0; 3053 } 3054 3055 static int jeq32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3056 { 3057 const struct bpf_insn *insn = &meta->insn; 3058 swreg tmp_reg; 3059 3060 tmp_reg = ur_load_imm_any(nfp_prog, insn->imm, imm_b(nfp_prog)); 3061 emit_alu(nfp_prog, reg_none(), 3062 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3063 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3064 3065 return 0; 3066 } 3067 3068 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3069 { 3070 const struct bpf_insn *insn = &meta->insn; 3071 u64 imm = insn->imm; /* sign extend */ 3072 u8 dst_gpr = insn->dst_reg * 2; 3073 swreg tmp_reg; 3074 3075 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3076 emit_alu(nfp_prog, imm_b(nfp_prog), 3077 reg_a(dst_gpr), ALU_OP_AND, tmp_reg); 3078 /* Upper word of the mask can only be 0 or ~0 from sign extension, 3079 * so either ignore it or OR the whole thing in. 3080 */ 3081 if (is_mbpf_jmp64(meta) && imm >> 32) { 3082 emit_alu(nfp_prog, reg_none(), 3083 reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog)); 3084 } 3085 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3086 3087 return 0; 3088 } 3089 3090 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3091 { 3092 const struct bpf_insn *insn = &meta->insn; 3093 u64 imm = insn->imm; /* sign extend */ 3094 bool is_jmp32 = is_mbpf_jmp32(meta); 3095 swreg tmp_reg; 3096 3097 if (!imm) { 3098 if (is_jmp32) 3099 emit_alu(nfp_prog, reg_none(), reg_none(), ALU_OP_NONE, 3100 reg_b(insn->dst_reg * 2)); 3101 else 3102 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 3103 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 3104 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3105 return 0; 3106 } 3107 3108 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3109 emit_alu(nfp_prog, reg_none(), 3110 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3111 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3112 3113 if (is_jmp32) 3114 return 0; 3115 3116 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3117 emit_alu(nfp_prog, reg_none(), 3118 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3119 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3120 3121 return 0; 3122 } 3123 3124 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3125 { 3126 const struct bpf_insn *insn = &meta->insn; 3127 3128 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 3129 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 3130 if (is_mbpf_jmp64(meta)) { 3131 emit_alu(nfp_prog, imm_b(nfp_prog), 3132 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, 3133 reg_b(insn->src_reg * 2 + 1)); 3134 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, 3135 imm_b(nfp_prog)); 3136 } 3137 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3138 3139 return 0; 3140 } 3141 3142 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3143 { 3144 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 3145 } 3146 3147 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3148 { 3149 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 3150 } 3151 3152 static int 3153 bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3154 { 3155 u32 ret_tgt, stack_depth, offset_br; 3156 swreg tmp_reg; 3157 3158 stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN); 3159 /* Space for saving the return address is accounted for by the callee, 3160 * so stack_depth can be zero for the main function. 3161 */ 3162 if (stack_depth) { 3163 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3164 stack_imm(nfp_prog)); 3165 emit_alu(nfp_prog, stack_reg(nfp_prog), 3166 stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg); 3167 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3168 NFP_CSR_ACT_LM_ADDR0); 3169 } 3170 3171 /* Two cases for jumping to the callee: 3172 * 3173 * - If callee uses and needs to save R6~R9 then: 3174 * 1. Put the start offset of the callee into imm_b(). This will 3175 * require a fixup step, as we do not necessarily know this 3176 * address yet. 3177 * 2. Put the return address from the callee to the caller into 3178 * register ret_reg(). 3179 * 3. (After defer slots are consumed) Jump to the subroutine that 3180 * pushes the registers to the stack. 3181 * The subroutine acts as a trampoline, and returns to the address in 3182 * imm_b(), i.e. jumps to the callee. 3183 * 3184 * - If callee does not need to save R6~R9 then just load return 3185 * address to the caller in ret_reg(), and jump to the callee 3186 * directly. 3187 * 3188 * Using ret_reg() to pass the return address to the callee is set here 3189 * as a convention. The callee can then push this address onto its 3190 * stack frame in its prologue. The advantages of passing the return 3191 * address through ret_reg(), instead of pushing it to the stack right 3192 * here, are the following: 3193 * - It looks cleaner. 3194 * - If the called function is called multiple time, we get a lower 3195 * program size. 3196 * - We save two no-op instructions that should be added just before 3197 * the emit_br() when stack depth is not null otherwise. 3198 * - If we ever find a register to hold the return address during whole 3199 * execution of the callee, we will not have to push the return 3200 * address to the stack for leaf functions. 3201 */ 3202 if (!meta->jmp_dst) { 3203 pr_err("BUG: BPF-to-BPF call has no destination recorded\n"); 3204 return -ELOOP; 3205 } 3206 if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) { 3207 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 3208 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, 3209 RELO_BR_GO_CALL_PUSH_REGS); 3210 offset_br = nfp_prog_current_offset(nfp_prog); 3211 wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL); 3212 } else { 3213 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 3214 emit_br(nfp_prog, BR_UNC, meta->insn.imm, 1); 3215 offset_br = nfp_prog_current_offset(nfp_prog); 3216 } 3217 wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL); 3218 3219 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 3220 return -EINVAL; 3221 3222 if (stack_depth) { 3223 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3224 stack_imm(nfp_prog)); 3225 emit_alu(nfp_prog, stack_reg(nfp_prog), 3226 stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 3227 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3228 NFP_CSR_ACT_LM_ADDR0); 3229 wrp_nops(nfp_prog, 3); 3230 } 3231 3232 meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog); 3233 meta->num_insns_after_br -= offset_br; 3234 3235 return 0; 3236 } 3237 3238 static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3239 { 3240 switch (meta->insn.imm) { 3241 case BPF_FUNC_xdp_adjust_head: 3242 return adjust_head(nfp_prog, meta); 3243 case BPF_FUNC_xdp_adjust_tail: 3244 return adjust_tail(nfp_prog, meta); 3245 case BPF_FUNC_map_lookup_elem: 3246 case BPF_FUNC_map_update_elem: 3247 case BPF_FUNC_map_delete_elem: 3248 return map_call_stack_common(nfp_prog, meta); 3249 case BPF_FUNC_get_prandom_u32: 3250 return nfp_get_prandom_u32(nfp_prog, meta); 3251 case BPF_FUNC_perf_event_output: 3252 return nfp_perf_event_output(nfp_prog, meta); 3253 default: 3254 WARN_ONCE(1, "verifier allowed unsupported function\n"); 3255 return -EOPNOTSUPP; 3256 } 3257 } 3258 3259 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3260 { 3261 if (is_mbpf_pseudo_call(meta)) 3262 return bpf_to_bpf_call(nfp_prog, meta); 3263 else 3264 return helper_call(nfp_prog, meta); 3265 } 3266 3267 static bool nfp_is_main_function(struct nfp_insn_meta *meta) 3268 { 3269 return meta->subprog_idx == 0; 3270 } 3271 3272 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3273 { 3274 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 3275 3276 return 0; 3277 } 3278 3279 static int 3280 nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3281 { 3282 if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) { 3283 /* Pop R6~R9 to the stack via related subroutine. 3284 * We loaded the return address to the caller into ret_reg(). 3285 * This means that the subroutine does not come back here, we 3286 * make it jump back to the subprogram caller directly! 3287 */ 3288 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1, 3289 RELO_BR_GO_CALL_POP_REGS); 3290 /* Pop return address from the stack. */ 3291 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3292 } else { 3293 /* Pop return address from the stack. */ 3294 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3295 /* Jump back to caller if no callee-saved registers were used 3296 * by the subprogram. 3297 */ 3298 emit_rtn(nfp_prog, ret_reg(nfp_prog), 0); 3299 } 3300 3301 return 0; 3302 } 3303 3304 static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3305 { 3306 if (nfp_is_main_function(meta)) 3307 return goto_out(nfp_prog, meta); 3308 else 3309 return nfp_subprog_epilogue(nfp_prog, meta); 3310 } 3311 3312 static const instr_cb_t instr_cb[256] = { 3313 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 3314 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 3315 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 3316 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 3317 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 3318 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 3319 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 3320 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 3321 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 3322 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 3323 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 3324 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 3325 [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64, 3326 [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64, 3327 [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64, 3328 [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64, 3329 [BPF_ALU64 | BPF_NEG] = neg_reg64, 3330 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, 3331 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 3332 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, 3333 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 3334 [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64, 3335 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, 3336 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 3337 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 3338 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 3339 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 3340 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 3341 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 3342 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 3343 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 3344 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 3345 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 3346 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 3347 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 3348 [BPF_ALU | BPF_MUL | BPF_X] = mul_reg, 3349 [BPF_ALU | BPF_MUL | BPF_K] = mul_imm, 3350 [BPF_ALU | BPF_DIV | BPF_X] = div_reg, 3351 [BPF_ALU | BPF_DIV | BPF_K] = div_imm, 3352 [BPF_ALU | BPF_NEG] = neg_reg, 3353 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 3354 [BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg, 3355 [BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm, 3356 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 3357 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 3358 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 3359 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 3360 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 3361 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 3362 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 3363 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 3364 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 3365 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 3366 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 3367 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 3368 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 3369 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 3370 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 3371 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 3372 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 3373 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 3374 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 3375 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 3376 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 3377 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 3378 [BPF_JMP | BPF_JA | BPF_K] = jump, 3379 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 3380 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, 3381 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, 3382 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, 3383 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, 3384 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, 3385 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, 3386 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, 3387 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, 3388 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 3389 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 3390 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 3391 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, 3392 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, 3393 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, 3394 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, 3395 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, 3396 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, 3397 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, 3398 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, 3399 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 3400 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 3401 [BPF_JMP32 | BPF_JEQ | BPF_K] = jeq32_imm, 3402 [BPF_JMP32 | BPF_JGT | BPF_K] = cmp_imm, 3403 [BPF_JMP32 | BPF_JGE | BPF_K] = cmp_imm, 3404 [BPF_JMP32 | BPF_JLT | BPF_K] = cmp_imm, 3405 [BPF_JMP32 | BPF_JLE | BPF_K] = cmp_imm, 3406 [BPF_JMP32 | BPF_JSGT | BPF_K] =cmp_imm, 3407 [BPF_JMP32 | BPF_JSGE | BPF_K] =cmp_imm, 3408 [BPF_JMP32 | BPF_JSLT | BPF_K] =cmp_imm, 3409 [BPF_JMP32 | BPF_JSLE | BPF_K] =cmp_imm, 3410 [BPF_JMP32 | BPF_JSET | BPF_K] =jset_imm, 3411 [BPF_JMP32 | BPF_JNE | BPF_K] = jne_imm, 3412 [BPF_JMP32 | BPF_JEQ | BPF_X] = jeq_reg, 3413 [BPF_JMP32 | BPF_JGT | BPF_X] = cmp_reg, 3414 [BPF_JMP32 | BPF_JGE | BPF_X] = cmp_reg, 3415 [BPF_JMP32 | BPF_JLT | BPF_X] = cmp_reg, 3416 [BPF_JMP32 | BPF_JLE | BPF_X] = cmp_reg, 3417 [BPF_JMP32 | BPF_JSGT | BPF_X] =cmp_reg, 3418 [BPF_JMP32 | BPF_JSGE | BPF_X] =cmp_reg, 3419 [BPF_JMP32 | BPF_JSLT | BPF_X] =cmp_reg, 3420 [BPF_JMP32 | BPF_JSLE | BPF_X] =cmp_reg, 3421 [BPF_JMP32 | BPF_JSET | BPF_X] =jset_reg, 3422 [BPF_JMP32 | BPF_JNE | BPF_X] = jne_reg, 3423 [BPF_JMP | BPF_CALL] = call, 3424 [BPF_JMP | BPF_EXIT] = jmp_exit, 3425 }; 3426 3427 /* --- Assembler logic --- */ 3428 static int 3429 nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 3430 struct nfp_insn_meta *jmp_dst, u32 br_idx) 3431 { 3432 if (immed_get_value(nfp_prog->prog[br_idx + 1])) { 3433 pr_err("BUG: failed to fix up callee register saving\n"); 3434 return -EINVAL; 3435 } 3436 3437 immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off); 3438 3439 return 0; 3440 } 3441 3442 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 3443 { 3444 struct nfp_insn_meta *meta, *jmp_dst; 3445 u32 idx, br_idx; 3446 int err; 3447 3448 list_for_each_entry(meta, &nfp_prog->insns, l) { 3449 if (meta->flags & FLAG_INSN_SKIP_MASK) 3450 continue; 3451 if (!is_mbpf_jmp(meta)) 3452 continue; 3453 if (meta->insn.code == (BPF_JMP | BPF_EXIT) && 3454 !nfp_is_main_function(meta)) 3455 continue; 3456 if (is_mbpf_helper_call(meta)) 3457 continue; 3458 3459 if (list_is_last(&meta->l, &nfp_prog->insns)) 3460 br_idx = nfp_prog->last_bpf_off; 3461 else 3462 br_idx = list_next_entry(meta, l)->off - 1; 3463 3464 /* For BPF-to-BPF function call, a stack adjustment sequence is 3465 * generated after the return instruction. Therefore, we must 3466 * withdraw the length of this sequence to have br_idx pointing 3467 * to where the "branch" NFP instruction is expected to be. 3468 */ 3469 if (is_mbpf_pseudo_call(meta)) 3470 br_idx -= meta->num_insns_after_br; 3471 3472 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 3473 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 3474 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 3475 return -ELOOP; 3476 } 3477 3478 if (meta->insn.code == (BPF_JMP | BPF_EXIT)) 3479 continue; 3480 3481 /* Leave special branches for later */ 3482 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3483 RELO_BR_REL && !is_mbpf_pseudo_call(meta)) 3484 continue; 3485 3486 if (!meta->jmp_dst) { 3487 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 3488 return -ELOOP; 3489 } 3490 3491 jmp_dst = meta->jmp_dst; 3492 3493 if (jmp_dst->flags & FLAG_INSN_SKIP_PREC_DEPENDENT) { 3494 pr_err("Branch landing on removed instruction!!\n"); 3495 return -ELOOP; 3496 } 3497 3498 if (is_mbpf_pseudo_call(meta) && 3499 nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) { 3500 err = nfp_fixup_immed_relo(nfp_prog, meta, 3501 jmp_dst, br_idx); 3502 if (err) 3503 return err; 3504 } 3505 3506 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3507 RELO_BR_REL) 3508 continue; 3509 3510 for (idx = meta->off; idx <= br_idx; idx++) { 3511 if (!nfp_is_br(nfp_prog->prog[idx])) 3512 continue; 3513 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 3514 } 3515 } 3516 3517 return 0; 3518 } 3519 3520 static void nfp_intro(struct nfp_prog *nfp_prog) 3521 { 3522 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 3523 emit_alu(nfp_prog, plen_reg(nfp_prog), 3524 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 3525 } 3526 3527 static void 3528 nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3529 { 3530 /* Save return address into the stack. */ 3531 wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog)); 3532 } 3533 3534 static void 3535 nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3536 { 3537 unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth; 3538 3539 nfp_prog->stack_frame_depth = round_up(depth, 4); 3540 nfp_subprog_prologue(nfp_prog, meta); 3541 } 3542 3543 bool nfp_is_subprog_start(struct nfp_insn_meta *meta) 3544 { 3545 return meta->flags & FLAG_INSN_IS_SUBPROG_START; 3546 } 3547 3548 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 3549 { 3550 /* TC direct-action mode: 3551 * 0,1 ok NOT SUPPORTED[1] 3552 * 2 drop 0x22 -> drop, count as stat1 3553 * 4,5 nuke 0x02 -> drop 3554 * 7 redir 0x44 -> redir, count as stat2 3555 * * unspec 0x11 -> pass, count as stat0 3556 * 3557 * [1] We can't support OK and RECLASSIFY because we can't tell TC 3558 * the exact decision made. We are forced to support UNSPEC 3559 * to handle aborts so that's the only one we handle for passing 3560 * packets up the stack. 3561 */ 3562 /* Target for aborts */ 3563 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3564 3565 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3566 3567 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3568 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 3569 3570 /* Target for normal exits */ 3571 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3572 3573 /* if R0 > 7 jump to abort */ 3574 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 3575 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3576 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3577 3578 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 3579 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 3580 3581 emit_shf(nfp_prog, reg_a(1), 3582 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 3583 3584 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3585 emit_shf(nfp_prog, reg_a(2), 3586 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3587 3588 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3589 emit_shf(nfp_prog, reg_b(2), 3590 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 3591 3592 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3593 3594 emit_shf(nfp_prog, reg_b(2), 3595 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 3596 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3597 } 3598 3599 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 3600 { 3601 /* XDP return codes: 3602 * 0 aborted 0x82 -> drop, count as stat3 3603 * 1 drop 0x22 -> drop, count as stat1 3604 * 2 pass 0x11 -> pass, count as stat0 3605 * 3 tx 0x44 -> redir, count as stat2 3606 * * unknown 0x82 -> drop, count as stat3 3607 */ 3608 /* Target for aborts */ 3609 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3610 3611 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3612 3613 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3614 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 3615 3616 /* Target for normal exits */ 3617 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3618 3619 /* if R0 > 3 jump to abort */ 3620 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 3621 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3622 3623 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 3624 3625 emit_shf(nfp_prog, reg_a(1), 3626 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 3627 3628 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3629 emit_shf(nfp_prog, reg_b(2), 3630 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3631 3632 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3633 3634 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3635 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3636 } 3637 3638 static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog) 3639 { 3640 unsigned int idx; 3641 3642 for (idx = 1; idx < nfp_prog->subprog_cnt; idx++) 3643 if (nfp_prog->subprog[idx].needs_reg_push) 3644 return true; 3645 3646 return false; 3647 } 3648 3649 static void nfp_push_callee_registers(struct nfp_prog *nfp_prog) 3650 { 3651 u8 reg; 3652 3653 /* Subroutine: Save all callee saved registers (R6 ~ R9). 3654 * imm_b() holds the return address. 3655 */ 3656 nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog); 3657 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3658 u8 adj = (reg - BPF_REG_0) * 2; 3659 u8 idx = (reg - BPF_REG_6) * 2; 3660 3661 /* The first slot in the stack frame is used to push the return 3662 * address in bpf_to_bpf_call(), start just after. 3663 */ 3664 wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj)); 3665 3666 if (reg == BPF_REG_8) 3667 /* Prepare to jump back, last 3 insns use defer slots */ 3668 emit_rtn(nfp_prog, imm_b(nfp_prog), 3); 3669 3670 wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1)); 3671 } 3672 } 3673 3674 static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog) 3675 { 3676 u8 reg; 3677 3678 /* Subroutine: Restore all callee saved registers (R6 ~ R9). 3679 * ret_reg() holds the return address. 3680 */ 3681 nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog); 3682 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3683 u8 adj = (reg - BPF_REG_0) * 2; 3684 u8 idx = (reg - BPF_REG_6) * 2; 3685 3686 /* The first slot in the stack frame holds the return address, 3687 * start popping just after that. 3688 */ 3689 wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx)); 3690 3691 if (reg == BPF_REG_8) 3692 /* Prepare to jump back, last 3 insns use defer slots */ 3693 emit_rtn(nfp_prog, ret_reg(nfp_prog), 3); 3694 3695 wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1)); 3696 } 3697 } 3698 3699 static void nfp_outro(struct nfp_prog *nfp_prog) 3700 { 3701 switch (nfp_prog->type) { 3702 case BPF_PROG_TYPE_SCHED_CLS: 3703 nfp_outro_tc_da(nfp_prog); 3704 break; 3705 case BPF_PROG_TYPE_XDP: 3706 nfp_outro_xdp(nfp_prog); 3707 break; 3708 default: 3709 WARN_ON(1); 3710 } 3711 3712 if (!nfp_prog_needs_callee_reg_save(nfp_prog)) 3713 return; 3714 3715 nfp_push_callee_registers(nfp_prog); 3716 nfp_pop_callee_registers(nfp_prog); 3717 } 3718 3719 static int nfp_translate(struct nfp_prog *nfp_prog) 3720 { 3721 struct nfp_insn_meta *meta; 3722 unsigned int depth; 3723 int err; 3724 3725 depth = nfp_prog->subprog[0].stack_depth; 3726 nfp_prog->stack_frame_depth = round_up(depth, 4); 3727 3728 nfp_intro(nfp_prog); 3729 if (nfp_prog->error) 3730 return nfp_prog->error; 3731 3732 list_for_each_entry(meta, &nfp_prog->insns, l) { 3733 instr_cb_t cb = instr_cb[meta->insn.code]; 3734 3735 meta->off = nfp_prog_current_offset(nfp_prog); 3736 3737 if (nfp_is_subprog_start(meta)) { 3738 nfp_start_subprog(nfp_prog, meta); 3739 if (nfp_prog->error) 3740 return nfp_prog->error; 3741 } 3742 3743 if (meta->flags & FLAG_INSN_SKIP_MASK) { 3744 nfp_prog->n_translated++; 3745 continue; 3746 } 3747 3748 if (nfp_meta_has_prev(nfp_prog, meta) && 3749 nfp_meta_prev(meta)->double_cb) 3750 cb = nfp_meta_prev(meta)->double_cb; 3751 if (!cb) 3752 return -ENOENT; 3753 err = cb(nfp_prog, meta); 3754 if (err) 3755 return err; 3756 if (nfp_prog->error) 3757 return nfp_prog->error; 3758 3759 nfp_prog->n_translated++; 3760 } 3761 3762 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 3763 3764 nfp_outro(nfp_prog); 3765 if (nfp_prog->error) 3766 return nfp_prog->error; 3767 3768 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 3769 if (nfp_prog->error) 3770 return nfp_prog->error; 3771 3772 return nfp_fixup_branches(nfp_prog); 3773 } 3774 3775 /* --- Optimizations --- */ 3776 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 3777 { 3778 struct nfp_insn_meta *meta; 3779 3780 list_for_each_entry(meta, &nfp_prog->insns, l) { 3781 struct bpf_insn insn = meta->insn; 3782 3783 /* Programs converted from cBPF start with register xoring */ 3784 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 3785 insn.src_reg == insn.dst_reg) 3786 continue; 3787 3788 /* Programs start with R6 = R1 but we ignore the skb pointer */ 3789 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 3790 insn.src_reg == 1 && insn.dst_reg == 6) 3791 meta->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3792 3793 /* Return as soon as something doesn't match */ 3794 if (!(meta->flags & FLAG_INSN_SKIP_MASK)) 3795 return; 3796 } 3797 } 3798 3799 /* abs(insn.imm) will fit better into unrestricted reg immediate - 3800 * convert add/sub of a negative number into a sub/add of a positive one. 3801 */ 3802 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) 3803 { 3804 struct nfp_insn_meta *meta; 3805 3806 list_for_each_entry(meta, &nfp_prog->insns, l) { 3807 struct bpf_insn insn = meta->insn; 3808 3809 if (meta->flags & FLAG_INSN_SKIP_MASK) 3810 continue; 3811 3812 if (!is_mbpf_alu(meta) && !is_mbpf_jmp(meta)) 3813 continue; 3814 if (BPF_SRC(insn.code) != BPF_K) 3815 continue; 3816 if (insn.imm >= 0) 3817 continue; 3818 3819 if (is_mbpf_jmp(meta)) { 3820 switch (BPF_OP(insn.code)) { 3821 case BPF_JGE: 3822 case BPF_JSGE: 3823 case BPF_JLT: 3824 case BPF_JSLT: 3825 meta->jump_neg_op = true; 3826 break; 3827 default: 3828 continue; 3829 } 3830 } else { 3831 if (BPF_OP(insn.code) == BPF_ADD) 3832 insn.code = BPF_CLASS(insn.code) | BPF_SUB; 3833 else if (BPF_OP(insn.code) == BPF_SUB) 3834 insn.code = BPF_CLASS(insn.code) | BPF_ADD; 3835 else 3836 continue; 3837 3838 meta->insn.code = insn.code | BPF_K; 3839 } 3840 3841 meta->insn.imm = -insn.imm; 3842 } 3843 } 3844 3845 /* Remove masking after load since our load guarantees this is not needed */ 3846 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 3847 { 3848 struct nfp_insn_meta *meta1, *meta2; 3849 const s32 exp_mask[] = { 3850 [BPF_B] = 0x000000ffU, 3851 [BPF_H] = 0x0000ffffU, 3852 [BPF_W] = 0xffffffffU, 3853 }; 3854 3855 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3856 struct bpf_insn insn, next; 3857 3858 insn = meta1->insn; 3859 next = meta2->insn; 3860 3861 if (BPF_CLASS(insn.code) != BPF_LD) 3862 continue; 3863 if (BPF_MODE(insn.code) != BPF_ABS && 3864 BPF_MODE(insn.code) != BPF_IND) 3865 continue; 3866 3867 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 3868 continue; 3869 3870 if (!exp_mask[BPF_SIZE(insn.code)]) 3871 continue; 3872 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 3873 continue; 3874 3875 if (next.src_reg || next.dst_reg) 3876 continue; 3877 3878 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 3879 continue; 3880 3881 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3882 } 3883 } 3884 3885 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 3886 { 3887 struct nfp_insn_meta *meta1, *meta2, *meta3; 3888 3889 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 3890 struct bpf_insn insn, next1, next2; 3891 3892 insn = meta1->insn; 3893 next1 = meta2->insn; 3894 next2 = meta3->insn; 3895 3896 if (BPF_CLASS(insn.code) != BPF_LD) 3897 continue; 3898 if (BPF_MODE(insn.code) != BPF_ABS && 3899 BPF_MODE(insn.code) != BPF_IND) 3900 continue; 3901 if (BPF_SIZE(insn.code) != BPF_W) 3902 continue; 3903 3904 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 3905 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 3906 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 3907 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 3908 continue; 3909 3910 if (next1.src_reg || next1.dst_reg || 3911 next2.src_reg || next2.dst_reg) 3912 continue; 3913 3914 if (next1.imm != 0x20 || next2.imm != 0x20) 3915 continue; 3916 3917 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 3918 meta3->flags & FLAG_INSN_IS_JUMP_DST) 3919 continue; 3920 3921 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3922 meta3->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3923 } 3924 } 3925 3926 /* load/store pair that forms memory copy sould look like the following: 3927 * 3928 * ld_width R, [addr_src + offset_src] 3929 * st_width [addr_dest + offset_dest], R 3930 * 3931 * The destination register of load and source register of store should 3932 * be the same, load and store should also perform at the same width. 3933 * If either of addr_src or addr_dest is stack pointer, we don't do the 3934 * CPP optimization as stack is modelled by registers on NFP. 3935 */ 3936 static bool 3937 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 3938 struct nfp_insn_meta *st_meta) 3939 { 3940 struct bpf_insn *ld = &ld_meta->insn; 3941 struct bpf_insn *st = &st_meta->insn; 3942 3943 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 3944 return false; 3945 3946 if (ld_meta->ptr.type != PTR_TO_PACKET && 3947 ld_meta->ptr.type != PTR_TO_MAP_VALUE) 3948 return false; 3949 3950 if (st_meta->ptr.type != PTR_TO_PACKET) 3951 return false; 3952 3953 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 3954 return false; 3955 3956 if (ld->dst_reg != st->src_reg) 3957 return false; 3958 3959 /* There is jump to the store insn in this pair. */ 3960 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 3961 return false; 3962 3963 return true; 3964 } 3965 3966 /* Currently, we only support chaining load/store pairs if: 3967 * 3968 * - Their address base registers are the same. 3969 * - Their address offsets are in the same order. 3970 * - They operate at the same memory width. 3971 * - There is no jump into the middle of them. 3972 */ 3973 static bool 3974 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 3975 struct nfp_insn_meta *st_meta, 3976 struct bpf_insn *prev_ld, 3977 struct bpf_insn *prev_st) 3978 { 3979 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 3980 struct bpf_insn *ld = &ld_meta->insn; 3981 struct bpf_insn *st = &st_meta->insn; 3982 s16 prev_ld_off, prev_st_off; 3983 3984 /* This pair is the start pair. */ 3985 if (!prev_ld) 3986 return true; 3987 3988 prev_size = BPF_LDST_BYTES(prev_ld); 3989 curr_size = BPF_LDST_BYTES(ld); 3990 prev_ld_base = prev_ld->src_reg; 3991 prev_st_base = prev_st->dst_reg; 3992 prev_ld_dst = prev_ld->dst_reg; 3993 prev_ld_off = prev_ld->off; 3994 prev_st_off = prev_st->off; 3995 3996 if (ld->dst_reg != prev_ld_dst) 3997 return false; 3998 3999 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 4000 return false; 4001 4002 if (curr_size != prev_size) 4003 return false; 4004 4005 /* There is jump to the head of this pair. */ 4006 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 4007 return false; 4008 4009 /* Both in ascending order. */ 4010 if (prev_ld_off + prev_size == ld->off && 4011 prev_st_off + prev_size == st->off) 4012 return true; 4013 4014 /* Both in descending order. */ 4015 if (ld->off + curr_size == prev_ld_off && 4016 st->off + curr_size == prev_st_off) 4017 return true; 4018 4019 return false; 4020 } 4021 4022 /* Return TRUE if cross memory access happens. Cross memory access means 4023 * store area is overlapping with load area that a later load might load 4024 * the value from previous store, for this case we can't treat the sequence 4025 * as an memory copy. 4026 */ 4027 static bool 4028 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 4029 struct nfp_insn_meta *head_st_meta) 4030 { 4031 s16 head_ld_off, head_st_off, ld_off; 4032 4033 /* Different pointer types does not overlap. */ 4034 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 4035 return false; 4036 4037 /* load and store are both PTR_TO_PACKET, check ID info. */ 4038 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 4039 return true; 4040 4041 /* Canonicalize the offsets. Turn all of them against the original 4042 * base register. 4043 */ 4044 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 4045 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 4046 ld_off = ld->off + head_ld_meta->ptr.off; 4047 4048 /* Ascending order cross. */ 4049 if (ld_off > head_ld_off && 4050 head_ld_off < head_st_off && ld_off >= head_st_off) 4051 return true; 4052 4053 /* Descending order cross. */ 4054 if (ld_off < head_ld_off && 4055 head_ld_off > head_st_off && ld_off <= head_st_off) 4056 return true; 4057 4058 return false; 4059 } 4060 4061 /* This pass try to identify the following instructoin sequences. 4062 * 4063 * load R, [regA + offA] 4064 * store [regB + offB], R 4065 * load R, [regA + offA + const_imm_A] 4066 * store [regB + offB + const_imm_A], R 4067 * load R, [regA + offA + 2 * const_imm_A] 4068 * store [regB + offB + 2 * const_imm_A], R 4069 * ... 4070 * 4071 * Above sequence is typically generated by compiler when lowering 4072 * memcpy. NFP prefer using CPP instructions to accelerate it. 4073 */ 4074 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 4075 { 4076 struct nfp_insn_meta *head_ld_meta = NULL; 4077 struct nfp_insn_meta *head_st_meta = NULL; 4078 struct nfp_insn_meta *meta1, *meta2; 4079 struct bpf_insn *prev_ld = NULL; 4080 struct bpf_insn *prev_st = NULL; 4081 u8 count = 0; 4082 4083 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4084 struct bpf_insn *ld = &meta1->insn; 4085 struct bpf_insn *st = &meta2->insn; 4086 4087 /* Reset record status if any of the following if true: 4088 * - The current insn pair is not load/store. 4089 * - The load/store pair doesn't chain with previous one. 4090 * - The chained load/store pair crossed with previous pair. 4091 * - The chained load/store pair has a total size of memory 4092 * copy beyond 128 bytes which is the maximum length a 4093 * single NFP CPP command can transfer. 4094 */ 4095 if (!curr_pair_is_memcpy(meta1, meta2) || 4096 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 4097 prev_st) || 4098 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 4099 head_st_meta) || 4100 head_ld_meta->ldst_gather_len >= 128))) { 4101 if (!count) 4102 continue; 4103 4104 if (count > 1) { 4105 s16 prev_ld_off = prev_ld->off; 4106 s16 prev_st_off = prev_st->off; 4107 s16 head_ld_off = head_ld_meta->insn.off; 4108 4109 if (prev_ld_off < head_ld_off) { 4110 head_ld_meta->insn.off = prev_ld_off; 4111 head_st_meta->insn.off = prev_st_off; 4112 head_ld_meta->ldst_gather_len = 4113 -head_ld_meta->ldst_gather_len; 4114 } 4115 4116 head_ld_meta->paired_st = &head_st_meta->insn; 4117 head_st_meta->flags |= 4118 FLAG_INSN_SKIP_PREC_DEPENDENT; 4119 } else { 4120 head_ld_meta->ldst_gather_len = 0; 4121 } 4122 4123 /* If the chain is ended by an load/store pair then this 4124 * could serve as the new head of the the next chain. 4125 */ 4126 if (curr_pair_is_memcpy(meta1, meta2)) { 4127 head_ld_meta = meta1; 4128 head_st_meta = meta2; 4129 head_ld_meta->ldst_gather_len = 4130 BPF_LDST_BYTES(ld); 4131 meta1 = nfp_meta_next(meta1); 4132 meta2 = nfp_meta_next(meta2); 4133 prev_ld = ld; 4134 prev_st = st; 4135 count = 1; 4136 } else { 4137 head_ld_meta = NULL; 4138 head_st_meta = NULL; 4139 prev_ld = NULL; 4140 prev_st = NULL; 4141 count = 0; 4142 } 4143 4144 continue; 4145 } 4146 4147 if (!head_ld_meta) { 4148 head_ld_meta = meta1; 4149 head_st_meta = meta2; 4150 } else { 4151 meta1->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 4152 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 4153 } 4154 4155 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 4156 meta1 = nfp_meta_next(meta1); 4157 meta2 = nfp_meta_next(meta2); 4158 prev_ld = ld; 4159 prev_st = st; 4160 count++; 4161 } 4162 } 4163 4164 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 4165 { 4166 struct nfp_insn_meta *meta, *range_node = NULL; 4167 s16 range_start = 0, range_end = 0; 4168 bool cache_avail = false; 4169 struct bpf_insn *insn; 4170 s32 range_ptr_off = 0; 4171 u32 range_ptr_id = 0; 4172 4173 list_for_each_entry(meta, &nfp_prog->insns, l) { 4174 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 4175 cache_avail = false; 4176 4177 if (meta->flags & FLAG_INSN_SKIP_MASK) 4178 continue; 4179 4180 insn = &meta->insn; 4181 4182 if (is_mbpf_store_pkt(meta) || 4183 insn->code == (BPF_JMP | BPF_CALL) || 4184 is_mbpf_classic_store_pkt(meta) || 4185 is_mbpf_classic_load(meta)) { 4186 cache_avail = false; 4187 continue; 4188 } 4189 4190 if (!is_mbpf_load(meta)) 4191 continue; 4192 4193 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 4194 cache_avail = false; 4195 continue; 4196 } 4197 4198 if (!cache_avail) { 4199 cache_avail = true; 4200 if (range_node) 4201 goto end_current_then_start_new; 4202 goto start_new; 4203 } 4204 4205 /* Check ID to make sure two reads share the same 4206 * variable offset against PTR_TO_PACKET, and check OFF 4207 * to make sure they also share the same constant 4208 * offset. 4209 * 4210 * OFFs don't really need to be the same, because they 4211 * are the constant offsets against PTR_TO_PACKET, so 4212 * for different OFFs, we could canonicalize them to 4213 * offsets against original packet pointer. We don't 4214 * support this. 4215 */ 4216 if (meta->ptr.id == range_ptr_id && 4217 meta->ptr.off == range_ptr_off) { 4218 s16 new_start = range_start; 4219 s16 end, off = insn->off; 4220 s16 new_end = range_end; 4221 bool changed = false; 4222 4223 if (off < range_start) { 4224 new_start = off; 4225 changed = true; 4226 } 4227 4228 end = off + BPF_LDST_BYTES(insn); 4229 if (end > range_end) { 4230 new_end = end; 4231 changed = true; 4232 } 4233 4234 if (!changed) 4235 continue; 4236 4237 if (new_end - new_start <= 64) { 4238 /* Install new range. */ 4239 range_start = new_start; 4240 range_end = new_end; 4241 continue; 4242 } 4243 } 4244 4245 end_current_then_start_new: 4246 range_node->pkt_cache.range_start = range_start; 4247 range_node->pkt_cache.range_end = range_end; 4248 start_new: 4249 range_node = meta; 4250 range_node->pkt_cache.do_init = true; 4251 range_ptr_id = range_node->ptr.id; 4252 range_ptr_off = range_node->ptr.off; 4253 range_start = insn->off; 4254 range_end = insn->off + BPF_LDST_BYTES(insn); 4255 } 4256 4257 if (range_node) { 4258 range_node->pkt_cache.range_start = range_start; 4259 range_node->pkt_cache.range_end = range_end; 4260 } 4261 4262 list_for_each_entry(meta, &nfp_prog->insns, l) { 4263 if (meta->flags & FLAG_INSN_SKIP_MASK) 4264 continue; 4265 4266 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 4267 if (meta->pkt_cache.do_init) { 4268 range_start = meta->pkt_cache.range_start; 4269 range_end = meta->pkt_cache.range_end; 4270 } else { 4271 meta->pkt_cache.range_start = range_start; 4272 meta->pkt_cache.range_end = range_end; 4273 } 4274 } 4275 } 4276 } 4277 4278 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 4279 { 4280 nfp_bpf_opt_reg_init(nfp_prog); 4281 4282 nfp_bpf_opt_neg_add_sub(nfp_prog); 4283 nfp_bpf_opt_ld_mask(nfp_prog); 4284 nfp_bpf_opt_ld_shift(nfp_prog); 4285 nfp_bpf_opt_ldst_gather(nfp_prog); 4286 nfp_bpf_opt_pkt_cache(nfp_prog); 4287 4288 return 0; 4289 } 4290 4291 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) 4292 { 4293 struct nfp_insn_meta *meta1, *meta2; 4294 struct nfp_bpf_map *nfp_map; 4295 struct bpf_map *map; 4296 u32 id; 4297 4298 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4299 if (meta1->flags & FLAG_INSN_SKIP_MASK || 4300 meta2->flags & FLAG_INSN_SKIP_MASK) 4301 continue; 4302 4303 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || 4304 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) 4305 continue; 4306 4307 map = (void *)(unsigned long)((u32)meta1->insn.imm | 4308 (u64)meta2->insn.imm << 32); 4309 if (bpf_map_offload_neutral(map)) { 4310 id = map->id; 4311 } else { 4312 nfp_map = map_to_offmap(map)->dev_priv; 4313 id = nfp_map->tid; 4314 } 4315 4316 meta1->insn.imm = id; 4317 meta2->insn.imm = 0; 4318 } 4319 4320 return 0; 4321 } 4322 4323 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 4324 { 4325 __le64 *ustore = (__force __le64 *)prog; 4326 int i; 4327 4328 for (i = 0; i < len; i++) { 4329 int err; 4330 4331 err = nfp_ustore_check_valid_no_ecc(prog[i]); 4332 if (err) 4333 return err; 4334 4335 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 4336 } 4337 4338 return 0; 4339 } 4340 4341 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 4342 { 4343 void *prog; 4344 4345 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 4346 if (!prog) 4347 return; 4348 4349 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 4350 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 4351 kvfree(nfp_prog->prog); 4352 nfp_prog->prog = prog; 4353 } 4354 4355 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 4356 { 4357 int ret; 4358 4359 ret = nfp_bpf_replace_map_ptrs(nfp_prog); 4360 if (ret) 4361 return ret; 4362 4363 ret = nfp_bpf_optimize(nfp_prog); 4364 if (ret) 4365 return ret; 4366 4367 ret = nfp_translate(nfp_prog); 4368 if (ret) { 4369 pr_err("Translation failed with error %d (translated: %u)\n", 4370 ret, nfp_prog->n_translated); 4371 return -EINVAL; 4372 } 4373 4374 nfp_bpf_prog_trim(nfp_prog); 4375 4376 return ret; 4377 } 4378 4379 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog) 4380 { 4381 struct nfp_insn_meta *meta; 4382 4383 /* Another pass to record jump information. */ 4384 list_for_each_entry(meta, &nfp_prog->insns, l) { 4385 struct nfp_insn_meta *dst_meta; 4386 u64 code = meta->insn.code; 4387 unsigned int dst_idx; 4388 bool pseudo_call; 4389 4390 if (!is_mbpf_jmp(meta)) 4391 continue; 4392 if (BPF_OP(code) == BPF_EXIT) 4393 continue; 4394 if (is_mbpf_helper_call(meta)) 4395 continue; 4396 4397 /* If opcode is BPF_CALL at this point, this can only be a 4398 * BPF-to-BPF call (a.k.a pseudo call). 4399 */ 4400 pseudo_call = BPF_OP(code) == BPF_CALL; 4401 4402 if (pseudo_call) 4403 dst_idx = meta->n + 1 + meta->insn.imm; 4404 else 4405 dst_idx = meta->n + 1 + meta->insn.off; 4406 4407 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx); 4408 4409 if (pseudo_call) 4410 dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START; 4411 4412 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 4413 meta->jmp_dst = dst_meta; 4414 } 4415 } 4416 4417 bool nfp_bpf_supported_opcode(u8 code) 4418 { 4419 return !!instr_cb[code]; 4420 } 4421 4422 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 4423 { 4424 unsigned int i; 4425 u64 *prog; 4426 int err; 4427 4428 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 4429 GFP_KERNEL); 4430 if (!prog) 4431 return ERR_PTR(-ENOMEM); 4432 4433 for (i = 0; i < nfp_prog->prog_len; i++) { 4434 enum nfp_relo_type special; 4435 u32 val; 4436 u16 off; 4437 4438 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 4439 switch (special) { 4440 case RELO_NONE: 4441 continue; 4442 case RELO_BR_REL: 4443 br_add_offset(&prog[i], bv->start_off); 4444 break; 4445 case RELO_BR_GO_OUT: 4446 br_set_offset(&prog[i], 4447 nfp_prog->tgt_out + bv->start_off); 4448 break; 4449 case RELO_BR_GO_ABORT: 4450 br_set_offset(&prog[i], 4451 nfp_prog->tgt_abort + bv->start_off); 4452 break; 4453 case RELO_BR_GO_CALL_PUSH_REGS: 4454 if (!nfp_prog->tgt_call_push_regs) { 4455 pr_err("BUG: failed to detect subprogram registers needs\n"); 4456 err = -EINVAL; 4457 goto err_free_prog; 4458 } 4459 off = nfp_prog->tgt_call_push_regs + bv->start_off; 4460 br_set_offset(&prog[i], off); 4461 break; 4462 case RELO_BR_GO_CALL_POP_REGS: 4463 if (!nfp_prog->tgt_call_pop_regs) { 4464 pr_err("BUG: failed to detect subprogram registers needs\n"); 4465 err = -EINVAL; 4466 goto err_free_prog; 4467 } 4468 off = nfp_prog->tgt_call_pop_regs + bv->start_off; 4469 br_set_offset(&prog[i], off); 4470 break; 4471 case RELO_BR_NEXT_PKT: 4472 br_set_offset(&prog[i], bv->tgt_done); 4473 break; 4474 case RELO_BR_HELPER: 4475 val = br_get_offset(prog[i]); 4476 val -= BR_OFF_RELO; 4477 switch (val) { 4478 case BPF_FUNC_map_lookup_elem: 4479 val = nfp_prog->bpf->helpers.map_lookup; 4480 break; 4481 case BPF_FUNC_map_update_elem: 4482 val = nfp_prog->bpf->helpers.map_update; 4483 break; 4484 case BPF_FUNC_map_delete_elem: 4485 val = nfp_prog->bpf->helpers.map_delete; 4486 break; 4487 case BPF_FUNC_perf_event_output: 4488 val = nfp_prog->bpf->helpers.perf_event_output; 4489 break; 4490 default: 4491 pr_err("relocation of unknown helper %d\n", 4492 val); 4493 err = -EINVAL; 4494 goto err_free_prog; 4495 } 4496 br_set_offset(&prog[i], val); 4497 break; 4498 case RELO_IMMED_REL: 4499 immed_add_value(&prog[i], bv->start_off); 4500 break; 4501 } 4502 4503 prog[i] &= ~OP_RELO_TYPE; 4504 } 4505 4506 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 4507 if (err) 4508 goto err_free_prog; 4509 4510 return prog; 4511 4512 err_free_prog: 4513 kfree(prog); 4514 return ERR_PTR(err); 4515 } 4516