1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */ 3 4 #define pr_fmt(fmt) "NFP net bpf: " fmt 5 6 #include <linux/bug.h> 7 #include <linux/bpf.h> 8 #include <linux/filter.h> 9 #include <linux/kernel.h> 10 #include <linux/pkt_cls.h> 11 #include <linux/reciprocal_div.h> 12 #include <linux/unistd.h> 13 14 #include "main.h" 15 #include "../nfp_asm.h" 16 #include "../nfp_net_ctrl.h" 17 18 /* --- NFP prog --- */ 19 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 20 * It's safe to modify the next pointers (but not pos). 21 */ 22 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 23 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 24 next = list_next_entry(pos, l); \ 25 &(nfp_prog)->insns != &pos->l && \ 26 &(nfp_prog)->insns != &next->l; \ 27 pos = nfp_meta_next(pos), \ 28 next = nfp_meta_next(pos)) 29 30 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 31 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 32 next = list_next_entry(pos, l), \ 33 next2 = list_next_entry(next, l); \ 34 &(nfp_prog)->insns != &pos->l && \ 35 &(nfp_prog)->insns != &next->l && \ 36 &(nfp_prog)->insns != &next2->l; \ 37 pos = nfp_meta_next(pos), \ 38 next = nfp_meta_next(pos), \ 39 next2 = nfp_meta_next(next)) 40 41 static bool 42 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 43 { 44 return meta->l.prev != &nfp_prog->insns; 45 } 46 47 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 48 { 49 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { 50 pr_warn("instruction limit reached (%u NFP instructions)\n", 51 nfp_prog->prog_len); 52 nfp_prog->error = -ENOSPC; 53 return; 54 } 55 56 nfp_prog->prog[nfp_prog->prog_len] = insn; 57 nfp_prog->prog_len++; 58 } 59 60 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 61 { 62 return nfp_prog->prog_len; 63 } 64 65 static bool 66 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 67 { 68 /* If there is a recorded error we may have dropped instructions; 69 * that doesn't have to be due to translator bug, and the translation 70 * will fail anyway, so just return OK. 71 */ 72 if (nfp_prog->error) 73 return true; 74 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 75 } 76 77 /* --- Emitters --- */ 78 static void 79 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 80 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, 81 bool indir) 82 { 83 u64 insn; 84 85 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 86 FIELD_PREP(OP_CMD_CTX, ctx) | 87 FIELD_PREP(OP_CMD_B_SRC, breg) | 88 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 89 FIELD_PREP(OP_CMD_XFER, xfer) | 90 FIELD_PREP(OP_CMD_CNT, size) | 91 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | 92 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 93 FIELD_PREP(OP_CMD_INDIR, indir) | 94 FIELD_PREP(OP_CMD_MODE, mode); 95 96 nfp_prog_push(nfp_prog, insn); 97 } 98 99 static void 100 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 101 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) 102 { 103 struct nfp_insn_re_regs reg; 104 int err; 105 106 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 107 if (err) { 108 nfp_prog->error = err; 109 return; 110 } 111 if (reg.swap) { 112 pr_err("cmd can't swap arguments\n"); 113 nfp_prog->error = -EFAULT; 114 return; 115 } 116 if (reg.dst_lmextn || reg.src_lmextn) { 117 pr_err("cmd can't use LMextn\n"); 118 nfp_prog->error = -EFAULT; 119 return; 120 } 121 122 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, 123 indir); 124 } 125 126 static void 127 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 128 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 129 { 130 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); 131 } 132 133 static void 134 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 135 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 136 { 137 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); 138 } 139 140 static void 141 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 142 enum br_ctx_signal_state css, u16 addr, u8 defer) 143 { 144 u16 addr_lo, addr_hi; 145 u64 insn; 146 147 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 148 addr_hi = addr != addr_lo; 149 150 insn = OP_BR_BASE | 151 FIELD_PREP(OP_BR_MASK, mask) | 152 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 153 FIELD_PREP(OP_BR_CSS, css) | 154 FIELD_PREP(OP_BR_DEFBR, defer) | 155 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 156 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 157 158 nfp_prog_push(nfp_prog, insn); 159 } 160 161 static void 162 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 163 enum nfp_relo_type relo) 164 { 165 if (mask == BR_UNC && defer > 2) { 166 pr_err("BUG: branch defer out of bounds %d\n", defer); 167 nfp_prog->error = -EFAULT; 168 return; 169 } 170 171 __emit_br(nfp_prog, mask, 172 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 173 BR_CSS_NONE, addr, defer); 174 175 nfp_prog->prog[nfp_prog->prog_len - 1] |= 176 FIELD_PREP(OP_RELO_TYPE, relo); 177 } 178 179 static void 180 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 181 { 182 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 183 } 184 185 static void 186 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer, 187 bool set, bool src_lmextn) 188 { 189 u16 addr_lo, addr_hi; 190 u64 insn; 191 192 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO)); 193 addr_hi = addr != addr_lo; 194 195 insn = OP_BR_BIT_BASE | 196 FIELD_PREP(OP_BR_BIT_A_SRC, areg) | 197 FIELD_PREP(OP_BR_BIT_B_SRC, breg) | 198 FIELD_PREP(OP_BR_BIT_BV, set) | 199 FIELD_PREP(OP_BR_BIT_DEFBR, defer) | 200 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) | 201 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) | 202 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn); 203 204 nfp_prog_push(nfp_prog, insn); 205 } 206 207 static void 208 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, 209 u8 defer, bool set, enum nfp_relo_type relo) 210 { 211 struct nfp_insn_re_regs reg; 212 int err; 213 214 /* NOTE: The bit to test is specified as an rotation amount, such that 215 * the bit to test will be placed on the MSB of the result when 216 * doing a rotate right. For bit X, we need right rotate X + 1. 217 */ 218 bit += 1; 219 220 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false); 221 if (err) { 222 nfp_prog->error = err; 223 return; 224 } 225 226 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set, 227 reg.src_lmextn); 228 229 nfp_prog->prog[nfp_prog->prog_len - 1] |= 230 FIELD_PREP(OP_RELO_TYPE, relo); 231 } 232 233 static void 234 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) 235 { 236 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL); 237 } 238 239 static void 240 __emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 241 u8 defer, bool dst_lmextn, bool src_lmextn) 242 { 243 u64 insn; 244 245 insn = OP_BR_ALU_BASE | 246 FIELD_PREP(OP_BR_ALU_A_SRC, areg) | 247 FIELD_PREP(OP_BR_ALU_B_SRC, breg) | 248 FIELD_PREP(OP_BR_ALU_DEFBR, defer) | 249 FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) | 250 FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) | 251 FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn); 252 253 nfp_prog_push(nfp_prog, insn); 254 } 255 256 static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer) 257 { 258 struct nfp_insn_ur_regs reg; 259 int err; 260 261 err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), ®); 262 if (err) { 263 nfp_prog->error = err; 264 return; 265 } 266 267 __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn, 268 reg.src_lmextn); 269 } 270 271 static void 272 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 273 enum immed_width width, bool invert, 274 enum immed_shift shift, bool wr_both, 275 bool dst_lmextn, bool src_lmextn) 276 { 277 u64 insn; 278 279 insn = OP_IMMED_BASE | 280 FIELD_PREP(OP_IMMED_A_SRC, areg) | 281 FIELD_PREP(OP_IMMED_B_SRC, breg) | 282 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 283 FIELD_PREP(OP_IMMED_WIDTH, width) | 284 FIELD_PREP(OP_IMMED_INV, invert) | 285 FIELD_PREP(OP_IMMED_SHIFT, shift) | 286 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 287 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 288 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 289 290 nfp_prog_push(nfp_prog, insn); 291 } 292 293 static void 294 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 295 enum immed_width width, bool invert, enum immed_shift shift) 296 { 297 struct nfp_insn_ur_regs reg; 298 int err; 299 300 if (swreg_type(dst) == NN_REG_IMM) { 301 nfp_prog->error = -EFAULT; 302 return; 303 } 304 305 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 306 if (err) { 307 nfp_prog->error = err; 308 return; 309 } 310 311 /* Use reg.dst when destination is No-Dest. */ 312 __emit_immed(nfp_prog, 313 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 314 reg.breg, imm >> 8, width, invert, shift, 315 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 316 } 317 318 static void 319 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 320 enum shf_sc sc, u8 shift, 321 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 322 bool dst_lmextn, bool src_lmextn) 323 { 324 u64 insn; 325 326 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 327 nfp_prog->error = -EFAULT; 328 return; 329 } 330 331 if (sc == SHF_SC_L_SHF) 332 shift = 32 - shift; 333 334 insn = OP_SHF_BASE | 335 FIELD_PREP(OP_SHF_A_SRC, areg) | 336 FIELD_PREP(OP_SHF_SC, sc) | 337 FIELD_PREP(OP_SHF_B_SRC, breg) | 338 FIELD_PREP(OP_SHF_I8, i8) | 339 FIELD_PREP(OP_SHF_SW, sw) | 340 FIELD_PREP(OP_SHF_DST, dst) | 341 FIELD_PREP(OP_SHF_SHIFT, shift) | 342 FIELD_PREP(OP_SHF_OP, op) | 343 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 344 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 345 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 346 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 347 348 nfp_prog_push(nfp_prog, insn); 349 } 350 351 static void 352 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 353 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 354 { 355 struct nfp_insn_re_regs reg; 356 int err; 357 358 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 359 if (err) { 360 nfp_prog->error = err; 361 return; 362 } 363 364 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 365 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 366 reg.dst_lmextn, reg.src_lmextn); 367 } 368 369 static void 370 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst, 371 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc) 372 { 373 if (sc == SHF_SC_R_ROT) { 374 pr_err("indirect shift is not allowed on rotation\n"); 375 nfp_prog->error = -EFAULT; 376 return; 377 } 378 379 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0); 380 } 381 382 static void 383 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 384 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 385 bool dst_lmextn, bool src_lmextn) 386 { 387 u64 insn; 388 389 insn = OP_ALU_BASE | 390 FIELD_PREP(OP_ALU_A_SRC, areg) | 391 FIELD_PREP(OP_ALU_B_SRC, breg) | 392 FIELD_PREP(OP_ALU_DST, dst) | 393 FIELD_PREP(OP_ALU_SW, swap) | 394 FIELD_PREP(OP_ALU_OP, op) | 395 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 396 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 397 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 398 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 399 400 nfp_prog_push(nfp_prog, insn); 401 } 402 403 static void 404 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 405 swreg lreg, enum alu_op op, swreg rreg) 406 { 407 struct nfp_insn_ur_regs reg; 408 int err; 409 410 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 411 if (err) { 412 nfp_prog->error = err; 413 return; 414 } 415 416 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 417 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 418 reg.dst_lmextn, reg.src_lmextn); 419 } 420 421 static void 422 __emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg, 423 enum mul_type type, enum mul_step step, u16 breg, bool swap, 424 bool wr_both, bool dst_lmextn, bool src_lmextn) 425 { 426 u64 insn; 427 428 insn = OP_MUL_BASE | 429 FIELD_PREP(OP_MUL_A_SRC, areg) | 430 FIELD_PREP(OP_MUL_B_SRC, breg) | 431 FIELD_PREP(OP_MUL_STEP, step) | 432 FIELD_PREP(OP_MUL_DST_AB, dst_ab) | 433 FIELD_PREP(OP_MUL_SW, swap) | 434 FIELD_PREP(OP_MUL_TYPE, type) | 435 FIELD_PREP(OP_MUL_WR_AB, wr_both) | 436 FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) | 437 FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn); 438 439 nfp_prog_push(nfp_prog, insn); 440 } 441 442 static void 443 emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type, 444 enum mul_step step, swreg rreg) 445 { 446 struct nfp_insn_ur_regs reg; 447 u16 areg; 448 int err; 449 450 if (type == MUL_TYPE_START && step != MUL_STEP_NONE) { 451 nfp_prog->error = -EINVAL; 452 return; 453 } 454 455 if (step == MUL_LAST || step == MUL_LAST_2) { 456 /* When type is step and step Number is LAST or LAST2, left 457 * source is used as destination. 458 */ 459 err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®); 460 areg = reg.dst; 461 } else { 462 err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®); 463 areg = reg.areg; 464 } 465 466 if (err) { 467 nfp_prog->error = err; 468 return; 469 } 470 471 __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap, 472 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 473 } 474 475 static void 476 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 477 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 478 bool zero, bool swap, bool wr_both, 479 bool dst_lmextn, bool src_lmextn) 480 { 481 u64 insn; 482 483 insn = OP_LDF_BASE | 484 FIELD_PREP(OP_LDF_A_SRC, areg) | 485 FIELD_PREP(OP_LDF_SC, sc) | 486 FIELD_PREP(OP_LDF_B_SRC, breg) | 487 FIELD_PREP(OP_LDF_I8, imm8) | 488 FIELD_PREP(OP_LDF_SW, swap) | 489 FIELD_PREP(OP_LDF_ZF, zero) | 490 FIELD_PREP(OP_LDF_BMASK, bmask) | 491 FIELD_PREP(OP_LDF_SHF, shift) | 492 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 493 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 494 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 495 496 nfp_prog_push(nfp_prog, insn); 497 } 498 499 static void 500 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 501 enum shf_sc sc, u8 shift, bool zero) 502 { 503 struct nfp_insn_re_regs reg; 504 int err; 505 506 /* Note: ld_field is special as it uses one of the src regs as dst */ 507 err = swreg_to_restricted(dst, dst, src, ®, true); 508 if (err) { 509 nfp_prog->error = err; 510 return; 511 } 512 513 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 514 reg.i8, zero, reg.swap, reg.wr_both, 515 reg.dst_lmextn, reg.src_lmextn); 516 } 517 518 static void 519 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 520 enum shf_sc sc, u8 shift) 521 { 522 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 523 } 524 525 static void 526 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 527 bool dst_lmextn, bool src_lmextn) 528 { 529 u64 insn; 530 531 insn = OP_LCSR_BASE | 532 FIELD_PREP(OP_LCSR_A_SRC, areg) | 533 FIELD_PREP(OP_LCSR_B_SRC, breg) | 534 FIELD_PREP(OP_LCSR_WRITE, wr) | 535 FIELD_PREP(OP_LCSR_ADDR, addr / 4) | 536 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 537 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 538 539 nfp_prog_push(nfp_prog, insn); 540 } 541 542 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 543 { 544 struct nfp_insn_ur_regs reg; 545 int err; 546 547 /* This instruction takes immeds instead of reg_none() for the ignored 548 * operand, but we can't encode 2 immeds in one instr with our normal 549 * swreg infra so if param is an immed, we encode as reg_none() and 550 * copy the immed to both operands. 551 */ 552 if (swreg_type(src) == NN_REG_IMM) { 553 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 554 reg.breg = reg.areg; 555 } else { 556 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 557 } 558 if (err) { 559 nfp_prog->error = err; 560 return; 561 } 562 563 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, 564 false, reg.src_lmextn); 565 } 566 567 /* CSR value is read in following immed[gpr, 0] */ 568 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) 569 { 570 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); 571 } 572 573 static void emit_nop(struct nfp_prog *nfp_prog) 574 { 575 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 576 } 577 578 /* --- Wrappers --- */ 579 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 580 { 581 if (!(imm & 0xffff0000)) { 582 *val = imm; 583 *shift = IMMED_SHIFT_0B; 584 } else if (!(imm & 0xff0000ff)) { 585 *val = imm >> 8; 586 *shift = IMMED_SHIFT_1B; 587 } else if (!(imm & 0x0000ffff)) { 588 *val = imm >> 16; 589 *shift = IMMED_SHIFT_2B; 590 } else { 591 return false; 592 } 593 594 return true; 595 } 596 597 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 598 { 599 enum immed_shift shift; 600 u16 val; 601 602 if (pack_immed(imm, &val, &shift)) { 603 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 604 } else if (pack_immed(~imm, &val, &shift)) { 605 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 606 } else { 607 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 608 false, IMMED_SHIFT_0B); 609 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 610 false, IMMED_SHIFT_2B); 611 } 612 } 613 614 static void 615 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 616 enum nfp_relo_type relo) 617 { 618 if (imm > 0xffff) { 619 pr_err("relocation of a large immediate!\n"); 620 nfp_prog->error = -EFAULT; 621 return; 622 } 623 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 624 625 nfp_prog->prog[nfp_prog->prog_len - 1] |= 626 FIELD_PREP(OP_RELO_TYPE, relo); 627 } 628 629 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 630 * If the @imm is small enough encode it directly in operand and return 631 * otherwise load @imm to a spare register and return its encoding. 632 */ 633 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 634 { 635 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 636 return reg_imm(imm); 637 638 wrp_immed(nfp_prog, tmp_reg, imm); 639 return tmp_reg; 640 } 641 642 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 643 * If the @imm is small enough encode it directly in operand and return 644 * otherwise load @imm to a spare register and return its encoding. 645 */ 646 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 647 { 648 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 649 return reg_imm(imm); 650 651 wrp_immed(nfp_prog, tmp_reg, imm); 652 return tmp_reg; 653 } 654 655 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 656 { 657 while (count--) 658 emit_nop(nfp_prog); 659 } 660 661 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 662 { 663 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 664 } 665 666 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 667 { 668 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 669 } 670 671 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 672 * result to @dst from low end. 673 */ 674 static void 675 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 676 u8 offset) 677 { 678 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 679 u8 mask = (1 << field_len) - 1; 680 681 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 682 } 683 684 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 685 * result to @dst from offset, there is no change on the other bits of @dst. 686 */ 687 static void 688 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 689 u8 field_len, u8 offset) 690 { 691 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 692 u8 mask = ((1 << field_len) - 1) << offset; 693 694 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 695 } 696 697 static void 698 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 699 swreg *rega, swreg *regb) 700 { 701 if (offset == reg_imm(0)) { 702 *rega = reg_a(src_gpr); 703 *regb = reg_b(src_gpr + 1); 704 return; 705 } 706 707 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 708 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 709 reg_imm(0)); 710 *rega = imm_a(nfp_prog); 711 *regb = imm_b(nfp_prog); 712 } 713 714 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 715 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 716 { 717 bool descending_seq = meta->ldst_gather_len < 0; 718 s16 len = abs(meta->ldst_gather_len); 719 swreg src_base, off; 720 bool src_40bit_addr; 721 unsigned int i; 722 u8 xfer_num; 723 724 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 725 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 726 src_base = reg_a(meta->insn.src_reg * 2); 727 xfer_num = round_up(len, 4) / 4; 728 729 if (src_40bit_addr) 730 addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base, 731 &off); 732 733 /* Setup PREV_ALU fields to override memory read length. */ 734 if (len > 32) 735 wrp_immed(nfp_prog, reg_none(), 736 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 737 738 /* Memory read from source addr into transfer-in registers. */ 739 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 740 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 741 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); 742 743 /* Move from transfer-in to transfer-out. */ 744 for (i = 0; i < xfer_num; i++) 745 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 746 747 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 748 749 if (len <= 8) { 750 /* Use single direct_ref write8. */ 751 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 752 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 753 CMD_CTX_SWAP); 754 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 755 /* Use single direct_ref write32. */ 756 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 757 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 758 CMD_CTX_SWAP); 759 } else if (len <= 32) { 760 /* Use single indirect_ref write8. */ 761 wrp_immed(nfp_prog, reg_none(), 762 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 763 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 764 reg_a(meta->paired_st->dst_reg * 2), off, 765 len - 1, CMD_CTX_SWAP); 766 } else if (IS_ALIGNED(len, 4)) { 767 /* Use single indirect_ref write32. */ 768 wrp_immed(nfp_prog, reg_none(), 769 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 770 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 771 reg_a(meta->paired_st->dst_reg * 2), off, 772 xfer_num - 1, CMD_CTX_SWAP); 773 } else if (len <= 40) { 774 /* Use one direct_ref write32 to write the first 32-bytes, then 775 * another direct_ref write8 to write the remaining bytes. 776 */ 777 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 778 reg_a(meta->paired_st->dst_reg * 2), off, 7, 779 CMD_CTX_SWAP); 780 781 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 782 imm_b(nfp_prog)); 783 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 784 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 785 CMD_CTX_SWAP); 786 } else { 787 /* Use one indirect_ref write32 to write 4-bytes aligned length, 788 * then another direct_ref write8 to write the remaining bytes. 789 */ 790 u8 new_off; 791 792 wrp_immed(nfp_prog, reg_none(), 793 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 794 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 795 reg_a(meta->paired_st->dst_reg * 2), off, 796 xfer_num - 2, CMD_CTX_SWAP); 797 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 798 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 799 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 800 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 801 (len & 0x3) - 1, CMD_CTX_SWAP); 802 } 803 804 /* TODO: The following extra load is to make sure data flow be identical 805 * before and after we do memory copy optimization. 806 * 807 * The load destination register is not guaranteed to be dead, so we 808 * need to make sure it is loaded with the value the same as before 809 * this transformation. 810 * 811 * These extra loads could be removed once we have accurate register 812 * usage information. 813 */ 814 if (descending_seq) 815 xfer_num = 0; 816 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 817 xfer_num = xfer_num - 1; 818 else 819 xfer_num = xfer_num - 2; 820 821 switch (BPF_SIZE(meta->insn.code)) { 822 case BPF_B: 823 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 824 reg_xfer(xfer_num), 1, 825 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 826 break; 827 case BPF_H: 828 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 829 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 830 break; 831 case BPF_W: 832 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 833 reg_xfer(0)); 834 break; 835 case BPF_DW: 836 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 837 reg_xfer(xfer_num)); 838 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 839 reg_xfer(xfer_num + 1)); 840 break; 841 } 842 843 if (BPF_SIZE(meta->insn.code) != BPF_DW) 844 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 845 846 return 0; 847 } 848 849 static int 850 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 851 { 852 unsigned int i; 853 u16 shift, sz; 854 855 /* We load the value from the address indicated in @offset and then 856 * shift out the data we don't need. Note: this is big endian! 857 */ 858 sz = max(size, 4); 859 shift = size < 4 ? 4 - size : 0; 860 861 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 862 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); 863 864 i = 0; 865 if (shift) 866 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 867 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 868 else 869 for (; i * 4 < size; i++) 870 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 871 872 if (i < 2) 873 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 874 875 return 0; 876 } 877 878 static int 879 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 880 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 881 { 882 unsigned int i; 883 u8 mask, sz; 884 885 /* We load the value from the address indicated in rreg + lreg and then 886 * mask out the data we don't need. Note: this is little endian! 887 */ 888 sz = max(size, 4); 889 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 890 891 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 892 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); 893 894 i = 0; 895 if (mask) 896 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 897 reg_xfer(0), SHF_SC_NONE, 0, true); 898 else 899 for (; i * 4 < size; i++) 900 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 901 902 if (i < 2) 903 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 904 905 return 0; 906 } 907 908 static int 909 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 910 u8 dst_gpr, u8 size) 911 { 912 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 913 size, CMD_MODE_32b); 914 } 915 916 static int 917 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 918 u8 dst_gpr, u8 size) 919 { 920 swreg rega, regb; 921 922 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 923 924 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 925 size, CMD_MODE_40b_BA); 926 } 927 928 static int 929 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 930 { 931 swreg tmp_reg; 932 933 /* Calculate the true offset (src_reg + imm) */ 934 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 935 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 936 937 /* Check packet length (size guaranteed to fit b/c it's u8) */ 938 emit_alu(nfp_prog, imm_a(nfp_prog), 939 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 940 emit_alu(nfp_prog, reg_none(), 941 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 942 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 943 944 /* Load data */ 945 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 946 } 947 948 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 949 { 950 swreg tmp_reg; 951 952 /* Check packet length */ 953 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 954 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 955 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 956 957 /* Load data */ 958 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 959 return data_ld(nfp_prog, tmp_reg, 0, size); 960 } 961 962 static int 963 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 964 u8 src_gpr, u8 size) 965 { 966 unsigned int i; 967 968 for (i = 0; i * 4 < size; i++) 969 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 970 971 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 972 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 973 974 return 0; 975 } 976 977 static int 978 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 979 u64 imm, u8 size) 980 { 981 wrp_immed(nfp_prog, reg_xfer(0), imm); 982 if (size == 8) 983 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 984 985 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 986 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 987 988 return 0; 989 } 990 991 typedef int 992 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 993 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 994 bool needs_inc); 995 996 static int 997 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 998 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 999 bool needs_inc) 1000 { 1001 bool should_inc = needs_inc && new_gpr && !last; 1002 u32 idx, src_byte; 1003 enum shf_sc sc; 1004 swreg reg; 1005 int shf; 1006 u8 mask; 1007 1008 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 1009 return -EOPNOTSUPP; 1010 1011 idx = off / 4; 1012 1013 /* Move the entire word */ 1014 if (size == 4) { 1015 wrp_mov(nfp_prog, reg_both(dst), 1016 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 1017 return 0; 1018 } 1019 1020 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1021 return -EOPNOTSUPP; 1022 1023 src_byte = off % 4; 1024 1025 mask = (1 << size) - 1; 1026 mask <<= dst_byte; 1027 1028 if (WARN_ON_ONCE(mask > 0xf)) 1029 return -EOPNOTSUPP; 1030 1031 shf = abs(src_byte - dst_byte) * 8; 1032 if (src_byte == dst_byte) { 1033 sc = SHF_SC_NONE; 1034 } else if (src_byte < dst_byte) { 1035 shf = 32 - shf; 1036 sc = SHF_SC_L_SHF; 1037 } else { 1038 sc = SHF_SC_R_SHF; 1039 } 1040 1041 /* ld_field can address fewer indexes, if offset too large do RMW. 1042 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1043 */ 1044 if (idx <= RE_REG_LM_IDX_MAX) { 1045 reg = reg_lm(lm3 ? 3 : 0, idx); 1046 } else { 1047 reg = imm_a(nfp_prog); 1048 /* If it's not the first part of the load and we start a new GPR 1049 * that means we are loading a second part of the LMEM word into 1050 * a new GPR. IOW we've already looked that LMEM word and 1051 * therefore it has been loaded into imm_a(). 1052 */ 1053 if (first || !new_gpr) 1054 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1055 } 1056 1057 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 1058 1059 if (should_inc) 1060 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1061 1062 return 0; 1063 } 1064 1065 static int 1066 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 1067 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 1068 bool needs_inc) 1069 { 1070 bool should_inc = needs_inc && new_gpr && !last; 1071 u32 idx, dst_byte; 1072 enum shf_sc sc; 1073 swreg reg; 1074 int shf; 1075 u8 mask; 1076 1077 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 1078 return -EOPNOTSUPP; 1079 1080 idx = off / 4; 1081 1082 /* Move the entire word */ 1083 if (size == 4) { 1084 wrp_mov(nfp_prog, 1085 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 1086 reg_b(src)); 1087 return 0; 1088 } 1089 1090 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1091 return -EOPNOTSUPP; 1092 1093 dst_byte = off % 4; 1094 1095 mask = (1 << size) - 1; 1096 mask <<= dst_byte; 1097 1098 if (WARN_ON_ONCE(mask > 0xf)) 1099 return -EOPNOTSUPP; 1100 1101 shf = abs(src_byte - dst_byte) * 8; 1102 if (src_byte == dst_byte) { 1103 sc = SHF_SC_NONE; 1104 } else if (src_byte < dst_byte) { 1105 shf = 32 - shf; 1106 sc = SHF_SC_L_SHF; 1107 } else { 1108 sc = SHF_SC_R_SHF; 1109 } 1110 1111 /* ld_field can address fewer indexes, if offset too large do RMW. 1112 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1113 */ 1114 if (idx <= RE_REG_LM_IDX_MAX) { 1115 reg = reg_lm(lm3 ? 3 : 0, idx); 1116 } else { 1117 reg = imm_a(nfp_prog); 1118 /* Only first and last LMEM locations are going to need RMW, 1119 * the middle location will be overwritten fully. 1120 */ 1121 if (first || last) 1122 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1123 } 1124 1125 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 1126 1127 if (new_gpr || last) { 1128 if (idx > RE_REG_LM_IDX_MAX) 1129 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1130 if (should_inc) 1131 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1132 } 1133 1134 return 0; 1135 } 1136 1137 static int 1138 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1139 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1140 bool clr_gpr, lmem_step step) 1141 { 1142 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; 1143 bool first = true, last; 1144 bool needs_inc = false; 1145 swreg stack_off_reg; 1146 u8 prev_gpr = 255; 1147 u32 gpr_byte = 0; 1148 bool lm3 = true; 1149 int ret; 1150 1151 if (meta->ptr_not_const || 1152 meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) { 1153 /* Use of the last encountered ptr_off is OK, they all have 1154 * the same alignment. Depend on low bits of value being 1155 * discarded when written to LMaddr register. 1156 */ 1157 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1158 stack_imm(nfp_prog)); 1159 1160 emit_alu(nfp_prog, imm_b(nfp_prog), 1161 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1162 1163 needs_inc = true; 1164 } else if (off + size <= 64) { 1165 /* We can reach bottom 64B with LMaddr0 */ 1166 lm3 = false; 1167 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1168 /* We have to set up a new pointer. If we know the offset 1169 * and the entire access falls into a single 32 byte aligned 1170 * window we won't have to increment the LM pointer. 1171 * The 32 byte alignment is imporant because offset is ORed in 1172 * not added when doing *l$indexN[off]. 1173 */ 1174 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1175 stack_imm(nfp_prog)); 1176 emit_alu(nfp_prog, imm_b(nfp_prog), 1177 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1178 1179 off %= 32; 1180 } else { 1181 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1182 stack_imm(nfp_prog)); 1183 1184 emit_alu(nfp_prog, imm_b(nfp_prog), 1185 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1186 1187 needs_inc = true; 1188 } 1189 if (lm3) { 1190 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1191 /* For size < 4 one slot will be filled by zeroing of upper. */ 1192 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1193 } 1194 1195 if (clr_gpr && size < 8) 1196 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1197 1198 while (size) { 1199 u32 slice_end; 1200 u8 slice_size; 1201 1202 slice_size = min(size, 4 - gpr_byte); 1203 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1204 slice_size = slice_end - off; 1205 1206 last = slice_size == size; 1207 1208 if (needs_inc) 1209 off %= 4; 1210 1211 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1212 first, gpr != prev_gpr, last, lm3, needs_inc); 1213 if (ret) 1214 return ret; 1215 1216 prev_gpr = gpr; 1217 first = false; 1218 1219 gpr_byte += slice_size; 1220 if (gpr_byte >= 4) { 1221 gpr_byte -= 4; 1222 gpr++; 1223 } 1224 1225 size -= slice_size; 1226 off += slice_size; 1227 } 1228 1229 return 0; 1230 } 1231 1232 static void 1233 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1234 { 1235 swreg tmp_reg; 1236 1237 if (alu_op == ALU_OP_AND) { 1238 if (!imm) 1239 wrp_immed(nfp_prog, reg_both(dst), 0); 1240 if (!imm || !~imm) 1241 return; 1242 } 1243 if (alu_op == ALU_OP_OR) { 1244 if (!~imm) 1245 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1246 if (!imm || !~imm) 1247 return; 1248 } 1249 if (alu_op == ALU_OP_XOR) { 1250 if (!~imm) 1251 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1252 ALU_OP_NOT, reg_b(dst)); 1253 if (!imm || !~imm) 1254 return; 1255 } 1256 1257 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1258 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1259 } 1260 1261 static int 1262 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1263 enum alu_op alu_op, bool skip) 1264 { 1265 const struct bpf_insn *insn = &meta->insn; 1266 u64 imm = insn->imm; /* sign extend */ 1267 1268 if (skip) { 1269 meta->flags |= FLAG_INSN_SKIP_NOOP; 1270 return 0; 1271 } 1272 1273 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1274 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1275 1276 return 0; 1277 } 1278 1279 static int 1280 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1281 enum alu_op alu_op) 1282 { 1283 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1284 1285 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1286 emit_alu(nfp_prog, reg_both(dst + 1), 1287 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1288 1289 return 0; 1290 } 1291 1292 static int 1293 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1294 enum alu_op alu_op, bool skip) 1295 { 1296 const struct bpf_insn *insn = &meta->insn; 1297 1298 if (skip) { 1299 meta->flags |= FLAG_INSN_SKIP_NOOP; 1300 return 0; 1301 } 1302 1303 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1304 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1305 1306 return 0; 1307 } 1308 1309 static int 1310 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1311 enum alu_op alu_op) 1312 { 1313 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1314 1315 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1316 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1317 1318 return 0; 1319 } 1320 1321 static void 1322 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1323 enum br_mask br_mask, u16 off) 1324 { 1325 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1326 emit_br(nfp_prog, br_mask, off, 0); 1327 } 1328 1329 static int 1330 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1331 enum alu_op alu_op, enum br_mask br_mask) 1332 { 1333 const struct bpf_insn *insn = &meta->insn; 1334 1335 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1336 insn->src_reg * 2, br_mask, insn->off); 1337 if (is_mbpf_jmp64(meta)) 1338 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1339 insn->src_reg * 2 + 1, br_mask, insn->off); 1340 1341 return 0; 1342 } 1343 1344 static const struct jmp_code_map { 1345 enum br_mask br_mask; 1346 bool swap; 1347 } jmp_code_map[] = { 1348 [BPF_JGT >> 4] = { BR_BLO, true }, 1349 [BPF_JGE >> 4] = { BR_BHS, false }, 1350 [BPF_JLT >> 4] = { BR_BLO, false }, 1351 [BPF_JLE >> 4] = { BR_BHS, true }, 1352 [BPF_JSGT >> 4] = { BR_BLT, true }, 1353 [BPF_JSGE >> 4] = { BR_BGE, false }, 1354 [BPF_JSLT >> 4] = { BR_BLT, false }, 1355 [BPF_JSLE >> 4] = { BR_BGE, true }, 1356 }; 1357 1358 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) 1359 { 1360 unsigned int op; 1361 1362 op = BPF_OP(meta->insn.code) >> 4; 1363 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ 1364 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || 1365 !jmp_code_map[op].br_mask, 1366 "no code found for jump instruction")) 1367 return NULL; 1368 1369 return &jmp_code_map[op]; 1370 } 1371 1372 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1373 { 1374 const struct bpf_insn *insn = &meta->insn; 1375 u64 imm = insn->imm; /* sign extend */ 1376 const struct jmp_code_map *code; 1377 enum alu_op alu_op, carry_op; 1378 u8 reg = insn->dst_reg * 2; 1379 swreg tmp_reg; 1380 1381 code = nfp_jmp_code_get(meta); 1382 if (!code) 1383 return -EINVAL; 1384 1385 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; 1386 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; 1387 1388 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1389 if (!code->swap) 1390 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); 1391 else 1392 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); 1393 1394 if (is_mbpf_jmp64(meta)) { 1395 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1396 if (!code->swap) 1397 emit_alu(nfp_prog, reg_none(), 1398 reg_a(reg + 1), carry_op, tmp_reg); 1399 else 1400 emit_alu(nfp_prog, reg_none(), 1401 tmp_reg, carry_op, reg_a(reg + 1)); 1402 } 1403 1404 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1405 1406 return 0; 1407 } 1408 1409 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1410 { 1411 const struct bpf_insn *insn = &meta->insn; 1412 const struct jmp_code_map *code; 1413 u8 areg, breg; 1414 1415 code = nfp_jmp_code_get(meta); 1416 if (!code) 1417 return -EINVAL; 1418 1419 areg = insn->dst_reg * 2; 1420 breg = insn->src_reg * 2; 1421 1422 if (code->swap) { 1423 areg ^= breg; 1424 breg ^= areg; 1425 areg ^= breg; 1426 } 1427 1428 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1429 if (is_mbpf_jmp64(meta)) 1430 emit_alu(nfp_prog, reg_none(), 1431 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1432 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1433 1434 return 0; 1435 } 1436 1437 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1438 { 1439 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1440 SHF_SC_R_ROT, 8); 1441 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1442 SHF_SC_R_ROT, 16); 1443 } 1444 1445 static void 1446 wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1447 swreg rreg, bool gen_high_half) 1448 { 1449 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1450 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg); 1451 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg); 1452 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg); 1453 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg); 1454 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none()); 1455 if (gen_high_half) 1456 emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2, 1457 reg_none()); 1458 else 1459 wrp_immed(nfp_prog, dst_hi, 0); 1460 } 1461 1462 static void 1463 wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1464 swreg rreg) 1465 { 1466 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1467 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg); 1468 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg); 1469 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none()); 1470 } 1471 1472 static int 1473 wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1474 bool gen_high_half, bool ropnd_from_reg) 1475 { 1476 swreg multiplier, multiplicand, dst_hi, dst_lo; 1477 const struct bpf_insn *insn = &meta->insn; 1478 u32 lopnd_max, ropnd_max; 1479 u8 dst_reg; 1480 1481 dst_reg = insn->dst_reg; 1482 multiplicand = reg_a(dst_reg * 2); 1483 dst_hi = reg_both(dst_reg * 2 + 1); 1484 dst_lo = reg_both(dst_reg * 2); 1485 lopnd_max = meta->umax_dst; 1486 if (ropnd_from_reg) { 1487 multiplier = reg_b(insn->src_reg * 2); 1488 ropnd_max = meta->umax_src; 1489 } else { 1490 u32 imm = insn->imm; 1491 1492 multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1493 ropnd_max = imm; 1494 } 1495 if (lopnd_max > U16_MAX || ropnd_max > U16_MAX) 1496 wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier, 1497 gen_high_half); 1498 else 1499 wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier); 1500 1501 return 0; 1502 } 1503 1504 static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) 1505 { 1506 swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst); 1507 struct reciprocal_value_adv rvalue; 1508 u8 pre_shift, exp; 1509 swreg magic; 1510 1511 if (imm > U32_MAX) { 1512 wrp_immed(nfp_prog, dst_both, 0); 1513 return 0; 1514 } 1515 1516 /* NOTE: because we are using "reciprocal_value_adv" which doesn't 1517 * support "divisor > (1u << 31)", we need to JIT separate NFP sequence 1518 * to handle such case which actually equals to the result of unsigned 1519 * comparison "dst >= imm" which could be calculated using the following 1520 * NFP sequence: 1521 * 1522 * alu[--, dst, -, imm] 1523 * immed[imm, 0] 1524 * alu[dst, imm, +carry, 0] 1525 * 1526 */ 1527 if (imm > 1U << 31) { 1528 swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1529 1530 emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b); 1531 wrp_immed(nfp_prog, imm_a(nfp_prog), 0); 1532 emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C, 1533 reg_imm(0)); 1534 return 0; 1535 } 1536 1537 rvalue = reciprocal_value_adv(imm, 32); 1538 exp = rvalue.exp; 1539 if (rvalue.is_wide_m && !(imm & 1)) { 1540 pre_shift = fls(imm & -imm) - 1; 1541 rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift); 1542 } else { 1543 pre_shift = 0; 1544 } 1545 magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog)); 1546 if (imm == 1U << exp) { 1547 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1548 SHF_SC_R_SHF, exp); 1549 } else if (rvalue.is_wide_m) { 1550 wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, 1551 magic, true); 1552 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, 1553 imm_b(nfp_prog)); 1554 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1555 SHF_SC_R_SHF, 1); 1556 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, 1557 imm_b(nfp_prog)); 1558 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1559 SHF_SC_R_SHF, rvalue.sh - 1); 1560 } else { 1561 if (pre_shift) 1562 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1563 dst_b, SHF_SC_R_SHF, pre_shift); 1564 wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true); 1565 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1566 dst_b, SHF_SC_R_SHF, rvalue.sh); 1567 } 1568 1569 return 0; 1570 } 1571 1572 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1573 { 1574 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1575 struct nfp_bpf_cap_adjust_head *adjust_head; 1576 u32 ret_einval, end; 1577 1578 adjust_head = &nfp_prog->bpf->adjust_head; 1579 1580 /* Optimized version - 5 vs 14 cycles */ 1581 if (nfp_prog->adjust_head_location != UINT_MAX) { 1582 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1583 return -EINVAL; 1584 1585 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1586 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1587 emit_alu(nfp_prog, plen_reg(nfp_prog), 1588 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1589 emit_alu(nfp_prog, pv_len(nfp_prog), 1590 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1591 1592 wrp_immed(nfp_prog, reg_both(0), 0); 1593 wrp_immed(nfp_prog, reg_both(1), 0); 1594 1595 /* TODO: when adjust head is guaranteed to succeed we can 1596 * also eliminate the following if (r0 == 0) branch. 1597 */ 1598 1599 return 0; 1600 } 1601 1602 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1603 end = ret_einval + 2; 1604 1605 /* We need to use a temp because offset is just a part of the pkt ptr */ 1606 emit_alu(nfp_prog, tmp, 1607 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1608 1609 /* Validate result will fit within FW datapath constraints */ 1610 emit_alu(nfp_prog, reg_none(), 1611 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1612 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1613 emit_alu(nfp_prog, reg_none(), 1614 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1615 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1616 1617 /* Validate the length is at least ETH_HLEN */ 1618 emit_alu(nfp_prog, tmp_len, 1619 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1620 emit_alu(nfp_prog, reg_none(), 1621 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1622 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1623 1624 /* Load the ret code */ 1625 wrp_immed(nfp_prog, reg_both(0), 0); 1626 wrp_immed(nfp_prog, reg_both(1), 0); 1627 1628 /* Modify the packet metadata */ 1629 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1630 1631 /* Skip over the -EINVAL ret code (defer 2) */ 1632 emit_br(nfp_prog, BR_UNC, end, 2); 1633 1634 emit_alu(nfp_prog, plen_reg(nfp_prog), 1635 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1636 emit_alu(nfp_prog, pv_len(nfp_prog), 1637 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1638 1639 /* return -EINVAL target */ 1640 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1641 return -EINVAL; 1642 1643 wrp_immed(nfp_prog, reg_both(0), -22); 1644 wrp_immed(nfp_prog, reg_both(1), ~0); 1645 1646 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1647 return -EINVAL; 1648 1649 return 0; 1650 } 1651 1652 static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1653 { 1654 u32 ret_einval, end; 1655 swreg plen, delta; 1656 1657 BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN)); 1658 1659 plen = imm_a(nfp_prog); 1660 delta = reg_a(2 * 2); 1661 1662 ret_einval = nfp_prog_current_offset(nfp_prog) + 9; 1663 end = nfp_prog_current_offset(nfp_prog) + 11; 1664 1665 /* Calculate resulting length */ 1666 emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta); 1667 /* delta == 0 is not allowed by the kernel, add must overflow to make 1668 * length smaller. 1669 */ 1670 emit_br(nfp_prog, BR_BCC, ret_einval, 0); 1671 1672 /* if (new_len < 14) then -EINVAL */ 1673 emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1674 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1675 1676 emit_alu(nfp_prog, plen_reg(nfp_prog), 1677 plen_reg(nfp_prog), ALU_OP_ADD, delta); 1678 emit_alu(nfp_prog, pv_len(nfp_prog), 1679 pv_len(nfp_prog), ALU_OP_ADD, delta); 1680 1681 emit_br(nfp_prog, BR_UNC, end, 2); 1682 wrp_immed(nfp_prog, reg_both(0), 0); 1683 wrp_immed(nfp_prog, reg_both(1), 0); 1684 1685 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1686 return -EINVAL; 1687 1688 wrp_immed(nfp_prog, reg_both(0), -22); 1689 wrp_immed(nfp_prog, reg_both(1), ~0); 1690 1691 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1692 return -EINVAL; 1693 1694 return 0; 1695 } 1696 1697 static int 1698 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1699 { 1700 bool load_lm_ptr; 1701 u32 ret_tgt; 1702 s64 lm_off; 1703 1704 /* We only have to reload LM0 if the key is not at start of stack */ 1705 lm_off = nfp_prog->stack_frame_depth; 1706 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1707 load_lm_ptr = meta->arg2.var_off || lm_off; 1708 1709 /* Set LM0 to start of key */ 1710 if (load_lm_ptr) 1711 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1712 if (meta->func_id == BPF_FUNC_map_update_elem) 1713 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1714 1715 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1716 2, RELO_BR_HELPER); 1717 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1718 1719 /* Load map ID into A0 */ 1720 wrp_mov(nfp_prog, reg_a(0), reg_a(2)); 1721 1722 /* Load the return address into B0 */ 1723 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1724 1725 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1726 return -EINVAL; 1727 1728 /* Reset the LM0 pointer */ 1729 if (!load_lm_ptr) 1730 return 0; 1731 1732 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1733 wrp_nops(nfp_prog, 3); 1734 1735 return 0; 1736 } 1737 1738 static int 1739 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1740 { 1741 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); 1742 /* CSR value is read in following immed[gpr, 0] */ 1743 emit_immed(nfp_prog, reg_both(0), 0, 1744 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1745 emit_immed(nfp_prog, reg_both(1), 0, 1746 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1747 return 0; 1748 } 1749 1750 static int 1751 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1752 { 1753 swreg ptr_type; 1754 u32 ret_tgt; 1755 1756 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); 1757 1758 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 1759 1760 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1761 2, RELO_BR_HELPER); 1762 1763 /* Load ptr type into A1 */ 1764 wrp_mov(nfp_prog, reg_a(1), ptr_type); 1765 1766 /* Load the return address into B0 */ 1767 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1768 1769 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1770 return -EINVAL; 1771 1772 return 0; 1773 } 1774 1775 static int 1776 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1777 { 1778 u32 jmp_tgt; 1779 1780 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; 1781 1782 /* Make sure the queue id fits into FW field */ 1783 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), 1784 ALU_OP_AND_NOT_B, reg_imm(0xff)); 1785 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); 1786 1787 /* Set the 'queue selected' bit and the queue value */ 1788 emit_shf(nfp_prog, pv_qsel_set(nfp_prog), 1789 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), 1790 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); 1791 emit_ld_field(nfp_prog, 1792 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), 1793 SHF_SC_NONE, 0); 1794 /* Delay slots end here, we will jump over next instruction if queue 1795 * value fits into the field. 1796 */ 1797 emit_ld_field(nfp_prog, 1798 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), 1799 SHF_SC_NONE, 0); 1800 1801 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) 1802 return -EINVAL; 1803 1804 return 0; 1805 } 1806 1807 /* --- Callbacks --- */ 1808 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1809 { 1810 const struct bpf_insn *insn = &meta->insn; 1811 u8 dst = insn->dst_reg * 2; 1812 u8 src = insn->src_reg * 2; 1813 1814 if (insn->src_reg == BPF_REG_10) { 1815 swreg stack_depth_reg; 1816 1817 stack_depth_reg = ur_load_imm_any(nfp_prog, 1818 nfp_prog->stack_frame_depth, 1819 stack_imm(nfp_prog)); 1820 emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog), 1821 ALU_OP_ADD, stack_depth_reg); 1822 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1823 } else { 1824 wrp_reg_mov(nfp_prog, dst, src); 1825 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1826 } 1827 1828 return 0; 1829 } 1830 1831 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1832 { 1833 u64 imm = meta->insn.imm; /* sign extend */ 1834 1835 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1836 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1837 1838 return 0; 1839 } 1840 1841 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1842 { 1843 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1844 } 1845 1846 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1847 { 1848 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1849 } 1850 1851 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1852 { 1853 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1854 } 1855 1856 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1857 { 1858 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1859 } 1860 1861 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1862 { 1863 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1864 } 1865 1866 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1867 { 1868 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1869 } 1870 1871 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1872 { 1873 const struct bpf_insn *insn = &meta->insn; 1874 1875 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1876 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1877 reg_b(insn->src_reg * 2)); 1878 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1879 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1880 reg_b(insn->src_reg * 2 + 1)); 1881 1882 return 0; 1883 } 1884 1885 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1886 { 1887 const struct bpf_insn *insn = &meta->insn; 1888 u64 imm = insn->imm; /* sign extend */ 1889 1890 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1891 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1892 1893 return 0; 1894 } 1895 1896 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1897 { 1898 const struct bpf_insn *insn = &meta->insn; 1899 1900 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1901 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1902 reg_b(insn->src_reg * 2)); 1903 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1904 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1905 reg_b(insn->src_reg * 2 + 1)); 1906 1907 return 0; 1908 } 1909 1910 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1911 { 1912 const struct bpf_insn *insn = &meta->insn; 1913 u64 imm = insn->imm; /* sign extend */ 1914 1915 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1916 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1917 1918 return 0; 1919 } 1920 1921 static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1922 { 1923 return wrp_mul(nfp_prog, meta, true, true); 1924 } 1925 1926 static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1927 { 1928 return wrp_mul(nfp_prog, meta, true, false); 1929 } 1930 1931 static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1932 { 1933 const struct bpf_insn *insn = &meta->insn; 1934 1935 return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm); 1936 } 1937 1938 static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1939 { 1940 /* NOTE: verifier hook has rejected cases for which verifier doesn't 1941 * know whether the source operand is constant or not. 1942 */ 1943 return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src); 1944 } 1945 1946 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1947 { 1948 const struct bpf_insn *insn = &meta->insn; 1949 1950 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1951 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1952 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1953 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1954 1955 return 0; 1956 } 1957 1958 /* Pseudo code: 1959 * if shift_amt >= 32 1960 * dst_high = dst_low << shift_amt[4:0] 1961 * dst_low = 0; 1962 * else 1963 * dst_high = (dst_high, dst_low) >> (32 - shift_amt) 1964 * dst_low = dst_low << shift_amt 1965 * 1966 * The indirect shift will use the same logic at runtime. 1967 */ 1968 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1969 { 1970 if (!shift_amt) 1971 return 0; 1972 1973 if (shift_amt < 32) { 1974 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), 1975 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, 1976 32 - shift_amt); 1977 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1978 reg_b(dst), SHF_SC_L_SHF, shift_amt); 1979 } else if (shift_amt == 32) { 1980 wrp_reg_mov(nfp_prog, dst + 1, dst); 1981 wrp_immed(nfp_prog, reg_both(dst), 0); 1982 } else if (shift_amt > 32) { 1983 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1984 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32); 1985 wrp_immed(nfp_prog, reg_both(dst), 0); 1986 } 1987 1988 return 0; 1989 } 1990 1991 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1992 { 1993 const struct bpf_insn *insn = &meta->insn; 1994 u8 dst = insn->dst_reg * 2; 1995 1996 return __shl_imm64(nfp_prog, dst, insn->imm); 1997 } 1998 1999 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2000 { 2001 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB, 2002 reg_b(src)); 2003 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0)); 2004 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, 2005 reg_b(dst), SHF_SC_R_DSHF); 2006 } 2007 2008 /* NOTE: for indirect left shift, HIGH part should be calculated first. */ 2009 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2010 { 2011 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2012 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2013 reg_b(dst), SHF_SC_L_SHF); 2014 } 2015 2016 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2017 { 2018 shl_reg64_lt32_high(nfp_prog, dst, src); 2019 shl_reg64_lt32_low(nfp_prog, dst, src); 2020 } 2021 2022 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2023 { 2024 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2025 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2026 reg_b(dst), SHF_SC_L_SHF); 2027 wrp_immed(nfp_prog, reg_both(dst), 0); 2028 } 2029 2030 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2031 { 2032 const struct bpf_insn *insn = &meta->insn; 2033 u64 umin, umax; 2034 u8 dst, src; 2035 2036 dst = insn->dst_reg * 2; 2037 umin = meta->umin_src; 2038 umax = meta->umax_src; 2039 if (umin == umax) 2040 return __shl_imm64(nfp_prog, dst, umin); 2041 2042 src = insn->src_reg * 2; 2043 if (umax < 32) { 2044 shl_reg64_lt32(nfp_prog, dst, src); 2045 } else if (umin >= 32) { 2046 shl_reg64_ge32(nfp_prog, dst, src); 2047 } else { 2048 /* Generate different instruction sequences depending on runtime 2049 * value of shift amount. 2050 */ 2051 u16 label_ge32, label_end; 2052 2053 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7; 2054 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2055 2056 shl_reg64_lt32_high(nfp_prog, dst, src); 2057 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2058 emit_br(nfp_prog, BR_UNC, label_end, 2); 2059 /* shl_reg64_lt32_low packed in delay slot. */ 2060 shl_reg64_lt32_low(nfp_prog, dst, src); 2061 2062 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2063 return -EINVAL; 2064 shl_reg64_ge32(nfp_prog, dst, src); 2065 2066 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2067 return -EINVAL; 2068 } 2069 2070 return 0; 2071 } 2072 2073 /* Pseudo code: 2074 * if shift_amt >= 32 2075 * dst_high = 0; 2076 * dst_low = dst_high >> shift_amt[4:0] 2077 * else 2078 * dst_high = dst_high >> shift_amt 2079 * dst_low = (dst_high, dst_low) >> shift_amt 2080 * 2081 * The indirect shift will use the same logic at runtime. 2082 */ 2083 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2084 { 2085 if (!shift_amt) 2086 return 0; 2087 2088 if (shift_amt < 32) { 2089 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2090 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2091 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2092 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2093 } else if (shift_amt == 32) { 2094 wrp_reg_mov(nfp_prog, dst, dst + 1); 2095 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2096 } else if (shift_amt > 32) { 2097 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2098 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2099 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2100 } 2101 2102 return 0; 2103 } 2104 2105 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2106 { 2107 const struct bpf_insn *insn = &meta->insn; 2108 u8 dst = insn->dst_reg * 2; 2109 2110 return __shr_imm64(nfp_prog, dst, insn->imm); 2111 } 2112 2113 /* NOTE: for indirect right shift, LOW part should be calculated first. */ 2114 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2115 { 2116 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2117 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2118 reg_b(dst + 1), SHF_SC_R_SHF); 2119 } 2120 2121 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2122 { 2123 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2124 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2125 reg_b(dst), SHF_SC_R_DSHF); 2126 } 2127 2128 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2129 { 2130 shr_reg64_lt32_low(nfp_prog, dst, src); 2131 shr_reg64_lt32_high(nfp_prog, dst, src); 2132 } 2133 2134 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2135 { 2136 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2137 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2138 reg_b(dst + 1), SHF_SC_R_SHF); 2139 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2140 } 2141 2142 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2143 { 2144 const struct bpf_insn *insn = &meta->insn; 2145 u64 umin, umax; 2146 u8 dst, src; 2147 2148 dst = insn->dst_reg * 2; 2149 umin = meta->umin_src; 2150 umax = meta->umax_src; 2151 if (umin == umax) 2152 return __shr_imm64(nfp_prog, dst, umin); 2153 2154 src = insn->src_reg * 2; 2155 if (umax < 32) { 2156 shr_reg64_lt32(nfp_prog, dst, src); 2157 } else if (umin >= 32) { 2158 shr_reg64_ge32(nfp_prog, dst, src); 2159 } else { 2160 /* Generate different instruction sequences depending on runtime 2161 * value of shift amount. 2162 */ 2163 u16 label_ge32, label_end; 2164 2165 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2166 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2167 shr_reg64_lt32_low(nfp_prog, dst, src); 2168 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2169 emit_br(nfp_prog, BR_UNC, label_end, 2); 2170 /* shr_reg64_lt32_high packed in delay slot. */ 2171 shr_reg64_lt32_high(nfp_prog, dst, src); 2172 2173 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2174 return -EINVAL; 2175 shr_reg64_ge32(nfp_prog, dst, src); 2176 2177 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2178 return -EINVAL; 2179 } 2180 2181 return 0; 2182 } 2183 2184 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit 2185 * told through PREV_ALU result. 2186 */ 2187 static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2188 { 2189 if (!shift_amt) 2190 return 0; 2191 2192 if (shift_amt < 32) { 2193 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2194 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2195 /* Set signedness bit. */ 2196 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2197 reg_imm(0)); 2198 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2199 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2200 } else if (shift_amt == 32) { 2201 /* NOTE: this also helps setting signedness bit. */ 2202 wrp_reg_mov(nfp_prog, dst, dst + 1); 2203 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2204 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2205 } else if (shift_amt > 32) { 2206 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2207 reg_imm(0)); 2208 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2209 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2210 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2211 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2212 } 2213 2214 return 0; 2215 } 2216 2217 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2218 { 2219 const struct bpf_insn *insn = &meta->insn; 2220 u8 dst = insn->dst_reg * 2; 2221 2222 return __ashr_imm64(nfp_prog, dst, insn->imm); 2223 } 2224 2225 static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2226 { 2227 /* NOTE: the first insn will set both indirect shift amount (source A) 2228 * and signedness bit (MSB of result). 2229 */ 2230 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2231 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2232 reg_b(dst + 1), SHF_SC_R_SHF); 2233 } 2234 2235 static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2236 { 2237 /* NOTE: it is the same as logic shift because we don't need to shift in 2238 * signedness bit when the shift amount is less than 32. 2239 */ 2240 return shr_reg64_lt32_low(nfp_prog, dst, src); 2241 } 2242 2243 static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2244 { 2245 ashr_reg64_lt32_low(nfp_prog, dst, src); 2246 ashr_reg64_lt32_high(nfp_prog, dst, src); 2247 } 2248 2249 static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2250 { 2251 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2252 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2253 reg_b(dst + 1), SHF_SC_R_SHF); 2254 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2255 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2256 } 2257 2258 /* Like ashr_imm64, but need to use indirect shift. */ 2259 static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2260 { 2261 const struct bpf_insn *insn = &meta->insn; 2262 u64 umin, umax; 2263 u8 dst, src; 2264 2265 dst = insn->dst_reg * 2; 2266 umin = meta->umin_src; 2267 umax = meta->umax_src; 2268 if (umin == umax) 2269 return __ashr_imm64(nfp_prog, dst, umin); 2270 2271 src = insn->src_reg * 2; 2272 if (umax < 32) { 2273 ashr_reg64_lt32(nfp_prog, dst, src); 2274 } else if (umin >= 32) { 2275 ashr_reg64_ge32(nfp_prog, dst, src); 2276 } else { 2277 u16 label_ge32, label_end; 2278 2279 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2280 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2281 ashr_reg64_lt32_low(nfp_prog, dst, src); 2282 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2283 emit_br(nfp_prog, BR_UNC, label_end, 2); 2284 /* ashr_reg64_lt32_high packed in delay slot. */ 2285 ashr_reg64_lt32_high(nfp_prog, dst, src); 2286 2287 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2288 return -EINVAL; 2289 ashr_reg64_ge32(nfp_prog, dst, src); 2290 2291 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2292 return -EINVAL; 2293 } 2294 2295 return 0; 2296 } 2297 2298 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2299 { 2300 const struct bpf_insn *insn = &meta->insn; 2301 2302 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 2303 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2304 2305 return 0; 2306 } 2307 2308 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2309 { 2310 const struct bpf_insn *insn = &meta->insn; 2311 2312 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 2313 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2314 2315 return 0; 2316 } 2317 2318 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2319 { 2320 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 2321 } 2322 2323 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2324 { 2325 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 2326 } 2327 2328 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2329 { 2330 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 2331 } 2332 2333 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2334 { 2335 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 2336 } 2337 2338 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2339 { 2340 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 2341 } 2342 2343 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2344 { 2345 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 2346 } 2347 2348 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2349 { 2350 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 2351 } 2352 2353 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2354 { 2355 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 2356 } 2357 2358 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2359 { 2360 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 2361 } 2362 2363 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2364 { 2365 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 2366 } 2367 2368 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2369 { 2370 return wrp_mul(nfp_prog, meta, false, true); 2371 } 2372 2373 static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2374 { 2375 return wrp_mul(nfp_prog, meta, false, false); 2376 } 2377 2378 static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2379 { 2380 return div_reg64(nfp_prog, meta); 2381 } 2382 2383 static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2384 { 2385 return div_imm64(nfp_prog, meta); 2386 } 2387 2388 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2389 { 2390 u8 dst = meta->insn.dst_reg * 2; 2391 2392 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 2393 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2394 2395 return 0; 2396 } 2397 2398 static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2399 { 2400 if (shift_amt) { 2401 /* Set signedness bit (MSB of result). */ 2402 emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, 2403 reg_imm(0)); 2404 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2405 reg_b(dst), SHF_SC_R_SHF, shift_amt); 2406 } 2407 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2408 2409 return 0; 2410 } 2411 2412 static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2413 { 2414 const struct bpf_insn *insn = &meta->insn; 2415 u64 umin, umax; 2416 u8 dst, src; 2417 2418 dst = insn->dst_reg * 2; 2419 umin = meta->umin_src; 2420 umax = meta->umax_src; 2421 if (umin == umax) 2422 return __ashr_imm(nfp_prog, dst, umin); 2423 2424 src = insn->src_reg * 2; 2425 /* NOTE: the first insn will set both indirect shift amount (source A) 2426 * and signedness bit (MSB of result). 2427 */ 2428 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst)); 2429 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2430 reg_b(dst), SHF_SC_R_SHF); 2431 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2432 2433 return 0; 2434 } 2435 2436 static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2437 { 2438 const struct bpf_insn *insn = &meta->insn; 2439 u8 dst = insn->dst_reg * 2; 2440 2441 return __ashr_imm(nfp_prog, dst, insn->imm); 2442 } 2443 2444 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2445 { 2446 const struct bpf_insn *insn = &meta->insn; 2447 2448 if (insn->imm) 2449 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 2450 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 2451 SHF_SC_L_SHF, insn->imm); 2452 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2453 2454 return 0; 2455 } 2456 2457 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2458 { 2459 const struct bpf_insn *insn = &meta->insn; 2460 u8 gpr = insn->dst_reg * 2; 2461 2462 switch (insn->imm) { 2463 case 16: 2464 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 2465 SHF_SC_R_ROT, 8); 2466 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 2467 SHF_SC_R_SHF, 16); 2468 2469 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2470 break; 2471 case 32: 2472 wrp_end32(nfp_prog, reg_a(gpr), gpr); 2473 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2474 break; 2475 case 64: 2476 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 2477 2478 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 2479 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 2480 break; 2481 } 2482 2483 return 0; 2484 } 2485 2486 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2487 { 2488 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 2489 u32 imm_lo, imm_hi; 2490 u8 dst; 2491 2492 dst = prev->insn.dst_reg * 2; 2493 imm_lo = prev->insn.imm; 2494 imm_hi = meta->insn.imm; 2495 2496 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 2497 2498 /* mov is always 1 insn, load imm may be two, so try to use mov */ 2499 if (imm_hi == imm_lo) 2500 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 2501 else 2502 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 2503 2504 return 0; 2505 } 2506 2507 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2508 { 2509 meta->double_cb = imm_ld8_part2; 2510 return 0; 2511 } 2512 2513 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2514 { 2515 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 2516 } 2517 2518 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2519 { 2520 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 2521 } 2522 2523 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2524 { 2525 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 2526 } 2527 2528 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2529 { 2530 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2531 meta->insn.src_reg * 2, 1); 2532 } 2533 2534 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2535 { 2536 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2537 meta->insn.src_reg * 2, 2); 2538 } 2539 2540 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2541 { 2542 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2543 meta->insn.src_reg * 2, 4); 2544 } 2545 2546 static int 2547 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2548 unsigned int size, unsigned int ptr_off) 2549 { 2550 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2551 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 2552 true, wrp_lmem_load); 2553 } 2554 2555 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2556 u8 size) 2557 { 2558 swreg dst = reg_both(meta->insn.dst_reg * 2); 2559 2560 switch (meta->insn.off) { 2561 case offsetof(struct __sk_buff, len): 2562 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 2563 return -EOPNOTSUPP; 2564 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 2565 break; 2566 case offsetof(struct __sk_buff, data): 2567 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 2568 return -EOPNOTSUPP; 2569 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2570 break; 2571 case offsetof(struct __sk_buff, data_end): 2572 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 2573 return -EOPNOTSUPP; 2574 emit_alu(nfp_prog, dst, 2575 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2576 break; 2577 default: 2578 return -EOPNOTSUPP; 2579 } 2580 2581 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2582 2583 return 0; 2584 } 2585 2586 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2587 u8 size) 2588 { 2589 swreg dst = reg_both(meta->insn.dst_reg * 2); 2590 2591 switch (meta->insn.off) { 2592 case offsetof(struct xdp_md, data): 2593 if (size != FIELD_SIZEOF(struct xdp_md, data)) 2594 return -EOPNOTSUPP; 2595 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2596 break; 2597 case offsetof(struct xdp_md, data_end): 2598 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 2599 return -EOPNOTSUPP; 2600 emit_alu(nfp_prog, dst, 2601 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2602 break; 2603 default: 2604 return -EOPNOTSUPP; 2605 } 2606 2607 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2608 2609 return 0; 2610 } 2611 2612 static int 2613 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2614 unsigned int size) 2615 { 2616 swreg tmp_reg; 2617 2618 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2619 2620 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 2621 tmp_reg, meta->insn.dst_reg * 2, size); 2622 } 2623 2624 static int 2625 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2626 unsigned int size) 2627 { 2628 swreg tmp_reg; 2629 2630 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2631 2632 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 2633 tmp_reg, meta->insn.dst_reg * 2, size); 2634 } 2635 2636 static void 2637 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 2638 struct nfp_insn_meta *meta) 2639 { 2640 s16 range_start = meta->pkt_cache.range_start; 2641 s16 range_end = meta->pkt_cache.range_end; 2642 swreg src_base, off; 2643 u8 xfer_num, len; 2644 bool indir; 2645 2646 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 2647 src_base = reg_a(meta->insn.src_reg * 2); 2648 len = range_end - range_start; 2649 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 2650 2651 indir = len > 8 * REG_WIDTH; 2652 /* Setup PREV_ALU for indirect mode. */ 2653 if (indir) 2654 wrp_immed(nfp_prog, reg_none(), 2655 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 2656 2657 /* Cache memory into transfer-in registers. */ 2658 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 2659 off, xfer_num - 1, CMD_CTX_SWAP, indir); 2660 } 2661 2662 static int 2663 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 2664 struct nfp_insn_meta *meta, 2665 unsigned int size) 2666 { 2667 s16 range_start = meta->pkt_cache.range_start; 2668 s16 insn_off = meta->insn.off - range_start; 2669 swreg dst_lo, dst_hi, src_lo, src_mid; 2670 u8 dst_gpr = meta->insn.dst_reg * 2; 2671 u8 len_lo = size, len_mid = 0; 2672 u8 idx = insn_off / REG_WIDTH; 2673 u8 off = insn_off % REG_WIDTH; 2674 2675 dst_hi = reg_both(dst_gpr + 1); 2676 dst_lo = reg_both(dst_gpr); 2677 src_lo = reg_xfer(idx); 2678 2679 /* The read length could involve as many as three registers. */ 2680 if (size > REG_WIDTH - off) { 2681 /* Calculate the part in the second register. */ 2682 len_lo = REG_WIDTH - off; 2683 len_mid = size - len_lo; 2684 2685 /* Calculate the part in the third register. */ 2686 if (size > 2 * REG_WIDTH - off) 2687 len_mid = REG_WIDTH; 2688 } 2689 2690 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 2691 2692 if (!len_mid) { 2693 wrp_immed(nfp_prog, dst_hi, 0); 2694 return 0; 2695 } 2696 2697 src_mid = reg_xfer(idx + 1); 2698 2699 if (size <= REG_WIDTH) { 2700 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 2701 wrp_immed(nfp_prog, dst_hi, 0); 2702 } else { 2703 swreg src_hi = reg_xfer(idx + 2); 2704 2705 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 2706 REG_WIDTH - len_lo, len_lo); 2707 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 2708 REG_WIDTH - len_lo); 2709 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 2710 len_lo); 2711 } 2712 2713 return 0; 2714 } 2715 2716 static int 2717 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 2718 struct nfp_insn_meta *meta, 2719 unsigned int size) 2720 { 2721 swreg dst_lo, dst_hi, src_lo; 2722 u8 dst_gpr, idx; 2723 2724 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 2725 dst_gpr = meta->insn.dst_reg * 2; 2726 dst_hi = reg_both(dst_gpr + 1); 2727 dst_lo = reg_both(dst_gpr); 2728 src_lo = reg_xfer(idx); 2729 2730 if (size < REG_WIDTH) { 2731 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 2732 wrp_immed(nfp_prog, dst_hi, 0); 2733 } else if (size == REG_WIDTH) { 2734 wrp_mov(nfp_prog, dst_lo, src_lo); 2735 wrp_immed(nfp_prog, dst_hi, 0); 2736 } else { 2737 swreg src_hi = reg_xfer(idx + 1); 2738 2739 wrp_mov(nfp_prog, dst_lo, src_lo); 2740 wrp_mov(nfp_prog, dst_hi, src_hi); 2741 } 2742 2743 return 0; 2744 } 2745 2746 static int 2747 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 2748 struct nfp_insn_meta *meta, unsigned int size) 2749 { 2750 u8 off = meta->insn.off - meta->pkt_cache.range_start; 2751 2752 if (IS_ALIGNED(off, REG_WIDTH)) 2753 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 2754 2755 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 2756 } 2757 2758 static int 2759 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2760 unsigned int size) 2761 { 2762 if (meta->ldst_gather_len) 2763 return nfp_cpp_memcpy(nfp_prog, meta); 2764 2765 if (meta->ptr.type == PTR_TO_CTX) { 2766 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2767 return mem_ldx_xdp(nfp_prog, meta, size); 2768 else 2769 return mem_ldx_skb(nfp_prog, meta, size); 2770 } 2771 2772 if (meta->ptr.type == PTR_TO_PACKET) { 2773 if (meta->pkt_cache.range_end) { 2774 if (meta->pkt_cache.do_init) 2775 mem_ldx_data_init_pktcache(nfp_prog, meta); 2776 2777 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 2778 } else { 2779 return mem_ldx_data(nfp_prog, meta, size); 2780 } 2781 } 2782 2783 if (meta->ptr.type == PTR_TO_STACK) 2784 return mem_ldx_stack(nfp_prog, meta, size, 2785 meta->ptr.off + meta->ptr.var_off.value); 2786 2787 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2788 return mem_ldx_emem(nfp_prog, meta, size); 2789 2790 return -EOPNOTSUPP; 2791 } 2792 2793 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2794 { 2795 return mem_ldx(nfp_prog, meta, 1); 2796 } 2797 2798 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2799 { 2800 return mem_ldx(nfp_prog, meta, 2); 2801 } 2802 2803 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2804 { 2805 return mem_ldx(nfp_prog, meta, 4); 2806 } 2807 2808 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2809 { 2810 return mem_ldx(nfp_prog, meta, 8); 2811 } 2812 2813 static int 2814 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2815 unsigned int size) 2816 { 2817 u64 imm = meta->insn.imm; /* sign extend */ 2818 swreg off_reg; 2819 2820 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2821 2822 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2823 imm, size); 2824 } 2825 2826 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2827 unsigned int size) 2828 { 2829 if (meta->ptr.type == PTR_TO_PACKET) 2830 return mem_st_data(nfp_prog, meta, size); 2831 2832 return -EOPNOTSUPP; 2833 } 2834 2835 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2836 { 2837 return mem_st(nfp_prog, meta, 1); 2838 } 2839 2840 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2841 { 2842 return mem_st(nfp_prog, meta, 2); 2843 } 2844 2845 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2846 { 2847 return mem_st(nfp_prog, meta, 4); 2848 } 2849 2850 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2851 { 2852 return mem_st(nfp_prog, meta, 8); 2853 } 2854 2855 static int 2856 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2857 unsigned int size) 2858 { 2859 swreg off_reg; 2860 2861 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2862 2863 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2864 meta->insn.src_reg * 2, size); 2865 } 2866 2867 static int 2868 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2869 unsigned int size, unsigned int ptr_off) 2870 { 2871 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2872 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2873 false, wrp_lmem_store); 2874 } 2875 2876 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2877 { 2878 switch (meta->insn.off) { 2879 case offsetof(struct xdp_md, rx_queue_index): 2880 return nfp_queue_select(nfp_prog, meta); 2881 } 2882 2883 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ 2884 return -EOPNOTSUPP; 2885 } 2886 2887 static int 2888 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2889 unsigned int size) 2890 { 2891 if (meta->ptr.type == PTR_TO_PACKET) 2892 return mem_stx_data(nfp_prog, meta, size); 2893 2894 if (meta->ptr.type == PTR_TO_STACK) 2895 return mem_stx_stack(nfp_prog, meta, size, 2896 meta->ptr.off + meta->ptr.var_off.value); 2897 2898 return -EOPNOTSUPP; 2899 } 2900 2901 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2902 { 2903 return mem_stx(nfp_prog, meta, 1); 2904 } 2905 2906 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2907 { 2908 return mem_stx(nfp_prog, meta, 2); 2909 } 2910 2911 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2912 { 2913 if (meta->ptr.type == PTR_TO_CTX) 2914 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2915 return mem_stx_xdp(nfp_prog, meta); 2916 return mem_stx(nfp_prog, meta, 4); 2917 } 2918 2919 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2920 { 2921 return mem_stx(nfp_prog, meta, 8); 2922 } 2923 2924 static int 2925 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) 2926 { 2927 u8 dst_gpr = meta->insn.dst_reg * 2; 2928 u8 src_gpr = meta->insn.src_reg * 2; 2929 unsigned int full_add, out; 2930 swreg addra, addrb, off; 2931 2932 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2933 2934 /* We can fit 16 bits into command immediate, if we know the immediate 2935 * is guaranteed to either always or never fit into 16 bit we only 2936 * generate code to handle that particular case, otherwise generate 2937 * code for both. 2938 */ 2939 out = nfp_prog_current_offset(nfp_prog); 2940 full_add = nfp_prog_current_offset(nfp_prog); 2941 2942 if (meta->insn.off) { 2943 out += 2; 2944 full_add += 2; 2945 } 2946 if (meta->xadd_maybe_16bit) { 2947 out += 3; 2948 full_add += 3; 2949 } 2950 if (meta->xadd_over_16bit) 2951 out += 2 + is64; 2952 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2953 out += 5; 2954 full_add += 5; 2955 } 2956 2957 /* Generate the branch for choosing add_imm vs add */ 2958 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2959 swreg max_imm = imm_a(nfp_prog); 2960 2961 wrp_immed(nfp_prog, max_imm, 0xffff); 2962 emit_alu(nfp_prog, reg_none(), 2963 max_imm, ALU_OP_SUB, reg_b(src_gpr)); 2964 emit_alu(nfp_prog, reg_none(), 2965 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); 2966 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); 2967 /* defer for add */ 2968 } 2969 2970 /* If insn has an offset add to the address */ 2971 if (!meta->insn.off) { 2972 addra = reg_a(dst_gpr); 2973 addrb = reg_b(dst_gpr + 1); 2974 } else { 2975 emit_alu(nfp_prog, imma_a(nfp_prog), 2976 reg_a(dst_gpr), ALU_OP_ADD, off); 2977 emit_alu(nfp_prog, imma_b(nfp_prog), 2978 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); 2979 addra = imma_a(nfp_prog); 2980 addrb = imma_b(nfp_prog); 2981 } 2982 2983 /* Generate the add_imm if 16 bits are possible */ 2984 if (meta->xadd_maybe_16bit) { 2985 swreg prev_alu = imm_a(nfp_prog); 2986 2987 wrp_immed(nfp_prog, prev_alu, 2988 FIELD_PREP(CMD_OVE_DATA, 2) | 2989 CMD_OVE_LEN | 2990 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); 2991 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); 2992 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, 2993 addra, addrb, 0, CMD_CTX_NO_SWAP); 2994 2995 if (meta->xadd_over_16bit) 2996 emit_br(nfp_prog, BR_UNC, out, 0); 2997 } 2998 2999 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) 3000 return -EINVAL; 3001 3002 /* Generate the add if 16 bits are not guaranteed */ 3003 if (meta->xadd_over_16bit) { 3004 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, 3005 addra, addrb, is64 << 2, 3006 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); 3007 3008 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); 3009 if (is64) 3010 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); 3011 } 3012 3013 if (!nfp_prog_confirm_current_offset(nfp_prog, out)) 3014 return -EINVAL; 3015 3016 return 0; 3017 } 3018 3019 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3020 { 3021 return mem_xadd(nfp_prog, meta, false); 3022 } 3023 3024 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3025 { 3026 return mem_xadd(nfp_prog, meta, true); 3027 } 3028 3029 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3030 { 3031 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 3032 3033 return 0; 3034 } 3035 3036 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3037 { 3038 const struct bpf_insn *insn = &meta->insn; 3039 u64 imm = insn->imm; /* sign extend */ 3040 swreg or1, or2, tmp_reg; 3041 3042 or1 = reg_a(insn->dst_reg * 2); 3043 or2 = reg_b(insn->dst_reg * 2 + 1); 3044 3045 if (imm & ~0U) { 3046 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3047 emit_alu(nfp_prog, imm_a(nfp_prog), 3048 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3049 or1 = imm_a(nfp_prog); 3050 } 3051 3052 if (imm >> 32) { 3053 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3054 emit_alu(nfp_prog, imm_b(nfp_prog), 3055 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3056 or2 = imm_b(nfp_prog); 3057 } 3058 3059 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 3060 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3061 3062 return 0; 3063 } 3064 3065 static int jeq32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3066 { 3067 const struct bpf_insn *insn = &meta->insn; 3068 swreg tmp_reg; 3069 3070 tmp_reg = ur_load_imm_any(nfp_prog, insn->imm, imm_b(nfp_prog)); 3071 emit_alu(nfp_prog, reg_none(), 3072 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3073 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3074 3075 return 0; 3076 } 3077 3078 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3079 { 3080 const struct bpf_insn *insn = &meta->insn; 3081 u64 imm = insn->imm; /* sign extend */ 3082 u8 dst_gpr = insn->dst_reg * 2; 3083 swreg tmp_reg; 3084 3085 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3086 emit_alu(nfp_prog, imm_b(nfp_prog), 3087 reg_a(dst_gpr), ALU_OP_AND, tmp_reg); 3088 /* Upper word of the mask can only be 0 or ~0 from sign extension, 3089 * so either ignore it or OR the whole thing in. 3090 */ 3091 if (is_mbpf_jmp64(meta) && imm >> 32) { 3092 emit_alu(nfp_prog, reg_none(), 3093 reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog)); 3094 } 3095 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3096 3097 return 0; 3098 } 3099 3100 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3101 { 3102 const struct bpf_insn *insn = &meta->insn; 3103 u64 imm = insn->imm; /* sign extend */ 3104 bool is_jmp32 = is_mbpf_jmp32(meta); 3105 swreg tmp_reg; 3106 3107 if (!imm) { 3108 if (is_jmp32) 3109 emit_alu(nfp_prog, reg_none(), reg_none(), ALU_OP_NONE, 3110 reg_b(insn->dst_reg * 2)); 3111 else 3112 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 3113 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 3114 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3115 return 0; 3116 } 3117 3118 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3119 emit_alu(nfp_prog, reg_none(), 3120 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3121 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3122 3123 if (is_jmp32) 3124 return 0; 3125 3126 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3127 emit_alu(nfp_prog, reg_none(), 3128 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3129 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3130 3131 return 0; 3132 } 3133 3134 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3135 { 3136 const struct bpf_insn *insn = &meta->insn; 3137 3138 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 3139 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 3140 if (is_mbpf_jmp64(meta)) { 3141 emit_alu(nfp_prog, imm_b(nfp_prog), 3142 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, 3143 reg_b(insn->src_reg * 2 + 1)); 3144 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, 3145 imm_b(nfp_prog)); 3146 } 3147 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3148 3149 return 0; 3150 } 3151 3152 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3153 { 3154 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 3155 } 3156 3157 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3158 { 3159 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 3160 } 3161 3162 static int 3163 bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3164 { 3165 u32 ret_tgt, stack_depth, offset_br; 3166 swreg tmp_reg; 3167 3168 stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN); 3169 /* Space for saving the return address is accounted for by the callee, 3170 * so stack_depth can be zero for the main function. 3171 */ 3172 if (stack_depth) { 3173 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3174 stack_imm(nfp_prog)); 3175 emit_alu(nfp_prog, stack_reg(nfp_prog), 3176 stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg); 3177 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3178 NFP_CSR_ACT_LM_ADDR0); 3179 } 3180 3181 /* Two cases for jumping to the callee: 3182 * 3183 * - If callee uses and needs to save R6~R9 then: 3184 * 1. Put the start offset of the callee into imm_b(). This will 3185 * require a fixup step, as we do not necessarily know this 3186 * address yet. 3187 * 2. Put the return address from the callee to the caller into 3188 * register ret_reg(). 3189 * 3. (After defer slots are consumed) Jump to the subroutine that 3190 * pushes the registers to the stack. 3191 * The subroutine acts as a trampoline, and returns to the address in 3192 * imm_b(), i.e. jumps to the callee. 3193 * 3194 * - If callee does not need to save R6~R9 then just load return 3195 * address to the caller in ret_reg(), and jump to the callee 3196 * directly. 3197 * 3198 * Using ret_reg() to pass the return address to the callee is set here 3199 * as a convention. The callee can then push this address onto its 3200 * stack frame in its prologue. The advantages of passing the return 3201 * address through ret_reg(), instead of pushing it to the stack right 3202 * here, are the following: 3203 * - It looks cleaner. 3204 * - If the called function is called multiple time, we get a lower 3205 * program size. 3206 * - We save two no-op instructions that should be added just before 3207 * the emit_br() when stack depth is not null otherwise. 3208 * - If we ever find a register to hold the return address during whole 3209 * execution of the callee, we will not have to push the return 3210 * address to the stack for leaf functions. 3211 */ 3212 if (!meta->jmp_dst) { 3213 pr_err("BUG: BPF-to-BPF call has no destination recorded\n"); 3214 return -ELOOP; 3215 } 3216 if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) { 3217 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 3218 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, 3219 RELO_BR_GO_CALL_PUSH_REGS); 3220 offset_br = nfp_prog_current_offset(nfp_prog); 3221 wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL); 3222 } else { 3223 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 3224 emit_br(nfp_prog, BR_UNC, meta->insn.imm, 1); 3225 offset_br = nfp_prog_current_offset(nfp_prog); 3226 } 3227 wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL); 3228 3229 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 3230 return -EINVAL; 3231 3232 if (stack_depth) { 3233 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3234 stack_imm(nfp_prog)); 3235 emit_alu(nfp_prog, stack_reg(nfp_prog), 3236 stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 3237 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3238 NFP_CSR_ACT_LM_ADDR0); 3239 wrp_nops(nfp_prog, 3); 3240 } 3241 3242 meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog); 3243 meta->num_insns_after_br -= offset_br; 3244 3245 return 0; 3246 } 3247 3248 static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3249 { 3250 switch (meta->insn.imm) { 3251 case BPF_FUNC_xdp_adjust_head: 3252 return adjust_head(nfp_prog, meta); 3253 case BPF_FUNC_xdp_adjust_tail: 3254 return adjust_tail(nfp_prog, meta); 3255 case BPF_FUNC_map_lookup_elem: 3256 case BPF_FUNC_map_update_elem: 3257 case BPF_FUNC_map_delete_elem: 3258 return map_call_stack_common(nfp_prog, meta); 3259 case BPF_FUNC_get_prandom_u32: 3260 return nfp_get_prandom_u32(nfp_prog, meta); 3261 case BPF_FUNC_perf_event_output: 3262 return nfp_perf_event_output(nfp_prog, meta); 3263 default: 3264 WARN_ONCE(1, "verifier allowed unsupported function\n"); 3265 return -EOPNOTSUPP; 3266 } 3267 } 3268 3269 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3270 { 3271 if (is_mbpf_pseudo_call(meta)) 3272 return bpf_to_bpf_call(nfp_prog, meta); 3273 else 3274 return helper_call(nfp_prog, meta); 3275 } 3276 3277 static bool nfp_is_main_function(struct nfp_insn_meta *meta) 3278 { 3279 return meta->subprog_idx == 0; 3280 } 3281 3282 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3283 { 3284 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 3285 3286 return 0; 3287 } 3288 3289 static int 3290 nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3291 { 3292 if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) { 3293 /* Pop R6~R9 to the stack via related subroutine. 3294 * We loaded the return address to the caller into ret_reg(). 3295 * This means that the subroutine does not come back here, we 3296 * make it jump back to the subprogram caller directly! 3297 */ 3298 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1, 3299 RELO_BR_GO_CALL_POP_REGS); 3300 /* Pop return address from the stack. */ 3301 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3302 } else { 3303 /* Pop return address from the stack. */ 3304 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3305 /* Jump back to caller if no callee-saved registers were used 3306 * by the subprogram. 3307 */ 3308 emit_rtn(nfp_prog, ret_reg(nfp_prog), 0); 3309 } 3310 3311 return 0; 3312 } 3313 3314 static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3315 { 3316 if (nfp_is_main_function(meta)) 3317 return goto_out(nfp_prog, meta); 3318 else 3319 return nfp_subprog_epilogue(nfp_prog, meta); 3320 } 3321 3322 static const instr_cb_t instr_cb[256] = { 3323 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 3324 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 3325 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 3326 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 3327 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 3328 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 3329 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 3330 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 3331 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 3332 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 3333 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 3334 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 3335 [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64, 3336 [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64, 3337 [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64, 3338 [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64, 3339 [BPF_ALU64 | BPF_NEG] = neg_reg64, 3340 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, 3341 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 3342 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, 3343 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 3344 [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64, 3345 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, 3346 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 3347 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 3348 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 3349 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 3350 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 3351 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 3352 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 3353 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 3354 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 3355 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 3356 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 3357 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 3358 [BPF_ALU | BPF_MUL | BPF_X] = mul_reg, 3359 [BPF_ALU | BPF_MUL | BPF_K] = mul_imm, 3360 [BPF_ALU | BPF_DIV | BPF_X] = div_reg, 3361 [BPF_ALU | BPF_DIV | BPF_K] = div_imm, 3362 [BPF_ALU | BPF_NEG] = neg_reg, 3363 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 3364 [BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg, 3365 [BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm, 3366 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 3367 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 3368 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 3369 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 3370 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 3371 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 3372 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 3373 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 3374 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 3375 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 3376 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 3377 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 3378 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 3379 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 3380 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 3381 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 3382 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 3383 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 3384 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 3385 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 3386 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 3387 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 3388 [BPF_JMP | BPF_JA | BPF_K] = jump, 3389 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 3390 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, 3391 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, 3392 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, 3393 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, 3394 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, 3395 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, 3396 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, 3397 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, 3398 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 3399 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 3400 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 3401 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, 3402 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, 3403 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, 3404 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, 3405 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, 3406 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, 3407 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, 3408 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, 3409 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 3410 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 3411 [BPF_JMP32 | BPF_JEQ | BPF_K] = jeq32_imm, 3412 [BPF_JMP32 | BPF_JGT | BPF_K] = cmp_imm, 3413 [BPF_JMP32 | BPF_JGE | BPF_K] = cmp_imm, 3414 [BPF_JMP32 | BPF_JLT | BPF_K] = cmp_imm, 3415 [BPF_JMP32 | BPF_JLE | BPF_K] = cmp_imm, 3416 [BPF_JMP32 | BPF_JSGT | BPF_K] =cmp_imm, 3417 [BPF_JMP32 | BPF_JSGE | BPF_K] =cmp_imm, 3418 [BPF_JMP32 | BPF_JSLT | BPF_K] =cmp_imm, 3419 [BPF_JMP32 | BPF_JSLE | BPF_K] =cmp_imm, 3420 [BPF_JMP32 | BPF_JSET | BPF_K] =jset_imm, 3421 [BPF_JMP32 | BPF_JNE | BPF_K] = jne_imm, 3422 [BPF_JMP32 | BPF_JEQ | BPF_X] = jeq_reg, 3423 [BPF_JMP32 | BPF_JGT | BPF_X] = cmp_reg, 3424 [BPF_JMP32 | BPF_JGE | BPF_X] = cmp_reg, 3425 [BPF_JMP32 | BPF_JLT | BPF_X] = cmp_reg, 3426 [BPF_JMP32 | BPF_JLE | BPF_X] = cmp_reg, 3427 [BPF_JMP32 | BPF_JSGT | BPF_X] =cmp_reg, 3428 [BPF_JMP32 | BPF_JSGE | BPF_X] =cmp_reg, 3429 [BPF_JMP32 | BPF_JSLT | BPF_X] =cmp_reg, 3430 [BPF_JMP32 | BPF_JSLE | BPF_X] =cmp_reg, 3431 [BPF_JMP32 | BPF_JSET | BPF_X] =jset_reg, 3432 [BPF_JMP32 | BPF_JNE | BPF_X] = jne_reg, 3433 [BPF_JMP | BPF_CALL] = call, 3434 [BPF_JMP | BPF_EXIT] = jmp_exit, 3435 }; 3436 3437 /* --- Assembler logic --- */ 3438 static int 3439 nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 3440 struct nfp_insn_meta *jmp_dst, u32 br_idx) 3441 { 3442 if (immed_get_value(nfp_prog->prog[br_idx + 1])) { 3443 pr_err("BUG: failed to fix up callee register saving\n"); 3444 return -EINVAL; 3445 } 3446 3447 immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off); 3448 3449 return 0; 3450 } 3451 3452 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 3453 { 3454 struct nfp_insn_meta *meta, *jmp_dst; 3455 u32 idx, br_idx; 3456 int err; 3457 3458 list_for_each_entry(meta, &nfp_prog->insns, l) { 3459 if (meta->flags & FLAG_INSN_SKIP_MASK) 3460 continue; 3461 if (!is_mbpf_jmp(meta)) 3462 continue; 3463 if (meta->insn.code == (BPF_JMP | BPF_EXIT) && 3464 !nfp_is_main_function(meta)) 3465 continue; 3466 if (is_mbpf_helper_call(meta)) 3467 continue; 3468 3469 if (list_is_last(&meta->l, &nfp_prog->insns)) 3470 br_idx = nfp_prog->last_bpf_off; 3471 else 3472 br_idx = list_next_entry(meta, l)->off - 1; 3473 3474 /* For BPF-to-BPF function call, a stack adjustment sequence is 3475 * generated after the return instruction. Therefore, we must 3476 * withdraw the length of this sequence to have br_idx pointing 3477 * to where the "branch" NFP instruction is expected to be. 3478 */ 3479 if (is_mbpf_pseudo_call(meta)) 3480 br_idx -= meta->num_insns_after_br; 3481 3482 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 3483 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 3484 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 3485 return -ELOOP; 3486 } 3487 3488 if (meta->insn.code == (BPF_JMP | BPF_EXIT)) 3489 continue; 3490 3491 /* Leave special branches for later */ 3492 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3493 RELO_BR_REL && !is_mbpf_pseudo_call(meta)) 3494 continue; 3495 3496 if (!meta->jmp_dst) { 3497 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 3498 return -ELOOP; 3499 } 3500 3501 jmp_dst = meta->jmp_dst; 3502 3503 if (jmp_dst->flags & FLAG_INSN_SKIP_PREC_DEPENDENT) { 3504 pr_err("Branch landing on removed instruction!!\n"); 3505 return -ELOOP; 3506 } 3507 3508 if (is_mbpf_pseudo_call(meta) && 3509 nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) { 3510 err = nfp_fixup_immed_relo(nfp_prog, meta, 3511 jmp_dst, br_idx); 3512 if (err) 3513 return err; 3514 } 3515 3516 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3517 RELO_BR_REL) 3518 continue; 3519 3520 for (idx = meta->off; idx <= br_idx; idx++) { 3521 if (!nfp_is_br(nfp_prog->prog[idx])) 3522 continue; 3523 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 3524 } 3525 } 3526 3527 return 0; 3528 } 3529 3530 static void nfp_intro(struct nfp_prog *nfp_prog) 3531 { 3532 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 3533 emit_alu(nfp_prog, plen_reg(nfp_prog), 3534 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 3535 } 3536 3537 static void 3538 nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3539 { 3540 /* Save return address into the stack. */ 3541 wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog)); 3542 } 3543 3544 static void 3545 nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3546 { 3547 unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth; 3548 3549 nfp_prog->stack_frame_depth = round_up(depth, 4); 3550 nfp_subprog_prologue(nfp_prog, meta); 3551 } 3552 3553 bool nfp_is_subprog_start(struct nfp_insn_meta *meta) 3554 { 3555 return meta->flags & FLAG_INSN_IS_SUBPROG_START; 3556 } 3557 3558 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 3559 { 3560 /* TC direct-action mode: 3561 * 0,1 ok NOT SUPPORTED[1] 3562 * 2 drop 0x22 -> drop, count as stat1 3563 * 4,5 nuke 0x02 -> drop 3564 * 7 redir 0x44 -> redir, count as stat2 3565 * * unspec 0x11 -> pass, count as stat0 3566 * 3567 * [1] We can't support OK and RECLASSIFY because we can't tell TC 3568 * the exact decision made. We are forced to support UNSPEC 3569 * to handle aborts so that's the only one we handle for passing 3570 * packets up the stack. 3571 */ 3572 /* Target for aborts */ 3573 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3574 3575 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3576 3577 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3578 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 3579 3580 /* Target for normal exits */ 3581 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3582 3583 /* if R0 > 7 jump to abort */ 3584 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 3585 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3586 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3587 3588 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 3589 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 3590 3591 emit_shf(nfp_prog, reg_a(1), 3592 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 3593 3594 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3595 emit_shf(nfp_prog, reg_a(2), 3596 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3597 3598 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3599 emit_shf(nfp_prog, reg_b(2), 3600 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 3601 3602 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3603 3604 emit_shf(nfp_prog, reg_b(2), 3605 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 3606 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3607 } 3608 3609 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 3610 { 3611 /* XDP return codes: 3612 * 0 aborted 0x82 -> drop, count as stat3 3613 * 1 drop 0x22 -> drop, count as stat1 3614 * 2 pass 0x11 -> pass, count as stat0 3615 * 3 tx 0x44 -> redir, count as stat2 3616 * * unknown 0x82 -> drop, count as stat3 3617 */ 3618 /* Target for aborts */ 3619 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3620 3621 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3622 3623 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3624 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 3625 3626 /* Target for normal exits */ 3627 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3628 3629 /* if R0 > 3 jump to abort */ 3630 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 3631 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3632 3633 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 3634 3635 emit_shf(nfp_prog, reg_a(1), 3636 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 3637 3638 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3639 emit_shf(nfp_prog, reg_b(2), 3640 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3641 3642 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3643 3644 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3645 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3646 } 3647 3648 static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog) 3649 { 3650 unsigned int idx; 3651 3652 for (idx = 1; idx < nfp_prog->subprog_cnt; idx++) 3653 if (nfp_prog->subprog[idx].needs_reg_push) 3654 return true; 3655 3656 return false; 3657 } 3658 3659 static void nfp_push_callee_registers(struct nfp_prog *nfp_prog) 3660 { 3661 u8 reg; 3662 3663 /* Subroutine: Save all callee saved registers (R6 ~ R9). 3664 * imm_b() holds the return address. 3665 */ 3666 nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog); 3667 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3668 u8 adj = (reg - BPF_REG_0) * 2; 3669 u8 idx = (reg - BPF_REG_6) * 2; 3670 3671 /* The first slot in the stack frame is used to push the return 3672 * address in bpf_to_bpf_call(), start just after. 3673 */ 3674 wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj)); 3675 3676 if (reg == BPF_REG_8) 3677 /* Prepare to jump back, last 3 insns use defer slots */ 3678 emit_rtn(nfp_prog, imm_b(nfp_prog), 3); 3679 3680 wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1)); 3681 } 3682 } 3683 3684 static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog) 3685 { 3686 u8 reg; 3687 3688 /* Subroutine: Restore all callee saved registers (R6 ~ R9). 3689 * ret_reg() holds the return address. 3690 */ 3691 nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog); 3692 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3693 u8 adj = (reg - BPF_REG_0) * 2; 3694 u8 idx = (reg - BPF_REG_6) * 2; 3695 3696 /* The first slot in the stack frame holds the return address, 3697 * start popping just after that. 3698 */ 3699 wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx)); 3700 3701 if (reg == BPF_REG_8) 3702 /* Prepare to jump back, last 3 insns use defer slots */ 3703 emit_rtn(nfp_prog, ret_reg(nfp_prog), 3); 3704 3705 wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1)); 3706 } 3707 } 3708 3709 static void nfp_outro(struct nfp_prog *nfp_prog) 3710 { 3711 switch (nfp_prog->type) { 3712 case BPF_PROG_TYPE_SCHED_CLS: 3713 nfp_outro_tc_da(nfp_prog); 3714 break; 3715 case BPF_PROG_TYPE_XDP: 3716 nfp_outro_xdp(nfp_prog); 3717 break; 3718 default: 3719 WARN_ON(1); 3720 } 3721 3722 if (!nfp_prog_needs_callee_reg_save(nfp_prog)) 3723 return; 3724 3725 nfp_push_callee_registers(nfp_prog); 3726 nfp_pop_callee_registers(nfp_prog); 3727 } 3728 3729 static int nfp_translate(struct nfp_prog *nfp_prog) 3730 { 3731 struct nfp_insn_meta *meta; 3732 unsigned int depth; 3733 int err; 3734 3735 depth = nfp_prog->subprog[0].stack_depth; 3736 nfp_prog->stack_frame_depth = round_up(depth, 4); 3737 3738 nfp_intro(nfp_prog); 3739 if (nfp_prog->error) 3740 return nfp_prog->error; 3741 3742 list_for_each_entry(meta, &nfp_prog->insns, l) { 3743 instr_cb_t cb = instr_cb[meta->insn.code]; 3744 3745 meta->off = nfp_prog_current_offset(nfp_prog); 3746 3747 if (nfp_is_subprog_start(meta)) { 3748 nfp_start_subprog(nfp_prog, meta); 3749 if (nfp_prog->error) 3750 return nfp_prog->error; 3751 } 3752 3753 if (meta->flags & FLAG_INSN_SKIP_MASK) { 3754 nfp_prog->n_translated++; 3755 continue; 3756 } 3757 3758 if (nfp_meta_has_prev(nfp_prog, meta) && 3759 nfp_meta_prev(meta)->double_cb) 3760 cb = nfp_meta_prev(meta)->double_cb; 3761 if (!cb) 3762 return -ENOENT; 3763 err = cb(nfp_prog, meta); 3764 if (err) 3765 return err; 3766 if (nfp_prog->error) 3767 return nfp_prog->error; 3768 3769 nfp_prog->n_translated++; 3770 } 3771 3772 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 3773 3774 nfp_outro(nfp_prog); 3775 if (nfp_prog->error) 3776 return nfp_prog->error; 3777 3778 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 3779 if (nfp_prog->error) 3780 return nfp_prog->error; 3781 3782 return nfp_fixup_branches(nfp_prog); 3783 } 3784 3785 /* --- Optimizations --- */ 3786 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 3787 { 3788 struct nfp_insn_meta *meta; 3789 3790 list_for_each_entry(meta, &nfp_prog->insns, l) { 3791 struct bpf_insn insn = meta->insn; 3792 3793 /* Programs converted from cBPF start with register xoring */ 3794 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 3795 insn.src_reg == insn.dst_reg) 3796 continue; 3797 3798 /* Programs start with R6 = R1 but we ignore the skb pointer */ 3799 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 3800 insn.src_reg == 1 && insn.dst_reg == 6) 3801 meta->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3802 3803 /* Return as soon as something doesn't match */ 3804 if (!(meta->flags & FLAG_INSN_SKIP_MASK)) 3805 return; 3806 } 3807 } 3808 3809 /* abs(insn.imm) will fit better into unrestricted reg immediate - 3810 * convert add/sub of a negative number into a sub/add of a positive one. 3811 */ 3812 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) 3813 { 3814 struct nfp_insn_meta *meta; 3815 3816 list_for_each_entry(meta, &nfp_prog->insns, l) { 3817 struct bpf_insn insn = meta->insn; 3818 3819 if (meta->flags & FLAG_INSN_SKIP_MASK) 3820 continue; 3821 3822 if (!is_mbpf_alu(meta) && !is_mbpf_jmp(meta)) 3823 continue; 3824 if (BPF_SRC(insn.code) != BPF_K) 3825 continue; 3826 if (insn.imm >= 0) 3827 continue; 3828 3829 if (is_mbpf_jmp(meta)) { 3830 switch (BPF_OP(insn.code)) { 3831 case BPF_JGE: 3832 case BPF_JSGE: 3833 case BPF_JLT: 3834 case BPF_JSLT: 3835 meta->jump_neg_op = true; 3836 break; 3837 default: 3838 continue; 3839 } 3840 } else { 3841 if (BPF_OP(insn.code) == BPF_ADD) 3842 insn.code = BPF_CLASS(insn.code) | BPF_SUB; 3843 else if (BPF_OP(insn.code) == BPF_SUB) 3844 insn.code = BPF_CLASS(insn.code) | BPF_ADD; 3845 else 3846 continue; 3847 3848 meta->insn.code = insn.code | BPF_K; 3849 } 3850 3851 meta->insn.imm = -insn.imm; 3852 } 3853 } 3854 3855 /* Remove masking after load since our load guarantees this is not needed */ 3856 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 3857 { 3858 struct nfp_insn_meta *meta1, *meta2; 3859 const s32 exp_mask[] = { 3860 [BPF_B] = 0x000000ffU, 3861 [BPF_H] = 0x0000ffffU, 3862 [BPF_W] = 0xffffffffU, 3863 }; 3864 3865 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3866 struct bpf_insn insn, next; 3867 3868 insn = meta1->insn; 3869 next = meta2->insn; 3870 3871 if (BPF_CLASS(insn.code) != BPF_LD) 3872 continue; 3873 if (BPF_MODE(insn.code) != BPF_ABS && 3874 BPF_MODE(insn.code) != BPF_IND) 3875 continue; 3876 3877 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 3878 continue; 3879 3880 if (!exp_mask[BPF_SIZE(insn.code)]) 3881 continue; 3882 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 3883 continue; 3884 3885 if (next.src_reg || next.dst_reg) 3886 continue; 3887 3888 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 3889 continue; 3890 3891 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3892 } 3893 } 3894 3895 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 3896 { 3897 struct nfp_insn_meta *meta1, *meta2, *meta3; 3898 3899 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 3900 struct bpf_insn insn, next1, next2; 3901 3902 insn = meta1->insn; 3903 next1 = meta2->insn; 3904 next2 = meta3->insn; 3905 3906 if (BPF_CLASS(insn.code) != BPF_LD) 3907 continue; 3908 if (BPF_MODE(insn.code) != BPF_ABS && 3909 BPF_MODE(insn.code) != BPF_IND) 3910 continue; 3911 if (BPF_SIZE(insn.code) != BPF_W) 3912 continue; 3913 3914 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 3915 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 3916 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 3917 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 3918 continue; 3919 3920 if (next1.src_reg || next1.dst_reg || 3921 next2.src_reg || next2.dst_reg) 3922 continue; 3923 3924 if (next1.imm != 0x20 || next2.imm != 0x20) 3925 continue; 3926 3927 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 3928 meta3->flags & FLAG_INSN_IS_JUMP_DST) 3929 continue; 3930 3931 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3932 meta3->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3933 } 3934 } 3935 3936 /* load/store pair that forms memory copy sould look like the following: 3937 * 3938 * ld_width R, [addr_src + offset_src] 3939 * st_width [addr_dest + offset_dest], R 3940 * 3941 * The destination register of load and source register of store should 3942 * be the same, load and store should also perform at the same width. 3943 * If either of addr_src or addr_dest is stack pointer, we don't do the 3944 * CPP optimization as stack is modelled by registers on NFP. 3945 */ 3946 static bool 3947 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 3948 struct nfp_insn_meta *st_meta) 3949 { 3950 struct bpf_insn *ld = &ld_meta->insn; 3951 struct bpf_insn *st = &st_meta->insn; 3952 3953 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 3954 return false; 3955 3956 if (ld_meta->ptr.type != PTR_TO_PACKET && 3957 ld_meta->ptr.type != PTR_TO_MAP_VALUE) 3958 return false; 3959 3960 if (st_meta->ptr.type != PTR_TO_PACKET) 3961 return false; 3962 3963 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 3964 return false; 3965 3966 if (ld->dst_reg != st->src_reg) 3967 return false; 3968 3969 /* There is jump to the store insn in this pair. */ 3970 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 3971 return false; 3972 3973 return true; 3974 } 3975 3976 /* Currently, we only support chaining load/store pairs if: 3977 * 3978 * - Their address base registers are the same. 3979 * - Their address offsets are in the same order. 3980 * - They operate at the same memory width. 3981 * - There is no jump into the middle of them. 3982 */ 3983 static bool 3984 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 3985 struct nfp_insn_meta *st_meta, 3986 struct bpf_insn *prev_ld, 3987 struct bpf_insn *prev_st) 3988 { 3989 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 3990 struct bpf_insn *ld = &ld_meta->insn; 3991 struct bpf_insn *st = &st_meta->insn; 3992 s16 prev_ld_off, prev_st_off; 3993 3994 /* This pair is the start pair. */ 3995 if (!prev_ld) 3996 return true; 3997 3998 prev_size = BPF_LDST_BYTES(prev_ld); 3999 curr_size = BPF_LDST_BYTES(ld); 4000 prev_ld_base = prev_ld->src_reg; 4001 prev_st_base = prev_st->dst_reg; 4002 prev_ld_dst = prev_ld->dst_reg; 4003 prev_ld_off = prev_ld->off; 4004 prev_st_off = prev_st->off; 4005 4006 if (ld->dst_reg != prev_ld_dst) 4007 return false; 4008 4009 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 4010 return false; 4011 4012 if (curr_size != prev_size) 4013 return false; 4014 4015 /* There is jump to the head of this pair. */ 4016 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 4017 return false; 4018 4019 /* Both in ascending order. */ 4020 if (prev_ld_off + prev_size == ld->off && 4021 prev_st_off + prev_size == st->off) 4022 return true; 4023 4024 /* Both in descending order. */ 4025 if (ld->off + curr_size == prev_ld_off && 4026 st->off + curr_size == prev_st_off) 4027 return true; 4028 4029 return false; 4030 } 4031 4032 /* Return TRUE if cross memory access happens. Cross memory access means 4033 * store area is overlapping with load area that a later load might load 4034 * the value from previous store, for this case we can't treat the sequence 4035 * as an memory copy. 4036 */ 4037 static bool 4038 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 4039 struct nfp_insn_meta *head_st_meta) 4040 { 4041 s16 head_ld_off, head_st_off, ld_off; 4042 4043 /* Different pointer types does not overlap. */ 4044 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 4045 return false; 4046 4047 /* load and store are both PTR_TO_PACKET, check ID info. */ 4048 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 4049 return true; 4050 4051 /* Canonicalize the offsets. Turn all of them against the original 4052 * base register. 4053 */ 4054 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 4055 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 4056 ld_off = ld->off + head_ld_meta->ptr.off; 4057 4058 /* Ascending order cross. */ 4059 if (ld_off > head_ld_off && 4060 head_ld_off < head_st_off && ld_off >= head_st_off) 4061 return true; 4062 4063 /* Descending order cross. */ 4064 if (ld_off < head_ld_off && 4065 head_ld_off > head_st_off && ld_off <= head_st_off) 4066 return true; 4067 4068 return false; 4069 } 4070 4071 /* This pass try to identify the following instructoin sequences. 4072 * 4073 * load R, [regA + offA] 4074 * store [regB + offB], R 4075 * load R, [regA + offA + const_imm_A] 4076 * store [regB + offB + const_imm_A], R 4077 * load R, [regA + offA + 2 * const_imm_A] 4078 * store [regB + offB + 2 * const_imm_A], R 4079 * ... 4080 * 4081 * Above sequence is typically generated by compiler when lowering 4082 * memcpy. NFP prefer using CPP instructions to accelerate it. 4083 */ 4084 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 4085 { 4086 struct nfp_insn_meta *head_ld_meta = NULL; 4087 struct nfp_insn_meta *head_st_meta = NULL; 4088 struct nfp_insn_meta *meta1, *meta2; 4089 struct bpf_insn *prev_ld = NULL; 4090 struct bpf_insn *prev_st = NULL; 4091 u8 count = 0; 4092 4093 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4094 struct bpf_insn *ld = &meta1->insn; 4095 struct bpf_insn *st = &meta2->insn; 4096 4097 /* Reset record status if any of the following if true: 4098 * - The current insn pair is not load/store. 4099 * - The load/store pair doesn't chain with previous one. 4100 * - The chained load/store pair crossed with previous pair. 4101 * - The chained load/store pair has a total size of memory 4102 * copy beyond 128 bytes which is the maximum length a 4103 * single NFP CPP command can transfer. 4104 */ 4105 if (!curr_pair_is_memcpy(meta1, meta2) || 4106 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 4107 prev_st) || 4108 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 4109 head_st_meta) || 4110 head_ld_meta->ldst_gather_len >= 128))) { 4111 if (!count) 4112 continue; 4113 4114 if (count > 1) { 4115 s16 prev_ld_off = prev_ld->off; 4116 s16 prev_st_off = prev_st->off; 4117 s16 head_ld_off = head_ld_meta->insn.off; 4118 4119 if (prev_ld_off < head_ld_off) { 4120 head_ld_meta->insn.off = prev_ld_off; 4121 head_st_meta->insn.off = prev_st_off; 4122 head_ld_meta->ldst_gather_len = 4123 -head_ld_meta->ldst_gather_len; 4124 } 4125 4126 head_ld_meta->paired_st = &head_st_meta->insn; 4127 head_st_meta->flags |= 4128 FLAG_INSN_SKIP_PREC_DEPENDENT; 4129 } else { 4130 head_ld_meta->ldst_gather_len = 0; 4131 } 4132 4133 /* If the chain is ended by an load/store pair then this 4134 * could serve as the new head of the the next chain. 4135 */ 4136 if (curr_pair_is_memcpy(meta1, meta2)) { 4137 head_ld_meta = meta1; 4138 head_st_meta = meta2; 4139 head_ld_meta->ldst_gather_len = 4140 BPF_LDST_BYTES(ld); 4141 meta1 = nfp_meta_next(meta1); 4142 meta2 = nfp_meta_next(meta2); 4143 prev_ld = ld; 4144 prev_st = st; 4145 count = 1; 4146 } else { 4147 head_ld_meta = NULL; 4148 head_st_meta = NULL; 4149 prev_ld = NULL; 4150 prev_st = NULL; 4151 count = 0; 4152 } 4153 4154 continue; 4155 } 4156 4157 if (!head_ld_meta) { 4158 head_ld_meta = meta1; 4159 head_st_meta = meta2; 4160 } else { 4161 meta1->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 4162 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 4163 } 4164 4165 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 4166 meta1 = nfp_meta_next(meta1); 4167 meta2 = nfp_meta_next(meta2); 4168 prev_ld = ld; 4169 prev_st = st; 4170 count++; 4171 } 4172 } 4173 4174 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 4175 { 4176 struct nfp_insn_meta *meta, *range_node = NULL; 4177 s16 range_start = 0, range_end = 0; 4178 bool cache_avail = false; 4179 struct bpf_insn *insn; 4180 s32 range_ptr_off = 0; 4181 u32 range_ptr_id = 0; 4182 4183 list_for_each_entry(meta, &nfp_prog->insns, l) { 4184 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 4185 cache_avail = false; 4186 4187 if (meta->flags & FLAG_INSN_SKIP_MASK) 4188 continue; 4189 4190 insn = &meta->insn; 4191 4192 if (is_mbpf_store_pkt(meta) || 4193 insn->code == (BPF_JMP | BPF_CALL) || 4194 is_mbpf_classic_store_pkt(meta) || 4195 is_mbpf_classic_load(meta)) { 4196 cache_avail = false; 4197 continue; 4198 } 4199 4200 if (!is_mbpf_load(meta)) 4201 continue; 4202 4203 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 4204 cache_avail = false; 4205 continue; 4206 } 4207 4208 if (!cache_avail) { 4209 cache_avail = true; 4210 if (range_node) 4211 goto end_current_then_start_new; 4212 goto start_new; 4213 } 4214 4215 /* Check ID to make sure two reads share the same 4216 * variable offset against PTR_TO_PACKET, and check OFF 4217 * to make sure they also share the same constant 4218 * offset. 4219 * 4220 * OFFs don't really need to be the same, because they 4221 * are the constant offsets against PTR_TO_PACKET, so 4222 * for different OFFs, we could canonicalize them to 4223 * offsets against original packet pointer. We don't 4224 * support this. 4225 */ 4226 if (meta->ptr.id == range_ptr_id && 4227 meta->ptr.off == range_ptr_off) { 4228 s16 new_start = range_start; 4229 s16 end, off = insn->off; 4230 s16 new_end = range_end; 4231 bool changed = false; 4232 4233 if (off < range_start) { 4234 new_start = off; 4235 changed = true; 4236 } 4237 4238 end = off + BPF_LDST_BYTES(insn); 4239 if (end > range_end) { 4240 new_end = end; 4241 changed = true; 4242 } 4243 4244 if (!changed) 4245 continue; 4246 4247 if (new_end - new_start <= 64) { 4248 /* Install new range. */ 4249 range_start = new_start; 4250 range_end = new_end; 4251 continue; 4252 } 4253 } 4254 4255 end_current_then_start_new: 4256 range_node->pkt_cache.range_start = range_start; 4257 range_node->pkt_cache.range_end = range_end; 4258 start_new: 4259 range_node = meta; 4260 range_node->pkt_cache.do_init = true; 4261 range_ptr_id = range_node->ptr.id; 4262 range_ptr_off = range_node->ptr.off; 4263 range_start = insn->off; 4264 range_end = insn->off + BPF_LDST_BYTES(insn); 4265 } 4266 4267 if (range_node) { 4268 range_node->pkt_cache.range_start = range_start; 4269 range_node->pkt_cache.range_end = range_end; 4270 } 4271 4272 list_for_each_entry(meta, &nfp_prog->insns, l) { 4273 if (meta->flags & FLAG_INSN_SKIP_MASK) 4274 continue; 4275 4276 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 4277 if (meta->pkt_cache.do_init) { 4278 range_start = meta->pkt_cache.range_start; 4279 range_end = meta->pkt_cache.range_end; 4280 } else { 4281 meta->pkt_cache.range_start = range_start; 4282 meta->pkt_cache.range_end = range_end; 4283 } 4284 } 4285 } 4286 } 4287 4288 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 4289 { 4290 nfp_bpf_opt_reg_init(nfp_prog); 4291 4292 nfp_bpf_opt_neg_add_sub(nfp_prog); 4293 nfp_bpf_opt_ld_mask(nfp_prog); 4294 nfp_bpf_opt_ld_shift(nfp_prog); 4295 nfp_bpf_opt_ldst_gather(nfp_prog); 4296 nfp_bpf_opt_pkt_cache(nfp_prog); 4297 4298 return 0; 4299 } 4300 4301 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) 4302 { 4303 struct nfp_insn_meta *meta1, *meta2; 4304 struct nfp_bpf_map *nfp_map; 4305 struct bpf_map *map; 4306 u32 id; 4307 4308 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4309 if (meta1->flags & FLAG_INSN_SKIP_MASK || 4310 meta2->flags & FLAG_INSN_SKIP_MASK) 4311 continue; 4312 4313 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || 4314 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) 4315 continue; 4316 4317 map = (void *)(unsigned long)((u32)meta1->insn.imm | 4318 (u64)meta2->insn.imm << 32); 4319 if (bpf_map_offload_neutral(map)) { 4320 id = map->id; 4321 } else { 4322 nfp_map = map_to_offmap(map)->dev_priv; 4323 id = nfp_map->tid; 4324 } 4325 4326 meta1->insn.imm = id; 4327 meta2->insn.imm = 0; 4328 } 4329 4330 return 0; 4331 } 4332 4333 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 4334 { 4335 __le64 *ustore = (__force __le64 *)prog; 4336 int i; 4337 4338 for (i = 0; i < len; i++) { 4339 int err; 4340 4341 err = nfp_ustore_check_valid_no_ecc(prog[i]); 4342 if (err) 4343 return err; 4344 4345 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 4346 } 4347 4348 return 0; 4349 } 4350 4351 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 4352 { 4353 void *prog; 4354 4355 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 4356 if (!prog) 4357 return; 4358 4359 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 4360 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 4361 kvfree(nfp_prog->prog); 4362 nfp_prog->prog = prog; 4363 } 4364 4365 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 4366 { 4367 int ret; 4368 4369 ret = nfp_bpf_replace_map_ptrs(nfp_prog); 4370 if (ret) 4371 return ret; 4372 4373 ret = nfp_bpf_optimize(nfp_prog); 4374 if (ret) 4375 return ret; 4376 4377 ret = nfp_translate(nfp_prog); 4378 if (ret) { 4379 pr_err("Translation failed with error %d (translated: %u)\n", 4380 ret, nfp_prog->n_translated); 4381 return -EINVAL; 4382 } 4383 4384 nfp_bpf_prog_trim(nfp_prog); 4385 4386 return ret; 4387 } 4388 4389 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog) 4390 { 4391 struct nfp_insn_meta *meta; 4392 4393 /* Another pass to record jump information. */ 4394 list_for_each_entry(meta, &nfp_prog->insns, l) { 4395 struct nfp_insn_meta *dst_meta; 4396 u64 code = meta->insn.code; 4397 unsigned int dst_idx; 4398 bool pseudo_call; 4399 4400 if (!is_mbpf_jmp(meta)) 4401 continue; 4402 if (BPF_OP(code) == BPF_EXIT) 4403 continue; 4404 if (is_mbpf_helper_call(meta)) 4405 continue; 4406 4407 /* If opcode is BPF_CALL at this point, this can only be a 4408 * BPF-to-BPF call (a.k.a pseudo call). 4409 */ 4410 pseudo_call = BPF_OP(code) == BPF_CALL; 4411 4412 if (pseudo_call) 4413 dst_idx = meta->n + 1 + meta->insn.imm; 4414 else 4415 dst_idx = meta->n + 1 + meta->insn.off; 4416 4417 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx); 4418 4419 if (pseudo_call) 4420 dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START; 4421 4422 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 4423 meta->jmp_dst = dst_meta; 4424 } 4425 } 4426 4427 bool nfp_bpf_supported_opcode(u8 code) 4428 { 4429 return !!instr_cb[code]; 4430 } 4431 4432 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 4433 { 4434 unsigned int i; 4435 u64 *prog; 4436 int err; 4437 4438 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 4439 GFP_KERNEL); 4440 if (!prog) 4441 return ERR_PTR(-ENOMEM); 4442 4443 for (i = 0; i < nfp_prog->prog_len; i++) { 4444 enum nfp_relo_type special; 4445 u32 val; 4446 u16 off; 4447 4448 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 4449 switch (special) { 4450 case RELO_NONE: 4451 continue; 4452 case RELO_BR_REL: 4453 br_add_offset(&prog[i], bv->start_off); 4454 break; 4455 case RELO_BR_GO_OUT: 4456 br_set_offset(&prog[i], 4457 nfp_prog->tgt_out + bv->start_off); 4458 break; 4459 case RELO_BR_GO_ABORT: 4460 br_set_offset(&prog[i], 4461 nfp_prog->tgt_abort + bv->start_off); 4462 break; 4463 case RELO_BR_GO_CALL_PUSH_REGS: 4464 if (!nfp_prog->tgt_call_push_regs) { 4465 pr_err("BUG: failed to detect subprogram registers needs\n"); 4466 err = -EINVAL; 4467 goto err_free_prog; 4468 } 4469 off = nfp_prog->tgt_call_push_regs + bv->start_off; 4470 br_set_offset(&prog[i], off); 4471 break; 4472 case RELO_BR_GO_CALL_POP_REGS: 4473 if (!nfp_prog->tgt_call_pop_regs) { 4474 pr_err("BUG: failed to detect subprogram registers needs\n"); 4475 err = -EINVAL; 4476 goto err_free_prog; 4477 } 4478 off = nfp_prog->tgt_call_pop_regs + bv->start_off; 4479 br_set_offset(&prog[i], off); 4480 break; 4481 case RELO_BR_NEXT_PKT: 4482 br_set_offset(&prog[i], bv->tgt_done); 4483 break; 4484 case RELO_BR_HELPER: 4485 val = br_get_offset(prog[i]); 4486 val -= BR_OFF_RELO; 4487 switch (val) { 4488 case BPF_FUNC_map_lookup_elem: 4489 val = nfp_prog->bpf->helpers.map_lookup; 4490 break; 4491 case BPF_FUNC_map_update_elem: 4492 val = nfp_prog->bpf->helpers.map_update; 4493 break; 4494 case BPF_FUNC_map_delete_elem: 4495 val = nfp_prog->bpf->helpers.map_delete; 4496 break; 4497 case BPF_FUNC_perf_event_output: 4498 val = nfp_prog->bpf->helpers.perf_event_output; 4499 break; 4500 default: 4501 pr_err("relocation of unknown helper %d\n", 4502 val); 4503 err = -EINVAL; 4504 goto err_free_prog; 4505 } 4506 br_set_offset(&prog[i], val); 4507 break; 4508 case RELO_IMMED_REL: 4509 immed_add_value(&prog[i], bv->start_off); 4510 break; 4511 } 4512 4513 prog[i] &= ~OP_RELO_TYPE; 4514 } 4515 4516 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 4517 if (err) 4518 goto err_free_prog; 4519 4520 return prog; 4521 4522 err_free_prog: 4523 kfree(prog); 4524 return ERR_PTR(err); 4525 } 4526