1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */ 3 4 #define pr_fmt(fmt) "NFP net bpf: " fmt 5 6 #include <linux/bug.h> 7 #include <linux/bpf.h> 8 #include <linux/filter.h> 9 #include <linux/kernel.h> 10 #include <linux/pkt_cls.h> 11 #include <linux/reciprocal_div.h> 12 #include <linux/unistd.h> 13 14 #include "main.h" 15 #include "../nfp_asm.h" 16 #include "../nfp_net_ctrl.h" 17 18 /* --- NFP prog --- */ 19 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 20 * It's safe to modify the next pointers (but not pos). 21 */ 22 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 23 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 24 next = list_next_entry(pos, l); \ 25 &(nfp_prog)->insns != &pos->l && \ 26 &(nfp_prog)->insns != &next->l; \ 27 pos = nfp_meta_next(pos), \ 28 next = nfp_meta_next(pos)) 29 30 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 31 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 32 next = list_next_entry(pos, l), \ 33 next2 = list_next_entry(next, l); \ 34 &(nfp_prog)->insns != &pos->l && \ 35 &(nfp_prog)->insns != &next->l && \ 36 &(nfp_prog)->insns != &next2->l; \ 37 pos = nfp_meta_next(pos), \ 38 next = nfp_meta_next(pos), \ 39 next2 = nfp_meta_next(next)) 40 41 static bool 42 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 43 { 44 return meta->l.prev != &nfp_prog->insns; 45 } 46 47 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 48 { 49 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { 50 pr_warn("instruction limit reached (%u NFP instructions)\n", 51 nfp_prog->prog_len); 52 nfp_prog->error = -ENOSPC; 53 return; 54 } 55 56 nfp_prog->prog[nfp_prog->prog_len] = insn; 57 nfp_prog->prog_len++; 58 } 59 60 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 61 { 62 return nfp_prog->prog_len; 63 } 64 65 static bool 66 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 67 { 68 /* If there is a recorded error we may have dropped instructions; 69 * that doesn't have to be due to translator bug, and the translation 70 * will fail anyway, so just return OK. 71 */ 72 if (nfp_prog->error) 73 return true; 74 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 75 } 76 77 /* --- Emitters --- */ 78 static void 79 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 80 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, 81 bool indir) 82 { 83 u64 insn; 84 85 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 86 FIELD_PREP(OP_CMD_CTX, ctx) | 87 FIELD_PREP(OP_CMD_B_SRC, breg) | 88 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 89 FIELD_PREP(OP_CMD_XFER, xfer) | 90 FIELD_PREP(OP_CMD_CNT, size) | 91 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | 92 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 93 FIELD_PREP(OP_CMD_INDIR, indir) | 94 FIELD_PREP(OP_CMD_MODE, mode); 95 96 nfp_prog_push(nfp_prog, insn); 97 } 98 99 static void 100 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 101 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) 102 { 103 struct nfp_insn_re_regs reg; 104 int err; 105 106 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 107 if (err) { 108 nfp_prog->error = err; 109 return; 110 } 111 if (reg.swap) { 112 pr_err("cmd can't swap arguments\n"); 113 nfp_prog->error = -EFAULT; 114 return; 115 } 116 if (reg.dst_lmextn || reg.src_lmextn) { 117 pr_err("cmd can't use LMextn\n"); 118 nfp_prog->error = -EFAULT; 119 return; 120 } 121 122 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, 123 indir); 124 } 125 126 static void 127 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 128 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 129 { 130 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); 131 } 132 133 static void 134 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 135 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 136 { 137 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); 138 } 139 140 static void 141 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 142 enum br_ctx_signal_state css, u16 addr, u8 defer) 143 { 144 u16 addr_lo, addr_hi; 145 u64 insn; 146 147 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 148 addr_hi = addr != addr_lo; 149 150 insn = OP_BR_BASE | 151 FIELD_PREP(OP_BR_MASK, mask) | 152 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 153 FIELD_PREP(OP_BR_CSS, css) | 154 FIELD_PREP(OP_BR_DEFBR, defer) | 155 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 156 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 157 158 nfp_prog_push(nfp_prog, insn); 159 } 160 161 static void 162 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 163 enum nfp_relo_type relo) 164 { 165 if (mask == BR_UNC && defer > 2) { 166 pr_err("BUG: branch defer out of bounds %d\n", defer); 167 nfp_prog->error = -EFAULT; 168 return; 169 } 170 171 __emit_br(nfp_prog, mask, 172 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 173 BR_CSS_NONE, addr, defer); 174 175 nfp_prog->prog[nfp_prog->prog_len - 1] |= 176 FIELD_PREP(OP_RELO_TYPE, relo); 177 } 178 179 static void 180 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 181 { 182 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 183 } 184 185 static void 186 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer, 187 bool set, bool src_lmextn) 188 { 189 u16 addr_lo, addr_hi; 190 u64 insn; 191 192 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO)); 193 addr_hi = addr != addr_lo; 194 195 insn = OP_BR_BIT_BASE | 196 FIELD_PREP(OP_BR_BIT_A_SRC, areg) | 197 FIELD_PREP(OP_BR_BIT_B_SRC, breg) | 198 FIELD_PREP(OP_BR_BIT_BV, set) | 199 FIELD_PREP(OP_BR_BIT_DEFBR, defer) | 200 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) | 201 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) | 202 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn); 203 204 nfp_prog_push(nfp_prog, insn); 205 } 206 207 static void 208 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, 209 u8 defer, bool set, enum nfp_relo_type relo) 210 { 211 struct nfp_insn_re_regs reg; 212 int err; 213 214 /* NOTE: The bit to test is specified as an rotation amount, such that 215 * the bit to test will be placed on the MSB of the result when 216 * doing a rotate right. For bit X, we need right rotate X + 1. 217 */ 218 bit += 1; 219 220 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false); 221 if (err) { 222 nfp_prog->error = err; 223 return; 224 } 225 226 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set, 227 reg.src_lmextn); 228 229 nfp_prog->prog[nfp_prog->prog_len - 1] |= 230 FIELD_PREP(OP_RELO_TYPE, relo); 231 } 232 233 static void 234 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) 235 { 236 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL); 237 } 238 239 static void 240 __emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 241 u8 defer, bool dst_lmextn, bool src_lmextn) 242 { 243 u64 insn; 244 245 insn = OP_BR_ALU_BASE | 246 FIELD_PREP(OP_BR_ALU_A_SRC, areg) | 247 FIELD_PREP(OP_BR_ALU_B_SRC, breg) | 248 FIELD_PREP(OP_BR_ALU_DEFBR, defer) | 249 FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) | 250 FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) | 251 FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn); 252 253 nfp_prog_push(nfp_prog, insn); 254 } 255 256 static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer) 257 { 258 struct nfp_insn_ur_regs reg; 259 int err; 260 261 err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), ®); 262 if (err) { 263 nfp_prog->error = err; 264 return; 265 } 266 267 __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn, 268 reg.src_lmextn); 269 } 270 271 static void 272 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 273 enum immed_width width, bool invert, 274 enum immed_shift shift, bool wr_both, 275 bool dst_lmextn, bool src_lmextn) 276 { 277 u64 insn; 278 279 insn = OP_IMMED_BASE | 280 FIELD_PREP(OP_IMMED_A_SRC, areg) | 281 FIELD_PREP(OP_IMMED_B_SRC, breg) | 282 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 283 FIELD_PREP(OP_IMMED_WIDTH, width) | 284 FIELD_PREP(OP_IMMED_INV, invert) | 285 FIELD_PREP(OP_IMMED_SHIFT, shift) | 286 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 287 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 288 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 289 290 nfp_prog_push(nfp_prog, insn); 291 } 292 293 static void 294 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 295 enum immed_width width, bool invert, enum immed_shift shift) 296 { 297 struct nfp_insn_ur_regs reg; 298 int err; 299 300 if (swreg_type(dst) == NN_REG_IMM) { 301 nfp_prog->error = -EFAULT; 302 return; 303 } 304 305 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 306 if (err) { 307 nfp_prog->error = err; 308 return; 309 } 310 311 /* Use reg.dst when destination is No-Dest. */ 312 __emit_immed(nfp_prog, 313 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 314 reg.breg, imm >> 8, width, invert, shift, 315 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 316 } 317 318 static void 319 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 320 enum shf_sc sc, u8 shift, 321 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 322 bool dst_lmextn, bool src_lmextn) 323 { 324 u64 insn; 325 326 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 327 nfp_prog->error = -EFAULT; 328 return; 329 } 330 331 if (sc == SHF_SC_L_SHF) 332 shift = 32 - shift; 333 334 insn = OP_SHF_BASE | 335 FIELD_PREP(OP_SHF_A_SRC, areg) | 336 FIELD_PREP(OP_SHF_SC, sc) | 337 FIELD_PREP(OP_SHF_B_SRC, breg) | 338 FIELD_PREP(OP_SHF_I8, i8) | 339 FIELD_PREP(OP_SHF_SW, sw) | 340 FIELD_PREP(OP_SHF_DST, dst) | 341 FIELD_PREP(OP_SHF_SHIFT, shift) | 342 FIELD_PREP(OP_SHF_OP, op) | 343 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 344 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 345 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 346 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 347 348 nfp_prog_push(nfp_prog, insn); 349 } 350 351 static void 352 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 353 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 354 { 355 struct nfp_insn_re_regs reg; 356 int err; 357 358 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 359 if (err) { 360 nfp_prog->error = err; 361 return; 362 } 363 364 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 365 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 366 reg.dst_lmextn, reg.src_lmextn); 367 } 368 369 static void 370 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst, 371 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc) 372 { 373 if (sc == SHF_SC_R_ROT) { 374 pr_err("indirect shift is not allowed on rotation\n"); 375 nfp_prog->error = -EFAULT; 376 return; 377 } 378 379 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0); 380 } 381 382 static void 383 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 384 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 385 bool dst_lmextn, bool src_lmextn) 386 { 387 u64 insn; 388 389 insn = OP_ALU_BASE | 390 FIELD_PREP(OP_ALU_A_SRC, areg) | 391 FIELD_PREP(OP_ALU_B_SRC, breg) | 392 FIELD_PREP(OP_ALU_DST, dst) | 393 FIELD_PREP(OP_ALU_SW, swap) | 394 FIELD_PREP(OP_ALU_OP, op) | 395 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 396 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 397 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 398 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 399 400 nfp_prog_push(nfp_prog, insn); 401 } 402 403 static void 404 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 405 swreg lreg, enum alu_op op, swreg rreg) 406 { 407 struct nfp_insn_ur_regs reg; 408 int err; 409 410 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 411 if (err) { 412 nfp_prog->error = err; 413 return; 414 } 415 416 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 417 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 418 reg.dst_lmextn, reg.src_lmextn); 419 } 420 421 static void 422 __emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg, 423 enum mul_type type, enum mul_step step, u16 breg, bool swap, 424 bool wr_both, bool dst_lmextn, bool src_lmextn) 425 { 426 u64 insn; 427 428 insn = OP_MUL_BASE | 429 FIELD_PREP(OP_MUL_A_SRC, areg) | 430 FIELD_PREP(OP_MUL_B_SRC, breg) | 431 FIELD_PREP(OP_MUL_STEP, step) | 432 FIELD_PREP(OP_MUL_DST_AB, dst_ab) | 433 FIELD_PREP(OP_MUL_SW, swap) | 434 FIELD_PREP(OP_MUL_TYPE, type) | 435 FIELD_PREP(OP_MUL_WR_AB, wr_both) | 436 FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) | 437 FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn); 438 439 nfp_prog_push(nfp_prog, insn); 440 } 441 442 static void 443 emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type, 444 enum mul_step step, swreg rreg) 445 { 446 struct nfp_insn_ur_regs reg; 447 u16 areg; 448 int err; 449 450 if (type == MUL_TYPE_START && step != MUL_STEP_NONE) { 451 nfp_prog->error = -EINVAL; 452 return; 453 } 454 455 if (step == MUL_LAST || step == MUL_LAST_2) { 456 /* When type is step and step Number is LAST or LAST2, left 457 * source is used as destination. 458 */ 459 err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®); 460 areg = reg.dst; 461 } else { 462 err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®); 463 areg = reg.areg; 464 } 465 466 if (err) { 467 nfp_prog->error = err; 468 return; 469 } 470 471 __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap, 472 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 473 } 474 475 static void 476 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 477 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 478 bool zero, bool swap, bool wr_both, 479 bool dst_lmextn, bool src_lmextn) 480 { 481 u64 insn; 482 483 insn = OP_LDF_BASE | 484 FIELD_PREP(OP_LDF_A_SRC, areg) | 485 FIELD_PREP(OP_LDF_SC, sc) | 486 FIELD_PREP(OP_LDF_B_SRC, breg) | 487 FIELD_PREP(OP_LDF_I8, imm8) | 488 FIELD_PREP(OP_LDF_SW, swap) | 489 FIELD_PREP(OP_LDF_ZF, zero) | 490 FIELD_PREP(OP_LDF_BMASK, bmask) | 491 FIELD_PREP(OP_LDF_SHF, shift) | 492 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 493 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 494 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 495 496 nfp_prog_push(nfp_prog, insn); 497 } 498 499 static void 500 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 501 enum shf_sc sc, u8 shift, bool zero) 502 { 503 struct nfp_insn_re_regs reg; 504 int err; 505 506 /* Note: ld_field is special as it uses one of the src regs as dst */ 507 err = swreg_to_restricted(dst, dst, src, ®, true); 508 if (err) { 509 nfp_prog->error = err; 510 return; 511 } 512 513 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 514 reg.i8, zero, reg.swap, reg.wr_both, 515 reg.dst_lmextn, reg.src_lmextn); 516 } 517 518 static void 519 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 520 enum shf_sc sc, u8 shift) 521 { 522 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 523 } 524 525 static void 526 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 527 bool dst_lmextn, bool src_lmextn) 528 { 529 u64 insn; 530 531 insn = OP_LCSR_BASE | 532 FIELD_PREP(OP_LCSR_A_SRC, areg) | 533 FIELD_PREP(OP_LCSR_B_SRC, breg) | 534 FIELD_PREP(OP_LCSR_WRITE, wr) | 535 FIELD_PREP(OP_LCSR_ADDR, addr / 4) | 536 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 537 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 538 539 nfp_prog_push(nfp_prog, insn); 540 } 541 542 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 543 { 544 struct nfp_insn_ur_regs reg; 545 int err; 546 547 /* This instruction takes immeds instead of reg_none() for the ignored 548 * operand, but we can't encode 2 immeds in one instr with our normal 549 * swreg infra so if param is an immed, we encode as reg_none() and 550 * copy the immed to both operands. 551 */ 552 if (swreg_type(src) == NN_REG_IMM) { 553 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 554 reg.breg = reg.areg; 555 } else { 556 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 557 } 558 if (err) { 559 nfp_prog->error = err; 560 return; 561 } 562 563 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, 564 false, reg.src_lmextn); 565 } 566 567 /* CSR value is read in following immed[gpr, 0] */ 568 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) 569 { 570 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); 571 } 572 573 static void emit_nop(struct nfp_prog *nfp_prog) 574 { 575 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 576 } 577 578 /* --- Wrappers --- */ 579 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 580 { 581 if (!(imm & 0xffff0000)) { 582 *val = imm; 583 *shift = IMMED_SHIFT_0B; 584 } else if (!(imm & 0xff0000ff)) { 585 *val = imm >> 8; 586 *shift = IMMED_SHIFT_1B; 587 } else if (!(imm & 0x0000ffff)) { 588 *val = imm >> 16; 589 *shift = IMMED_SHIFT_2B; 590 } else { 591 return false; 592 } 593 594 return true; 595 } 596 597 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 598 { 599 enum immed_shift shift; 600 u16 val; 601 602 if (pack_immed(imm, &val, &shift)) { 603 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 604 } else if (pack_immed(~imm, &val, &shift)) { 605 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 606 } else { 607 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 608 false, IMMED_SHIFT_0B); 609 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 610 false, IMMED_SHIFT_2B); 611 } 612 } 613 614 static void 615 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 616 enum nfp_relo_type relo) 617 { 618 if (imm > 0xffff) { 619 pr_err("relocation of a large immediate!\n"); 620 nfp_prog->error = -EFAULT; 621 return; 622 } 623 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 624 625 nfp_prog->prog[nfp_prog->prog_len - 1] |= 626 FIELD_PREP(OP_RELO_TYPE, relo); 627 } 628 629 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 630 * If the @imm is small enough encode it directly in operand and return 631 * otherwise load @imm to a spare register and return its encoding. 632 */ 633 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 634 { 635 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 636 return reg_imm(imm); 637 638 wrp_immed(nfp_prog, tmp_reg, imm); 639 return tmp_reg; 640 } 641 642 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 643 * If the @imm is small enough encode it directly in operand and return 644 * otherwise load @imm to a spare register and return its encoding. 645 */ 646 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 647 { 648 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 649 return reg_imm(imm); 650 651 wrp_immed(nfp_prog, tmp_reg, imm); 652 return tmp_reg; 653 } 654 655 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 656 { 657 while (count--) 658 emit_nop(nfp_prog); 659 } 660 661 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 662 { 663 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 664 } 665 666 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 667 { 668 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 669 } 670 671 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 672 * result to @dst from low end. 673 */ 674 static void 675 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 676 u8 offset) 677 { 678 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 679 u8 mask = (1 << field_len) - 1; 680 681 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 682 } 683 684 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 685 * result to @dst from offset, there is no change on the other bits of @dst. 686 */ 687 static void 688 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 689 u8 field_len, u8 offset) 690 { 691 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 692 u8 mask = ((1 << field_len) - 1) << offset; 693 694 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 695 } 696 697 static void 698 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 699 swreg *rega, swreg *regb) 700 { 701 if (offset == reg_imm(0)) { 702 *rega = reg_a(src_gpr); 703 *regb = reg_b(src_gpr + 1); 704 return; 705 } 706 707 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 708 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 709 reg_imm(0)); 710 *rega = imm_a(nfp_prog); 711 *regb = imm_b(nfp_prog); 712 } 713 714 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 715 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 716 { 717 bool descending_seq = meta->ldst_gather_len < 0; 718 s16 len = abs(meta->ldst_gather_len); 719 swreg src_base, off; 720 bool src_40bit_addr; 721 unsigned int i; 722 u8 xfer_num; 723 724 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 725 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 726 src_base = reg_a(meta->insn.src_reg * 2); 727 xfer_num = round_up(len, 4) / 4; 728 729 if (src_40bit_addr) 730 addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base, 731 &off); 732 733 /* Setup PREV_ALU fields to override memory read length. */ 734 if (len > 32) 735 wrp_immed(nfp_prog, reg_none(), 736 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 737 738 /* Memory read from source addr into transfer-in registers. */ 739 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 740 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 741 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); 742 743 /* Move from transfer-in to transfer-out. */ 744 for (i = 0; i < xfer_num; i++) 745 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 746 747 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 748 749 if (len <= 8) { 750 /* Use single direct_ref write8. */ 751 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 752 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 753 CMD_CTX_SWAP); 754 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 755 /* Use single direct_ref write32. */ 756 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 757 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 758 CMD_CTX_SWAP); 759 } else if (len <= 32) { 760 /* Use single indirect_ref write8. */ 761 wrp_immed(nfp_prog, reg_none(), 762 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 763 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 764 reg_a(meta->paired_st->dst_reg * 2), off, 765 len - 1, CMD_CTX_SWAP); 766 } else if (IS_ALIGNED(len, 4)) { 767 /* Use single indirect_ref write32. */ 768 wrp_immed(nfp_prog, reg_none(), 769 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 770 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 771 reg_a(meta->paired_st->dst_reg * 2), off, 772 xfer_num - 1, CMD_CTX_SWAP); 773 } else if (len <= 40) { 774 /* Use one direct_ref write32 to write the first 32-bytes, then 775 * another direct_ref write8 to write the remaining bytes. 776 */ 777 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 778 reg_a(meta->paired_st->dst_reg * 2), off, 7, 779 CMD_CTX_SWAP); 780 781 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 782 imm_b(nfp_prog)); 783 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 784 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 785 CMD_CTX_SWAP); 786 } else { 787 /* Use one indirect_ref write32 to write 4-bytes aligned length, 788 * then another direct_ref write8 to write the remaining bytes. 789 */ 790 u8 new_off; 791 792 wrp_immed(nfp_prog, reg_none(), 793 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 794 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 795 reg_a(meta->paired_st->dst_reg * 2), off, 796 xfer_num - 2, CMD_CTX_SWAP); 797 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 798 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 799 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 800 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 801 (len & 0x3) - 1, CMD_CTX_SWAP); 802 } 803 804 /* TODO: The following extra load is to make sure data flow be identical 805 * before and after we do memory copy optimization. 806 * 807 * The load destination register is not guaranteed to be dead, so we 808 * need to make sure it is loaded with the value the same as before 809 * this transformation. 810 * 811 * These extra loads could be removed once we have accurate register 812 * usage information. 813 */ 814 if (descending_seq) 815 xfer_num = 0; 816 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 817 xfer_num = xfer_num - 1; 818 else 819 xfer_num = xfer_num - 2; 820 821 switch (BPF_SIZE(meta->insn.code)) { 822 case BPF_B: 823 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 824 reg_xfer(xfer_num), 1, 825 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 826 break; 827 case BPF_H: 828 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 829 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 830 break; 831 case BPF_W: 832 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 833 reg_xfer(0)); 834 break; 835 case BPF_DW: 836 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 837 reg_xfer(xfer_num)); 838 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 839 reg_xfer(xfer_num + 1)); 840 break; 841 } 842 843 if (BPF_SIZE(meta->insn.code) != BPF_DW) 844 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 845 846 return 0; 847 } 848 849 static int 850 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 851 { 852 unsigned int i; 853 u16 shift, sz; 854 855 /* We load the value from the address indicated in @offset and then 856 * shift out the data we don't need. Note: this is big endian! 857 */ 858 sz = max(size, 4); 859 shift = size < 4 ? 4 - size : 0; 860 861 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 862 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); 863 864 i = 0; 865 if (shift) 866 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 867 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 868 else 869 for (; i * 4 < size; i++) 870 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 871 872 if (i < 2) 873 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 874 875 return 0; 876 } 877 878 static int 879 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 880 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 881 { 882 unsigned int i; 883 u8 mask, sz; 884 885 /* We load the value from the address indicated in rreg + lreg and then 886 * mask out the data we don't need. Note: this is little endian! 887 */ 888 sz = max(size, 4); 889 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 890 891 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 892 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); 893 894 i = 0; 895 if (mask) 896 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 897 reg_xfer(0), SHF_SC_NONE, 0, true); 898 else 899 for (; i * 4 < size; i++) 900 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 901 902 if (i < 2) 903 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 904 905 return 0; 906 } 907 908 static int 909 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 910 u8 dst_gpr, u8 size) 911 { 912 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 913 size, CMD_MODE_32b); 914 } 915 916 static int 917 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 918 u8 dst_gpr, u8 size) 919 { 920 swreg rega, regb; 921 922 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 923 924 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 925 size, CMD_MODE_40b_BA); 926 } 927 928 static int 929 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 930 { 931 swreg tmp_reg; 932 933 /* Calculate the true offset (src_reg + imm) */ 934 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 935 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 936 937 /* Check packet length (size guaranteed to fit b/c it's u8) */ 938 emit_alu(nfp_prog, imm_a(nfp_prog), 939 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 940 emit_alu(nfp_prog, reg_none(), 941 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 942 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 943 944 /* Load data */ 945 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 946 } 947 948 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 949 { 950 swreg tmp_reg; 951 952 /* Check packet length */ 953 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 954 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 955 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 956 957 /* Load data */ 958 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 959 return data_ld(nfp_prog, tmp_reg, 0, size); 960 } 961 962 static int 963 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 964 u8 src_gpr, u8 size) 965 { 966 unsigned int i; 967 968 for (i = 0; i * 4 < size; i++) 969 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 970 971 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 972 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 973 974 return 0; 975 } 976 977 static int 978 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 979 u64 imm, u8 size) 980 { 981 wrp_immed(nfp_prog, reg_xfer(0), imm); 982 if (size == 8) 983 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 984 985 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 986 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 987 988 return 0; 989 } 990 991 typedef int 992 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 993 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 994 bool needs_inc); 995 996 static int 997 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 998 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 999 bool needs_inc) 1000 { 1001 bool should_inc = needs_inc && new_gpr && !last; 1002 u32 idx, src_byte; 1003 enum shf_sc sc; 1004 swreg reg; 1005 int shf; 1006 u8 mask; 1007 1008 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 1009 return -EOPNOTSUPP; 1010 1011 idx = off / 4; 1012 1013 /* Move the entire word */ 1014 if (size == 4) { 1015 wrp_mov(nfp_prog, reg_both(dst), 1016 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 1017 return 0; 1018 } 1019 1020 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1021 return -EOPNOTSUPP; 1022 1023 src_byte = off % 4; 1024 1025 mask = (1 << size) - 1; 1026 mask <<= dst_byte; 1027 1028 if (WARN_ON_ONCE(mask > 0xf)) 1029 return -EOPNOTSUPP; 1030 1031 shf = abs(src_byte - dst_byte) * 8; 1032 if (src_byte == dst_byte) { 1033 sc = SHF_SC_NONE; 1034 } else if (src_byte < dst_byte) { 1035 shf = 32 - shf; 1036 sc = SHF_SC_L_SHF; 1037 } else { 1038 sc = SHF_SC_R_SHF; 1039 } 1040 1041 /* ld_field can address fewer indexes, if offset too large do RMW. 1042 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1043 */ 1044 if (idx <= RE_REG_LM_IDX_MAX) { 1045 reg = reg_lm(lm3 ? 3 : 0, idx); 1046 } else { 1047 reg = imm_a(nfp_prog); 1048 /* If it's not the first part of the load and we start a new GPR 1049 * that means we are loading a second part of the LMEM word into 1050 * a new GPR. IOW we've already looked that LMEM word and 1051 * therefore it has been loaded into imm_a(). 1052 */ 1053 if (first || !new_gpr) 1054 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1055 } 1056 1057 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 1058 1059 if (should_inc) 1060 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1061 1062 return 0; 1063 } 1064 1065 static int 1066 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 1067 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 1068 bool needs_inc) 1069 { 1070 bool should_inc = needs_inc && new_gpr && !last; 1071 u32 idx, dst_byte; 1072 enum shf_sc sc; 1073 swreg reg; 1074 int shf; 1075 u8 mask; 1076 1077 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 1078 return -EOPNOTSUPP; 1079 1080 idx = off / 4; 1081 1082 /* Move the entire word */ 1083 if (size == 4) { 1084 wrp_mov(nfp_prog, 1085 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 1086 reg_b(src)); 1087 return 0; 1088 } 1089 1090 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1091 return -EOPNOTSUPP; 1092 1093 dst_byte = off % 4; 1094 1095 mask = (1 << size) - 1; 1096 mask <<= dst_byte; 1097 1098 if (WARN_ON_ONCE(mask > 0xf)) 1099 return -EOPNOTSUPP; 1100 1101 shf = abs(src_byte - dst_byte) * 8; 1102 if (src_byte == dst_byte) { 1103 sc = SHF_SC_NONE; 1104 } else if (src_byte < dst_byte) { 1105 shf = 32 - shf; 1106 sc = SHF_SC_L_SHF; 1107 } else { 1108 sc = SHF_SC_R_SHF; 1109 } 1110 1111 /* ld_field can address fewer indexes, if offset too large do RMW. 1112 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1113 */ 1114 if (idx <= RE_REG_LM_IDX_MAX) { 1115 reg = reg_lm(lm3 ? 3 : 0, idx); 1116 } else { 1117 reg = imm_a(nfp_prog); 1118 /* Only first and last LMEM locations are going to need RMW, 1119 * the middle location will be overwritten fully. 1120 */ 1121 if (first || last) 1122 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1123 } 1124 1125 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 1126 1127 if (new_gpr || last) { 1128 if (idx > RE_REG_LM_IDX_MAX) 1129 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1130 if (should_inc) 1131 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1132 } 1133 1134 return 0; 1135 } 1136 1137 static int 1138 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1139 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1140 bool clr_gpr, lmem_step step) 1141 { 1142 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; 1143 bool first = true, last; 1144 bool needs_inc = false; 1145 swreg stack_off_reg; 1146 u8 prev_gpr = 255; 1147 u32 gpr_byte = 0; 1148 bool lm3 = true; 1149 int ret; 1150 1151 if (meta->ptr_not_const || 1152 meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) { 1153 /* Use of the last encountered ptr_off is OK, they all have 1154 * the same alignment. Depend on low bits of value being 1155 * discarded when written to LMaddr register. 1156 */ 1157 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1158 stack_imm(nfp_prog)); 1159 1160 emit_alu(nfp_prog, imm_b(nfp_prog), 1161 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1162 1163 needs_inc = true; 1164 } else if (off + size <= 64) { 1165 /* We can reach bottom 64B with LMaddr0 */ 1166 lm3 = false; 1167 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1168 /* We have to set up a new pointer. If we know the offset 1169 * and the entire access falls into a single 32 byte aligned 1170 * window we won't have to increment the LM pointer. 1171 * The 32 byte alignment is imporant because offset is ORed in 1172 * not added when doing *l$indexN[off]. 1173 */ 1174 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1175 stack_imm(nfp_prog)); 1176 emit_alu(nfp_prog, imm_b(nfp_prog), 1177 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1178 1179 off %= 32; 1180 } else { 1181 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1182 stack_imm(nfp_prog)); 1183 1184 emit_alu(nfp_prog, imm_b(nfp_prog), 1185 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1186 1187 needs_inc = true; 1188 } 1189 if (lm3) { 1190 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1191 /* For size < 4 one slot will be filled by zeroing of upper. */ 1192 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1193 } 1194 1195 if (clr_gpr && size < 8) 1196 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1197 1198 while (size) { 1199 u32 slice_end; 1200 u8 slice_size; 1201 1202 slice_size = min(size, 4 - gpr_byte); 1203 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1204 slice_size = slice_end - off; 1205 1206 last = slice_size == size; 1207 1208 if (needs_inc) 1209 off %= 4; 1210 1211 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1212 first, gpr != prev_gpr, last, lm3, needs_inc); 1213 if (ret) 1214 return ret; 1215 1216 prev_gpr = gpr; 1217 first = false; 1218 1219 gpr_byte += slice_size; 1220 if (gpr_byte >= 4) { 1221 gpr_byte -= 4; 1222 gpr++; 1223 } 1224 1225 size -= slice_size; 1226 off += slice_size; 1227 } 1228 1229 return 0; 1230 } 1231 1232 static void 1233 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1234 { 1235 swreg tmp_reg; 1236 1237 if (alu_op == ALU_OP_AND) { 1238 if (!imm) 1239 wrp_immed(nfp_prog, reg_both(dst), 0); 1240 if (!imm || !~imm) 1241 return; 1242 } 1243 if (alu_op == ALU_OP_OR) { 1244 if (!~imm) 1245 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1246 if (!imm || !~imm) 1247 return; 1248 } 1249 if (alu_op == ALU_OP_XOR) { 1250 if (!~imm) 1251 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1252 ALU_OP_NOT, reg_b(dst)); 1253 if (!imm || !~imm) 1254 return; 1255 } 1256 1257 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1258 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1259 } 1260 1261 static int 1262 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1263 enum alu_op alu_op, bool skip) 1264 { 1265 const struct bpf_insn *insn = &meta->insn; 1266 u64 imm = insn->imm; /* sign extend */ 1267 1268 if (skip) { 1269 meta->flags |= FLAG_INSN_SKIP_NOOP; 1270 return 0; 1271 } 1272 1273 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1274 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1275 1276 return 0; 1277 } 1278 1279 static int 1280 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1281 enum alu_op alu_op) 1282 { 1283 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1284 1285 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1286 emit_alu(nfp_prog, reg_both(dst + 1), 1287 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1288 1289 return 0; 1290 } 1291 1292 static int 1293 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1294 enum alu_op alu_op) 1295 { 1296 const struct bpf_insn *insn = &meta->insn; 1297 1298 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1299 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1300 1301 return 0; 1302 } 1303 1304 static int 1305 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1306 enum alu_op alu_op) 1307 { 1308 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1309 1310 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1311 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1312 1313 return 0; 1314 } 1315 1316 static void 1317 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1318 enum br_mask br_mask, u16 off) 1319 { 1320 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1321 emit_br(nfp_prog, br_mask, off, 0); 1322 } 1323 1324 static int 1325 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1326 enum alu_op alu_op, enum br_mask br_mask) 1327 { 1328 const struct bpf_insn *insn = &meta->insn; 1329 1330 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1331 insn->src_reg * 2, br_mask, insn->off); 1332 if (is_mbpf_jmp64(meta)) 1333 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1334 insn->src_reg * 2 + 1, br_mask, insn->off); 1335 1336 return 0; 1337 } 1338 1339 static const struct jmp_code_map { 1340 enum br_mask br_mask; 1341 bool swap; 1342 } jmp_code_map[] = { 1343 [BPF_JGT >> 4] = { BR_BLO, true }, 1344 [BPF_JGE >> 4] = { BR_BHS, false }, 1345 [BPF_JLT >> 4] = { BR_BLO, false }, 1346 [BPF_JLE >> 4] = { BR_BHS, true }, 1347 [BPF_JSGT >> 4] = { BR_BLT, true }, 1348 [BPF_JSGE >> 4] = { BR_BGE, false }, 1349 [BPF_JSLT >> 4] = { BR_BLT, false }, 1350 [BPF_JSLE >> 4] = { BR_BGE, true }, 1351 }; 1352 1353 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) 1354 { 1355 unsigned int op; 1356 1357 op = BPF_OP(meta->insn.code) >> 4; 1358 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ 1359 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || 1360 !jmp_code_map[op].br_mask, 1361 "no code found for jump instruction")) 1362 return NULL; 1363 1364 return &jmp_code_map[op]; 1365 } 1366 1367 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1368 { 1369 const struct bpf_insn *insn = &meta->insn; 1370 u64 imm = insn->imm; /* sign extend */ 1371 const struct jmp_code_map *code; 1372 enum alu_op alu_op, carry_op; 1373 u8 reg = insn->dst_reg * 2; 1374 swreg tmp_reg; 1375 1376 code = nfp_jmp_code_get(meta); 1377 if (!code) 1378 return -EINVAL; 1379 1380 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; 1381 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; 1382 1383 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1384 if (!code->swap) 1385 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); 1386 else 1387 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); 1388 1389 if (is_mbpf_jmp64(meta)) { 1390 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1391 if (!code->swap) 1392 emit_alu(nfp_prog, reg_none(), 1393 reg_a(reg + 1), carry_op, tmp_reg); 1394 else 1395 emit_alu(nfp_prog, reg_none(), 1396 tmp_reg, carry_op, reg_a(reg + 1)); 1397 } 1398 1399 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1400 1401 return 0; 1402 } 1403 1404 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1405 { 1406 const struct bpf_insn *insn = &meta->insn; 1407 const struct jmp_code_map *code; 1408 u8 areg, breg; 1409 1410 code = nfp_jmp_code_get(meta); 1411 if (!code) 1412 return -EINVAL; 1413 1414 areg = insn->dst_reg * 2; 1415 breg = insn->src_reg * 2; 1416 1417 if (code->swap) { 1418 areg ^= breg; 1419 breg ^= areg; 1420 areg ^= breg; 1421 } 1422 1423 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1424 if (is_mbpf_jmp64(meta)) 1425 emit_alu(nfp_prog, reg_none(), 1426 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1427 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1428 1429 return 0; 1430 } 1431 1432 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1433 { 1434 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1435 SHF_SC_R_ROT, 8); 1436 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1437 SHF_SC_R_ROT, 16); 1438 } 1439 1440 static void 1441 wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1442 swreg rreg, bool gen_high_half) 1443 { 1444 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1445 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg); 1446 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg); 1447 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg); 1448 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg); 1449 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none()); 1450 if (gen_high_half) 1451 emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2, 1452 reg_none()); 1453 else 1454 wrp_immed(nfp_prog, dst_hi, 0); 1455 } 1456 1457 static void 1458 wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1459 swreg rreg) 1460 { 1461 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1462 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg); 1463 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg); 1464 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none()); 1465 } 1466 1467 static int 1468 wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1469 bool gen_high_half, bool ropnd_from_reg) 1470 { 1471 swreg multiplier, multiplicand, dst_hi, dst_lo; 1472 const struct bpf_insn *insn = &meta->insn; 1473 u32 lopnd_max, ropnd_max; 1474 u8 dst_reg; 1475 1476 dst_reg = insn->dst_reg; 1477 multiplicand = reg_a(dst_reg * 2); 1478 dst_hi = reg_both(dst_reg * 2 + 1); 1479 dst_lo = reg_both(dst_reg * 2); 1480 lopnd_max = meta->umax_dst; 1481 if (ropnd_from_reg) { 1482 multiplier = reg_b(insn->src_reg * 2); 1483 ropnd_max = meta->umax_src; 1484 } else { 1485 u32 imm = insn->imm; 1486 1487 multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1488 ropnd_max = imm; 1489 } 1490 if (lopnd_max > U16_MAX || ropnd_max > U16_MAX) 1491 wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier, 1492 gen_high_half); 1493 else 1494 wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier); 1495 1496 return 0; 1497 } 1498 1499 static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) 1500 { 1501 swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst); 1502 struct reciprocal_value_adv rvalue; 1503 u8 pre_shift, exp; 1504 swreg magic; 1505 1506 if (imm > U32_MAX) { 1507 wrp_immed(nfp_prog, dst_both, 0); 1508 return 0; 1509 } 1510 1511 /* NOTE: because we are using "reciprocal_value_adv" which doesn't 1512 * support "divisor > (1u << 31)", we need to JIT separate NFP sequence 1513 * to handle such case which actually equals to the result of unsigned 1514 * comparison "dst >= imm" which could be calculated using the following 1515 * NFP sequence: 1516 * 1517 * alu[--, dst, -, imm] 1518 * immed[imm, 0] 1519 * alu[dst, imm, +carry, 0] 1520 * 1521 */ 1522 if (imm > 1U << 31) { 1523 swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1524 1525 emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b); 1526 wrp_immed(nfp_prog, imm_a(nfp_prog), 0); 1527 emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C, 1528 reg_imm(0)); 1529 return 0; 1530 } 1531 1532 rvalue = reciprocal_value_adv(imm, 32); 1533 exp = rvalue.exp; 1534 if (rvalue.is_wide_m && !(imm & 1)) { 1535 pre_shift = fls(imm & -imm) - 1; 1536 rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift); 1537 } else { 1538 pre_shift = 0; 1539 } 1540 magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog)); 1541 if (imm == 1U << exp) { 1542 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1543 SHF_SC_R_SHF, exp); 1544 } else if (rvalue.is_wide_m) { 1545 wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, 1546 magic, true); 1547 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, 1548 imm_b(nfp_prog)); 1549 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1550 SHF_SC_R_SHF, 1); 1551 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, 1552 imm_b(nfp_prog)); 1553 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1554 SHF_SC_R_SHF, rvalue.sh - 1); 1555 } else { 1556 if (pre_shift) 1557 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1558 dst_b, SHF_SC_R_SHF, pre_shift); 1559 wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true); 1560 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1561 dst_b, SHF_SC_R_SHF, rvalue.sh); 1562 } 1563 1564 return 0; 1565 } 1566 1567 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1568 { 1569 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1570 struct nfp_bpf_cap_adjust_head *adjust_head; 1571 u32 ret_einval, end; 1572 1573 adjust_head = &nfp_prog->bpf->adjust_head; 1574 1575 /* Optimized version - 5 vs 14 cycles */ 1576 if (nfp_prog->adjust_head_location != UINT_MAX) { 1577 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1578 return -EINVAL; 1579 1580 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1581 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1582 emit_alu(nfp_prog, plen_reg(nfp_prog), 1583 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1584 emit_alu(nfp_prog, pv_len(nfp_prog), 1585 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1586 1587 wrp_immed(nfp_prog, reg_both(0), 0); 1588 wrp_immed(nfp_prog, reg_both(1), 0); 1589 1590 /* TODO: when adjust head is guaranteed to succeed we can 1591 * also eliminate the following if (r0 == 0) branch. 1592 */ 1593 1594 return 0; 1595 } 1596 1597 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1598 end = ret_einval + 2; 1599 1600 /* We need to use a temp because offset is just a part of the pkt ptr */ 1601 emit_alu(nfp_prog, tmp, 1602 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1603 1604 /* Validate result will fit within FW datapath constraints */ 1605 emit_alu(nfp_prog, reg_none(), 1606 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1607 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1608 emit_alu(nfp_prog, reg_none(), 1609 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1610 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1611 1612 /* Validate the length is at least ETH_HLEN */ 1613 emit_alu(nfp_prog, tmp_len, 1614 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1615 emit_alu(nfp_prog, reg_none(), 1616 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1617 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1618 1619 /* Load the ret code */ 1620 wrp_immed(nfp_prog, reg_both(0), 0); 1621 wrp_immed(nfp_prog, reg_both(1), 0); 1622 1623 /* Modify the packet metadata */ 1624 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1625 1626 /* Skip over the -EINVAL ret code (defer 2) */ 1627 emit_br(nfp_prog, BR_UNC, end, 2); 1628 1629 emit_alu(nfp_prog, plen_reg(nfp_prog), 1630 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1631 emit_alu(nfp_prog, pv_len(nfp_prog), 1632 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1633 1634 /* return -EINVAL target */ 1635 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1636 return -EINVAL; 1637 1638 wrp_immed(nfp_prog, reg_both(0), -22); 1639 wrp_immed(nfp_prog, reg_both(1), ~0); 1640 1641 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1642 return -EINVAL; 1643 1644 return 0; 1645 } 1646 1647 static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1648 { 1649 u32 ret_einval, end; 1650 swreg plen, delta; 1651 1652 BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN)); 1653 1654 plen = imm_a(nfp_prog); 1655 delta = reg_a(2 * 2); 1656 1657 ret_einval = nfp_prog_current_offset(nfp_prog) + 9; 1658 end = nfp_prog_current_offset(nfp_prog) + 11; 1659 1660 /* Calculate resulting length */ 1661 emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta); 1662 /* delta == 0 is not allowed by the kernel, add must overflow to make 1663 * length smaller. 1664 */ 1665 emit_br(nfp_prog, BR_BCC, ret_einval, 0); 1666 1667 /* if (new_len < 14) then -EINVAL */ 1668 emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1669 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1670 1671 emit_alu(nfp_prog, plen_reg(nfp_prog), 1672 plen_reg(nfp_prog), ALU_OP_ADD, delta); 1673 emit_alu(nfp_prog, pv_len(nfp_prog), 1674 pv_len(nfp_prog), ALU_OP_ADD, delta); 1675 1676 emit_br(nfp_prog, BR_UNC, end, 2); 1677 wrp_immed(nfp_prog, reg_both(0), 0); 1678 wrp_immed(nfp_prog, reg_both(1), 0); 1679 1680 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1681 return -EINVAL; 1682 1683 wrp_immed(nfp_prog, reg_both(0), -22); 1684 wrp_immed(nfp_prog, reg_both(1), ~0); 1685 1686 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1687 return -EINVAL; 1688 1689 return 0; 1690 } 1691 1692 static int 1693 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1694 { 1695 bool load_lm_ptr; 1696 u32 ret_tgt; 1697 s64 lm_off; 1698 1699 /* We only have to reload LM0 if the key is not at start of stack */ 1700 lm_off = nfp_prog->stack_frame_depth; 1701 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1702 load_lm_ptr = meta->arg2.var_off || lm_off; 1703 1704 /* Set LM0 to start of key */ 1705 if (load_lm_ptr) 1706 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1707 if (meta->func_id == BPF_FUNC_map_update_elem) 1708 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1709 1710 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1711 2, RELO_BR_HELPER); 1712 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1713 1714 /* Load map ID into A0 */ 1715 wrp_mov(nfp_prog, reg_a(0), reg_a(2)); 1716 1717 /* Load the return address into B0 */ 1718 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1719 1720 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1721 return -EINVAL; 1722 1723 /* Reset the LM0 pointer */ 1724 if (!load_lm_ptr) 1725 return 0; 1726 1727 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1728 wrp_nops(nfp_prog, 3); 1729 1730 return 0; 1731 } 1732 1733 static int 1734 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1735 { 1736 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); 1737 /* CSR value is read in following immed[gpr, 0] */ 1738 emit_immed(nfp_prog, reg_both(0), 0, 1739 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1740 emit_immed(nfp_prog, reg_both(1), 0, 1741 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1742 return 0; 1743 } 1744 1745 static int 1746 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1747 { 1748 swreg ptr_type; 1749 u32 ret_tgt; 1750 1751 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); 1752 1753 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 1754 1755 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1756 2, RELO_BR_HELPER); 1757 1758 /* Load ptr type into A1 */ 1759 wrp_mov(nfp_prog, reg_a(1), ptr_type); 1760 1761 /* Load the return address into B0 */ 1762 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1763 1764 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1765 return -EINVAL; 1766 1767 return 0; 1768 } 1769 1770 static int 1771 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1772 { 1773 u32 jmp_tgt; 1774 1775 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; 1776 1777 /* Make sure the queue id fits into FW field */ 1778 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), 1779 ALU_OP_AND_NOT_B, reg_imm(0xff)); 1780 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); 1781 1782 /* Set the 'queue selected' bit and the queue value */ 1783 emit_shf(nfp_prog, pv_qsel_set(nfp_prog), 1784 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), 1785 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); 1786 emit_ld_field(nfp_prog, 1787 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), 1788 SHF_SC_NONE, 0); 1789 /* Delay slots end here, we will jump over next instruction if queue 1790 * value fits into the field. 1791 */ 1792 emit_ld_field(nfp_prog, 1793 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), 1794 SHF_SC_NONE, 0); 1795 1796 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) 1797 return -EINVAL; 1798 1799 return 0; 1800 } 1801 1802 /* --- Callbacks --- */ 1803 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1804 { 1805 const struct bpf_insn *insn = &meta->insn; 1806 u8 dst = insn->dst_reg * 2; 1807 u8 src = insn->src_reg * 2; 1808 1809 if (insn->src_reg == BPF_REG_10) { 1810 swreg stack_depth_reg; 1811 1812 stack_depth_reg = ur_load_imm_any(nfp_prog, 1813 nfp_prog->stack_frame_depth, 1814 stack_imm(nfp_prog)); 1815 emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog), 1816 ALU_OP_ADD, stack_depth_reg); 1817 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1818 } else { 1819 wrp_reg_mov(nfp_prog, dst, src); 1820 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1821 } 1822 1823 return 0; 1824 } 1825 1826 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1827 { 1828 u64 imm = meta->insn.imm; /* sign extend */ 1829 1830 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1831 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1832 1833 return 0; 1834 } 1835 1836 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1837 { 1838 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1839 } 1840 1841 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1842 { 1843 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1844 } 1845 1846 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1847 { 1848 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1849 } 1850 1851 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1852 { 1853 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1854 } 1855 1856 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1857 { 1858 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1859 } 1860 1861 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1862 { 1863 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1864 } 1865 1866 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1867 { 1868 const struct bpf_insn *insn = &meta->insn; 1869 1870 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1871 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1872 reg_b(insn->src_reg * 2)); 1873 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1874 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1875 reg_b(insn->src_reg * 2 + 1)); 1876 1877 return 0; 1878 } 1879 1880 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1881 { 1882 const struct bpf_insn *insn = &meta->insn; 1883 u64 imm = insn->imm; /* sign extend */ 1884 1885 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1886 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1887 1888 return 0; 1889 } 1890 1891 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1892 { 1893 const struct bpf_insn *insn = &meta->insn; 1894 1895 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1896 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1897 reg_b(insn->src_reg * 2)); 1898 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1899 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1900 reg_b(insn->src_reg * 2 + 1)); 1901 1902 return 0; 1903 } 1904 1905 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1906 { 1907 const struct bpf_insn *insn = &meta->insn; 1908 u64 imm = insn->imm; /* sign extend */ 1909 1910 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1911 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1912 1913 return 0; 1914 } 1915 1916 static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1917 { 1918 return wrp_mul(nfp_prog, meta, true, true); 1919 } 1920 1921 static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1922 { 1923 return wrp_mul(nfp_prog, meta, true, false); 1924 } 1925 1926 static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1927 { 1928 const struct bpf_insn *insn = &meta->insn; 1929 1930 return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm); 1931 } 1932 1933 static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1934 { 1935 /* NOTE: verifier hook has rejected cases for which verifier doesn't 1936 * know whether the source operand is constant or not. 1937 */ 1938 return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src); 1939 } 1940 1941 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1942 { 1943 const struct bpf_insn *insn = &meta->insn; 1944 1945 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1946 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1947 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1948 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1949 1950 return 0; 1951 } 1952 1953 /* Pseudo code: 1954 * if shift_amt >= 32 1955 * dst_high = dst_low << shift_amt[4:0] 1956 * dst_low = 0; 1957 * else 1958 * dst_high = (dst_high, dst_low) >> (32 - shift_amt) 1959 * dst_low = dst_low << shift_amt 1960 * 1961 * The indirect shift will use the same logic at runtime. 1962 */ 1963 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1964 { 1965 if (!shift_amt) 1966 return 0; 1967 1968 if (shift_amt < 32) { 1969 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), 1970 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, 1971 32 - shift_amt); 1972 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1973 reg_b(dst), SHF_SC_L_SHF, shift_amt); 1974 } else if (shift_amt == 32) { 1975 wrp_reg_mov(nfp_prog, dst + 1, dst); 1976 wrp_immed(nfp_prog, reg_both(dst), 0); 1977 } else if (shift_amt > 32) { 1978 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1979 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32); 1980 wrp_immed(nfp_prog, reg_both(dst), 0); 1981 } 1982 1983 return 0; 1984 } 1985 1986 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1987 { 1988 const struct bpf_insn *insn = &meta->insn; 1989 u8 dst = insn->dst_reg * 2; 1990 1991 return __shl_imm64(nfp_prog, dst, insn->imm); 1992 } 1993 1994 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1995 { 1996 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB, 1997 reg_b(src)); 1998 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0)); 1999 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, 2000 reg_b(dst), SHF_SC_R_DSHF); 2001 } 2002 2003 /* NOTE: for indirect left shift, HIGH part should be calculated first. */ 2004 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2005 { 2006 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2007 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2008 reg_b(dst), SHF_SC_L_SHF); 2009 } 2010 2011 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2012 { 2013 shl_reg64_lt32_high(nfp_prog, dst, src); 2014 shl_reg64_lt32_low(nfp_prog, dst, src); 2015 } 2016 2017 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2018 { 2019 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2020 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2021 reg_b(dst), SHF_SC_L_SHF); 2022 wrp_immed(nfp_prog, reg_both(dst), 0); 2023 } 2024 2025 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2026 { 2027 const struct bpf_insn *insn = &meta->insn; 2028 u64 umin, umax; 2029 u8 dst, src; 2030 2031 dst = insn->dst_reg * 2; 2032 umin = meta->umin_src; 2033 umax = meta->umax_src; 2034 if (umin == umax) 2035 return __shl_imm64(nfp_prog, dst, umin); 2036 2037 src = insn->src_reg * 2; 2038 if (umax < 32) { 2039 shl_reg64_lt32(nfp_prog, dst, src); 2040 } else if (umin >= 32) { 2041 shl_reg64_ge32(nfp_prog, dst, src); 2042 } else { 2043 /* Generate different instruction sequences depending on runtime 2044 * value of shift amount. 2045 */ 2046 u16 label_ge32, label_end; 2047 2048 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7; 2049 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2050 2051 shl_reg64_lt32_high(nfp_prog, dst, src); 2052 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2053 emit_br(nfp_prog, BR_UNC, label_end, 2); 2054 /* shl_reg64_lt32_low packed in delay slot. */ 2055 shl_reg64_lt32_low(nfp_prog, dst, src); 2056 2057 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2058 return -EINVAL; 2059 shl_reg64_ge32(nfp_prog, dst, src); 2060 2061 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2062 return -EINVAL; 2063 } 2064 2065 return 0; 2066 } 2067 2068 /* Pseudo code: 2069 * if shift_amt >= 32 2070 * dst_high = 0; 2071 * dst_low = dst_high >> shift_amt[4:0] 2072 * else 2073 * dst_high = dst_high >> shift_amt 2074 * dst_low = (dst_high, dst_low) >> shift_amt 2075 * 2076 * The indirect shift will use the same logic at runtime. 2077 */ 2078 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2079 { 2080 if (!shift_amt) 2081 return 0; 2082 2083 if (shift_amt < 32) { 2084 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2085 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2086 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2087 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2088 } else if (shift_amt == 32) { 2089 wrp_reg_mov(nfp_prog, dst, dst + 1); 2090 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2091 } else if (shift_amt > 32) { 2092 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2093 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2094 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2095 } 2096 2097 return 0; 2098 } 2099 2100 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2101 { 2102 const struct bpf_insn *insn = &meta->insn; 2103 u8 dst = insn->dst_reg * 2; 2104 2105 return __shr_imm64(nfp_prog, dst, insn->imm); 2106 } 2107 2108 /* NOTE: for indirect right shift, LOW part should be calculated first. */ 2109 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2110 { 2111 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2112 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2113 reg_b(dst + 1), SHF_SC_R_SHF); 2114 } 2115 2116 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2117 { 2118 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2119 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2120 reg_b(dst), SHF_SC_R_DSHF); 2121 } 2122 2123 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2124 { 2125 shr_reg64_lt32_low(nfp_prog, dst, src); 2126 shr_reg64_lt32_high(nfp_prog, dst, src); 2127 } 2128 2129 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2130 { 2131 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2132 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2133 reg_b(dst + 1), SHF_SC_R_SHF); 2134 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2135 } 2136 2137 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2138 { 2139 const struct bpf_insn *insn = &meta->insn; 2140 u64 umin, umax; 2141 u8 dst, src; 2142 2143 dst = insn->dst_reg * 2; 2144 umin = meta->umin_src; 2145 umax = meta->umax_src; 2146 if (umin == umax) 2147 return __shr_imm64(nfp_prog, dst, umin); 2148 2149 src = insn->src_reg * 2; 2150 if (umax < 32) { 2151 shr_reg64_lt32(nfp_prog, dst, src); 2152 } else if (umin >= 32) { 2153 shr_reg64_ge32(nfp_prog, dst, src); 2154 } else { 2155 /* Generate different instruction sequences depending on runtime 2156 * value of shift amount. 2157 */ 2158 u16 label_ge32, label_end; 2159 2160 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2161 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2162 shr_reg64_lt32_low(nfp_prog, dst, src); 2163 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2164 emit_br(nfp_prog, BR_UNC, label_end, 2); 2165 /* shr_reg64_lt32_high packed in delay slot. */ 2166 shr_reg64_lt32_high(nfp_prog, dst, src); 2167 2168 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2169 return -EINVAL; 2170 shr_reg64_ge32(nfp_prog, dst, src); 2171 2172 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2173 return -EINVAL; 2174 } 2175 2176 return 0; 2177 } 2178 2179 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit 2180 * told through PREV_ALU result. 2181 */ 2182 static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2183 { 2184 if (!shift_amt) 2185 return 0; 2186 2187 if (shift_amt < 32) { 2188 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2189 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2190 /* Set signedness bit. */ 2191 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2192 reg_imm(0)); 2193 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2194 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2195 } else if (shift_amt == 32) { 2196 /* NOTE: this also helps setting signedness bit. */ 2197 wrp_reg_mov(nfp_prog, dst, dst + 1); 2198 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2199 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2200 } else if (shift_amt > 32) { 2201 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2202 reg_imm(0)); 2203 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2204 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2205 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2206 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2207 } 2208 2209 return 0; 2210 } 2211 2212 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2213 { 2214 const struct bpf_insn *insn = &meta->insn; 2215 u8 dst = insn->dst_reg * 2; 2216 2217 return __ashr_imm64(nfp_prog, dst, insn->imm); 2218 } 2219 2220 static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2221 { 2222 /* NOTE: the first insn will set both indirect shift amount (source A) 2223 * and signedness bit (MSB of result). 2224 */ 2225 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2226 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2227 reg_b(dst + 1), SHF_SC_R_SHF); 2228 } 2229 2230 static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2231 { 2232 /* NOTE: it is the same as logic shift because we don't need to shift in 2233 * signedness bit when the shift amount is less than 32. 2234 */ 2235 return shr_reg64_lt32_low(nfp_prog, dst, src); 2236 } 2237 2238 static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2239 { 2240 ashr_reg64_lt32_low(nfp_prog, dst, src); 2241 ashr_reg64_lt32_high(nfp_prog, dst, src); 2242 } 2243 2244 static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2245 { 2246 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2247 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2248 reg_b(dst + 1), SHF_SC_R_SHF); 2249 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2250 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2251 } 2252 2253 /* Like ashr_imm64, but need to use indirect shift. */ 2254 static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2255 { 2256 const struct bpf_insn *insn = &meta->insn; 2257 u64 umin, umax; 2258 u8 dst, src; 2259 2260 dst = insn->dst_reg * 2; 2261 umin = meta->umin_src; 2262 umax = meta->umax_src; 2263 if (umin == umax) 2264 return __ashr_imm64(nfp_prog, dst, umin); 2265 2266 src = insn->src_reg * 2; 2267 if (umax < 32) { 2268 ashr_reg64_lt32(nfp_prog, dst, src); 2269 } else if (umin >= 32) { 2270 ashr_reg64_ge32(nfp_prog, dst, src); 2271 } else { 2272 u16 label_ge32, label_end; 2273 2274 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2275 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2276 ashr_reg64_lt32_low(nfp_prog, dst, src); 2277 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2278 emit_br(nfp_prog, BR_UNC, label_end, 2); 2279 /* ashr_reg64_lt32_high packed in delay slot. */ 2280 ashr_reg64_lt32_high(nfp_prog, dst, src); 2281 2282 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2283 return -EINVAL; 2284 ashr_reg64_ge32(nfp_prog, dst, src); 2285 2286 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2287 return -EINVAL; 2288 } 2289 2290 return 0; 2291 } 2292 2293 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2294 { 2295 const struct bpf_insn *insn = &meta->insn; 2296 2297 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 2298 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2299 2300 return 0; 2301 } 2302 2303 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2304 { 2305 const struct bpf_insn *insn = &meta->insn; 2306 2307 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 2308 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2309 2310 return 0; 2311 } 2312 2313 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2314 { 2315 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 2316 } 2317 2318 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2319 { 2320 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR); 2321 } 2322 2323 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2324 { 2325 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 2326 } 2327 2328 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2329 { 2330 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND); 2331 } 2332 2333 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2334 { 2335 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 2336 } 2337 2338 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2339 { 2340 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR); 2341 } 2342 2343 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2344 { 2345 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 2346 } 2347 2348 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2349 { 2350 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD); 2351 } 2352 2353 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2354 { 2355 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 2356 } 2357 2358 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2359 { 2360 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB); 2361 } 2362 2363 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2364 { 2365 return wrp_mul(nfp_prog, meta, false, true); 2366 } 2367 2368 static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2369 { 2370 return wrp_mul(nfp_prog, meta, false, false); 2371 } 2372 2373 static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2374 { 2375 return div_reg64(nfp_prog, meta); 2376 } 2377 2378 static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2379 { 2380 return div_imm64(nfp_prog, meta); 2381 } 2382 2383 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2384 { 2385 u8 dst = meta->insn.dst_reg * 2; 2386 2387 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 2388 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2389 2390 return 0; 2391 } 2392 2393 static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2394 { 2395 if (shift_amt) { 2396 /* Set signedness bit (MSB of result). */ 2397 emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, 2398 reg_imm(0)); 2399 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2400 reg_b(dst), SHF_SC_R_SHF, shift_amt); 2401 } 2402 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2403 2404 return 0; 2405 } 2406 2407 static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2408 { 2409 const struct bpf_insn *insn = &meta->insn; 2410 u64 umin, umax; 2411 u8 dst, src; 2412 2413 dst = insn->dst_reg * 2; 2414 umin = meta->umin_src; 2415 umax = meta->umax_src; 2416 if (umin == umax) 2417 return __ashr_imm(nfp_prog, dst, umin); 2418 2419 src = insn->src_reg * 2; 2420 /* NOTE: the first insn will set both indirect shift amount (source A) 2421 * and signedness bit (MSB of result). 2422 */ 2423 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst)); 2424 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2425 reg_b(dst), SHF_SC_R_SHF); 2426 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2427 2428 return 0; 2429 } 2430 2431 static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2432 { 2433 const struct bpf_insn *insn = &meta->insn; 2434 u8 dst = insn->dst_reg * 2; 2435 2436 return __ashr_imm(nfp_prog, dst, insn->imm); 2437 } 2438 2439 static int __shr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2440 { 2441 if (shift_amt) 2442 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2443 reg_b(dst), SHF_SC_R_SHF, shift_amt); 2444 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2445 return 0; 2446 } 2447 2448 static int shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2449 { 2450 const struct bpf_insn *insn = &meta->insn; 2451 u8 dst = insn->dst_reg * 2; 2452 2453 return __shr_imm(nfp_prog, dst, insn->imm); 2454 } 2455 2456 static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2457 { 2458 const struct bpf_insn *insn = &meta->insn; 2459 u64 umin, umax; 2460 u8 dst, src; 2461 2462 dst = insn->dst_reg * 2; 2463 umin = meta->umin_src; 2464 umax = meta->umax_src; 2465 if (umin == umax) 2466 return __shr_imm(nfp_prog, dst, umin); 2467 2468 src = insn->src_reg * 2; 2469 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2470 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2471 reg_b(dst), SHF_SC_R_SHF); 2472 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2473 return 0; 2474 } 2475 2476 static int __shl_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2477 { 2478 if (shift_amt) 2479 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2480 reg_b(dst), SHF_SC_L_SHF, shift_amt); 2481 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2482 return 0; 2483 } 2484 2485 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2486 { 2487 const struct bpf_insn *insn = &meta->insn; 2488 u8 dst = insn->dst_reg * 2; 2489 2490 return __shl_imm(nfp_prog, dst, insn->imm); 2491 } 2492 2493 static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2494 { 2495 const struct bpf_insn *insn = &meta->insn; 2496 u64 umin, umax; 2497 u8 dst, src; 2498 2499 dst = insn->dst_reg * 2; 2500 umin = meta->umin_src; 2501 umax = meta->umax_src; 2502 if (umin == umax) 2503 return __shl_imm(nfp_prog, dst, umin); 2504 2505 src = insn->src_reg * 2; 2506 shl_reg64_lt32_low(nfp_prog, dst, src); 2507 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2508 return 0; 2509 } 2510 2511 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2512 { 2513 const struct bpf_insn *insn = &meta->insn; 2514 u8 gpr = insn->dst_reg * 2; 2515 2516 switch (insn->imm) { 2517 case 16: 2518 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 2519 SHF_SC_R_ROT, 8); 2520 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 2521 SHF_SC_R_SHF, 16); 2522 2523 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2524 break; 2525 case 32: 2526 wrp_end32(nfp_prog, reg_a(gpr), gpr); 2527 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2528 break; 2529 case 64: 2530 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 2531 2532 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 2533 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 2534 break; 2535 } 2536 2537 return 0; 2538 } 2539 2540 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2541 { 2542 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 2543 u32 imm_lo, imm_hi; 2544 u8 dst; 2545 2546 dst = prev->insn.dst_reg * 2; 2547 imm_lo = prev->insn.imm; 2548 imm_hi = meta->insn.imm; 2549 2550 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 2551 2552 /* mov is always 1 insn, load imm may be two, so try to use mov */ 2553 if (imm_hi == imm_lo) 2554 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 2555 else 2556 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 2557 2558 return 0; 2559 } 2560 2561 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2562 { 2563 meta->double_cb = imm_ld8_part2; 2564 return 0; 2565 } 2566 2567 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2568 { 2569 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 2570 } 2571 2572 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2573 { 2574 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 2575 } 2576 2577 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2578 { 2579 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 2580 } 2581 2582 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2583 { 2584 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2585 meta->insn.src_reg * 2, 1); 2586 } 2587 2588 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2589 { 2590 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2591 meta->insn.src_reg * 2, 2); 2592 } 2593 2594 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2595 { 2596 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2597 meta->insn.src_reg * 2, 4); 2598 } 2599 2600 static int 2601 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2602 unsigned int size, unsigned int ptr_off) 2603 { 2604 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2605 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 2606 true, wrp_lmem_load); 2607 } 2608 2609 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2610 u8 size) 2611 { 2612 swreg dst = reg_both(meta->insn.dst_reg * 2); 2613 2614 switch (meta->insn.off) { 2615 case offsetof(struct __sk_buff, len): 2616 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 2617 return -EOPNOTSUPP; 2618 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 2619 break; 2620 case offsetof(struct __sk_buff, data): 2621 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 2622 return -EOPNOTSUPP; 2623 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2624 break; 2625 case offsetof(struct __sk_buff, data_end): 2626 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 2627 return -EOPNOTSUPP; 2628 emit_alu(nfp_prog, dst, 2629 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2630 break; 2631 default: 2632 return -EOPNOTSUPP; 2633 } 2634 2635 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2636 2637 return 0; 2638 } 2639 2640 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2641 u8 size) 2642 { 2643 swreg dst = reg_both(meta->insn.dst_reg * 2); 2644 2645 switch (meta->insn.off) { 2646 case offsetof(struct xdp_md, data): 2647 if (size != FIELD_SIZEOF(struct xdp_md, data)) 2648 return -EOPNOTSUPP; 2649 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2650 break; 2651 case offsetof(struct xdp_md, data_end): 2652 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 2653 return -EOPNOTSUPP; 2654 emit_alu(nfp_prog, dst, 2655 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2656 break; 2657 default: 2658 return -EOPNOTSUPP; 2659 } 2660 2661 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2662 2663 return 0; 2664 } 2665 2666 static int 2667 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2668 unsigned int size) 2669 { 2670 swreg tmp_reg; 2671 2672 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2673 2674 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 2675 tmp_reg, meta->insn.dst_reg * 2, size); 2676 } 2677 2678 static int 2679 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2680 unsigned int size) 2681 { 2682 swreg tmp_reg; 2683 2684 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2685 2686 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 2687 tmp_reg, meta->insn.dst_reg * 2, size); 2688 } 2689 2690 static void 2691 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 2692 struct nfp_insn_meta *meta) 2693 { 2694 s16 range_start = meta->pkt_cache.range_start; 2695 s16 range_end = meta->pkt_cache.range_end; 2696 swreg src_base, off; 2697 u8 xfer_num, len; 2698 bool indir; 2699 2700 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 2701 src_base = reg_a(meta->insn.src_reg * 2); 2702 len = range_end - range_start; 2703 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 2704 2705 indir = len > 8 * REG_WIDTH; 2706 /* Setup PREV_ALU for indirect mode. */ 2707 if (indir) 2708 wrp_immed(nfp_prog, reg_none(), 2709 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 2710 2711 /* Cache memory into transfer-in registers. */ 2712 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 2713 off, xfer_num - 1, CMD_CTX_SWAP, indir); 2714 } 2715 2716 static int 2717 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 2718 struct nfp_insn_meta *meta, 2719 unsigned int size) 2720 { 2721 s16 range_start = meta->pkt_cache.range_start; 2722 s16 insn_off = meta->insn.off - range_start; 2723 swreg dst_lo, dst_hi, src_lo, src_mid; 2724 u8 dst_gpr = meta->insn.dst_reg * 2; 2725 u8 len_lo = size, len_mid = 0; 2726 u8 idx = insn_off / REG_WIDTH; 2727 u8 off = insn_off % REG_WIDTH; 2728 2729 dst_hi = reg_both(dst_gpr + 1); 2730 dst_lo = reg_both(dst_gpr); 2731 src_lo = reg_xfer(idx); 2732 2733 /* The read length could involve as many as three registers. */ 2734 if (size > REG_WIDTH - off) { 2735 /* Calculate the part in the second register. */ 2736 len_lo = REG_WIDTH - off; 2737 len_mid = size - len_lo; 2738 2739 /* Calculate the part in the third register. */ 2740 if (size > 2 * REG_WIDTH - off) 2741 len_mid = REG_WIDTH; 2742 } 2743 2744 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 2745 2746 if (!len_mid) { 2747 wrp_immed(nfp_prog, dst_hi, 0); 2748 return 0; 2749 } 2750 2751 src_mid = reg_xfer(idx + 1); 2752 2753 if (size <= REG_WIDTH) { 2754 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 2755 wrp_immed(nfp_prog, dst_hi, 0); 2756 } else { 2757 swreg src_hi = reg_xfer(idx + 2); 2758 2759 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 2760 REG_WIDTH - len_lo, len_lo); 2761 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 2762 REG_WIDTH - len_lo); 2763 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 2764 len_lo); 2765 } 2766 2767 return 0; 2768 } 2769 2770 static int 2771 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 2772 struct nfp_insn_meta *meta, 2773 unsigned int size) 2774 { 2775 swreg dst_lo, dst_hi, src_lo; 2776 u8 dst_gpr, idx; 2777 2778 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 2779 dst_gpr = meta->insn.dst_reg * 2; 2780 dst_hi = reg_both(dst_gpr + 1); 2781 dst_lo = reg_both(dst_gpr); 2782 src_lo = reg_xfer(idx); 2783 2784 if (size < REG_WIDTH) { 2785 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 2786 wrp_immed(nfp_prog, dst_hi, 0); 2787 } else if (size == REG_WIDTH) { 2788 wrp_mov(nfp_prog, dst_lo, src_lo); 2789 wrp_immed(nfp_prog, dst_hi, 0); 2790 } else { 2791 swreg src_hi = reg_xfer(idx + 1); 2792 2793 wrp_mov(nfp_prog, dst_lo, src_lo); 2794 wrp_mov(nfp_prog, dst_hi, src_hi); 2795 } 2796 2797 return 0; 2798 } 2799 2800 static int 2801 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 2802 struct nfp_insn_meta *meta, unsigned int size) 2803 { 2804 u8 off = meta->insn.off - meta->pkt_cache.range_start; 2805 2806 if (IS_ALIGNED(off, REG_WIDTH)) 2807 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 2808 2809 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 2810 } 2811 2812 static int 2813 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2814 unsigned int size) 2815 { 2816 if (meta->ldst_gather_len) 2817 return nfp_cpp_memcpy(nfp_prog, meta); 2818 2819 if (meta->ptr.type == PTR_TO_CTX) { 2820 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2821 return mem_ldx_xdp(nfp_prog, meta, size); 2822 else 2823 return mem_ldx_skb(nfp_prog, meta, size); 2824 } 2825 2826 if (meta->ptr.type == PTR_TO_PACKET) { 2827 if (meta->pkt_cache.range_end) { 2828 if (meta->pkt_cache.do_init) 2829 mem_ldx_data_init_pktcache(nfp_prog, meta); 2830 2831 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 2832 } else { 2833 return mem_ldx_data(nfp_prog, meta, size); 2834 } 2835 } 2836 2837 if (meta->ptr.type == PTR_TO_STACK) 2838 return mem_ldx_stack(nfp_prog, meta, size, 2839 meta->ptr.off + meta->ptr.var_off.value); 2840 2841 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2842 return mem_ldx_emem(nfp_prog, meta, size); 2843 2844 return -EOPNOTSUPP; 2845 } 2846 2847 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2848 { 2849 return mem_ldx(nfp_prog, meta, 1); 2850 } 2851 2852 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2853 { 2854 return mem_ldx(nfp_prog, meta, 2); 2855 } 2856 2857 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2858 { 2859 return mem_ldx(nfp_prog, meta, 4); 2860 } 2861 2862 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2863 { 2864 return mem_ldx(nfp_prog, meta, 8); 2865 } 2866 2867 static int 2868 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2869 unsigned int size) 2870 { 2871 u64 imm = meta->insn.imm; /* sign extend */ 2872 swreg off_reg; 2873 2874 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2875 2876 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2877 imm, size); 2878 } 2879 2880 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2881 unsigned int size) 2882 { 2883 if (meta->ptr.type == PTR_TO_PACKET) 2884 return mem_st_data(nfp_prog, meta, size); 2885 2886 return -EOPNOTSUPP; 2887 } 2888 2889 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2890 { 2891 return mem_st(nfp_prog, meta, 1); 2892 } 2893 2894 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2895 { 2896 return mem_st(nfp_prog, meta, 2); 2897 } 2898 2899 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2900 { 2901 return mem_st(nfp_prog, meta, 4); 2902 } 2903 2904 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2905 { 2906 return mem_st(nfp_prog, meta, 8); 2907 } 2908 2909 static int 2910 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2911 unsigned int size) 2912 { 2913 swreg off_reg; 2914 2915 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2916 2917 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2918 meta->insn.src_reg * 2, size); 2919 } 2920 2921 static int 2922 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2923 unsigned int size, unsigned int ptr_off) 2924 { 2925 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2926 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2927 false, wrp_lmem_store); 2928 } 2929 2930 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2931 { 2932 switch (meta->insn.off) { 2933 case offsetof(struct xdp_md, rx_queue_index): 2934 return nfp_queue_select(nfp_prog, meta); 2935 } 2936 2937 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ 2938 return -EOPNOTSUPP; 2939 } 2940 2941 static int 2942 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2943 unsigned int size) 2944 { 2945 if (meta->ptr.type == PTR_TO_PACKET) 2946 return mem_stx_data(nfp_prog, meta, size); 2947 2948 if (meta->ptr.type == PTR_TO_STACK) 2949 return mem_stx_stack(nfp_prog, meta, size, 2950 meta->ptr.off + meta->ptr.var_off.value); 2951 2952 return -EOPNOTSUPP; 2953 } 2954 2955 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2956 { 2957 return mem_stx(nfp_prog, meta, 1); 2958 } 2959 2960 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2961 { 2962 return mem_stx(nfp_prog, meta, 2); 2963 } 2964 2965 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2966 { 2967 if (meta->ptr.type == PTR_TO_CTX) 2968 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2969 return mem_stx_xdp(nfp_prog, meta); 2970 return mem_stx(nfp_prog, meta, 4); 2971 } 2972 2973 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2974 { 2975 return mem_stx(nfp_prog, meta, 8); 2976 } 2977 2978 static int 2979 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) 2980 { 2981 u8 dst_gpr = meta->insn.dst_reg * 2; 2982 u8 src_gpr = meta->insn.src_reg * 2; 2983 unsigned int full_add, out; 2984 swreg addra, addrb, off; 2985 2986 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2987 2988 /* We can fit 16 bits into command immediate, if we know the immediate 2989 * is guaranteed to either always or never fit into 16 bit we only 2990 * generate code to handle that particular case, otherwise generate 2991 * code for both. 2992 */ 2993 out = nfp_prog_current_offset(nfp_prog); 2994 full_add = nfp_prog_current_offset(nfp_prog); 2995 2996 if (meta->insn.off) { 2997 out += 2; 2998 full_add += 2; 2999 } 3000 if (meta->xadd_maybe_16bit) { 3001 out += 3; 3002 full_add += 3; 3003 } 3004 if (meta->xadd_over_16bit) 3005 out += 2 + is64; 3006 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 3007 out += 5; 3008 full_add += 5; 3009 } 3010 3011 /* Generate the branch for choosing add_imm vs add */ 3012 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 3013 swreg max_imm = imm_a(nfp_prog); 3014 3015 wrp_immed(nfp_prog, max_imm, 0xffff); 3016 emit_alu(nfp_prog, reg_none(), 3017 max_imm, ALU_OP_SUB, reg_b(src_gpr)); 3018 emit_alu(nfp_prog, reg_none(), 3019 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); 3020 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); 3021 /* defer for add */ 3022 } 3023 3024 /* If insn has an offset add to the address */ 3025 if (!meta->insn.off) { 3026 addra = reg_a(dst_gpr); 3027 addrb = reg_b(dst_gpr + 1); 3028 } else { 3029 emit_alu(nfp_prog, imma_a(nfp_prog), 3030 reg_a(dst_gpr), ALU_OP_ADD, off); 3031 emit_alu(nfp_prog, imma_b(nfp_prog), 3032 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); 3033 addra = imma_a(nfp_prog); 3034 addrb = imma_b(nfp_prog); 3035 } 3036 3037 /* Generate the add_imm if 16 bits are possible */ 3038 if (meta->xadd_maybe_16bit) { 3039 swreg prev_alu = imm_a(nfp_prog); 3040 3041 wrp_immed(nfp_prog, prev_alu, 3042 FIELD_PREP(CMD_OVE_DATA, 2) | 3043 CMD_OVE_LEN | 3044 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); 3045 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); 3046 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, 3047 addra, addrb, 0, CMD_CTX_NO_SWAP); 3048 3049 if (meta->xadd_over_16bit) 3050 emit_br(nfp_prog, BR_UNC, out, 0); 3051 } 3052 3053 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) 3054 return -EINVAL; 3055 3056 /* Generate the add if 16 bits are not guaranteed */ 3057 if (meta->xadd_over_16bit) { 3058 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, 3059 addra, addrb, is64 << 2, 3060 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); 3061 3062 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); 3063 if (is64) 3064 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); 3065 } 3066 3067 if (!nfp_prog_confirm_current_offset(nfp_prog, out)) 3068 return -EINVAL; 3069 3070 return 0; 3071 } 3072 3073 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3074 { 3075 return mem_xadd(nfp_prog, meta, false); 3076 } 3077 3078 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3079 { 3080 return mem_xadd(nfp_prog, meta, true); 3081 } 3082 3083 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3084 { 3085 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 3086 3087 return 0; 3088 } 3089 3090 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3091 { 3092 const struct bpf_insn *insn = &meta->insn; 3093 u64 imm = insn->imm; /* sign extend */ 3094 swreg or1, or2, tmp_reg; 3095 3096 or1 = reg_a(insn->dst_reg * 2); 3097 or2 = reg_b(insn->dst_reg * 2 + 1); 3098 3099 if (imm & ~0U) { 3100 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3101 emit_alu(nfp_prog, imm_a(nfp_prog), 3102 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3103 or1 = imm_a(nfp_prog); 3104 } 3105 3106 if (imm >> 32) { 3107 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3108 emit_alu(nfp_prog, imm_b(nfp_prog), 3109 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3110 or2 = imm_b(nfp_prog); 3111 } 3112 3113 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 3114 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3115 3116 return 0; 3117 } 3118 3119 static int jeq32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3120 { 3121 const struct bpf_insn *insn = &meta->insn; 3122 swreg tmp_reg; 3123 3124 tmp_reg = ur_load_imm_any(nfp_prog, insn->imm, imm_b(nfp_prog)); 3125 emit_alu(nfp_prog, reg_none(), 3126 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3127 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3128 3129 return 0; 3130 } 3131 3132 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3133 { 3134 const struct bpf_insn *insn = &meta->insn; 3135 u64 imm = insn->imm; /* sign extend */ 3136 u8 dst_gpr = insn->dst_reg * 2; 3137 swreg tmp_reg; 3138 3139 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3140 emit_alu(nfp_prog, imm_b(nfp_prog), 3141 reg_a(dst_gpr), ALU_OP_AND, tmp_reg); 3142 /* Upper word of the mask can only be 0 or ~0 from sign extension, 3143 * so either ignore it or OR the whole thing in. 3144 */ 3145 if (is_mbpf_jmp64(meta) && imm >> 32) { 3146 emit_alu(nfp_prog, reg_none(), 3147 reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog)); 3148 } 3149 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3150 3151 return 0; 3152 } 3153 3154 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3155 { 3156 const struct bpf_insn *insn = &meta->insn; 3157 u64 imm = insn->imm; /* sign extend */ 3158 bool is_jmp32 = is_mbpf_jmp32(meta); 3159 swreg tmp_reg; 3160 3161 if (!imm) { 3162 if (is_jmp32) 3163 emit_alu(nfp_prog, reg_none(), reg_none(), ALU_OP_NONE, 3164 reg_b(insn->dst_reg * 2)); 3165 else 3166 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 3167 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 3168 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3169 return 0; 3170 } 3171 3172 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3173 emit_alu(nfp_prog, reg_none(), 3174 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3175 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3176 3177 if (is_jmp32) 3178 return 0; 3179 3180 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3181 emit_alu(nfp_prog, reg_none(), 3182 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3183 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3184 3185 return 0; 3186 } 3187 3188 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3189 { 3190 const struct bpf_insn *insn = &meta->insn; 3191 3192 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 3193 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 3194 if (is_mbpf_jmp64(meta)) { 3195 emit_alu(nfp_prog, imm_b(nfp_prog), 3196 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, 3197 reg_b(insn->src_reg * 2 + 1)); 3198 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, 3199 imm_b(nfp_prog)); 3200 } 3201 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3202 3203 return 0; 3204 } 3205 3206 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3207 { 3208 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 3209 } 3210 3211 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3212 { 3213 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 3214 } 3215 3216 static int 3217 bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3218 { 3219 u32 ret_tgt, stack_depth, offset_br; 3220 swreg tmp_reg; 3221 3222 stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN); 3223 /* Space for saving the return address is accounted for by the callee, 3224 * so stack_depth can be zero for the main function. 3225 */ 3226 if (stack_depth) { 3227 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3228 stack_imm(nfp_prog)); 3229 emit_alu(nfp_prog, stack_reg(nfp_prog), 3230 stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg); 3231 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3232 NFP_CSR_ACT_LM_ADDR0); 3233 } 3234 3235 /* Two cases for jumping to the callee: 3236 * 3237 * - If callee uses and needs to save R6~R9 then: 3238 * 1. Put the start offset of the callee into imm_b(). This will 3239 * require a fixup step, as we do not necessarily know this 3240 * address yet. 3241 * 2. Put the return address from the callee to the caller into 3242 * register ret_reg(). 3243 * 3. (After defer slots are consumed) Jump to the subroutine that 3244 * pushes the registers to the stack. 3245 * The subroutine acts as a trampoline, and returns to the address in 3246 * imm_b(), i.e. jumps to the callee. 3247 * 3248 * - If callee does not need to save R6~R9 then just load return 3249 * address to the caller in ret_reg(), and jump to the callee 3250 * directly. 3251 * 3252 * Using ret_reg() to pass the return address to the callee is set here 3253 * as a convention. The callee can then push this address onto its 3254 * stack frame in its prologue. The advantages of passing the return 3255 * address through ret_reg(), instead of pushing it to the stack right 3256 * here, are the following: 3257 * - It looks cleaner. 3258 * - If the called function is called multiple time, we get a lower 3259 * program size. 3260 * - We save two no-op instructions that should be added just before 3261 * the emit_br() when stack depth is not null otherwise. 3262 * - If we ever find a register to hold the return address during whole 3263 * execution of the callee, we will not have to push the return 3264 * address to the stack for leaf functions. 3265 */ 3266 if (!meta->jmp_dst) { 3267 pr_err("BUG: BPF-to-BPF call has no destination recorded\n"); 3268 return -ELOOP; 3269 } 3270 if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) { 3271 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 3272 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, 3273 RELO_BR_GO_CALL_PUSH_REGS); 3274 offset_br = nfp_prog_current_offset(nfp_prog); 3275 wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL); 3276 } else { 3277 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 3278 emit_br(nfp_prog, BR_UNC, meta->insn.imm, 1); 3279 offset_br = nfp_prog_current_offset(nfp_prog); 3280 } 3281 wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL); 3282 3283 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 3284 return -EINVAL; 3285 3286 if (stack_depth) { 3287 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3288 stack_imm(nfp_prog)); 3289 emit_alu(nfp_prog, stack_reg(nfp_prog), 3290 stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 3291 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3292 NFP_CSR_ACT_LM_ADDR0); 3293 wrp_nops(nfp_prog, 3); 3294 } 3295 3296 meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog); 3297 meta->num_insns_after_br -= offset_br; 3298 3299 return 0; 3300 } 3301 3302 static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3303 { 3304 switch (meta->insn.imm) { 3305 case BPF_FUNC_xdp_adjust_head: 3306 return adjust_head(nfp_prog, meta); 3307 case BPF_FUNC_xdp_adjust_tail: 3308 return adjust_tail(nfp_prog, meta); 3309 case BPF_FUNC_map_lookup_elem: 3310 case BPF_FUNC_map_update_elem: 3311 case BPF_FUNC_map_delete_elem: 3312 return map_call_stack_common(nfp_prog, meta); 3313 case BPF_FUNC_get_prandom_u32: 3314 return nfp_get_prandom_u32(nfp_prog, meta); 3315 case BPF_FUNC_perf_event_output: 3316 return nfp_perf_event_output(nfp_prog, meta); 3317 default: 3318 WARN_ONCE(1, "verifier allowed unsupported function\n"); 3319 return -EOPNOTSUPP; 3320 } 3321 } 3322 3323 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3324 { 3325 if (is_mbpf_pseudo_call(meta)) 3326 return bpf_to_bpf_call(nfp_prog, meta); 3327 else 3328 return helper_call(nfp_prog, meta); 3329 } 3330 3331 static bool nfp_is_main_function(struct nfp_insn_meta *meta) 3332 { 3333 return meta->subprog_idx == 0; 3334 } 3335 3336 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3337 { 3338 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 3339 3340 return 0; 3341 } 3342 3343 static int 3344 nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3345 { 3346 if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) { 3347 /* Pop R6~R9 to the stack via related subroutine. 3348 * We loaded the return address to the caller into ret_reg(). 3349 * This means that the subroutine does not come back here, we 3350 * make it jump back to the subprogram caller directly! 3351 */ 3352 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1, 3353 RELO_BR_GO_CALL_POP_REGS); 3354 /* Pop return address from the stack. */ 3355 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3356 } else { 3357 /* Pop return address from the stack. */ 3358 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3359 /* Jump back to caller if no callee-saved registers were used 3360 * by the subprogram. 3361 */ 3362 emit_rtn(nfp_prog, ret_reg(nfp_prog), 0); 3363 } 3364 3365 return 0; 3366 } 3367 3368 static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3369 { 3370 if (nfp_is_main_function(meta)) 3371 return goto_out(nfp_prog, meta); 3372 else 3373 return nfp_subprog_epilogue(nfp_prog, meta); 3374 } 3375 3376 static const instr_cb_t instr_cb[256] = { 3377 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 3378 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 3379 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 3380 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 3381 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 3382 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 3383 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 3384 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 3385 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 3386 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 3387 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 3388 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 3389 [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64, 3390 [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64, 3391 [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64, 3392 [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64, 3393 [BPF_ALU64 | BPF_NEG] = neg_reg64, 3394 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, 3395 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 3396 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, 3397 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 3398 [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64, 3399 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, 3400 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 3401 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 3402 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 3403 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 3404 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 3405 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 3406 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 3407 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 3408 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 3409 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 3410 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 3411 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 3412 [BPF_ALU | BPF_MUL | BPF_X] = mul_reg, 3413 [BPF_ALU | BPF_MUL | BPF_K] = mul_imm, 3414 [BPF_ALU | BPF_DIV | BPF_X] = div_reg, 3415 [BPF_ALU | BPF_DIV | BPF_K] = div_imm, 3416 [BPF_ALU | BPF_NEG] = neg_reg, 3417 [BPF_ALU | BPF_LSH | BPF_X] = shl_reg, 3418 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 3419 [BPF_ALU | BPF_RSH | BPF_X] = shr_reg, 3420 [BPF_ALU | BPF_RSH | BPF_K] = shr_imm, 3421 [BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg, 3422 [BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm, 3423 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 3424 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 3425 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 3426 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 3427 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 3428 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 3429 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 3430 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 3431 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 3432 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 3433 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 3434 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 3435 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 3436 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 3437 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 3438 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 3439 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 3440 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 3441 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 3442 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 3443 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 3444 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 3445 [BPF_JMP | BPF_JA | BPF_K] = jump, 3446 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 3447 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, 3448 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, 3449 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, 3450 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, 3451 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, 3452 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, 3453 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, 3454 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, 3455 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 3456 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 3457 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 3458 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, 3459 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, 3460 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, 3461 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, 3462 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, 3463 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, 3464 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, 3465 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, 3466 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 3467 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 3468 [BPF_JMP32 | BPF_JEQ | BPF_K] = jeq32_imm, 3469 [BPF_JMP32 | BPF_JGT | BPF_K] = cmp_imm, 3470 [BPF_JMP32 | BPF_JGE | BPF_K] = cmp_imm, 3471 [BPF_JMP32 | BPF_JLT | BPF_K] = cmp_imm, 3472 [BPF_JMP32 | BPF_JLE | BPF_K] = cmp_imm, 3473 [BPF_JMP32 | BPF_JSGT | BPF_K] =cmp_imm, 3474 [BPF_JMP32 | BPF_JSGE | BPF_K] =cmp_imm, 3475 [BPF_JMP32 | BPF_JSLT | BPF_K] =cmp_imm, 3476 [BPF_JMP32 | BPF_JSLE | BPF_K] =cmp_imm, 3477 [BPF_JMP32 | BPF_JSET | BPF_K] =jset_imm, 3478 [BPF_JMP32 | BPF_JNE | BPF_K] = jne_imm, 3479 [BPF_JMP32 | BPF_JEQ | BPF_X] = jeq_reg, 3480 [BPF_JMP32 | BPF_JGT | BPF_X] = cmp_reg, 3481 [BPF_JMP32 | BPF_JGE | BPF_X] = cmp_reg, 3482 [BPF_JMP32 | BPF_JLT | BPF_X] = cmp_reg, 3483 [BPF_JMP32 | BPF_JLE | BPF_X] = cmp_reg, 3484 [BPF_JMP32 | BPF_JSGT | BPF_X] =cmp_reg, 3485 [BPF_JMP32 | BPF_JSGE | BPF_X] =cmp_reg, 3486 [BPF_JMP32 | BPF_JSLT | BPF_X] =cmp_reg, 3487 [BPF_JMP32 | BPF_JSLE | BPF_X] =cmp_reg, 3488 [BPF_JMP32 | BPF_JSET | BPF_X] =jset_reg, 3489 [BPF_JMP32 | BPF_JNE | BPF_X] = jne_reg, 3490 [BPF_JMP | BPF_CALL] = call, 3491 [BPF_JMP | BPF_EXIT] = jmp_exit, 3492 }; 3493 3494 /* --- Assembler logic --- */ 3495 static int 3496 nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 3497 struct nfp_insn_meta *jmp_dst, u32 br_idx) 3498 { 3499 if (immed_get_value(nfp_prog->prog[br_idx + 1])) { 3500 pr_err("BUG: failed to fix up callee register saving\n"); 3501 return -EINVAL; 3502 } 3503 3504 immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off); 3505 3506 return 0; 3507 } 3508 3509 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 3510 { 3511 struct nfp_insn_meta *meta, *jmp_dst; 3512 u32 idx, br_idx; 3513 int err; 3514 3515 list_for_each_entry(meta, &nfp_prog->insns, l) { 3516 if (meta->flags & FLAG_INSN_SKIP_MASK) 3517 continue; 3518 if (!is_mbpf_jmp(meta)) 3519 continue; 3520 if (meta->insn.code == (BPF_JMP | BPF_EXIT) && 3521 !nfp_is_main_function(meta)) 3522 continue; 3523 if (is_mbpf_helper_call(meta)) 3524 continue; 3525 3526 if (list_is_last(&meta->l, &nfp_prog->insns)) 3527 br_idx = nfp_prog->last_bpf_off; 3528 else 3529 br_idx = list_next_entry(meta, l)->off - 1; 3530 3531 /* For BPF-to-BPF function call, a stack adjustment sequence is 3532 * generated after the return instruction. Therefore, we must 3533 * withdraw the length of this sequence to have br_idx pointing 3534 * to where the "branch" NFP instruction is expected to be. 3535 */ 3536 if (is_mbpf_pseudo_call(meta)) 3537 br_idx -= meta->num_insns_after_br; 3538 3539 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 3540 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 3541 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 3542 return -ELOOP; 3543 } 3544 3545 if (meta->insn.code == (BPF_JMP | BPF_EXIT)) 3546 continue; 3547 3548 /* Leave special branches for later */ 3549 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3550 RELO_BR_REL && !is_mbpf_pseudo_call(meta)) 3551 continue; 3552 3553 if (!meta->jmp_dst) { 3554 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 3555 return -ELOOP; 3556 } 3557 3558 jmp_dst = meta->jmp_dst; 3559 3560 if (jmp_dst->flags & FLAG_INSN_SKIP_PREC_DEPENDENT) { 3561 pr_err("Branch landing on removed instruction!!\n"); 3562 return -ELOOP; 3563 } 3564 3565 if (is_mbpf_pseudo_call(meta) && 3566 nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) { 3567 err = nfp_fixup_immed_relo(nfp_prog, meta, 3568 jmp_dst, br_idx); 3569 if (err) 3570 return err; 3571 } 3572 3573 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3574 RELO_BR_REL) 3575 continue; 3576 3577 for (idx = meta->off; idx <= br_idx; idx++) { 3578 if (!nfp_is_br(nfp_prog->prog[idx])) 3579 continue; 3580 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 3581 } 3582 } 3583 3584 return 0; 3585 } 3586 3587 static void nfp_intro(struct nfp_prog *nfp_prog) 3588 { 3589 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 3590 emit_alu(nfp_prog, plen_reg(nfp_prog), 3591 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 3592 } 3593 3594 static void 3595 nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3596 { 3597 /* Save return address into the stack. */ 3598 wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog)); 3599 } 3600 3601 static void 3602 nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3603 { 3604 unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth; 3605 3606 nfp_prog->stack_frame_depth = round_up(depth, 4); 3607 nfp_subprog_prologue(nfp_prog, meta); 3608 } 3609 3610 bool nfp_is_subprog_start(struct nfp_insn_meta *meta) 3611 { 3612 return meta->flags & FLAG_INSN_IS_SUBPROG_START; 3613 } 3614 3615 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 3616 { 3617 /* TC direct-action mode: 3618 * 0,1 ok NOT SUPPORTED[1] 3619 * 2 drop 0x22 -> drop, count as stat1 3620 * 4,5 nuke 0x02 -> drop 3621 * 7 redir 0x44 -> redir, count as stat2 3622 * * unspec 0x11 -> pass, count as stat0 3623 * 3624 * [1] We can't support OK and RECLASSIFY because we can't tell TC 3625 * the exact decision made. We are forced to support UNSPEC 3626 * to handle aborts so that's the only one we handle for passing 3627 * packets up the stack. 3628 */ 3629 /* Target for aborts */ 3630 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3631 3632 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3633 3634 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3635 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 3636 3637 /* Target for normal exits */ 3638 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3639 3640 /* if R0 > 7 jump to abort */ 3641 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 3642 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3643 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3644 3645 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 3646 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 3647 3648 emit_shf(nfp_prog, reg_a(1), 3649 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 3650 3651 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3652 emit_shf(nfp_prog, reg_a(2), 3653 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3654 3655 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3656 emit_shf(nfp_prog, reg_b(2), 3657 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 3658 3659 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3660 3661 emit_shf(nfp_prog, reg_b(2), 3662 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 3663 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3664 } 3665 3666 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 3667 { 3668 /* XDP return codes: 3669 * 0 aborted 0x82 -> drop, count as stat3 3670 * 1 drop 0x22 -> drop, count as stat1 3671 * 2 pass 0x11 -> pass, count as stat0 3672 * 3 tx 0x44 -> redir, count as stat2 3673 * * unknown 0x82 -> drop, count as stat3 3674 */ 3675 /* Target for aborts */ 3676 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3677 3678 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3679 3680 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3681 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 3682 3683 /* Target for normal exits */ 3684 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3685 3686 /* if R0 > 3 jump to abort */ 3687 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 3688 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3689 3690 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 3691 3692 emit_shf(nfp_prog, reg_a(1), 3693 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 3694 3695 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3696 emit_shf(nfp_prog, reg_b(2), 3697 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3698 3699 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3700 3701 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3702 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3703 } 3704 3705 static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog) 3706 { 3707 unsigned int idx; 3708 3709 for (idx = 1; idx < nfp_prog->subprog_cnt; idx++) 3710 if (nfp_prog->subprog[idx].needs_reg_push) 3711 return true; 3712 3713 return false; 3714 } 3715 3716 static void nfp_push_callee_registers(struct nfp_prog *nfp_prog) 3717 { 3718 u8 reg; 3719 3720 /* Subroutine: Save all callee saved registers (R6 ~ R9). 3721 * imm_b() holds the return address. 3722 */ 3723 nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog); 3724 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3725 u8 adj = (reg - BPF_REG_0) * 2; 3726 u8 idx = (reg - BPF_REG_6) * 2; 3727 3728 /* The first slot in the stack frame is used to push the return 3729 * address in bpf_to_bpf_call(), start just after. 3730 */ 3731 wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj)); 3732 3733 if (reg == BPF_REG_8) 3734 /* Prepare to jump back, last 3 insns use defer slots */ 3735 emit_rtn(nfp_prog, imm_b(nfp_prog), 3); 3736 3737 wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1)); 3738 } 3739 } 3740 3741 static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog) 3742 { 3743 u8 reg; 3744 3745 /* Subroutine: Restore all callee saved registers (R6 ~ R9). 3746 * ret_reg() holds the return address. 3747 */ 3748 nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog); 3749 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3750 u8 adj = (reg - BPF_REG_0) * 2; 3751 u8 idx = (reg - BPF_REG_6) * 2; 3752 3753 /* The first slot in the stack frame holds the return address, 3754 * start popping just after that. 3755 */ 3756 wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx)); 3757 3758 if (reg == BPF_REG_8) 3759 /* Prepare to jump back, last 3 insns use defer slots */ 3760 emit_rtn(nfp_prog, ret_reg(nfp_prog), 3); 3761 3762 wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1)); 3763 } 3764 } 3765 3766 static void nfp_outro(struct nfp_prog *nfp_prog) 3767 { 3768 switch (nfp_prog->type) { 3769 case BPF_PROG_TYPE_SCHED_CLS: 3770 nfp_outro_tc_da(nfp_prog); 3771 break; 3772 case BPF_PROG_TYPE_XDP: 3773 nfp_outro_xdp(nfp_prog); 3774 break; 3775 default: 3776 WARN_ON(1); 3777 } 3778 3779 if (!nfp_prog_needs_callee_reg_save(nfp_prog)) 3780 return; 3781 3782 nfp_push_callee_registers(nfp_prog); 3783 nfp_pop_callee_registers(nfp_prog); 3784 } 3785 3786 static int nfp_translate(struct nfp_prog *nfp_prog) 3787 { 3788 struct nfp_insn_meta *meta; 3789 unsigned int depth; 3790 int err; 3791 3792 depth = nfp_prog->subprog[0].stack_depth; 3793 nfp_prog->stack_frame_depth = round_up(depth, 4); 3794 3795 nfp_intro(nfp_prog); 3796 if (nfp_prog->error) 3797 return nfp_prog->error; 3798 3799 list_for_each_entry(meta, &nfp_prog->insns, l) { 3800 instr_cb_t cb = instr_cb[meta->insn.code]; 3801 3802 meta->off = nfp_prog_current_offset(nfp_prog); 3803 3804 if (nfp_is_subprog_start(meta)) { 3805 nfp_start_subprog(nfp_prog, meta); 3806 if (nfp_prog->error) 3807 return nfp_prog->error; 3808 } 3809 3810 if (meta->flags & FLAG_INSN_SKIP_MASK) { 3811 nfp_prog->n_translated++; 3812 continue; 3813 } 3814 3815 if (nfp_meta_has_prev(nfp_prog, meta) && 3816 nfp_meta_prev(meta)->double_cb) 3817 cb = nfp_meta_prev(meta)->double_cb; 3818 if (!cb) 3819 return -ENOENT; 3820 err = cb(nfp_prog, meta); 3821 if (err) 3822 return err; 3823 if (nfp_prog->error) 3824 return nfp_prog->error; 3825 3826 nfp_prog->n_translated++; 3827 } 3828 3829 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 3830 3831 nfp_outro(nfp_prog); 3832 if (nfp_prog->error) 3833 return nfp_prog->error; 3834 3835 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 3836 if (nfp_prog->error) 3837 return nfp_prog->error; 3838 3839 return nfp_fixup_branches(nfp_prog); 3840 } 3841 3842 /* --- Optimizations --- */ 3843 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 3844 { 3845 struct nfp_insn_meta *meta; 3846 3847 list_for_each_entry(meta, &nfp_prog->insns, l) { 3848 struct bpf_insn insn = meta->insn; 3849 3850 /* Programs converted from cBPF start with register xoring */ 3851 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 3852 insn.src_reg == insn.dst_reg) 3853 continue; 3854 3855 /* Programs start with R6 = R1 but we ignore the skb pointer */ 3856 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 3857 insn.src_reg == 1 && insn.dst_reg == 6) 3858 meta->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3859 3860 /* Return as soon as something doesn't match */ 3861 if (!(meta->flags & FLAG_INSN_SKIP_MASK)) 3862 return; 3863 } 3864 } 3865 3866 /* abs(insn.imm) will fit better into unrestricted reg immediate - 3867 * convert add/sub of a negative number into a sub/add of a positive one. 3868 */ 3869 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) 3870 { 3871 struct nfp_insn_meta *meta; 3872 3873 list_for_each_entry(meta, &nfp_prog->insns, l) { 3874 struct bpf_insn insn = meta->insn; 3875 3876 if (meta->flags & FLAG_INSN_SKIP_MASK) 3877 continue; 3878 3879 if (!is_mbpf_alu(meta) && !is_mbpf_jmp(meta)) 3880 continue; 3881 if (BPF_SRC(insn.code) != BPF_K) 3882 continue; 3883 if (insn.imm >= 0) 3884 continue; 3885 3886 if (is_mbpf_jmp(meta)) { 3887 switch (BPF_OP(insn.code)) { 3888 case BPF_JGE: 3889 case BPF_JSGE: 3890 case BPF_JLT: 3891 case BPF_JSLT: 3892 meta->jump_neg_op = true; 3893 break; 3894 default: 3895 continue; 3896 } 3897 } else { 3898 if (BPF_OP(insn.code) == BPF_ADD) 3899 insn.code = BPF_CLASS(insn.code) | BPF_SUB; 3900 else if (BPF_OP(insn.code) == BPF_SUB) 3901 insn.code = BPF_CLASS(insn.code) | BPF_ADD; 3902 else 3903 continue; 3904 3905 meta->insn.code = insn.code | BPF_K; 3906 } 3907 3908 meta->insn.imm = -insn.imm; 3909 } 3910 } 3911 3912 /* Remove masking after load since our load guarantees this is not needed */ 3913 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 3914 { 3915 struct nfp_insn_meta *meta1, *meta2; 3916 const s32 exp_mask[] = { 3917 [BPF_B] = 0x000000ffU, 3918 [BPF_H] = 0x0000ffffU, 3919 [BPF_W] = 0xffffffffU, 3920 }; 3921 3922 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3923 struct bpf_insn insn, next; 3924 3925 insn = meta1->insn; 3926 next = meta2->insn; 3927 3928 if (BPF_CLASS(insn.code) != BPF_LD) 3929 continue; 3930 if (BPF_MODE(insn.code) != BPF_ABS && 3931 BPF_MODE(insn.code) != BPF_IND) 3932 continue; 3933 3934 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 3935 continue; 3936 3937 if (!exp_mask[BPF_SIZE(insn.code)]) 3938 continue; 3939 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 3940 continue; 3941 3942 if (next.src_reg || next.dst_reg) 3943 continue; 3944 3945 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 3946 continue; 3947 3948 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3949 } 3950 } 3951 3952 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 3953 { 3954 struct nfp_insn_meta *meta1, *meta2, *meta3; 3955 3956 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 3957 struct bpf_insn insn, next1, next2; 3958 3959 insn = meta1->insn; 3960 next1 = meta2->insn; 3961 next2 = meta3->insn; 3962 3963 if (BPF_CLASS(insn.code) != BPF_LD) 3964 continue; 3965 if (BPF_MODE(insn.code) != BPF_ABS && 3966 BPF_MODE(insn.code) != BPF_IND) 3967 continue; 3968 if (BPF_SIZE(insn.code) != BPF_W) 3969 continue; 3970 3971 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 3972 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 3973 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 3974 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 3975 continue; 3976 3977 if (next1.src_reg || next1.dst_reg || 3978 next2.src_reg || next2.dst_reg) 3979 continue; 3980 3981 if (next1.imm != 0x20 || next2.imm != 0x20) 3982 continue; 3983 3984 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 3985 meta3->flags & FLAG_INSN_IS_JUMP_DST) 3986 continue; 3987 3988 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3989 meta3->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 3990 } 3991 } 3992 3993 /* load/store pair that forms memory copy sould look like the following: 3994 * 3995 * ld_width R, [addr_src + offset_src] 3996 * st_width [addr_dest + offset_dest], R 3997 * 3998 * The destination register of load and source register of store should 3999 * be the same, load and store should also perform at the same width. 4000 * If either of addr_src or addr_dest is stack pointer, we don't do the 4001 * CPP optimization as stack is modelled by registers on NFP. 4002 */ 4003 static bool 4004 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 4005 struct nfp_insn_meta *st_meta) 4006 { 4007 struct bpf_insn *ld = &ld_meta->insn; 4008 struct bpf_insn *st = &st_meta->insn; 4009 4010 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 4011 return false; 4012 4013 if (ld_meta->ptr.type != PTR_TO_PACKET && 4014 ld_meta->ptr.type != PTR_TO_MAP_VALUE) 4015 return false; 4016 4017 if (st_meta->ptr.type != PTR_TO_PACKET) 4018 return false; 4019 4020 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 4021 return false; 4022 4023 if (ld->dst_reg != st->src_reg) 4024 return false; 4025 4026 /* There is jump to the store insn in this pair. */ 4027 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 4028 return false; 4029 4030 return true; 4031 } 4032 4033 /* Currently, we only support chaining load/store pairs if: 4034 * 4035 * - Their address base registers are the same. 4036 * - Their address offsets are in the same order. 4037 * - They operate at the same memory width. 4038 * - There is no jump into the middle of them. 4039 */ 4040 static bool 4041 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 4042 struct nfp_insn_meta *st_meta, 4043 struct bpf_insn *prev_ld, 4044 struct bpf_insn *prev_st) 4045 { 4046 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 4047 struct bpf_insn *ld = &ld_meta->insn; 4048 struct bpf_insn *st = &st_meta->insn; 4049 s16 prev_ld_off, prev_st_off; 4050 4051 /* This pair is the start pair. */ 4052 if (!prev_ld) 4053 return true; 4054 4055 prev_size = BPF_LDST_BYTES(prev_ld); 4056 curr_size = BPF_LDST_BYTES(ld); 4057 prev_ld_base = prev_ld->src_reg; 4058 prev_st_base = prev_st->dst_reg; 4059 prev_ld_dst = prev_ld->dst_reg; 4060 prev_ld_off = prev_ld->off; 4061 prev_st_off = prev_st->off; 4062 4063 if (ld->dst_reg != prev_ld_dst) 4064 return false; 4065 4066 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 4067 return false; 4068 4069 if (curr_size != prev_size) 4070 return false; 4071 4072 /* There is jump to the head of this pair. */ 4073 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 4074 return false; 4075 4076 /* Both in ascending order. */ 4077 if (prev_ld_off + prev_size == ld->off && 4078 prev_st_off + prev_size == st->off) 4079 return true; 4080 4081 /* Both in descending order. */ 4082 if (ld->off + curr_size == prev_ld_off && 4083 st->off + curr_size == prev_st_off) 4084 return true; 4085 4086 return false; 4087 } 4088 4089 /* Return TRUE if cross memory access happens. Cross memory access means 4090 * store area is overlapping with load area that a later load might load 4091 * the value from previous store, for this case we can't treat the sequence 4092 * as an memory copy. 4093 */ 4094 static bool 4095 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 4096 struct nfp_insn_meta *head_st_meta) 4097 { 4098 s16 head_ld_off, head_st_off, ld_off; 4099 4100 /* Different pointer types does not overlap. */ 4101 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 4102 return false; 4103 4104 /* load and store are both PTR_TO_PACKET, check ID info. */ 4105 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 4106 return true; 4107 4108 /* Canonicalize the offsets. Turn all of them against the original 4109 * base register. 4110 */ 4111 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 4112 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 4113 ld_off = ld->off + head_ld_meta->ptr.off; 4114 4115 /* Ascending order cross. */ 4116 if (ld_off > head_ld_off && 4117 head_ld_off < head_st_off && ld_off >= head_st_off) 4118 return true; 4119 4120 /* Descending order cross. */ 4121 if (ld_off < head_ld_off && 4122 head_ld_off > head_st_off && ld_off <= head_st_off) 4123 return true; 4124 4125 return false; 4126 } 4127 4128 /* This pass try to identify the following instructoin sequences. 4129 * 4130 * load R, [regA + offA] 4131 * store [regB + offB], R 4132 * load R, [regA + offA + const_imm_A] 4133 * store [regB + offB + const_imm_A], R 4134 * load R, [regA + offA + 2 * const_imm_A] 4135 * store [regB + offB + 2 * const_imm_A], R 4136 * ... 4137 * 4138 * Above sequence is typically generated by compiler when lowering 4139 * memcpy. NFP prefer using CPP instructions to accelerate it. 4140 */ 4141 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 4142 { 4143 struct nfp_insn_meta *head_ld_meta = NULL; 4144 struct nfp_insn_meta *head_st_meta = NULL; 4145 struct nfp_insn_meta *meta1, *meta2; 4146 struct bpf_insn *prev_ld = NULL; 4147 struct bpf_insn *prev_st = NULL; 4148 u8 count = 0; 4149 4150 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4151 struct bpf_insn *ld = &meta1->insn; 4152 struct bpf_insn *st = &meta2->insn; 4153 4154 /* Reset record status if any of the following if true: 4155 * - The current insn pair is not load/store. 4156 * - The load/store pair doesn't chain with previous one. 4157 * - The chained load/store pair crossed with previous pair. 4158 * - The chained load/store pair has a total size of memory 4159 * copy beyond 128 bytes which is the maximum length a 4160 * single NFP CPP command can transfer. 4161 */ 4162 if (!curr_pair_is_memcpy(meta1, meta2) || 4163 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 4164 prev_st) || 4165 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 4166 head_st_meta) || 4167 head_ld_meta->ldst_gather_len >= 128))) { 4168 if (!count) 4169 continue; 4170 4171 if (count > 1) { 4172 s16 prev_ld_off = prev_ld->off; 4173 s16 prev_st_off = prev_st->off; 4174 s16 head_ld_off = head_ld_meta->insn.off; 4175 4176 if (prev_ld_off < head_ld_off) { 4177 head_ld_meta->insn.off = prev_ld_off; 4178 head_st_meta->insn.off = prev_st_off; 4179 head_ld_meta->ldst_gather_len = 4180 -head_ld_meta->ldst_gather_len; 4181 } 4182 4183 head_ld_meta->paired_st = &head_st_meta->insn; 4184 head_st_meta->flags |= 4185 FLAG_INSN_SKIP_PREC_DEPENDENT; 4186 } else { 4187 head_ld_meta->ldst_gather_len = 0; 4188 } 4189 4190 /* If the chain is ended by an load/store pair then this 4191 * could serve as the new head of the the next chain. 4192 */ 4193 if (curr_pair_is_memcpy(meta1, meta2)) { 4194 head_ld_meta = meta1; 4195 head_st_meta = meta2; 4196 head_ld_meta->ldst_gather_len = 4197 BPF_LDST_BYTES(ld); 4198 meta1 = nfp_meta_next(meta1); 4199 meta2 = nfp_meta_next(meta2); 4200 prev_ld = ld; 4201 prev_st = st; 4202 count = 1; 4203 } else { 4204 head_ld_meta = NULL; 4205 head_st_meta = NULL; 4206 prev_ld = NULL; 4207 prev_st = NULL; 4208 count = 0; 4209 } 4210 4211 continue; 4212 } 4213 4214 if (!head_ld_meta) { 4215 head_ld_meta = meta1; 4216 head_st_meta = meta2; 4217 } else { 4218 meta1->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 4219 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; 4220 } 4221 4222 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 4223 meta1 = nfp_meta_next(meta1); 4224 meta2 = nfp_meta_next(meta2); 4225 prev_ld = ld; 4226 prev_st = st; 4227 count++; 4228 } 4229 } 4230 4231 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 4232 { 4233 struct nfp_insn_meta *meta, *range_node = NULL; 4234 s16 range_start = 0, range_end = 0; 4235 bool cache_avail = false; 4236 struct bpf_insn *insn; 4237 s32 range_ptr_off = 0; 4238 u32 range_ptr_id = 0; 4239 4240 list_for_each_entry(meta, &nfp_prog->insns, l) { 4241 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 4242 cache_avail = false; 4243 4244 if (meta->flags & FLAG_INSN_SKIP_MASK) 4245 continue; 4246 4247 insn = &meta->insn; 4248 4249 if (is_mbpf_store_pkt(meta) || 4250 insn->code == (BPF_JMP | BPF_CALL) || 4251 is_mbpf_classic_store_pkt(meta) || 4252 is_mbpf_classic_load(meta)) { 4253 cache_avail = false; 4254 continue; 4255 } 4256 4257 if (!is_mbpf_load(meta)) 4258 continue; 4259 4260 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 4261 cache_avail = false; 4262 continue; 4263 } 4264 4265 if (!cache_avail) { 4266 cache_avail = true; 4267 if (range_node) 4268 goto end_current_then_start_new; 4269 goto start_new; 4270 } 4271 4272 /* Check ID to make sure two reads share the same 4273 * variable offset against PTR_TO_PACKET, and check OFF 4274 * to make sure they also share the same constant 4275 * offset. 4276 * 4277 * OFFs don't really need to be the same, because they 4278 * are the constant offsets against PTR_TO_PACKET, so 4279 * for different OFFs, we could canonicalize them to 4280 * offsets against original packet pointer. We don't 4281 * support this. 4282 */ 4283 if (meta->ptr.id == range_ptr_id && 4284 meta->ptr.off == range_ptr_off) { 4285 s16 new_start = range_start; 4286 s16 end, off = insn->off; 4287 s16 new_end = range_end; 4288 bool changed = false; 4289 4290 if (off < range_start) { 4291 new_start = off; 4292 changed = true; 4293 } 4294 4295 end = off + BPF_LDST_BYTES(insn); 4296 if (end > range_end) { 4297 new_end = end; 4298 changed = true; 4299 } 4300 4301 if (!changed) 4302 continue; 4303 4304 if (new_end - new_start <= 64) { 4305 /* Install new range. */ 4306 range_start = new_start; 4307 range_end = new_end; 4308 continue; 4309 } 4310 } 4311 4312 end_current_then_start_new: 4313 range_node->pkt_cache.range_start = range_start; 4314 range_node->pkt_cache.range_end = range_end; 4315 start_new: 4316 range_node = meta; 4317 range_node->pkt_cache.do_init = true; 4318 range_ptr_id = range_node->ptr.id; 4319 range_ptr_off = range_node->ptr.off; 4320 range_start = insn->off; 4321 range_end = insn->off + BPF_LDST_BYTES(insn); 4322 } 4323 4324 if (range_node) { 4325 range_node->pkt_cache.range_start = range_start; 4326 range_node->pkt_cache.range_end = range_end; 4327 } 4328 4329 list_for_each_entry(meta, &nfp_prog->insns, l) { 4330 if (meta->flags & FLAG_INSN_SKIP_MASK) 4331 continue; 4332 4333 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 4334 if (meta->pkt_cache.do_init) { 4335 range_start = meta->pkt_cache.range_start; 4336 range_end = meta->pkt_cache.range_end; 4337 } else { 4338 meta->pkt_cache.range_start = range_start; 4339 meta->pkt_cache.range_end = range_end; 4340 } 4341 } 4342 } 4343 } 4344 4345 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 4346 { 4347 nfp_bpf_opt_reg_init(nfp_prog); 4348 4349 nfp_bpf_opt_neg_add_sub(nfp_prog); 4350 nfp_bpf_opt_ld_mask(nfp_prog); 4351 nfp_bpf_opt_ld_shift(nfp_prog); 4352 nfp_bpf_opt_ldst_gather(nfp_prog); 4353 nfp_bpf_opt_pkt_cache(nfp_prog); 4354 4355 return 0; 4356 } 4357 4358 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) 4359 { 4360 struct nfp_insn_meta *meta1, *meta2; 4361 struct nfp_bpf_map *nfp_map; 4362 struct bpf_map *map; 4363 u32 id; 4364 4365 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4366 if (meta1->flags & FLAG_INSN_SKIP_MASK || 4367 meta2->flags & FLAG_INSN_SKIP_MASK) 4368 continue; 4369 4370 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || 4371 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) 4372 continue; 4373 4374 map = (void *)(unsigned long)((u32)meta1->insn.imm | 4375 (u64)meta2->insn.imm << 32); 4376 if (bpf_map_offload_neutral(map)) { 4377 id = map->id; 4378 } else { 4379 nfp_map = map_to_offmap(map)->dev_priv; 4380 id = nfp_map->tid; 4381 } 4382 4383 meta1->insn.imm = id; 4384 meta2->insn.imm = 0; 4385 } 4386 4387 return 0; 4388 } 4389 4390 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 4391 { 4392 __le64 *ustore = (__force __le64 *)prog; 4393 int i; 4394 4395 for (i = 0; i < len; i++) { 4396 int err; 4397 4398 err = nfp_ustore_check_valid_no_ecc(prog[i]); 4399 if (err) 4400 return err; 4401 4402 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 4403 } 4404 4405 return 0; 4406 } 4407 4408 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 4409 { 4410 void *prog; 4411 4412 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 4413 if (!prog) 4414 return; 4415 4416 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 4417 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 4418 kvfree(nfp_prog->prog); 4419 nfp_prog->prog = prog; 4420 } 4421 4422 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 4423 { 4424 int ret; 4425 4426 ret = nfp_bpf_replace_map_ptrs(nfp_prog); 4427 if (ret) 4428 return ret; 4429 4430 ret = nfp_bpf_optimize(nfp_prog); 4431 if (ret) 4432 return ret; 4433 4434 ret = nfp_translate(nfp_prog); 4435 if (ret) { 4436 pr_err("Translation failed with error %d (translated: %u)\n", 4437 ret, nfp_prog->n_translated); 4438 return -EINVAL; 4439 } 4440 4441 nfp_bpf_prog_trim(nfp_prog); 4442 4443 return ret; 4444 } 4445 4446 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog) 4447 { 4448 struct nfp_insn_meta *meta; 4449 4450 /* Another pass to record jump information. */ 4451 list_for_each_entry(meta, &nfp_prog->insns, l) { 4452 struct nfp_insn_meta *dst_meta; 4453 u64 code = meta->insn.code; 4454 unsigned int dst_idx; 4455 bool pseudo_call; 4456 4457 if (!is_mbpf_jmp(meta)) 4458 continue; 4459 if (BPF_OP(code) == BPF_EXIT) 4460 continue; 4461 if (is_mbpf_helper_call(meta)) 4462 continue; 4463 4464 /* If opcode is BPF_CALL at this point, this can only be a 4465 * BPF-to-BPF call (a.k.a pseudo call). 4466 */ 4467 pseudo_call = BPF_OP(code) == BPF_CALL; 4468 4469 if (pseudo_call) 4470 dst_idx = meta->n + 1 + meta->insn.imm; 4471 else 4472 dst_idx = meta->n + 1 + meta->insn.off; 4473 4474 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx); 4475 4476 if (pseudo_call) 4477 dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START; 4478 4479 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 4480 meta->jmp_dst = dst_meta; 4481 } 4482 } 4483 4484 bool nfp_bpf_supported_opcode(u8 code) 4485 { 4486 return !!instr_cb[code]; 4487 } 4488 4489 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 4490 { 4491 unsigned int i; 4492 u64 *prog; 4493 int err; 4494 4495 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 4496 GFP_KERNEL); 4497 if (!prog) 4498 return ERR_PTR(-ENOMEM); 4499 4500 for (i = 0; i < nfp_prog->prog_len; i++) { 4501 enum nfp_relo_type special; 4502 u32 val; 4503 u16 off; 4504 4505 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 4506 switch (special) { 4507 case RELO_NONE: 4508 continue; 4509 case RELO_BR_REL: 4510 br_add_offset(&prog[i], bv->start_off); 4511 break; 4512 case RELO_BR_GO_OUT: 4513 br_set_offset(&prog[i], 4514 nfp_prog->tgt_out + bv->start_off); 4515 break; 4516 case RELO_BR_GO_ABORT: 4517 br_set_offset(&prog[i], 4518 nfp_prog->tgt_abort + bv->start_off); 4519 break; 4520 case RELO_BR_GO_CALL_PUSH_REGS: 4521 if (!nfp_prog->tgt_call_push_regs) { 4522 pr_err("BUG: failed to detect subprogram registers needs\n"); 4523 err = -EINVAL; 4524 goto err_free_prog; 4525 } 4526 off = nfp_prog->tgt_call_push_regs + bv->start_off; 4527 br_set_offset(&prog[i], off); 4528 break; 4529 case RELO_BR_GO_CALL_POP_REGS: 4530 if (!nfp_prog->tgt_call_pop_regs) { 4531 pr_err("BUG: failed to detect subprogram registers needs\n"); 4532 err = -EINVAL; 4533 goto err_free_prog; 4534 } 4535 off = nfp_prog->tgt_call_pop_regs + bv->start_off; 4536 br_set_offset(&prog[i], off); 4537 break; 4538 case RELO_BR_NEXT_PKT: 4539 br_set_offset(&prog[i], bv->tgt_done); 4540 break; 4541 case RELO_BR_HELPER: 4542 val = br_get_offset(prog[i]); 4543 val -= BR_OFF_RELO; 4544 switch (val) { 4545 case BPF_FUNC_map_lookup_elem: 4546 val = nfp_prog->bpf->helpers.map_lookup; 4547 break; 4548 case BPF_FUNC_map_update_elem: 4549 val = nfp_prog->bpf->helpers.map_update; 4550 break; 4551 case BPF_FUNC_map_delete_elem: 4552 val = nfp_prog->bpf->helpers.map_delete; 4553 break; 4554 case BPF_FUNC_perf_event_output: 4555 val = nfp_prog->bpf->helpers.perf_event_output; 4556 break; 4557 default: 4558 pr_err("relocation of unknown helper %d\n", 4559 val); 4560 err = -EINVAL; 4561 goto err_free_prog; 4562 } 4563 br_set_offset(&prog[i], val); 4564 break; 4565 case RELO_IMMED_REL: 4566 immed_add_value(&prog[i], bv->start_off); 4567 break; 4568 } 4569 4570 prog[i] &= ~OP_RELO_TYPE; 4571 } 4572 4573 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 4574 if (err) 4575 goto err_free_prog; 4576 4577 return prog; 4578 4579 err_free_prog: 4580 kfree(prog); 4581 return ERR_PTR(err); 4582 } 4583