1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */ 3 4 #define pr_fmt(fmt) "NFP net bpf: " fmt 5 6 #include <linux/bug.h> 7 #include <linux/bpf.h> 8 #include <linux/filter.h> 9 #include <linux/kernel.h> 10 #include <linux/pkt_cls.h> 11 #include <linux/reciprocal_div.h> 12 #include <linux/unistd.h> 13 14 #include "main.h" 15 #include "../nfp_asm.h" 16 #include "../nfp_net_ctrl.h" 17 18 /* --- NFP prog --- */ 19 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 20 * It's safe to modify the next pointers (but not pos). 21 */ 22 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 23 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 24 next = list_next_entry(pos, l); \ 25 &(nfp_prog)->insns != &pos->l && \ 26 &(nfp_prog)->insns != &next->l; \ 27 pos = nfp_meta_next(pos), \ 28 next = nfp_meta_next(pos)) 29 30 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 31 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 32 next = list_next_entry(pos, l), \ 33 next2 = list_next_entry(next, l); \ 34 &(nfp_prog)->insns != &pos->l && \ 35 &(nfp_prog)->insns != &next->l && \ 36 &(nfp_prog)->insns != &next2->l; \ 37 pos = nfp_meta_next(pos), \ 38 next = nfp_meta_next(pos), \ 39 next2 = nfp_meta_next(next)) 40 41 static bool 42 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 43 { 44 return meta->l.prev != &nfp_prog->insns; 45 } 46 47 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 48 { 49 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { 50 pr_warn("instruction limit reached (%u NFP instructions)\n", 51 nfp_prog->prog_len); 52 nfp_prog->error = -ENOSPC; 53 return; 54 } 55 56 nfp_prog->prog[nfp_prog->prog_len] = insn; 57 nfp_prog->prog_len++; 58 } 59 60 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 61 { 62 return nfp_prog->prog_len; 63 } 64 65 static bool 66 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 67 { 68 /* If there is a recorded error we may have dropped instructions; 69 * that doesn't have to be due to translator bug, and the translation 70 * will fail anyway, so just return OK. 71 */ 72 if (nfp_prog->error) 73 return true; 74 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 75 } 76 77 /* --- Emitters --- */ 78 static void 79 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 80 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, 81 bool indir) 82 { 83 u64 insn; 84 85 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 86 FIELD_PREP(OP_CMD_CTX, ctx) | 87 FIELD_PREP(OP_CMD_B_SRC, breg) | 88 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 89 FIELD_PREP(OP_CMD_XFER, xfer) | 90 FIELD_PREP(OP_CMD_CNT, size) | 91 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | 92 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 93 FIELD_PREP(OP_CMD_INDIR, indir) | 94 FIELD_PREP(OP_CMD_MODE, mode); 95 96 nfp_prog_push(nfp_prog, insn); 97 } 98 99 static void 100 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 101 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) 102 { 103 struct nfp_insn_re_regs reg; 104 int err; 105 106 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 107 if (err) { 108 nfp_prog->error = err; 109 return; 110 } 111 if (reg.swap) { 112 pr_err("cmd can't swap arguments\n"); 113 nfp_prog->error = -EFAULT; 114 return; 115 } 116 if (reg.dst_lmextn || reg.src_lmextn) { 117 pr_err("cmd can't use LMextn\n"); 118 nfp_prog->error = -EFAULT; 119 return; 120 } 121 122 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, 123 indir); 124 } 125 126 static void 127 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 128 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 129 { 130 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); 131 } 132 133 static void 134 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 135 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 136 { 137 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); 138 } 139 140 static void 141 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 142 enum br_ctx_signal_state css, u16 addr, u8 defer) 143 { 144 u16 addr_lo, addr_hi; 145 u64 insn; 146 147 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 148 addr_hi = addr != addr_lo; 149 150 insn = OP_BR_BASE | 151 FIELD_PREP(OP_BR_MASK, mask) | 152 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 153 FIELD_PREP(OP_BR_CSS, css) | 154 FIELD_PREP(OP_BR_DEFBR, defer) | 155 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 156 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 157 158 nfp_prog_push(nfp_prog, insn); 159 } 160 161 static void 162 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 163 enum nfp_relo_type relo) 164 { 165 if (mask == BR_UNC && defer > 2) { 166 pr_err("BUG: branch defer out of bounds %d\n", defer); 167 nfp_prog->error = -EFAULT; 168 return; 169 } 170 171 __emit_br(nfp_prog, mask, 172 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 173 BR_CSS_NONE, addr, defer); 174 175 nfp_prog->prog[nfp_prog->prog_len - 1] |= 176 FIELD_PREP(OP_RELO_TYPE, relo); 177 } 178 179 static void 180 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 181 { 182 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 183 } 184 185 static void 186 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer, 187 bool set, bool src_lmextn) 188 { 189 u16 addr_lo, addr_hi; 190 u64 insn; 191 192 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO)); 193 addr_hi = addr != addr_lo; 194 195 insn = OP_BR_BIT_BASE | 196 FIELD_PREP(OP_BR_BIT_A_SRC, areg) | 197 FIELD_PREP(OP_BR_BIT_B_SRC, breg) | 198 FIELD_PREP(OP_BR_BIT_BV, set) | 199 FIELD_PREP(OP_BR_BIT_DEFBR, defer) | 200 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) | 201 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) | 202 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn); 203 204 nfp_prog_push(nfp_prog, insn); 205 } 206 207 static void 208 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, 209 u8 defer, bool set, enum nfp_relo_type relo) 210 { 211 struct nfp_insn_re_regs reg; 212 int err; 213 214 /* NOTE: The bit to test is specified as an rotation amount, such that 215 * the bit to test will be placed on the MSB of the result when 216 * doing a rotate right. For bit X, we need right rotate X + 1. 217 */ 218 bit += 1; 219 220 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false); 221 if (err) { 222 nfp_prog->error = err; 223 return; 224 } 225 226 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set, 227 reg.src_lmextn); 228 229 nfp_prog->prog[nfp_prog->prog_len - 1] |= 230 FIELD_PREP(OP_RELO_TYPE, relo); 231 } 232 233 static void 234 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) 235 { 236 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL); 237 } 238 239 static void 240 __emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 241 u8 defer, bool dst_lmextn, bool src_lmextn) 242 { 243 u64 insn; 244 245 insn = OP_BR_ALU_BASE | 246 FIELD_PREP(OP_BR_ALU_A_SRC, areg) | 247 FIELD_PREP(OP_BR_ALU_B_SRC, breg) | 248 FIELD_PREP(OP_BR_ALU_DEFBR, defer) | 249 FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) | 250 FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) | 251 FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn); 252 253 nfp_prog_push(nfp_prog, insn); 254 } 255 256 static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer) 257 { 258 struct nfp_insn_ur_regs reg; 259 int err; 260 261 err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), ®); 262 if (err) { 263 nfp_prog->error = err; 264 return; 265 } 266 267 __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn, 268 reg.src_lmextn); 269 } 270 271 static void 272 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 273 enum immed_width width, bool invert, 274 enum immed_shift shift, bool wr_both, 275 bool dst_lmextn, bool src_lmextn) 276 { 277 u64 insn; 278 279 insn = OP_IMMED_BASE | 280 FIELD_PREP(OP_IMMED_A_SRC, areg) | 281 FIELD_PREP(OP_IMMED_B_SRC, breg) | 282 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 283 FIELD_PREP(OP_IMMED_WIDTH, width) | 284 FIELD_PREP(OP_IMMED_INV, invert) | 285 FIELD_PREP(OP_IMMED_SHIFT, shift) | 286 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 287 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 288 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 289 290 nfp_prog_push(nfp_prog, insn); 291 } 292 293 static void 294 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 295 enum immed_width width, bool invert, enum immed_shift shift) 296 { 297 struct nfp_insn_ur_regs reg; 298 int err; 299 300 if (swreg_type(dst) == NN_REG_IMM) { 301 nfp_prog->error = -EFAULT; 302 return; 303 } 304 305 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 306 if (err) { 307 nfp_prog->error = err; 308 return; 309 } 310 311 /* Use reg.dst when destination is No-Dest. */ 312 __emit_immed(nfp_prog, 313 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 314 reg.breg, imm >> 8, width, invert, shift, 315 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 316 } 317 318 static void 319 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 320 enum shf_sc sc, u8 shift, 321 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 322 bool dst_lmextn, bool src_lmextn) 323 { 324 u64 insn; 325 326 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 327 nfp_prog->error = -EFAULT; 328 return; 329 } 330 331 if (sc == SHF_SC_L_SHF) 332 shift = 32 - shift; 333 334 insn = OP_SHF_BASE | 335 FIELD_PREP(OP_SHF_A_SRC, areg) | 336 FIELD_PREP(OP_SHF_SC, sc) | 337 FIELD_PREP(OP_SHF_B_SRC, breg) | 338 FIELD_PREP(OP_SHF_I8, i8) | 339 FIELD_PREP(OP_SHF_SW, sw) | 340 FIELD_PREP(OP_SHF_DST, dst) | 341 FIELD_PREP(OP_SHF_SHIFT, shift) | 342 FIELD_PREP(OP_SHF_OP, op) | 343 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 344 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 345 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 346 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 347 348 nfp_prog_push(nfp_prog, insn); 349 } 350 351 static void 352 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 353 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 354 { 355 struct nfp_insn_re_regs reg; 356 int err; 357 358 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 359 if (err) { 360 nfp_prog->error = err; 361 return; 362 } 363 364 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 365 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 366 reg.dst_lmextn, reg.src_lmextn); 367 } 368 369 static void 370 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst, 371 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc) 372 { 373 if (sc == SHF_SC_R_ROT) { 374 pr_err("indirect shift is not allowed on rotation\n"); 375 nfp_prog->error = -EFAULT; 376 return; 377 } 378 379 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0); 380 } 381 382 static void 383 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 384 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 385 bool dst_lmextn, bool src_lmextn) 386 { 387 u64 insn; 388 389 insn = OP_ALU_BASE | 390 FIELD_PREP(OP_ALU_A_SRC, areg) | 391 FIELD_PREP(OP_ALU_B_SRC, breg) | 392 FIELD_PREP(OP_ALU_DST, dst) | 393 FIELD_PREP(OP_ALU_SW, swap) | 394 FIELD_PREP(OP_ALU_OP, op) | 395 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 396 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 397 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 398 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 399 400 nfp_prog_push(nfp_prog, insn); 401 } 402 403 static void 404 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 405 swreg lreg, enum alu_op op, swreg rreg) 406 { 407 struct nfp_insn_ur_regs reg; 408 int err; 409 410 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 411 if (err) { 412 nfp_prog->error = err; 413 return; 414 } 415 416 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 417 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 418 reg.dst_lmextn, reg.src_lmextn); 419 } 420 421 static void 422 __emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg, 423 enum mul_type type, enum mul_step step, u16 breg, bool swap, 424 bool wr_both, bool dst_lmextn, bool src_lmextn) 425 { 426 u64 insn; 427 428 insn = OP_MUL_BASE | 429 FIELD_PREP(OP_MUL_A_SRC, areg) | 430 FIELD_PREP(OP_MUL_B_SRC, breg) | 431 FIELD_PREP(OP_MUL_STEP, step) | 432 FIELD_PREP(OP_MUL_DST_AB, dst_ab) | 433 FIELD_PREP(OP_MUL_SW, swap) | 434 FIELD_PREP(OP_MUL_TYPE, type) | 435 FIELD_PREP(OP_MUL_WR_AB, wr_both) | 436 FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) | 437 FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn); 438 439 nfp_prog_push(nfp_prog, insn); 440 } 441 442 static void 443 emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type, 444 enum mul_step step, swreg rreg) 445 { 446 struct nfp_insn_ur_regs reg; 447 u16 areg; 448 int err; 449 450 if (type == MUL_TYPE_START && step != MUL_STEP_NONE) { 451 nfp_prog->error = -EINVAL; 452 return; 453 } 454 455 if (step == MUL_LAST || step == MUL_LAST_2) { 456 /* When type is step and step Number is LAST or LAST2, left 457 * source is used as destination. 458 */ 459 err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®); 460 areg = reg.dst; 461 } else { 462 err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®); 463 areg = reg.areg; 464 } 465 466 if (err) { 467 nfp_prog->error = err; 468 return; 469 } 470 471 __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap, 472 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 473 } 474 475 static void 476 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 477 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 478 bool zero, bool swap, bool wr_both, 479 bool dst_lmextn, bool src_lmextn) 480 { 481 u64 insn; 482 483 insn = OP_LDF_BASE | 484 FIELD_PREP(OP_LDF_A_SRC, areg) | 485 FIELD_PREP(OP_LDF_SC, sc) | 486 FIELD_PREP(OP_LDF_B_SRC, breg) | 487 FIELD_PREP(OP_LDF_I8, imm8) | 488 FIELD_PREP(OP_LDF_SW, swap) | 489 FIELD_PREP(OP_LDF_ZF, zero) | 490 FIELD_PREP(OP_LDF_BMASK, bmask) | 491 FIELD_PREP(OP_LDF_SHF, shift) | 492 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 493 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 494 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 495 496 nfp_prog_push(nfp_prog, insn); 497 } 498 499 static void 500 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 501 enum shf_sc sc, u8 shift, bool zero) 502 { 503 struct nfp_insn_re_regs reg; 504 int err; 505 506 /* Note: ld_field is special as it uses one of the src regs as dst */ 507 err = swreg_to_restricted(dst, dst, src, ®, true); 508 if (err) { 509 nfp_prog->error = err; 510 return; 511 } 512 513 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 514 reg.i8, zero, reg.swap, reg.wr_both, 515 reg.dst_lmextn, reg.src_lmextn); 516 } 517 518 static void 519 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 520 enum shf_sc sc, u8 shift) 521 { 522 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 523 } 524 525 static void 526 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 527 bool dst_lmextn, bool src_lmextn) 528 { 529 u64 insn; 530 531 insn = OP_LCSR_BASE | 532 FIELD_PREP(OP_LCSR_A_SRC, areg) | 533 FIELD_PREP(OP_LCSR_B_SRC, breg) | 534 FIELD_PREP(OP_LCSR_WRITE, wr) | 535 FIELD_PREP(OP_LCSR_ADDR, addr / 4) | 536 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 537 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 538 539 nfp_prog_push(nfp_prog, insn); 540 } 541 542 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 543 { 544 struct nfp_insn_ur_regs reg; 545 int err; 546 547 /* This instruction takes immeds instead of reg_none() for the ignored 548 * operand, but we can't encode 2 immeds in one instr with our normal 549 * swreg infra so if param is an immed, we encode as reg_none() and 550 * copy the immed to both operands. 551 */ 552 if (swreg_type(src) == NN_REG_IMM) { 553 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 554 reg.breg = reg.areg; 555 } else { 556 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 557 } 558 if (err) { 559 nfp_prog->error = err; 560 return; 561 } 562 563 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, 564 false, reg.src_lmextn); 565 } 566 567 /* CSR value is read in following immed[gpr, 0] */ 568 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) 569 { 570 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); 571 } 572 573 static void emit_nop(struct nfp_prog *nfp_prog) 574 { 575 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 576 } 577 578 /* --- Wrappers --- */ 579 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 580 { 581 if (!(imm & 0xffff0000)) { 582 *val = imm; 583 *shift = IMMED_SHIFT_0B; 584 } else if (!(imm & 0xff0000ff)) { 585 *val = imm >> 8; 586 *shift = IMMED_SHIFT_1B; 587 } else if (!(imm & 0x0000ffff)) { 588 *val = imm >> 16; 589 *shift = IMMED_SHIFT_2B; 590 } else { 591 return false; 592 } 593 594 return true; 595 } 596 597 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 598 { 599 enum immed_shift shift; 600 u16 val; 601 602 if (pack_immed(imm, &val, &shift)) { 603 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 604 } else if (pack_immed(~imm, &val, &shift)) { 605 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 606 } else { 607 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 608 false, IMMED_SHIFT_0B); 609 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 610 false, IMMED_SHIFT_2B); 611 } 612 } 613 614 static void 615 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 616 enum nfp_relo_type relo) 617 { 618 if (imm > 0xffff) { 619 pr_err("relocation of a large immediate!\n"); 620 nfp_prog->error = -EFAULT; 621 return; 622 } 623 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 624 625 nfp_prog->prog[nfp_prog->prog_len - 1] |= 626 FIELD_PREP(OP_RELO_TYPE, relo); 627 } 628 629 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 630 * If the @imm is small enough encode it directly in operand and return 631 * otherwise load @imm to a spare register and return its encoding. 632 */ 633 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 634 { 635 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 636 return reg_imm(imm); 637 638 wrp_immed(nfp_prog, tmp_reg, imm); 639 return tmp_reg; 640 } 641 642 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 643 * If the @imm is small enough encode it directly in operand and return 644 * otherwise load @imm to a spare register and return its encoding. 645 */ 646 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 647 { 648 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 649 return reg_imm(imm); 650 651 wrp_immed(nfp_prog, tmp_reg, imm); 652 return tmp_reg; 653 } 654 655 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 656 { 657 while (count--) 658 emit_nop(nfp_prog); 659 } 660 661 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 662 { 663 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 664 } 665 666 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 667 { 668 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 669 } 670 671 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 672 * result to @dst from low end. 673 */ 674 static void 675 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 676 u8 offset) 677 { 678 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 679 u8 mask = (1 << field_len) - 1; 680 681 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 682 } 683 684 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 685 * result to @dst from offset, there is no change on the other bits of @dst. 686 */ 687 static void 688 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 689 u8 field_len, u8 offset) 690 { 691 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 692 u8 mask = ((1 << field_len) - 1) << offset; 693 694 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 695 } 696 697 static void 698 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 699 swreg *rega, swreg *regb) 700 { 701 if (offset == reg_imm(0)) { 702 *rega = reg_a(src_gpr); 703 *regb = reg_b(src_gpr + 1); 704 return; 705 } 706 707 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 708 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 709 reg_imm(0)); 710 *rega = imm_a(nfp_prog); 711 *regb = imm_b(nfp_prog); 712 } 713 714 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 715 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 716 { 717 bool descending_seq = meta->ldst_gather_len < 0; 718 s16 len = abs(meta->ldst_gather_len); 719 swreg src_base, off; 720 bool src_40bit_addr; 721 unsigned int i; 722 u8 xfer_num; 723 724 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 725 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 726 src_base = reg_a(meta->insn.src_reg * 2); 727 xfer_num = round_up(len, 4) / 4; 728 729 if (src_40bit_addr) 730 addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base, 731 &off); 732 733 /* Setup PREV_ALU fields to override memory read length. */ 734 if (len > 32) 735 wrp_immed(nfp_prog, reg_none(), 736 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 737 738 /* Memory read from source addr into transfer-in registers. */ 739 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 740 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 741 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); 742 743 /* Move from transfer-in to transfer-out. */ 744 for (i = 0; i < xfer_num; i++) 745 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 746 747 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 748 749 if (len <= 8) { 750 /* Use single direct_ref write8. */ 751 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 752 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 753 CMD_CTX_SWAP); 754 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 755 /* Use single direct_ref write32. */ 756 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 757 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 758 CMD_CTX_SWAP); 759 } else if (len <= 32) { 760 /* Use single indirect_ref write8. */ 761 wrp_immed(nfp_prog, reg_none(), 762 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 763 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 764 reg_a(meta->paired_st->dst_reg * 2), off, 765 len - 1, CMD_CTX_SWAP); 766 } else if (IS_ALIGNED(len, 4)) { 767 /* Use single indirect_ref write32. */ 768 wrp_immed(nfp_prog, reg_none(), 769 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 770 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 771 reg_a(meta->paired_st->dst_reg * 2), off, 772 xfer_num - 1, CMD_CTX_SWAP); 773 } else if (len <= 40) { 774 /* Use one direct_ref write32 to write the first 32-bytes, then 775 * another direct_ref write8 to write the remaining bytes. 776 */ 777 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 778 reg_a(meta->paired_st->dst_reg * 2), off, 7, 779 CMD_CTX_SWAP); 780 781 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 782 imm_b(nfp_prog)); 783 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 784 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 785 CMD_CTX_SWAP); 786 } else { 787 /* Use one indirect_ref write32 to write 4-bytes aligned length, 788 * then another direct_ref write8 to write the remaining bytes. 789 */ 790 u8 new_off; 791 792 wrp_immed(nfp_prog, reg_none(), 793 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 794 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 795 reg_a(meta->paired_st->dst_reg * 2), off, 796 xfer_num - 2, CMD_CTX_SWAP); 797 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 798 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 799 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 800 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 801 (len & 0x3) - 1, CMD_CTX_SWAP); 802 } 803 804 /* TODO: The following extra load is to make sure data flow be identical 805 * before and after we do memory copy optimization. 806 * 807 * The load destination register is not guaranteed to be dead, so we 808 * need to make sure it is loaded with the value the same as before 809 * this transformation. 810 * 811 * These extra loads could be removed once we have accurate register 812 * usage information. 813 */ 814 if (descending_seq) 815 xfer_num = 0; 816 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 817 xfer_num = xfer_num - 1; 818 else 819 xfer_num = xfer_num - 2; 820 821 switch (BPF_SIZE(meta->insn.code)) { 822 case BPF_B: 823 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 824 reg_xfer(xfer_num), 1, 825 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 826 break; 827 case BPF_H: 828 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 829 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 830 break; 831 case BPF_W: 832 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 833 reg_xfer(0)); 834 break; 835 case BPF_DW: 836 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 837 reg_xfer(xfer_num)); 838 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 839 reg_xfer(xfer_num + 1)); 840 break; 841 } 842 843 if (BPF_SIZE(meta->insn.code) != BPF_DW) 844 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 845 846 return 0; 847 } 848 849 static int 850 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 851 { 852 unsigned int i; 853 u16 shift, sz; 854 855 /* We load the value from the address indicated in @offset and then 856 * shift out the data we don't need. Note: this is big endian! 857 */ 858 sz = max(size, 4); 859 shift = size < 4 ? 4 - size : 0; 860 861 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 862 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); 863 864 i = 0; 865 if (shift) 866 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 867 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 868 else 869 for (; i * 4 < size; i++) 870 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 871 872 if (i < 2) 873 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 874 875 return 0; 876 } 877 878 static int 879 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 880 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 881 { 882 unsigned int i; 883 u8 mask, sz; 884 885 /* We load the value from the address indicated in rreg + lreg and then 886 * mask out the data we don't need. Note: this is little endian! 887 */ 888 sz = max(size, 4); 889 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 890 891 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 892 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); 893 894 i = 0; 895 if (mask) 896 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 897 reg_xfer(0), SHF_SC_NONE, 0, true); 898 else 899 for (; i * 4 < size; i++) 900 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 901 902 if (i < 2) 903 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 904 905 return 0; 906 } 907 908 static int 909 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 910 u8 dst_gpr, u8 size) 911 { 912 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 913 size, CMD_MODE_32b); 914 } 915 916 static int 917 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 918 u8 dst_gpr, u8 size) 919 { 920 swreg rega, regb; 921 922 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 923 924 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 925 size, CMD_MODE_40b_BA); 926 } 927 928 static int 929 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 930 { 931 swreg tmp_reg; 932 933 /* Calculate the true offset (src_reg + imm) */ 934 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 935 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 936 937 /* Check packet length (size guaranteed to fit b/c it's u8) */ 938 emit_alu(nfp_prog, imm_a(nfp_prog), 939 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 940 emit_alu(nfp_prog, reg_none(), 941 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 942 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 943 944 /* Load data */ 945 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 946 } 947 948 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 949 { 950 swreg tmp_reg; 951 952 /* Check packet length */ 953 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 954 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 955 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 956 957 /* Load data */ 958 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 959 return data_ld(nfp_prog, tmp_reg, 0, size); 960 } 961 962 static int 963 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 964 u8 src_gpr, u8 size) 965 { 966 unsigned int i; 967 968 for (i = 0; i * 4 < size; i++) 969 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 970 971 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 972 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 973 974 return 0; 975 } 976 977 static int 978 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 979 u64 imm, u8 size) 980 { 981 wrp_immed(nfp_prog, reg_xfer(0), imm); 982 if (size == 8) 983 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 984 985 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 986 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 987 988 return 0; 989 } 990 991 typedef int 992 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 993 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 994 bool needs_inc); 995 996 static int 997 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 998 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 999 bool needs_inc) 1000 { 1001 bool should_inc = needs_inc && new_gpr && !last; 1002 u32 idx, src_byte; 1003 enum shf_sc sc; 1004 swreg reg; 1005 int shf; 1006 u8 mask; 1007 1008 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 1009 return -EOPNOTSUPP; 1010 1011 idx = off / 4; 1012 1013 /* Move the entire word */ 1014 if (size == 4) { 1015 wrp_mov(nfp_prog, reg_both(dst), 1016 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 1017 return 0; 1018 } 1019 1020 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1021 return -EOPNOTSUPP; 1022 1023 src_byte = off % 4; 1024 1025 mask = (1 << size) - 1; 1026 mask <<= dst_byte; 1027 1028 if (WARN_ON_ONCE(mask > 0xf)) 1029 return -EOPNOTSUPP; 1030 1031 shf = abs(src_byte - dst_byte) * 8; 1032 if (src_byte == dst_byte) { 1033 sc = SHF_SC_NONE; 1034 } else if (src_byte < dst_byte) { 1035 shf = 32 - shf; 1036 sc = SHF_SC_L_SHF; 1037 } else { 1038 sc = SHF_SC_R_SHF; 1039 } 1040 1041 /* ld_field can address fewer indexes, if offset too large do RMW. 1042 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1043 */ 1044 if (idx <= RE_REG_LM_IDX_MAX) { 1045 reg = reg_lm(lm3 ? 3 : 0, idx); 1046 } else { 1047 reg = imm_a(nfp_prog); 1048 /* If it's not the first part of the load and we start a new GPR 1049 * that means we are loading a second part of the LMEM word into 1050 * a new GPR. IOW we've already looked that LMEM word and 1051 * therefore it has been loaded into imm_a(). 1052 */ 1053 if (first || !new_gpr) 1054 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1055 } 1056 1057 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 1058 1059 if (should_inc) 1060 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1061 1062 return 0; 1063 } 1064 1065 static int 1066 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 1067 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 1068 bool needs_inc) 1069 { 1070 bool should_inc = needs_inc && new_gpr && !last; 1071 u32 idx, dst_byte; 1072 enum shf_sc sc; 1073 swreg reg; 1074 int shf; 1075 u8 mask; 1076 1077 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 1078 return -EOPNOTSUPP; 1079 1080 idx = off / 4; 1081 1082 /* Move the entire word */ 1083 if (size == 4) { 1084 wrp_mov(nfp_prog, 1085 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 1086 reg_b(src)); 1087 return 0; 1088 } 1089 1090 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1091 return -EOPNOTSUPP; 1092 1093 dst_byte = off % 4; 1094 1095 mask = (1 << size) - 1; 1096 mask <<= dst_byte; 1097 1098 if (WARN_ON_ONCE(mask > 0xf)) 1099 return -EOPNOTSUPP; 1100 1101 shf = abs(src_byte - dst_byte) * 8; 1102 if (src_byte == dst_byte) { 1103 sc = SHF_SC_NONE; 1104 } else if (src_byte < dst_byte) { 1105 shf = 32 - shf; 1106 sc = SHF_SC_L_SHF; 1107 } else { 1108 sc = SHF_SC_R_SHF; 1109 } 1110 1111 /* ld_field can address fewer indexes, if offset too large do RMW. 1112 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1113 */ 1114 if (idx <= RE_REG_LM_IDX_MAX) { 1115 reg = reg_lm(lm3 ? 3 : 0, idx); 1116 } else { 1117 reg = imm_a(nfp_prog); 1118 /* Only first and last LMEM locations are going to need RMW, 1119 * the middle location will be overwritten fully. 1120 */ 1121 if (first || last) 1122 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1123 } 1124 1125 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 1126 1127 if (new_gpr || last) { 1128 if (idx > RE_REG_LM_IDX_MAX) 1129 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1130 if (should_inc) 1131 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1132 } 1133 1134 return 0; 1135 } 1136 1137 static int 1138 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1139 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1140 bool clr_gpr, lmem_step step) 1141 { 1142 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; 1143 bool first = true, last; 1144 bool needs_inc = false; 1145 swreg stack_off_reg; 1146 u8 prev_gpr = 255; 1147 u32 gpr_byte = 0; 1148 bool lm3 = true; 1149 int ret; 1150 1151 if (meta->ptr_not_const || 1152 meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) { 1153 /* Use of the last encountered ptr_off is OK, they all have 1154 * the same alignment. Depend on low bits of value being 1155 * discarded when written to LMaddr register. 1156 */ 1157 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1158 stack_imm(nfp_prog)); 1159 1160 emit_alu(nfp_prog, imm_b(nfp_prog), 1161 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1162 1163 needs_inc = true; 1164 } else if (off + size <= 64) { 1165 /* We can reach bottom 64B with LMaddr0 */ 1166 lm3 = false; 1167 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1168 /* We have to set up a new pointer. If we know the offset 1169 * and the entire access falls into a single 32 byte aligned 1170 * window we won't have to increment the LM pointer. 1171 * The 32 byte alignment is imporant because offset is ORed in 1172 * not added when doing *l$indexN[off]. 1173 */ 1174 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1175 stack_imm(nfp_prog)); 1176 emit_alu(nfp_prog, imm_b(nfp_prog), 1177 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1178 1179 off %= 32; 1180 } else { 1181 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1182 stack_imm(nfp_prog)); 1183 1184 emit_alu(nfp_prog, imm_b(nfp_prog), 1185 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1186 1187 needs_inc = true; 1188 } 1189 if (lm3) { 1190 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1191 /* For size < 4 one slot will be filled by zeroing of upper. */ 1192 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1193 } 1194 1195 if (clr_gpr && size < 8) 1196 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1197 1198 while (size) { 1199 u32 slice_end; 1200 u8 slice_size; 1201 1202 slice_size = min(size, 4 - gpr_byte); 1203 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1204 slice_size = slice_end - off; 1205 1206 last = slice_size == size; 1207 1208 if (needs_inc) 1209 off %= 4; 1210 1211 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1212 first, gpr != prev_gpr, last, lm3, needs_inc); 1213 if (ret) 1214 return ret; 1215 1216 prev_gpr = gpr; 1217 first = false; 1218 1219 gpr_byte += slice_size; 1220 if (gpr_byte >= 4) { 1221 gpr_byte -= 4; 1222 gpr++; 1223 } 1224 1225 size -= slice_size; 1226 off += slice_size; 1227 } 1228 1229 return 0; 1230 } 1231 1232 static void 1233 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1234 { 1235 swreg tmp_reg; 1236 1237 if (alu_op == ALU_OP_AND) { 1238 if (!imm) 1239 wrp_immed(nfp_prog, reg_both(dst), 0); 1240 if (!imm || !~imm) 1241 return; 1242 } 1243 if (alu_op == ALU_OP_OR) { 1244 if (!~imm) 1245 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1246 if (!imm || !~imm) 1247 return; 1248 } 1249 if (alu_op == ALU_OP_XOR) { 1250 if (!~imm) 1251 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1252 ALU_OP_NOT, reg_b(dst)); 1253 if (!imm || !~imm) 1254 return; 1255 } 1256 1257 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1258 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1259 } 1260 1261 static int 1262 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1263 enum alu_op alu_op, bool skip) 1264 { 1265 const struct bpf_insn *insn = &meta->insn; 1266 u64 imm = insn->imm; /* sign extend */ 1267 1268 if (skip) { 1269 meta->skip = true; 1270 return 0; 1271 } 1272 1273 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1274 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1275 1276 return 0; 1277 } 1278 1279 static int 1280 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1281 enum alu_op alu_op) 1282 { 1283 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1284 1285 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1286 emit_alu(nfp_prog, reg_both(dst + 1), 1287 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1288 1289 return 0; 1290 } 1291 1292 static int 1293 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1294 enum alu_op alu_op, bool skip) 1295 { 1296 const struct bpf_insn *insn = &meta->insn; 1297 1298 if (skip) { 1299 meta->skip = true; 1300 return 0; 1301 } 1302 1303 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1304 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1305 1306 return 0; 1307 } 1308 1309 static int 1310 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1311 enum alu_op alu_op) 1312 { 1313 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1314 1315 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1316 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1317 1318 return 0; 1319 } 1320 1321 static void 1322 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1323 enum br_mask br_mask, u16 off) 1324 { 1325 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1326 emit_br(nfp_prog, br_mask, off, 0); 1327 } 1328 1329 static int 1330 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1331 enum alu_op alu_op, enum br_mask br_mask) 1332 { 1333 const struct bpf_insn *insn = &meta->insn; 1334 1335 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1336 insn->src_reg * 2, br_mask, insn->off); 1337 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1338 insn->src_reg * 2 + 1, br_mask, insn->off); 1339 1340 return 0; 1341 } 1342 1343 static const struct jmp_code_map { 1344 enum br_mask br_mask; 1345 bool swap; 1346 } jmp_code_map[] = { 1347 [BPF_JGT >> 4] = { BR_BLO, true }, 1348 [BPF_JGE >> 4] = { BR_BHS, false }, 1349 [BPF_JLT >> 4] = { BR_BLO, false }, 1350 [BPF_JLE >> 4] = { BR_BHS, true }, 1351 [BPF_JSGT >> 4] = { BR_BLT, true }, 1352 [BPF_JSGE >> 4] = { BR_BGE, false }, 1353 [BPF_JSLT >> 4] = { BR_BLT, false }, 1354 [BPF_JSLE >> 4] = { BR_BGE, true }, 1355 }; 1356 1357 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) 1358 { 1359 unsigned int op; 1360 1361 op = BPF_OP(meta->insn.code) >> 4; 1362 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ 1363 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || 1364 !jmp_code_map[op].br_mask, 1365 "no code found for jump instruction")) 1366 return NULL; 1367 1368 return &jmp_code_map[op]; 1369 } 1370 1371 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1372 { 1373 const struct bpf_insn *insn = &meta->insn; 1374 u64 imm = insn->imm; /* sign extend */ 1375 const struct jmp_code_map *code; 1376 enum alu_op alu_op, carry_op; 1377 u8 reg = insn->dst_reg * 2; 1378 swreg tmp_reg; 1379 1380 code = nfp_jmp_code_get(meta); 1381 if (!code) 1382 return -EINVAL; 1383 1384 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; 1385 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; 1386 1387 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1388 if (!code->swap) 1389 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); 1390 else 1391 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); 1392 1393 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1394 if (!code->swap) 1395 emit_alu(nfp_prog, reg_none(), 1396 reg_a(reg + 1), carry_op, tmp_reg); 1397 else 1398 emit_alu(nfp_prog, reg_none(), 1399 tmp_reg, carry_op, reg_a(reg + 1)); 1400 1401 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1402 1403 return 0; 1404 } 1405 1406 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1407 { 1408 const struct bpf_insn *insn = &meta->insn; 1409 const struct jmp_code_map *code; 1410 u8 areg, breg; 1411 1412 code = nfp_jmp_code_get(meta); 1413 if (!code) 1414 return -EINVAL; 1415 1416 areg = insn->dst_reg * 2; 1417 breg = insn->src_reg * 2; 1418 1419 if (code->swap) { 1420 areg ^= breg; 1421 breg ^= areg; 1422 areg ^= breg; 1423 } 1424 1425 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1426 emit_alu(nfp_prog, reg_none(), 1427 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1428 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1429 1430 return 0; 1431 } 1432 1433 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1434 { 1435 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1436 SHF_SC_R_ROT, 8); 1437 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1438 SHF_SC_R_ROT, 16); 1439 } 1440 1441 static void 1442 wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1443 swreg rreg, bool gen_high_half) 1444 { 1445 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1446 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg); 1447 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg); 1448 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg); 1449 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg); 1450 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none()); 1451 if (gen_high_half) 1452 emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2, 1453 reg_none()); 1454 else 1455 wrp_immed(nfp_prog, dst_hi, 0); 1456 } 1457 1458 static void 1459 wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1460 swreg rreg) 1461 { 1462 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1463 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg); 1464 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg); 1465 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none()); 1466 } 1467 1468 static int 1469 wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1470 bool gen_high_half, bool ropnd_from_reg) 1471 { 1472 swreg multiplier, multiplicand, dst_hi, dst_lo; 1473 const struct bpf_insn *insn = &meta->insn; 1474 u32 lopnd_max, ropnd_max; 1475 u8 dst_reg; 1476 1477 dst_reg = insn->dst_reg; 1478 multiplicand = reg_a(dst_reg * 2); 1479 dst_hi = reg_both(dst_reg * 2 + 1); 1480 dst_lo = reg_both(dst_reg * 2); 1481 lopnd_max = meta->umax_dst; 1482 if (ropnd_from_reg) { 1483 multiplier = reg_b(insn->src_reg * 2); 1484 ropnd_max = meta->umax_src; 1485 } else { 1486 u32 imm = insn->imm; 1487 1488 multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1489 ropnd_max = imm; 1490 } 1491 if (lopnd_max > U16_MAX || ropnd_max > U16_MAX) 1492 wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier, 1493 gen_high_half); 1494 else 1495 wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier); 1496 1497 return 0; 1498 } 1499 1500 static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) 1501 { 1502 swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst); 1503 struct reciprocal_value_adv rvalue; 1504 u8 pre_shift, exp; 1505 swreg magic; 1506 1507 if (imm > U32_MAX) { 1508 wrp_immed(nfp_prog, dst_both, 0); 1509 return 0; 1510 } 1511 1512 /* NOTE: because we are using "reciprocal_value_adv" which doesn't 1513 * support "divisor > (1u << 31)", we need to JIT separate NFP sequence 1514 * to handle such case which actually equals to the result of unsigned 1515 * comparison "dst >= imm" which could be calculated using the following 1516 * NFP sequence: 1517 * 1518 * alu[--, dst, -, imm] 1519 * immed[imm, 0] 1520 * alu[dst, imm, +carry, 0] 1521 * 1522 */ 1523 if (imm > 1U << 31) { 1524 swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1525 1526 emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b); 1527 wrp_immed(nfp_prog, imm_a(nfp_prog), 0); 1528 emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C, 1529 reg_imm(0)); 1530 return 0; 1531 } 1532 1533 rvalue = reciprocal_value_adv(imm, 32); 1534 exp = rvalue.exp; 1535 if (rvalue.is_wide_m && !(imm & 1)) { 1536 pre_shift = fls(imm & -imm) - 1; 1537 rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift); 1538 } else { 1539 pre_shift = 0; 1540 } 1541 magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog)); 1542 if (imm == 1U << exp) { 1543 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1544 SHF_SC_R_SHF, exp); 1545 } else if (rvalue.is_wide_m) { 1546 wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, 1547 magic, true); 1548 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, 1549 imm_b(nfp_prog)); 1550 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1551 SHF_SC_R_SHF, 1); 1552 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, 1553 imm_b(nfp_prog)); 1554 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1555 SHF_SC_R_SHF, rvalue.sh - 1); 1556 } else { 1557 if (pre_shift) 1558 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1559 dst_b, SHF_SC_R_SHF, pre_shift); 1560 wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true); 1561 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1562 dst_b, SHF_SC_R_SHF, rvalue.sh); 1563 } 1564 1565 return 0; 1566 } 1567 1568 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1569 { 1570 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1571 struct nfp_bpf_cap_adjust_head *adjust_head; 1572 u32 ret_einval, end; 1573 1574 adjust_head = &nfp_prog->bpf->adjust_head; 1575 1576 /* Optimized version - 5 vs 14 cycles */ 1577 if (nfp_prog->adjust_head_location != UINT_MAX) { 1578 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1579 return -EINVAL; 1580 1581 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1582 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1583 emit_alu(nfp_prog, plen_reg(nfp_prog), 1584 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1585 emit_alu(nfp_prog, pv_len(nfp_prog), 1586 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1587 1588 wrp_immed(nfp_prog, reg_both(0), 0); 1589 wrp_immed(nfp_prog, reg_both(1), 0); 1590 1591 /* TODO: when adjust head is guaranteed to succeed we can 1592 * also eliminate the following if (r0 == 0) branch. 1593 */ 1594 1595 return 0; 1596 } 1597 1598 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1599 end = ret_einval + 2; 1600 1601 /* We need to use a temp because offset is just a part of the pkt ptr */ 1602 emit_alu(nfp_prog, tmp, 1603 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1604 1605 /* Validate result will fit within FW datapath constraints */ 1606 emit_alu(nfp_prog, reg_none(), 1607 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1608 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1609 emit_alu(nfp_prog, reg_none(), 1610 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1611 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1612 1613 /* Validate the length is at least ETH_HLEN */ 1614 emit_alu(nfp_prog, tmp_len, 1615 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1616 emit_alu(nfp_prog, reg_none(), 1617 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1618 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1619 1620 /* Load the ret code */ 1621 wrp_immed(nfp_prog, reg_both(0), 0); 1622 wrp_immed(nfp_prog, reg_both(1), 0); 1623 1624 /* Modify the packet metadata */ 1625 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1626 1627 /* Skip over the -EINVAL ret code (defer 2) */ 1628 emit_br(nfp_prog, BR_UNC, end, 2); 1629 1630 emit_alu(nfp_prog, plen_reg(nfp_prog), 1631 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1632 emit_alu(nfp_prog, pv_len(nfp_prog), 1633 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1634 1635 /* return -EINVAL target */ 1636 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1637 return -EINVAL; 1638 1639 wrp_immed(nfp_prog, reg_both(0), -22); 1640 wrp_immed(nfp_prog, reg_both(1), ~0); 1641 1642 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1643 return -EINVAL; 1644 1645 return 0; 1646 } 1647 1648 static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1649 { 1650 u32 ret_einval, end; 1651 swreg plen, delta; 1652 1653 BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN)); 1654 1655 plen = imm_a(nfp_prog); 1656 delta = reg_a(2 * 2); 1657 1658 ret_einval = nfp_prog_current_offset(nfp_prog) + 9; 1659 end = nfp_prog_current_offset(nfp_prog) + 11; 1660 1661 /* Calculate resulting length */ 1662 emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta); 1663 /* delta == 0 is not allowed by the kernel, add must overflow to make 1664 * length smaller. 1665 */ 1666 emit_br(nfp_prog, BR_BCC, ret_einval, 0); 1667 1668 /* if (new_len < 14) then -EINVAL */ 1669 emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1670 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1671 1672 emit_alu(nfp_prog, plen_reg(nfp_prog), 1673 plen_reg(nfp_prog), ALU_OP_ADD, delta); 1674 emit_alu(nfp_prog, pv_len(nfp_prog), 1675 pv_len(nfp_prog), ALU_OP_ADD, delta); 1676 1677 emit_br(nfp_prog, BR_UNC, end, 2); 1678 wrp_immed(nfp_prog, reg_both(0), 0); 1679 wrp_immed(nfp_prog, reg_both(1), 0); 1680 1681 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1682 return -EINVAL; 1683 1684 wrp_immed(nfp_prog, reg_both(0), -22); 1685 wrp_immed(nfp_prog, reg_both(1), ~0); 1686 1687 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1688 return -EINVAL; 1689 1690 return 0; 1691 } 1692 1693 static int 1694 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1695 { 1696 bool load_lm_ptr; 1697 u32 ret_tgt; 1698 s64 lm_off; 1699 1700 /* We only have to reload LM0 if the key is not at start of stack */ 1701 lm_off = nfp_prog->stack_frame_depth; 1702 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1703 load_lm_ptr = meta->arg2.var_off || lm_off; 1704 1705 /* Set LM0 to start of key */ 1706 if (load_lm_ptr) 1707 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1708 if (meta->func_id == BPF_FUNC_map_update_elem) 1709 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1710 1711 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1712 2, RELO_BR_HELPER); 1713 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1714 1715 /* Load map ID into A0 */ 1716 wrp_mov(nfp_prog, reg_a(0), reg_a(2)); 1717 1718 /* Load the return address into B0 */ 1719 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1720 1721 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1722 return -EINVAL; 1723 1724 /* Reset the LM0 pointer */ 1725 if (!load_lm_ptr) 1726 return 0; 1727 1728 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1729 wrp_nops(nfp_prog, 3); 1730 1731 return 0; 1732 } 1733 1734 static int 1735 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1736 { 1737 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); 1738 /* CSR value is read in following immed[gpr, 0] */ 1739 emit_immed(nfp_prog, reg_both(0), 0, 1740 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1741 emit_immed(nfp_prog, reg_both(1), 0, 1742 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1743 return 0; 1744 } 1745 1746 static int 1747 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1748 { 1749 swreg ptr_type; 1750 u32 ret_tgt; 1751 1752 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); 1753 1754 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 1755 1756 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1757 2, RELO_BR_HELPER); 1758 1759 /* Load ptr type into A1 */ 1760 wrp_mov(nfp_prog, reg_a(1), ptr_type); 1761 1762 /* Load the return address into B0 */ 1763 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1764 1765 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1766 return -EINVAL; 1767 1768 return 0; 1769 } 1770 1771 static int 1772 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1773 { 1774 u32 jmp_tgt; 1775 1776 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; 1777 1778 /* Make sure the queue id fits into FW field */ 1779 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), 1780 ALU_OP_AND_NOT_B, reg_imm(0xff)); 1781 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); 1782 1783 /* Set the 'queue selected' bit and the queue value */ 1784 emit_shf(nfp_prog, pv_qsel_set(nfp_prog), 1785 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), 1786 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); 1787 emit_ld_field(nfp_prog, 1788 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), 1789 SHF_SC_NONE, 0); 1790 /* Delay slots end here, we will jump over next instruction if queue 1791 * value fits into the field. 1792 */ 1793 emit_ld_field(nfp_prog, 1794 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), 1795 SHF_SC_NONE, 0); 1796 1797 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) 1798 return -EINVAL; 1799 1800 return 0; 1801 } 1802 1803 /* --- Callbacks --- */ 1804 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1805 { 1806 const struct bpf_insn *insn = &meta->insn; 1807 u8 dst = insn->dst_reg * 2; 1808 u8 src = insn->src_reg * 2; 1809 1810 if (insn->src_reg == BPF_REG_10) { 1811 swreg stack_depth_reg; 1812 1813 stack_depth_reg = ur_load_imm_any(nfp_prog, 1814 nfp_prog->stack_frame_depth, 1815 stack_imm(nfp_prog)); 1816 emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog), 1817 ALU_OP_ADD, stack_depth_reg); 1818 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1819 } else { 1820 wrp_reg_mov(nfp_prog, dst, src); 1821 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1822 } 1823 1824 return 0; 1825 } 1826 1827 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1828 { 1829 u64 imm = meta->insn.imm; /* sign extend */ 1830 1831 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1832 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1833 1834 return 0; 1835 } 1836 1837 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1838 { 1839 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1840 } 1841 1842 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1843 { 1844 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1845 } 1846 1847 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1848 { 1849 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1850 } 1851 1852 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1853 { 1854 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1855 } 1856 1857 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1858 { 1859 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1860 } 1861 1862 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1863 { 1864 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1865 } 1866 1867 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1868 { 1869 const struct bpf_insn *insn = &meta->insn; 1870 1871 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1872 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1873 reg_b(insn->src_reg * 2)); 1874 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1875 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1876 reg_b(insn->src_reg * 2 + 1)); 1877 1878 return 0; 1879 } 1880 1881 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1882 { 1883 const struct bpf_insn *insn = &meta->insn; 1884 u64 imm = insn->imm; /* sign extend */ 1885 1886 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1887 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1888 1889 return 0; 1890 } 1891 1892 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1893 { 1894 const struct bpf_insn *insn = &meta->insn; 1895 1896 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1897 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1898 reg_b(insn->src_reg * 2)); 1899 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1900 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1901 reg_b(insn->src_reg * 2 + 1)); 1902 1903 return 0; 1904 } 1905 1906 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1907 { 1908 const struct bpf_insn *insn = &meta->insn; 1909 u64 imm = insn->imm; /* sign extend */ 1910 1911 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1912 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1913 1914 return 0; 1915 } 1916 1917 static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1918 { 1919 return wrp_mul(nfp_prog, meta, true, true); 1920 } 1921 1922 static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1923 { 1924 return wrp_mul(nfp_prog, meta, true, false); 1925 } 1926 1927 static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1928 { 1929 const struct bpf_insn *insn = &meta->insn; 1930 1931 return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm); 1932 } 1933 1934 static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1935 { 1936 /* NOTE: verifier hook has rejected cases for which verifier doesn't 1937 * know whether the source operand is constant or not. 1938 */ 1939 return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src); 1940 } 1941 1942 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1943 { 1944 const struct bpf_insn *insn = &meta->insn; 1945 1946 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1947 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1948 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1949 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1950 1951 return 0; 1952 } 1953 1954 /* Pseudo code: 1955 * if shift_amt >= 32 1956 * dst_high = dst_low << shift_amt[4:0] 1957 * dst_low = 0; 1958 * else 1959 * dst_high = (dst_high, dst_low) >> (32 - shift_amt) 1960 * dst_low = dst_low << shift_amt 1961 * 1962 * The indirect shift will use the same logic at runtime. 1963 */ 1964 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1965 { 1966 if (shift_amt < 32) { 1967 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), 1968 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, 1969 32 - shift_amt); 1970 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1971 reg_b(dst), SHF_SC_L_SHF, shift_amt); 1972 } else if (shift_amt == 32) { 1973 wrp_reg_mov(nfp_prog, dst + 1, dst); 1974 wrp_immed(nfp_prog, reg_both(dst), 0); 1975 } else if (shift_amt > 32) { 1976 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1977 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32); 1978 wrp_immed(nfp_prog, reg_both(dst), 0); 1979 } 1980 1981 return 0; 1982 } 1983 1984 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1985 { 1986 const struct bpf_insn *insn = &meta->insn; 1987 u8 dst = insn->dst_reg * 2; 1988 1989 return __shl_imm64(nfp_prog, dst, insn->imm); 1990 } 1991 1992 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1993 { 1994 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB, 1995 reg_b(src)); 1996 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0)); 1997 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, 1998 reg_b(dst), SHF_SC_R_DSHF); 1999 } 2000 2001 /* NOTE: for indirect left shift, HIGH part should be calculated first. */ 2002 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2003 { 2004 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2005 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2006 reg_b(dst), SHF_SC_L_SHF); 2007 } 2008 2009 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2010 { 2011 shl_reg64_lt32_high(nfp_prog, dst, src); 2012 shl_reg64_lt32_low(nfp_prog, dst, src); 2013 } 2014 2015 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2016 { 2017 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2018 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2019 reg_b(dst), SHF_SC_L_SHF); 2020 wrp_immed(nfp_prog, reg_both(dst), 0); 2021 } 2022 2023 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2024 { 2025 const struct bpf_insn *insn = &meta->insn; 2026 u64 umin, umax; 2027 u8 dst, src; 2028 2029 dst = insn->dst_reg * 2; 2030 umin = meta->umin_src; 2031 umax = meta->umax_src; 2032 if (umin == umax) 2033 return __shl_imm64(nfp_prog, dst, umin); 2034 2035 src = insn->src_reg * 2; 2036 if (umax < 32) { 2037 shl_reg64_lt32(nfp_prog, dst, src); 2038 } else if (umin >= 32) { 2039 shl_reg64_ge32(nfp_prog, dst, src); 2040 } else { 2041 /* Generate different instruction sequences depending on runtime 2042 * value of shift amount. 2043 */ 2044 u16 label_ge32, label_end; 2045 2046 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7; 2047 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2048 2049 shl_reg64_lt32_high(nfp_prog, dst, src); 2050 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2051 emit_br(nfp_prog, BR_UNC, label_end, 2); 2052 /* shl_reg64_lt32_low packed in delay slot. */ 2053 shl_reg64_lt32_low(nfp_prog, dst, src); 2054 2055 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2056 return -EINVAL; 2057 shl_reg64_ge32(nfp_prog, dst, src); 2058 2059 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2060 return -EINVAL; 2061 } 2062 2063 return 0; 2064 } 2065 2066 /* Pseudo code: 2067 * if shift_amt >= 32 2068 * dst_high = 0; 2069 * dst_low = dst_high >> shift_amt[4:0] 2070 * else 2071 * dst_high = dst_high >> shift_amt 2072 * dst_low = (dst_high, dst_low) >> shift_amt 2073 * 2074 * The indirect shift will use the same logic at runtime. 2075 */ 2076 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2077 { 2078 if (shift_amt < 32) { 2079 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2080 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2081 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2082 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2083 } else if (shift_amt == 32) { 2084 wrp_reg_mov(nfp_prog, dst, dst + 1); 2085 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2086 } else if (shift_amt > 32) { 2087 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2088 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2089 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2090 } 2091 2092 return 0; 2093 } 2094 2095 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2096 { 2097 const struct bpf_insn *insn = &meta->insn; 2098 u8 dst = insn->dst_reg * 2; 2099 2100 return __shr_imm64(nfp_prog, dst, insn->imm); 2101 } 2102 2103 /* NOTE: for indirect right shift, LOW part should be calculated first. */ 2104 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2105 { 2106 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2107 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2108 reg_b(dst + 1), SHF_SC_R_SHF); 2109 } 2110 2111 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2112 { 2113 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2114 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2115 reg_b(dst), SHF_SC_R_DSHF); 2116 } 2117 2118 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2119 { 2120 shr_reg64_lt32_low(nfp_prog, dst, src); 2121 shr_reg64_lt32_high(nfp_prog, dst, src); 2122 } 2123 2124 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2125 { 2126 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2127 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2128 reg_b(dst + 1), SHF_SC_R_SHF); 2129 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2130 } 2131 2132 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2133 { 2134 const struct bpf_insn *insn = &meta->insn; 2135 u64 umin, umax; 2136 u8 dst, src; 2137 2138 dst = insn->dst_reg * 2; 2139 umin = meta->umin_src; 2140 umax = meta->umax_src; 2141 if (umin == umax) 2142 return __shr_imm64(nfp_prog, dst, umin); 2143 2144 src = insn->src_reg * 2; 2145 if (umax < 32) { 2146 shr_reg64_lt32(nfp_prog, dst, src); 2147 } else if (umin >= 32) { 2148 shr_reg64_ge32(nfp_prog, dst, src); 2149 } else { 2150 /* Generate different instruction sequences depending on runtime 2151 * value of shift amount. 2152 */ 2153 u16 label_ge32, label_end; 2154 2155 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2156 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2157 shr_reg64_lt32_low(nfp_prog, dst, src); 2158 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2159 emit_br(nfp_prog, BR_UNC, label_end, 2); 2160 /* shr_reg64_lt32_high packed in delay slot. */ 2161 shr_reg64_lt32_high(nfp_prog, dst, src); 2162 2163 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2164 return -EINVAL; 2165 shr_reg64_ge32(nfp_prog, dst, src); 2166 2167 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2168 return -EINVAL; 2169 } 2170 2171 return 0; 2172 } 2173 2174 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit 2175 * told through PREV_ALU result. 2176 */ 2177 static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2178 { 2179 if (shift_amt < 32) { 2180 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2181 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2182 /* Set signedness bit. */ 2183 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2184 reg_imm(0)); 2185 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2186 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2187 } else if (shift_amt == 32) { 2188 /* NOTE: this also helps setting signedness bit. */ 2189 wrp_reg_mov(nfp_prog, dst, dst + 1); 2190 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2191 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2192 } else if (shift_amt > 32) { 2193 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2194 reg_imm(0)); 2195 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2196 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2197 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2198 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2199 } 2200 2201 return 0; 2202 } 2203 2204 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2205 { 2206 const struct bpf_insn *insn = &meta->insn; 2207 u8 dst = insn->dst_reg * 2; 2208 2209 return __ashr_imm64(nfp_prog, dst, insn->imm); 2210 } 2211 2212 static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2213 { 2214 /* NOTE: the first insn will set both indirect shift amount (source A) 2215 * and signedness bit (MSB of result). 2216 */ 2217 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2218 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2219 reg_b(dst + 1), SHF_SC_R_SHF); 2220 } 2221 2222 static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2223 { 2224 /* NOTE: it is the same as logic shift because we don't need to shift in 2225 * signedness bit when the shift amount is less than 32. 2226 */ 2227 return shr_reg64_lt32_low(nfp_prog, dst, src); 2228 } 2229 2230 static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2231 { 2232 ashr_reg64_lt32_low(nfp_prog, dst, src); 2233 ashr_reg64_lt32_high(nfp_prog, dst, src); 2234 } 2235 2236 static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2237 { 2238 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2239 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2240 reg_b(dst + 1), SHF_SC_R_SHF); 2241 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2242 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2243 } 2244 2245 /* Like ashr_imm64, but need to use indirect shift. */ 2246 static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2247 { 2248 const struct bpf_insn *insn = &meta->insn; 2249 u64 umin, umax; 2250 u8 dst, src; 2251 2252 dst = insn->dst_reg * 2; 2253 umin = meta->umin_src; 2254 umax = meta->umax_src; 2255 if (umin == umax) 2256 return __ashr_imm64(nfp_prog, dst, umin); 2257 2258 src = insn->src_reg * 2; 2259 if (umax < 32) { 2260 ashr_reg64_lt32(nfp_prog, dst, src); 2261 } else if (umin >= 32) { 2262 ashr_reg64_ge32(nfp_prog, dst, src); 2263 } else { 2264 u16 label_ge32, label_end; 2265 2266 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2267 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2268 ashr_reg64_lt32_low(nfp_prog, dst, src); 2269 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2270 emit_br(nfp_prog, BR_UNC, label_end, 2); 2271 /* ashr_reg64_lt32_high packed in delay slot. */ 2272 ashr_reg64_lt32_high(nfp_prog, dst, src); 2273 2274 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2275 return -EINVAL; 2276 ashr_reg64_ge32(nfp_prog, dst, src); 2277 2278 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2279 return -EINVAL; 2280 } 2281 2282 return 0; 2283 } 2284 2285 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2286 { 2287 const struct bpf_insn *insn = &meta->insn; 2288 2289 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 2290 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2291 2292 return 0; 2293 } 2294 2295 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2296 { 2297 const struct bpf_insn *insn = &meta->insn; 2298 2299 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 2300 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2301 2302 return 0; 2303 } 2304 2305 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2306 { 2307 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 2308 } 2309 2310 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2311 { 2312 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 2313 } 2314 2315 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2316 { 2317 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 2318 } 2319 2320 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2321 { 2322 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 2323 } 2324 2325 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2326 { 2327 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 2328 } 2329 2330 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2331 { 2332 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 2333 } 2334 2335 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2336 { 2337 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 2338 } 2339 2340 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2341 { 2342 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 2343 } 2344 2345 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2346 { 2347 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 2348 } 2349 2350 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2351 { 2352 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 2353 } 2354 2355 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2356 { 2357 return wrp_mul(nfp_prog, meta, false, true); 2358 } 2359 2360 static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2361 { 2362 return wrp_mul(nfp_prog, meta, false, false); 2363 } 2364 2365 static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2366 { 2367 return div_reg64(nfp_prog, meta); 2368 } 2369 2370 static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2371 { 2372 return div_imm64(nfp_prog, meta); 2373 } 2374 2375 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2376 { 2377 u8 dst = meta->insn.dst_reg * 2; 2378 2379 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 2380 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2381 2382 return 0; 2383 } 2384 2385 static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2386 { 2387 /* Set signedness bit (MSB of result). */ 2388 emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, reg_imm(0)); 2389 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst), 2390 SHF_SC_R_SHF, shift_amt); 2391 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2392 2393 return 0; 2394 } 2395 2396 static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2397 { 2398 const struct bpf_insn *insn = &meta->insn; 2399 u64 umin, umax; 2400 u8 dst, src; 2401 2402 dst = insn->dst_reg * 2; 2403 umin = meta->umin_src; 2404 umax = meta->umax_src; 2405 if (umin == umax) 2406 return __ashr_imm(nfp_prog, dst, umin); 2407 2408 src = insn->src_reg * 2; 2409 /* NOTE: the first insn will set both indirect shift amount (source A) 2410 * and signedness bit (MSB of result). 2411 */ 2412 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst)); 2413 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2414 reg_b(dst), SHF_SC_R_SHF); 2415 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2416 2417 return 0; 2418 } 2419 2420 static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2421 { 2422 const struct bpf_insn *insn = &meta->insn; 2423 u8 dst = insn->dst_reg * 2; 2424 2425 return __ashr_imm(nfp_prog, dst, insn->imm); 2426 } 2427 2428 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2429 { 2430 const struct bpf_insn *insn = &meta->insn; 2431 2432 if (!insn->imm) 2433 return 1; /* TODO: zero shift means indirect */ 2434 2435 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 2436 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 2437 SHF_SC_L_SHF, insn->imm); 2438 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2439 2440 return 0; 2441 } 2442 2443 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2444 { 2445 const struct bpf_insn *insn = &meta->insn; 2446 u8 gpr = insn->dst_reg * 2; 2447 2448 switch (insn->imm) { 2449 case 16: 2450 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 2451 SHF_SC_R_ROT, 8); 2452 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 2453 SHF_SC_R_SHF, 16); 2454 2455 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2456 break; 2457 case 32: 2458 wrp_end32(nfp_prog, reg_a(gpr), gpr); 2459 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2460 break; 2461 case 64: 2462 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 2463 2464 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 2465 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 2466 break; 2467 } 2468 2469 return 0; 2470 } 2471 2472 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2473 { 2474 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 2475 u32 imm_lo, imm_hi; 2476 u8 dst; 2477 2478 dst = prev->insn.dst_reg * 2; 2479 imm_lo = prev->insn.imm; 2480 imm_hi = meta->insn.imm; 2481 2482 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 2483 2484 /* mov is always 1 insn, load imm may be two, so try to use mov */ 2485 if (imm_hi == imm_lo) 2486 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 2487 else 2488 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 2489 2490 return 0; 2491 } 2492 2493 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2494 { 2495 meta->double_cb = imm_ld8_part2; 2496 return 0; 2497 } 2498 2499 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2500 { 2501 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 2502 } 2503 2504 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2505 { 2506 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 2507 } 2508 2509 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2510 { 2511 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 2512 } 2513 2514 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2515 { 2516 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2517 meta->insn.src_reg * 2, 1); 2518 } 2519 2520 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2521 { 2522 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2523 meta->insn.src_reg * 2, 2); 2524 } 2525 2526 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2527 { 2528 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2529 meta->insn.src_reg * 2, 4); 2530 } 2531 2532 static int 2533 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2534 unsigned int size, unsigned int ptr_off) 2535 { 2536 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2537 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 2538 true, wrp_lmem_load); 2539 } 2540 2541 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2542 u8 size) 2543 { 2544 swreg dst = reg_both(meta->insn.dst_reg * 2); 2545 2546 switch (meta->insn.off) { 2547 case offsetof(struct __sk_buff, len): 2548 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 2549 return -EOPNOTSUPP; 2550 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 2551 break; 2552 case offsetof(struct __sk_buff, data): 2553 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 2554 return -EOPNOTSUPP; 2555 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2556 break; 2557 case offsetof(struct __sk_buff, data_end): 2558 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 2559 return -EOPNOTSUPP; 2560 emit_alu(nfp_prog, dst, 2561 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2562 break; 2563 default: 2564 return -EOPNOTSUPP; 2565 } 2566 2567 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2568 2569 return 0; 2570 } 2571 2572 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2573 u8 size) 2574 { 2575 swreg dst = reg_both(meta->insn.dst_reg * 2); 2576 2577 switch (meta->insn.off) { 2578 case offsetof(struct xdp_md, data): 2579 if (size != FIELD_SIZEOF(struct xdp_md, data)) 2580 return -EOPNOTSUPP; 2581 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2582 break; 2583 case offsetof(struct xdp_md, data_end): 2584 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 2585 return -EOPNOTSUPP; 2586 emit_alu(nfp_prog, dst, 2587 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2588 break; 2589 default: 2590 return -EOPNOTSUPP; 2591 } 2592 2593 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2594 2595 return 0; 2596 } 2597 2598 static int 2599 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2600 unsigned int size) 2601 { 2602 swreg tmp_reg; 2603 2604 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2605 2606 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 2607 tmp_reg, meta->insn.dst_reg * 2, size); 2608 } 2609 2610 static int 2611 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2612 unsigned int size) 2613 { 2614 swreg tmp_reg; 2615 2616 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2617 2618 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 2619 tmp_reg, meta->insn.dst_reg * 2, size); 2620 } 2621 2622 static void 2623 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 2624 struct nfp_insn_meta *meta) 2625 { 2626 s16 range_start = meta->pkt_cache.range_start; 2627 s16 range_end = meta->pkt_cache.range_end; 2628 swreg src_base, off; 2629 u8 xfer_num, len; 2630 bool indir; 2631 2632 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 2633 src_base = reg_a(meta->insn.src_reg * 2); 2634 len = range_end - range_start; 2635 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 2636 2637 indir = len > 8 * REG_WIDTH; 2638 /* Setup PREV_ALU for indirect mode. */ 2639 if (indir) 2640 wrp_immed(nfp_prog, reg_none(), 2641 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 2642 2643 /* Cache memory into transfer-in registers. */ 2644 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 2645 off, xfer_num - 1, CMD_CTX_SWAP, indir); 2646 } 2647 2648 static int 2649 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 2650 struct nfp_insn_meta *meta, 2651 unsigned int size) 2652 { 2653 s16 range_start = meta->pkt_cache.range_start; 2654 s16 insn_off = meta->insn.off - range_start; 2655 swreg dst_lo, dst_hi, src_lo, src_mid; 2656 u8 dst_gpr = meta->insn.dst_reg * 2; 2657 u8 len_lo = size, len_mid = 0; 2658 u8 idx = insn_off / REG_WIDTH; 2659 u8 off = insn_off % REG_WIDTH; 2660 2661 dst_hi = reg_both(dst_gpr + 1); 2662 dst_lo = reg_both(dst_gpr); 2663 src_lo = reg_xfer(idx); 2664 2665 /* The read length could involve as many as three registers. */ 2666 if (size > REG_WIDTH - off) { 2667 /* Calculate the part in the second register. */ 2668 len_lo = REG_WIDTH - off; 2669 len_mid = size - len_lo; 2670 2671 /* Calculate the part in the third register. */ 2672 if (size > 2 * REG_WIDTH - off) 2673 len_mid = REG_WIDTH; 2674 } 2675 2676 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 2677 2678 if (!len_mid) { 2679 wrp_immed(nfp_prog, dst_hi, 0); 2680 return 0; 2681 } 2682 2683 src_mid = reg_xfer(idx + 1); 2684 2685 if (size <= REG_WIDTH) { 2686 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 2687 wrp_immed(nfp_prog, dst_hi, 0); 2688 } else { 2689 swreg src_hi = reg_xfer(idx + 2); 2690 2691 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 2692 REG_WIDTH - len_lo, len_lo); 2693 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 2694 REG_WIDTH - len_lo); 2695 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 2696 len_lo); 2697 } 2698 2699 return 0; 2700 } 2701 2702 static int 2703 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 2704 struct nfp_insn_meta *meta, 2705 unsigned int size) 2706 { 2707 swreg dst_lo, dst_hi, src_lo; 2708 u8 dst_gpr, idx; 2709 2710 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 2711 dst_gpr = meta->insn.dst_reg * 2; 2712 dst_hi = reg_both(dst_gpr + 1); 2713 dst_lo = reg_both(dst_gpr); 2714 src_lo = reg_xfer(idx); 2715 2716 if (size < REG_WIDTH) { 2717 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 2718 wrp_immed(nfp_prog, dst_hi, 0); 2719 } else if (size == REG_WIDTH) { 2720 wrp_mov(nfp_prog, dst_lo, src_lo); 2721 wrp_immed(nfp_prog, dst_hi, 0); 2722 } else { 2723 swreg src_hi = reg_xfer(idx + 1); 2724 2725 wrp_mov(nfp_prog, dst_lo, src_lo); 2726 wrp_mov(nfp_prog, dst_hi, src_hi); 2727 } 2728 2729 return 0; 2730 } 2731 2732 static int 2733 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 2734 struct nfp_insn_meta *meta, unsigned int size) 2735 { 2736 u8 off = meta->insn.off - meta->pkt_cache.range_start; 2737 2738 if (IS_ALIGNED(off, REG_WIDTH)) 2739 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 2740 2741 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 2742 } 2743 2744 static int 2745 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2746 unsigned int size) 2747 { 2748 if (meta->ldst_gather_len) 2749 return nfp_cpp_memcpy(nfp_prog, meta); 2750 2751 if (meta->ptr.type == PTR_TO_CTX) { 2752 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2753 return mem_ldx_xdp(nfp_prog, meta, size); 2754 else 2755 return mem_ldx_skb(nfp_prog, meta, size); 2756 } 2757 2758 if (meta->ptr.type == PTR_TO_PACKET) { 2759 if (meta->pkt_cache.range_end) { 2760 if (meta->pkt_cache.do_init) 2761 mem_ldx_data_init_pktcache(nfp_prog, meta); 2762 2763 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 2764 } else { 2765 return mem_ldx_data(nfp_prog, meta, size); 2766 } 2767 } 2768 2769 if (meta->ptr.type == PTR_TO_STACK) 2770 return mem_ldx_stack(nfp_prog, meta, size, 2771 meta->ptr.off + meta->ptr.var_off.value); 2772 2773 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2774 return mem_ldx_emem(nfp_prog, meta, size); 2775 2776 return -EOPNOTSUPP; 2777 } 2778 2779 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2780 { 2781 return mem_ldx(nfp_prog, meta, 1); 2782 } 2783 2784 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2785 { 2786 return mem_ldx(nfp_prog, meta, 2); 2787 } 2788 2789 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2790 { 2791 return mem_ldx(nfp_prog, meta, 4); 2792 } 2793 2794 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2795 { 2796 return mem_ldx(nfp_prog, meta, 8); 2797 } 2798 2799 static int 2800 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2801 unsigned int size) 2802 { 2803 u64 imm = meta->insn.imm; /* sign extend */ 2804 swreg off_reg; 2805 2806 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2807 2808 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2809 imm, size); 2810 } 2811 2812 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2813 unsigned int size) 2814 { 2815 if (meta->ptr.type == PTR_TO_PACKET) 2816 return mem_st_data(nfp_prog, meta, size); 2817 2818 return -EOPNOTSUPP; 2819 } 2820 2821 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2822 { 2823 return mem_st(nfp_prog, meta, 1); 2824 } 2825 2826 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2827 { 2828 return mem_st(nfp_prog, meta, 2); 2829 } 2830 2831 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2832 { 2833 return mem_st(nfp_prog, meta, 4); 2834 } 2835 2836 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2837 { 2838 return mem_st(nfp_prog, meta, 8); 2839 } 2840 2841 static int 2842 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2843 unsigned int size) 2844 { 2845 swreg off_reg; 2846 2847 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2848 2849 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2850 meta->insn.src_reg * 2, size); 2851 } 2852 2853 static int 2854 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2855 unsigned int size, unsigned int ptr_off) 2856 { 2857 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2858 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2859 false, wrp_lmem_store); 2860 } 2861 2862 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2863 { 2864 switch (meta->insn.off) { 2865 case offsetof(struct xdp_md, rx_queue_index): 2866 return nfp_queue_select(nfp_prog, meta); 2867 } 2868 2869 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ 2870 return -EOPNOTSUPP; 2871 } 2872 2873 static int 2874 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2875 unsigned int size) 2876 { 2877 if (meta->ptr.type == PTR_TO_PACKET) 2878 return mem_stx_data(nfp_prog, meta, size); 2879 2880 if (meta->ptr.type == PTR_TO_STACK) 2881 return mem_stx_stack(nfp_prog, meta, size, 2882 meta->ptr.off + meta->ptr.var_off.value); 2883 2884 return -EOPNOTSUPP; 2885 } 2886 2887 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2888 { 2889 return mem_stx(nfp_prog, meta, 1); 2890 } 2891 2892 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2893 { 2894 return mem_stx(nfp_prog, meta, 2); 2895 } 2896 2897 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2898 { 2899 if (meta->ptr.type == PTR_TO_CTX) 2900 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2901 return mem_stx_xdp(nfp_prog, meta); 2902 return mem_stx(nfp_prog, meta, 4); 2903 } 2904 2905 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2906 { 2907 return mem_stx(nfp_prog, meta, 8); 2908 } 2909 2910 static int 2911 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) 2912 { 2913 u8 dst_gpr = meta->insn.dst_reg * 2; 2914 u8 src_gpr = meta->insn.src_reg * 2; 2915 unsigned int full_add, out; 2916 swreg addra, addrb, off; 2917 2918 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2919 2920 /* We can fit 16 bits into command immediate, if we know the immediate 2921 * is guaranteed to either always or never fit into 16 bit we only 2922 * generate code to handle that particular case, otherwise generate 2923 * code for both. 2924 */ 2925 out = nfp_prog_current_offset(nfp_prog); 2926 full_add = nfp_prog_current_offset(nfp_prog); 2927 2928 if (meta->insn.off) { 2929 out += 2; 2930 full_add += 2; 2931 } 2932 if (meta->xadd_maybe_16bit) { 2933 out += 3; 2934 full_add += 3; 2935 } 2936 if (meta->xadd_over_16bit) 2937 out += 2 + is64; 2938 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2939 out += 5; 2940 full_add += 5; 2941 } 2942 2943 /* Generate the branch for choosing add_imm vs add */ 2944 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2945 swreg max_imm = imm_a(nfp_prog); 2946 2947 wrp_immed(nfp_prog, max_imm, 0xffff); 2948 emit_alu(nfp_prog, reg_none(), 2949 max_imm, ALU_OP_SUB, reg_b(src_gpr)); 2950 emit_alu(nfp_prog, reg_none(), 2951 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); 2952 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); 2953 /* defer for add */ 2954 } 2955 2956 /* If insn has an offset add to the address */ 2957 if (!meta->insn.off) { 2958 addra = reg_a(dst_gpr); 2959 addrb = reg_b(dst_gpr + 1); 2960 } else { 2961 emit_alu(nfp_prog, imma_a(nfp_prog), 2962 reg_a(dst_gpr), ALU_OP_ADD, off); 2963 emit_alu(nfp_prog, imma_b(nfp_prog), 2964 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); 2965 addra = imma_a(nfp_prog); 2966 addrb = imma_b(nfp_prog); 2967 } 2968 2969 /* Generate the add_imm if 16 bits are possible */ 2970 if (meta->xadd_maybe_16bit) { 2971 swreg prev_alu = imm_a(nfp_prog); 2972 2973 wrp_immed(nfp_prog, prev_alu, 2974 FIELD_PREP(CMD_OVE_DATA, 2) | 2975 CMD_OVE_LEN | 2976 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); 2977 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); 2978 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, 2979 addra, addrb, 0, CMD_CTX_NO_SWAP); 2980 2981 if (meta->xadd_over_16bit) 2982 emit_br(nfp_prog, BR_UNC, out, 0); 2983 } 2984 2985 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) 2986 return -EINVAL; 2987 2988 /* Generate the add if 16 bits are not guaranteed */ 2989 if (meta->xadd_over_16bit) { 2990 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, 2991 addra, addrb, is64 << 2, 2992 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); 2993 2994 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); 2995 if (is64) 2996 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); 2997 } 2998 2999 if (!nfp_prog_confirm_current_offset(nfp_prog, out)) 3000 return -EINVAL; 3001 3002 return 0; 3003 } 3004 3005 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3006 { 3007 return mem_xadd(nfp_prog, meta, false); 3008 } 3009 3010 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3011 { 3012 return mem_xadd(nfp_prog, meta, true); 3013 } 3014 3015 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3016 { 3017 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 3018 3019 return 0; 3020 } 3021 3022 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3023 { 3024 const struct bpf_insn *insn = &meta->insn; 3025 u64 imm = insn->imm; /* sign extend */ 3026 swreg or1, or2, tmp_reg; 3027 3028 or1 = reg_a(insn->dst_reg * 2); 3029 or2 = reg_b(insn->dst_reg * 2 + 1); 3030 3031 if (imm & ~0U) { 3032 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3033 emit_alu(nfp_prog, imm_a(nfp_prog), 3034 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3035 or1 = imm_a(nfp_prog); 3036 } 3037 3038 if (imm >> 32) { 3039 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3040 emit_alu(nfp_prog, imm_b(nfp_prog), 3041 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3042 or2 = imm_b(nfp_prog); 3043 } 3044 3045 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 3046 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3047 3048 return 0; 3049 } 3050 3051 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3052 { 3053 const struct bpf_insn *insn = &meta->insn; 3054 u64 imm = insn->imm; /* sign extend */ 3055 u8 dst_gpr = insn->dst_reg * 2; 3056 swreg tmp_reg; 3057 3058 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3059 emit_alu(nfp_prog, imm_b(nfp_prog), 3060 reg_a(dst_gpr), ALU_OP_AND, tmp_reg); 3061 /* Upper word of the mask can only be 0 or ~0 from sign extension, 3062 * so either ignore it or OR the whole thing in. 3063 */ 3064 if (imm >> 32) 3065 emit_alu(nfp_prog, reg_none(), 3066 reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog)); 3067 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3068 3069 return 0; 3070 } 3071 3072 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3073 { 3074 const struct bpf_insn *insn = &meta->insn; 3075 u64 imm = insn->imm; /* sign extend */ 3076 swreg tmp_reg; 3077 3078 if (!imm) { 3079 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 3080 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 3081 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3082 return 0; 3083 } 3084 3085 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3086 emit_alu(nfp_prog, reg_none(), 3087 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3088 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3089 3090 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3091 emit_alu(nfp_prog, reg_none(), 3092 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3093 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3094 3095 return 0; 3096 } 3097 3098 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3099 { 3100 const struct bpf_insn *insn = &meta->insn; 3101 3102 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 3103 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 3104 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 3105 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 3106 emit_alu(nfp_prog, reg_none(), 3107 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 3108 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3109 3110 return 0; 3111 } 3112 3113 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3114 { 3115 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 3116 } 3117 3118 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3119 { 3120 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 3121 } 3122 3123 static int 3124 bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3125 { 3126 u32 ret_tgt, stack_depth, offset_br; 3127 swreg tmp_reg; 3128 3129 stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN); 3130 /* Space for saving the return address is accounted for by the callee, 3131 * so stack_depth can be zero for the main function. 3132 */ 3133 if (stack_depth) { 3134 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3135 stack_imm(nfp_prog)); 3136 emit_alu(nfp_prog, stack_reg(nfp_prog), 3137 stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg); 3138 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3139 NFP_CSR_ACT_LM_ADDR0); 3140 } 3141 3142 /* Two cases for jumping to the callee: 3143 * 3144 * - If callee uses and needs to save R6~R9 then: 3145 * 1. Put the start offset of the callee into imm_b(). This will 3146 * require a fixup step, as we do not necessarily know this 3147 * address yet. 3148 * 2. Put the return address from the callee to the caller into 3149 * register ret_reg(). 3150 * 3. (After defer slots are consumed) Jump to the subroutine that 3151 * pushes the registers to the stack. 3152 * The subroutine acts as a trampoline, and returns to the address in 3153 * imm_b(), i.e. jumps to the callee. 3154 * 3155 * - If callee does not need to save R6~R9 then just load return 3156 * address to the caller in ret_reg(), and jump to the callee 3157 * directly. 3158 * 3159 * Using ret_reg() to pass the return address to the callee is set here 3160 * as a convention. The callee can then push this address onto its 3161 * stack frame in its prologue. The advantages of passing the return 3162 * address through ret_reg(), instead of pushing it to the stack right 3163 * here, are the following: 3164 * - It looks cleaner. 3165 * - If the called function is called multiple time, we get a lower 3166 * program size. 3167 * - We save two no-op instructions that should be added just before 3168 * the emit_br() when stack depth is not null otherwise. 3169 * - If we ever find a register to hold the return address during whole 3170 * execution of the callee, we will not have to push the return 3171 * address to the stack for leaf functions. 3172 */ 3173 if (!meta->jmp_dst) { 3174 pr_err("BUG: BPF-to-BPF call has no destination recorded\n"); 3175 return -ELOOP; 3176 } 3177 if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) { 3178 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 3179 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, 3180 RELO_BR_GO_CALL_PUSH_REGS); 3181 offset_br = nfp_prog_current_offset(nfp_prog); 3182 wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL); 3183 } else { 3184 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 3185 emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1); 3186 offset_br = nfp_prog_current_offset(nfp_prog); 3187 } 3188 wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL); 3189 3190 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 3191 return -EINVAL; 3192 3193 if (stack_depth) { 3194 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3195 stack_imm(nfp_prog)); 3196 emit_alu(nfp_prog, stack_reg(nfp_prog), 3197 stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 3198 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3199 NFP_CSR_ACT_LM_ADDR0); 3200 wrp_nops(nfp_prog, 3); 3201 } 3202 3203 meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog); 3204 meta->num_insns_after_br -= offset_br; 3205 3206 return 0; 3207 } 3208 3209 static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3210 { 3211 switch (meta->insn.imm) { 3212 case BPF_FUNC_xdp_adjust_head: 3213 return adjust_head(nfp_prog, meta); 3214 case BPF_FUNC_xdp_adjust_tail: 3215 return adjust_tail(nfp_prog, meta); 3216 case BPF_FUNC_map_lookup_elem: 3217 case BPF_FUNC_map_update_elem: 3218 case BPF_FUNC_map_delete_elem: 3219 return map_call_stack_common(nfp_prog, meta); 3220 case BPF_FUNC_get_prandom_u32: 3221 return nfp_get_prandom_u32(nfp_prog, meta); 3222 case BPF_FUNC_perf_event_output: 3223 return nfp_perf_event_output(nfp_prog, meta); 3224 default: 3225 WARN_ONCE(1, "verifier allowed unsupported function\n"); 3226 return -EOPNOTSUPP; 3227 } 3228 } 3229 3230 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3231 { 3232 if (is_mbpf_pseudo_call(meta)) 3233 return bpf_to_bpf_call(nfp_prog, meta); 3234 else 3235 return helper_call(nfp_prog, meta); 3236 } 3237 3238 static bool nfp_is_main_function(struct nfp_insn_meta *meta) 3239 { 3240 return meta->subprog_idx == 0; 3241 } 3242 3243 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3244 { 3245 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 3246 3247 return 0; 3248 } 3249 3250 static int 3251 nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3252 { 3253 if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) { 3254 /* Pop R6~R9 to the stack via related subroutine. 3255 * We loaded the return address to the caller into ret_reg(). 3256 * This means that the subroutine does not come back here, we 3257 * make it jump back to the subprogram caller directly! 3258 */ 3259 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1, 3260 RELO_BR_GO_CALL_POP_REGS); 3261 /* Pop return address from the stack. */ 3262 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3263 } else { 3264 /* Pop return address from the stack. */ 3265 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3266 /* Jump back to caller if no callee-saved registers were used 3267 * by the subprogram. 3268 */ 3269 emit_rtn(nfp_prog, ret_reg(nfp_prog), 0); 3270 } 3271 3272 return 0; 3273 } 3274 3275 static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3276 { 3277 if (nfp_is_main_function(meta)) 3278 return goto_out(nfp_prog, meta); 3279 else 3280 return nfp_subprog_epilogue(nfp_prog, meta); 3281 } 3282 3283 static const instr_cb_t instr_cb[256] = { 3284 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 3285 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 3286 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 3287 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 3288 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 3289 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 3290 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 3291 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 3292 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 3293 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 3294 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 3295 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 3296 [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64, 3297 [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64, 3298 [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64, 3299 [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64, 3300 [BPF_ALU64 | BPF_NEG] = neg_reg64, 3301 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, 3302 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 3303 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, 3304 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 3305 [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64, 3306 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, 3307 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 3308 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 3309 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 3310 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 3311 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 3312 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 3313 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 3314 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 3315 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 3316 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 3317 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 3318 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 3319 [BPF_ALU | BPF_MUL | BPF_X] = mul_reg, 3320 [BPF_ALU | BPF_MUL | BPF_K] = mul_imm, 3321 [BPF_ALU | BPF_DIV | BPF_X] = div_reg, 3322 [BPF_ALU | BPF_DIV | BPF_K] = div_imm, 3323 [BPF_ALU | BPF_NEG] = neg_reg, 3324 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 3325 [BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg, 3326 [BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm, 3327 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 3328 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 3329 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 3330 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 3331 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 3332 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 3333 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 3334 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 3335 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 3336 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 3337 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 3338 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 3339 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 3340 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 3341 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 3342 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 3343 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 3344 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 3345 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 3346 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 3347 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 3348 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 3349 [BPF_JMP | BPF_JA | BPF_K] = jump, 3350 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 3351 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, 3352 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, 3353 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, 3354 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, 3355 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, 3356 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, 3357 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, 3358 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, 3359 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 3360 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 3361 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 3362 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, 3363 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, 3364 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, 3365 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, 3366 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, 3367 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, 3368 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, 3369 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, 3370 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 3371 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 3372 [BPF_JMP | BPF_CALL] = call, 3373 [BPF_JMP | BPF_EXIT] = jmp_exit, 3374 }; 3375 3376 /* --- Assembler logic --- */ 3377 static int 3378 nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 3379 struct nfp_insn_meta *jmp_dst, u32 br_idx) 3380 { 3381 if (immed_get_value(nfp_prog->prog[br_idx + 1])) { 3382 pr_err("BUG: failed to fix up callee register saving\n"); 3383 return -EINVAL; 3384 } 3385 3386 immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off); 3387 3388 return 0; 3389 } 3390 3391 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 3392 { 3393 struct nfp_insn_meta *meta, *jmp_dst; 3394 u32 idx, br_idx; 3395 int err; 3396 3397 list_for_each_entry(meta, &nfp_prog->insns, l) { 3398 if (meta->skip) 3399 continue; 3400 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 3401 continue; 3402 if (meta->insn.code == (BPF_JMP | BPF_EXIT) && 3403 !nfp_is_main_function(meta)) 3404 continue; 3405 if (is_mbpf_helper_call(meta)) 3406 continue; 3407 3408 if (list_is_last(&meta->l, &nfp_prog->insns)) 3409 br_idx = nfp_prog->last_bpf_off; 3410 else 3411 br_idx = list_next_entry(meta, l)->off - 1; 3412 3413 /* For BPF-to-BPF function call, a stack adjustment sequence is 3414 * generated after the return instruction. Therefore, we must 3415 * withdraw the length of this sequence to have br_idx pointing 3416 * to where the "branch" NFP instruction is expected to be. 3417 */ 3418 if (is_mbpf_pseudo_call(meta)) 3419 br_idx -= meta->num_insns_after_br; 3420 3421 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 3422 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 3423 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 3424 return -ELOOP; 3425 } 3426 3427 if (meta->insn.code == (BPF_JMP | BPF_EXIT)) 3428 continue; 3429 3430 /* Leave special branches for later */ 3431 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3432 RELO_BR_REL && !is_mbpf_pseudo_call(meta)) 3433 continue; 3434 3435 if (!meta->jmp_dst) { 3436 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 3437 return -ELOOP; 3438 } 3439 3440 jmp_dst = meta->jmp_dst; 3441 3442 if (jmp_dst->skip) { 3443 pr_err("Branch landing on removed instruction!!\n"); 3444 return -ELOOP; 3445 } 3446 3447 if (is_mbpf_pseudo_call(meta) && 3448 nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) { 3449 err = nfp_fixup_immed_relo(nfp_prog, meta, 3450 jmp_dst, br_idx); 3451 if (err) 3452 return err; 3453 } 3454 3455 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3456 RELO_BR_REL) 3457 continue; 3458 3459 for (idx = meta->off; idx <= br_idx; idx++) { 3460 if (!nfp_is_br(nfp_prog->prog[idx])) 3461 continue; 3462 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 3463 } 3464 } 3465 3466 return 0; 3467 } 3468 3469 static void nfp_intro(struct nfp_prog *nfp_prog) 3470 { 3471 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 3472 emit_alu(nfp_prog, plen_reg(nfp_prog), 3473 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 3474 } 3475 3476 static void 3477 nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3478 { 3479 /* Save return address into the stack. */ 3480 wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog)); 3481 } 3482 3483 static void 3484 nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3485 { 3486 unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth; 3487 3488 nfp_prog->stack_frame_depth = round_up(depth, 4); 3489 nfp_subprog_prologue(nfp_prog, meta); 3490 } 3491 3492 bool nfp_is_subprog_start(struct nfp_insn_meta *meta) 3493 { 3494 return meta->flags & FLAG_INSN_IS_SUBPROG_START; 3495 } 3496 3497 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 3498 { 3499 /* TC direct-action mode: 3500 * 0,1 ok NOT SUPPORTED[1] 3501 * 2 drop 0x22 -> drop, count as stat1 3502 * 4,5 nuke 0x02 -> drop 3503 * 7 redir 0x44 -> redir, count as stat2 3504 * * unspec 0x11 -> pass, count as stat0 3505 * 3506 * [1] We can't support OK and RECLASSIFY because we can't tell TC 3507 * the exact decision made. We are forced to support UNSPEC 3508 * to handle aborts so that's the only one we handle for passing 3509 * packets up the stack. 3510 */ 3511 /* Target for aborts */ 3512 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3513 3514 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3515 3516 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3517 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 3518 3519 /* Target for normal exits */ 3520 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3521 3522 /* if R0 > 7 jump to abort */ 3523 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 3524 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3525 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3526 3527 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 3528 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 3529 3530 emit_shf(nfp_prog, reg_a(1), 3531 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 3532 3533 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3534 emit_shf(nfp_prog, reg_a(2), 3535 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3536 3537 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3538 emit_shf(nfp_prog, reg_b(2), 3539 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 3540 3541 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3542 3543 emit_shf(nfp_prog, reg_b(2), 3544 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 3545 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3546 } 3547 3548 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 3549 { 3550 /* XDP return codes: 3551 * 0 aborted 0x82 -> drop, count as stat3 3552 * 1 drop 0x22 -> drop, count as stat1 3553 * 2 pass 0x11 -> pass, count as stat0 3554 * 3 tx 0x44 -> redir, count as stat2 3555 * * unknown 0x82 -> drop, count as stat3 3556 */ 3557 /* Target for aborts */ 3558 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3559 3560 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3561 3562 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3563 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 3564 3565 /* Target for normal exits */ 3566 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3567 3568 /* if R0 > 3 jump to abort */ 3569 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 3570 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3571 3572 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 3573 3574 emit_shf(nfp_prog, reg_a(1), 3575 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 3576 3577 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3578 emit_shf(nfp_prog, reg_b(2), 3579 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3580 3581 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3582 3583 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3584 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3585 } 3586 3587 static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog) 3588 { 3589 unsigned int idx; 3590 3591 for (idx = 1; idx < nfp_prog->subprog_cnt; idx++) 3592 if (nfp_prog->subprog[idx].needs_reg_push) 3593 return true; 3594 3595 return false; 3596 } 3597 3598 static void nfp_push_callee_registers(struct nfp_prog *nfp_prog) 3599 { 3600 u8 reg; 3601 3602 /* Subroutine: Save all callee saved registers (R6 ~ R9). 3603 * imm_b() holds the return address. 3604 */ 3605 nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog); 3606 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3607 u8 adj = (reg - BPF_REG_0) * 2; 3608 u8 idx = (reg - BPF_REG_6) * 2; 3609 3610 /* The first slot in the stack frame is used to push the return 3611 * address in bpf_to_bpf_call(), start just after. 3612 */ 3613 wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj)); 3614 3615 if (reg == BPF_REG_8) 3616 /* Prepare to jump back, last 3 insns use defer slots */ 3617 emit_rtn(nfp_prog, imm_b(nfp_prog), 3); 3618 3619 wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1)); 3620 } 3621 } 3622 3623 static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog) 3624 { 3625 u8 reg; 3626 3627 /* Subroutine: Restore all callee saved registers (R6 ~ R9). 3628 * ret_reg() holds the return address. 3629 */ 3630 nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog); 3631 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3632 u8 adj = (reg - BPF_REG_0) * 2; 3633 u8 idx = (reg - BPF_REG_6) * 2; 3634 3635 /* The first slot in the stack frame holds the return address, 3636 * start popping just after that. 3637 */ 3638 wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx)); 3639 3640 if (reg == BPF_REG_8) 3641 /* Prepare to jump back, last 3 insns use defer slots */ 3642 emit_rtn(nfp_prog, ret_reg(nfp_prog), 3); 3643 3644 wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1)); 3645 } 3646 } 3647 3648 static void nfp_outro(struct nfp_prog *nfp_prog) 3649 { 3650 switch (nfp_prog->type) { 3651 case BPF_PROG_TYPE_SCHED_CLS: 3652 nfp_outro_tc_da(nfp_prog); 3653 break; 3654 case BPF_PROG_TYPE_XDP: 3655 nfp_outro_xdp(nfp_prog); 3656 break; 3657 default: 3658 WARN_ON(1); 3659 } 3660 3661 if (!nfp_prog_needs_callee_reg_save(nfp_prog)) 3662 return; 3663 3664 nfp_push_callee_registers(nfp_prog); 3665 nfp_pop_callee_registers(nfp_prog); 3666 } 3667 3668 static int nfp_translate(struct nfp_prog *nfp_prog) 3669 { 3670 struct nfp_insn_meta *meta; 3671 unsigned int depth; 3672 int err; 3673 3674 depth = nfp_prog->subprog[0].stack_depth; 3675 nfp_prog->stack_frame_depth = round_up(depth, 4); 3676 3677 nfp_intro(nfp_prog); 3678 if (nfp_prog->error) 3679 return nfp_prog->error; 3680 3681 list_for_each_entry(meta, &nfp_prog->insns, l) { 3682 instr_cb_t cb = instr_cb[meta->insn.code]; 3683 3684 meta->off = nfp_prog_current_offset(nfp_prog); 3685 3686 if (nfp_is_subprog_start(meta)) { 3687 nfp_start_subprog(nfp_prog, meta); 3688 if (nfp_prog->error) 3689 return nfp_prog->error; 3690 } 3691 3692 if (meta->skip) { 3693 nfp_prog->n_translated++; 3694 continue; 3695 } 3696 3697 if (nfp_meta_has_prev(nfp_prog, meta) && 3698 nfp_meta_prev(meta)->double_cb) 3699 cb = nfp_meta_prev(meta)->double_cb; 3700 if (!cb) 3701 return -ENOENT; 3702 err = cb(nfp_prog, meta); 3703 if (err) 3704 return err; 3705 if (nfp_prog->error) 3706 return nfp_prog->error; 3707 3708 nfp_prog->n_translated++; 3709 } 3710 3711 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 3712 3713 nfp_outro(nfp_prog); 3714 if (nfp_prog->error) 3715 return nfp_prog->error; 3716 3717 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 3718 if (nfp_prog->error) 3719 return nfp_prog->error; 3720 3721 return nfp_fixup_branches(nfp_prog); 3722 } 3723 3724 /* --- Optimizations --- */ 3725 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 3726 { 3727 struct nfp_insn_meta *meta; 3728 3729 list_for_each_entry(meta, &nfp_prog->insns, l) { 3730 struct bpf_insn insn = meta->insn; 3731 3732 /* Programs converted from cBPF start with register xoring */ 3733 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 3734 insn.src_reg == insn.dst_reg) 3735 continue; 3736 3737 /* Programs start with R6 = R1 but we ignore the skb pointer */ 3738 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 3739 insn.src_reg == 1 && insn.dst_reg == 6) 3740 meta->skip = true; 3741 3742 /* Return as soon as something doesn't match */ 3743 if (!meta->skip) 3744 return; 3745 } 3746 } 3747 3748 /* abs(insn.imm) will fit better into unrestricted reg immediate - 3749 * convert add/sub of a negative number into a sub/add of a positive one. 3750 */ 3751 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) 3752 { 3753 struct nfp_insn_meta *meta; 3754 3755 list_for_each_entry(meta, &nfp_prog->insns, l) { 3756 struct bpf_insn insn = meta->insn; 3757 3758 if (meta->skip) 3759 continue; 3760 3761 if (BPF_CLASS(insn.code) != BPF_ALU && 3762 BPF_CLASS(insn.code) != BPF_ALU64 && 3763 BPF_CLASS(insn.code) != BPF_JMP) 3764 continue; 3765 if (BPF_SRC(insn.code) != BPF_K) 3766 continue; 3767 if (insn.imm >= 0) 3768 continue; 3769 3770 if (BPF_CLASS(insn.code) == BPF_JMP) { 3771 switch (BPF_OP(insn.code)) { 3772 case BPF_JGE: 3773 case BPF_JSGE: 3774 case BPF_JLT: 3775 case BPF_JSLT: 3776 meta->jump_neg_op = true; 3777 break; 3778 default: 3779 continue; 3780 } 3781 } else { 3782 if (BPF_OP(insn.code) == BPF_ADD) 3783 insn.code = BPF_CLASS(insn.code) | BPF_SUB; 3784 else if (BPF_OP(insn.code) == BPF_SUB) 3785 insn.code = BPF_CLASS(insn.code) | BPF_ADD; 3786 else 3787 continue; 3788 3789 meta->insn.code = insn.code | BPF_K; 3790 } 3791 3792 meta->insn.imm = -insn.imm; 3793 } 3794 } 3795 3796 /* Remove masking after load since our load guarantees this is not needed */ 3797 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 3798 { 3799 struct nfp_insn_meta *meta1, *meta2; 3800 const s32 exp_mask[] = { 3801 [BPF_B] = 0x000000ffU, 3802 [BPF_H] = 0x0000ffffU, 3803 [BPF_W] = 0xffffffffU, 3804 }; 3805 3806 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3807 struct bpf_insn insn, next; 3808 3809 insn = meta1->insn; 3810 next = meta2->insn; 3811 3812 if (BPF_CLASS(insn.code) != BPF_LD) 3813 continue; 3814 if (BPF_MODE(insn.code) != BPF_ABS && 3815 BPF_MODE(insn.code) != BPF_IND) 3816 continue; 3817 3818 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 3819 continue; 3820 3821 if (!exp_mask[BPF_SIZE(insn.code)]) 3822 continue; 3823 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 3824 continue; 3825 3826 if (next.src_reg || next.dst_reg) 3827 continue; 3828 3829 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 3830 continue; 3831 3832 meta2->skip = true; 3833 } 3834 } 3835 3836 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 3837 { 3838 struct nfp_insn_meta *meta1, *meta2, *meta3; 3839 3840 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 3841 struct bpf_insn insn, next1, next2; 3842 3843 insn = meta1->insn; 3844 next1 = meta2->insn; 3845 next2 = meta3->insn; 3846 3847 if (BPF_CLASS(insn.code) != BPF_LD) 3848 continue; 3849 if (BPF_MODE(insn.code) != BPF_ABS && 3850 BPF_MODE(insn.code) != BPF_IND) 3851 continue; 3852 if (BPF_SIZE(insn.code) != BPF_W) 3853 continue; 3854 3855 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 3856 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 3857 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 3858 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 3859 continue; 3860 3861 if (next1.src_reg || next1.dst_reg || 3862 next2.src_reg || next2.dst_reg) 3863 continue; 3864 3865 if (next1.imm != 0x20 || next2.imm != 0x20) 3866 continue; 3867 3868 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 3869 meta3->flags & FLAG_INSN_IS_JUMP_DST) 3870 continue; 3871 3872 meta2->skip = true; 3873 meta3->skip = true; 3874 } 3875 } 3876 3877 /* load/store pair that forms memory copy sould look like the following: 3878 * 3879 * ld_width R, [addr_src + offset_src] 3880 * st_width [addr_dest + offset_dest], R 3881 * 3882 * The destination register of load and source register of store should 3883 * be the same, load and store should also perform at the same width. 3884 * If either of addr_src or addr_dest is stack pointer, we don't do the 3885 * CPP optimization as stack is modelled by registers on NFP. 3886 */ 3887 static bool 3888 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 3889 struct nfp_insn_meta *st_meta) 3890 { 3891 struct bpf_insn *ld = &ld_meta->insn; 3892 struct bpf_insn *st = &st_meta->insn; 3893 3894 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 3895 return false; 3896 3897 if (ld_meta->ptr.type != PTR_TO_PACKET && 3898 ld_meta->ptr.type != PTR_TO_MAP_VALUE) 3899 return false; 3900 3901 if (st_meta->ptr.type != PTR_TO_PACKET) 3902 return false; 3903 3904 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 3905 return false; 3906 3907 if (ld->dst_reg != st->src_reg) 3908 return false; 3909 3910 /* There is jump to the store insn in this pair. */ 3911 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 3912 return false; 3913 3914 return true; 3915 } 3916 3917 /* Currently, we only support chaining load/store pairs if: 3918 * 3919 * - Their address base registers are the same. 3920 * - Their address offsets are in the same order. 3921 * - They operate at the same memory width. 3922 * - There is no jump into the middle of them. 3923 */ 3924 static bool 3925 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 3926 struct nfp_insn_meta *st_meta, 3927 struct bpf_insn *prev_ld, 3928 struct bpf_insn *prev_st) 3929 { 3930 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 3931 struct bpf_insn *ld = &ld_meta->insn; 3932 struct bpf_insn *st = &st_meta->insn; 3933 s16 prev_ld_off, prev_st_off; 3934 3935 /* This pair is the start pair. */ 3936 if (!prev_ld) 3937 return true; 3938 3939 prev_size = BPF_LDST_BYTES(prev_ld); 3940 curr_size = BPF_LDST_BYTES(ld); 3941 prev_ld_base = prev_ld->src_reg; 3942 prev_st_base = prev_st->dst_reg; 3943 prev_ld_dst = prev_ld->dst_reg; 3944 prev_ld_off = prev_ld->off; 3945 prev_st_off = prev_st->off; 3946 3947 if (ld->dst_reg != prev_ld_dst) 3948 return false; 3949 3950 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 3951 return false; 3952 3953 if (curr_size != prev_size) 3954 return false; 3955 3956 /* There is jump to the head of this pair. */ 3957 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 3958 return false; 3959 3960 /* Both in ascending order. */ 3961 if (prev_ld_off + prev_size == ld->off && 3962 prev_st_off + prev_size == st->off) 3963 return true; 3964 3965 /* Both in descending order. */ 3966 if (ld->off + curr_size == prev_ld_off && 3967 st->off + curr_size == prev_st_off) 3968 return true; 3969 3970 return false; 3971 } 3972 3973 /* Return TRUE if cross memory access happens. Cross memory access means 3974 * store area is overlapping with load area that a later load might load 3975 * the value from previous store, for this case we can't treat the sequence 3976 * as an memory copy. 3977 */ 3978 static bool 3979 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 3980 struct nfp_insn_meta *head_st_meta) 3981 { 3982 s16 head_ld_off, head_st_off, ld_off; 3983 3984 /* Different pointer types does not overlap. */ 3985 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 3986 return false; 3987 3988 /* load and store are both PTR_TO_PACKET, check ID info. */ 3989 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 3990 return true; 3991 3992 /* Canonicalize the offsets. Turn all of them against the original 3993 * base register. 3994 */ 3995 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 3996 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 3997 ld_off = ld->off + head_ld_meta->ptr.off; 3998 3999 /* Ascending order cross. */ 4000 if (ld_off > head_ld_off && 4001 head_ld_off < head_st_off && ld_off >= head_st_off) 4002 return true; 4003 4004 /* Descending order cross. */ 4005 if (ld_off < head_ld_off && 4006 head_ld_off > head_st_off && ld_off <= head_st_off) 4007 return true; 4008 4009 return false; 4010 } 4011 4012 /* This pass try to identify the following instructoin sequences. 4013 * 4014 * load R, [regA + offA] 4015 * store [regB + offB], R 4016 * load R, [regA + offA + const_imm_A] 4017 * store [regB + offB + const_imm_A], R 4018 * load R, [regA + offA + 2 * const_imm_A] 4019 * store [regB + offB + 2 * const_imm_A], R 4020 * ... 4021 * 4022 * Above sequence is typically generated by compiler when lowering 4023 * memcpy. NFP prefer using CPP instructions to accelerate it. 4024 */ 4025 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 4026 { 4027 struct nfp_insn_meta *head_ld_meta = NULL; 4028 struct nfp_insn_meta *head_st_meta = NULL; 4029 struct nfp_insn_meta *meta1, *meta2; 4030 struct bpf_insn *prev_ld = NULL; 4031 struct bpf_insn *prev_st = NULL; 4032 u8 count = 0; 4033 4034 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4035 struct bpf_insn *ld = &meta1->insn; 4036 struct bpf_insn *st = &meta2->insn; 4037 4038 /* Reset record status if any of the following if true: 4039 * - The current insn pair is not load/store. 4040 * - The load/store pair doesn't chain with previous one. 4041 * - The chained load/store pair crossed with previous pair. 4042 * - The chained load/store pair has a total size of memory 4043 * copy beyond 128 bytes which is the maximum length a 4044 * single NFP CPP command can transfer. 4045 */ 4046 if (!curr_pair_is_memcpy(meta1, meta2) || 4047 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 4048 prev_st) || 4049 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 4050 head_st_meta) || 4051 head_ld_meta->ldst_gather_len >= 128))) { 4052 if (!count) 4053 continue; 4054 4055 if (count > 1) { 4056 s16 prev_ld_off = prev_ld->off; 4057 s16 prev_st_off = prev_st->off; 4058 s16 head_ld_off = head_ld_meta->insn.off; 4059 4060 if (prev_ld_off < head_ld_off) { 4061 head_ld_meta->insn.off = prev_ld_off; 4062 head_st_meta->insn.off = prev_st_off; 4063 head_ld_meta->ldst_gather_len = 4064 -head_ld_meta->ldst_gather_len; 4065 } 4066 4067 head_ld_meta->paired_st = &head_st_meta->insn; 4068 head_st_meta->skip = true; 4069 } else { 4070 head_ld_meta->ldst_gather_len = 0; 4071 } 4072 4073 /* If the chain is ended by an load/store pair then this 4074 * could serve as the new head of the the next chain. 4075 */ 4076 if (curr_pair_is_memcpy(meta1, meta2)) { 4077 head_ld_meta = meta1; 4078 head_st_meta = meta2; 4079 head_ld_meta->ldst_gather_len = 4080 BPF_LDST_BYTES(ld); 4081 meta1 = nfp_meta_next(meta1); 4082 meta2 = nfp_meta_next(meta2); 4083 prev_ld = ld; 4084 prev_st = st; 4085 count = 1; 4086 } else { 4087 head_ld_meta = NULL; 4088 head_st_meta = NULL; 4089 prev_ld = NULL; 4090 prev_st = NULL; 4091 count = 0; 4092 } 4093 4094 continue; 4095 } 4096 4097 if (!head_ld_meta) { 4098 head_ld_meta = meta1; 4099 head_st_meta = meta2; 4100 } else { 4101 meta1->skip = true; 4102 meta2->skip = true; 4103 } 4104 4105 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 4106 meta1 = nfp_meta_next(meta1); 4107 meta2 = nfp_meta_next(meta2); 4108 prev_ld = ld; 4109 prev_st = st; 4110 count++; 4111 } 4112 } 4113 4114 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 4115 { 4116 struct nfp_insn_meta *meta, *range_node = NULL; 4117 s16 range_start = 0, range_end = 0; 4118 bool cache_avail = false; 4119 struct bpf_insn *insn; 4120 s32 range_ptr_off = 0; 4121 u32 range_ptr_id = 0; 4122 4123 list_for_each_entry(meta, &nfp_prog->insns, l) { 4124 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 4125 cache_avail = false; 4126 4127 if (meta->skip) 4128 continue; 4129 4130 insn = &meta->insn; 4131 4132 if (is_mbpf_store_pkt(meta) || 4133 insn->code == (BPF_JMP | BPF_CALL) || 4134 is_mbpf_classic_store_pkt(meta) || 4135 is_mbpf_classic_load(meta)) { 4136 cache_avail = false; 4137 continue; 4138 } 4139 4140 if (!is_mbpf_load(meta)) 4141 continue; 4142 4143 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 4144 cache_avail = false; 4145 continue; 4146 } 4147 4148 if (!cache_avail) { 4149 cache_avail = true; 4150 if (range_node) 4151 goto end_current_then_start_new; 4152 goto start_new; 4153 } 4154 4155 /* Check ID to make sure two reads share the same 4156 * variable offset against PTR_TO_PACKET, and check OFF 4157 * to make sure they also share the same constant 4158 * offset. 4159 * 4160 * OFFs don't really need to be the same, because they 4161 * are the constant offsets against PTR_TO_PACKET, so 4162 * for different OFFs, we could canonicalize them to 4163 * offsets against original packet pointer. We don't 4164 * support this. 4165 */ 4166 if (meta->ptr.id == range_ptr_id && 4167 meta->ptr.off == range_ptr_off) { 4168 s16 new_start = range_start; 4169 s16 end, off = insn->off; 4170 s16 new_end = range_end; 4171 bool changed = false; 4172 4173 if (off < range_start) { 4174 new_start = off; 4175 changed = true; 4176 } 4177 4178 end = off + BPF_LDST_BYTES(insn); 4179 if (end > range_end) { 4180 new_end = end; 4181 changed = true; 4182 } 4183 4184 if (!changed) 4185 continue; 4186 4187 if (new_end - new_start <= 64) { 4188 /* Install new range. */ 4189 range_start = new_start; 4190 range_end = new_end; 4191 continue; 4192 } 4193 } 4194 4195 end_current_then_start_new: 4196 range_node->pkt_cache.range_start = range_start; 4197 range_node->pkt_cache.range_end = range_end; 4198 start_new: 4199 range_node = meta; 4200 range_node->pkt_cache.do_init = true; 4201 range_ptr_id = range_node->ptr.id; 4202 range_ptr_off = range_node->ptr.off; 4203 range_start = insn->off; 4204 range_end = insn->off + BPF_LDST_BYTES(insn); 4205 } 4206 4207 if (range_node) { 4208 range_node->pkt_cache.range_start = range_start; 4209 range_node->pkt_cache.range_end = range_end; 4210 } 4211 4212 list_for_each_entry(meta, &nfp_prog->insns, l) { 4213 if (meta->skip) 4214 continue; 4215 4216 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 4217 if (meta->pkt_cache.do_init) { 4218 range_start = meta->pkt_cache.range_start; 4219 range_end = meta->pkt_cache.range_end; 4220 } else { 4221 meta->pkt_cache.range_start = range_start; 4222 meta->pkt_cache.range_end = range_end; 4223 } 4224 } 4225 } 4226 } 4227 4228 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 4229 { 4230 nfp_bpf_opt_reg_init(nfp_prog); 4231 4232 nfp_bpf_opt_neg_add_sub(nfp_prog); 4233 nfp_bpf_opt_ld_mask(nfp_prog); 4234 nfp_bpf_opt_ld_shift(nfp_prog); 4235 nfp_bpf_opt_ldst_gather(nfp_prog); 4236 nfp_bpf_opt_pkt_cache(nfp_prog); 4237 4238 return 0; 4239 } 4240 4241 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) 4242 { 4243 struct nfp_insn_meta *meta1, *meta2; 4244 struct nfp_bpf_map *nfp_map; 4245 struct bpf_map *map; 4246 u32 id; 4247 4248 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4249 if (meta1->skip || meta2->skip) 4250 continue; 4251 4252 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || 4253 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) 4254 continue; 4255 4256 map = (void *)(unsigned long)((u32)meta1->insn.imm | 4257 (u64)meta2->insn.imm << 32); 4258 if (bpf_map_offload_neutral(map)) { 4259 id = map->id; 4260 } else { 4261 nfp_map = map_to_offmap(map)->dev_priv; 4262 id = nfp_map->tid; 4263 } 4264 4265 meta1->insn.imm = id; 4266 meta2->insn.imm = 0; 4267 } 4268 4269 return 0; 4270 } 4271 4272 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 4273 { 4274 __le64 *ustore = (__force __le64 *)prog; 4275 int i; 4276 4277 for (i = 0; i < len; i++) { 4278 int err; 4279 4280 err = nfp_ustore_check_valid_no_ecc(prog[i]); 4281 if (err) 4282 return err; 4283 4284 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 4285 } 4286 4287 return 0; 4288 } 4289 4290 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 4291 { 4292 void *prog; 4293 4294 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 4295 if (!prog) 4296 return; 4297 4298 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 4299 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 4300 kvfree(nfp_prog->prog); 4301 nfp_prog->prog = prog; 4302 } 4303 4304 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 4305 { 4306 int ret; 4307 4308 ret = nfp_bpf_replace_map_ptrs(nfp_prog); 4309 if (ret) 4310 return ret; 4311 4312 ret = nfp_bpf_optimize(nfp_prog); 4313 if (ret) 4314 return ret; 4315 4316 ret = nfp_translate(nfp_prog); 4317 if (ret) { 4318 pr_err("Translation failed with error %d (translated: %u)\n", 4319 ret, nfp_prog->n_translated); 4320 return -EINVAL; 4321 } 4322 4323 nfp_bpf_prog_trim(nfp_prog); 4324 4325 return ret; 4326 } 4327 4328 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 4329 { 4330 struct nfp_insn_meta *meta; 4331 4332 /* Another pass to record jump information. */ 4333 list_for_each_entry(meta, &nfp_prog->insns, l) { 4334 struct nfp_insn_meta *dst_meta; 4335 u64 code = meta->insn.code; 4336 unsigned int dst_idx; 4337 bool pseudo_call; 4338 4339 if (BPF_CLASS(code) != BPF_JMP) 4340 continue; 4341 if (BPF_OP(code) == BPF_EXIT) 4342 continue; 4343 if (is_mbpf_helper_call(meta)) 4344 continue; 4345 4346 /* If opcode is BPF_CALL at this point, this can only be a 4347 * BPF-to-BPF call (a.k.a pseudo call). 4348 */ 4349 pseudo_call = BPF_OP(code) == BPF_CALL; 4350 4351 if (pseudo_call) 4352 dst_idx = meta->n + 1 + meta->insn.imm; 4353 else 4354 dst_idx = meta->n + 1 + meta->insn.off; 4355 4356 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt); 4357 4358 if (pseudo_call) 4359 dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START; 4360 4361 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 4362 meta->jmp_dst = dst_meta; 4363 } 4364 } 4365 4366 bool nfp_bpf_supported_opcode(u8 code) 4367 { 4368 return !!instr_cb[code]; 4369 } 4370 4371 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 4372 { 4373 unsigned int i; 4374 u64 *prog; 4375 int err; 4376 4377 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 4378 GFP_KERNEL); 4379 if (!prog) 4380 return ERR_PTR(-ENOMEM); 4381 4382 for (i = 0; i < nfp_prog->prog_len; i++) { 4383 enum nfp_relo_type special; 4384 u32 val; 4385 u16 off; 4386 4387 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 4388 switch (special) { 4389 case RELO_NONE: 4390 continue; 4391 case RELO_BR_REL: 4392 br_add_offset(&prog[i], bv->start_off); 4393 break; 4394 case RELO_BR_GO_OUT: 4395 br_set_offset(&prog[i], 4396 nfp_prog->tgt_out + bv->start_off); 4397 break; 4398 case RELO_BR_GO_ABORT: 4399 br_set_offset(&prog[i], 4400 nfp_prog->tgt_abort + bv->start_off); 4401 break; 4402 case RELO_BR_GO_CALL_PUSH_REGS: 4403 if (!nfp_prog->tgt_call_push_regs) { 4404 pr_err("BUG: failed to detect subprogram registers needs\n"); 4405 err = -EINVAL; 4406 goto err_free_prog; 4407 } 4408 off = nfp_prog->tgt_call_push_regs + bv->start_off; 4409 br_set_offset(&prog[i], off); 4410 break; 4411 case RELO_BR_GO_CALL_POP_REGS: 4412 if (!nfp_prog->tgt_call_pop_regs) { 4413 pr_err("BUG: failed to detect subprogram registers needs\n"); 4414 err = -EINVAL; 4415 goto err_free_prog; 4416 } 4417 off = nfp_prog->tgt_call_pop_regs + bv->start_off; 4418 br_set_offset(&prog[i], off); 4419 break; 4420 case RELO_BR_NEXT_PKT: 4421 br_set_offset(&prog[i], bv->tgt_done); 4422 break; 4423 case RELO_BR_HELPER: 4424 val = br_get_offset(prog[i]); 4425 val -= BR_OFF_RELO; 4426 switch (val) { 4427 case BPF_FUNC_map_lookup_elem: 4428 val = nfp_prog->bpf->helpers.map_lookup; 4429 break; 4430 case BPF_FUNC_map_update_elem: 4431 val = nfp_prog->bpf->helpers.map_update; 4432 break; 4433 case BPF_FUNC_map_delete_elem: 4434 val = nfp_prog->bpf->helpers.map_delete; 4435 break; 4436 case BPF_FUNC_perf_event_output: 4437 val = nfp_prog->bpf->helpers.perf_event_output; 4438 break; 4439 default: 4440 pr_err("relocation of unknown helper %d\n", 4441 val); 4442 err = -EINVAL; 4443 goto err_free_prog; 4444 } 4445 br_set_offset(&prog[i], val); 4446 break; 4447 case RELO_IMMED_REL: 4448 immed_add_value(&prog[i], bv->start_off); 4449 break; 4450 } 4451 4452 prog[i] &= ~OP_RELO_TYPE; 4453 } 4454 4455 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 4456 if (err) 4457 goto err_free_prog; 4458 4459 return prog; 4460 4461 err_free_prog: 4462 kfree(prog); 4463 return ERR_PTR(err); 4464 } 4465