1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */ 3 4 #define pr_fmt(fmt) "NFP net bpf: " fmt 5 6 #include <linux/bug.h> 7 #include <linux/bpf.h> 8 #include <linux/filter.h> 9 #include <linux/kernel.h> 10 #include <linux/pkt_cls.h> 11 #include <linux/reciprocal_div.h> 12 #include <linux/unistd.h> 13 14 #include "main.h" 15 #include "../nfp_asm.h" 16 #include "../nfp_net_ctrl.h" 17 18 /* --- NFP prog --- */ 19 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 20 * It's safe to modify the next pointers (but not pos). 21 */ 22 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 23 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 24 next = list_next_entry(pos, l); \ 25 &(nfp_prog)->insns != &pos->l && \ 26 &(nfp_prog)->insns != &next->l; \ 27 pos = nfp_meta_next(pos), \ 28 next = nfp_meta_next(pos)) 29 30 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 31 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 32 next = list_next_entry(pos, l), \ 33 next2 = list_next_entry(next, l); \ 34 &(nfp_prog)->insns != &pos->l && \ 35 &(nfp_prog)->insns != &next->l && \ 36 &(nfp_prog)->insns != &next2->l; \ 37 pos = nfp_meta_next(pos), \ 38 next = nfp_meta_next(pos), \ 39 next2 = nfp_meta_next(next)) 40 41 static bool 42 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 43 { 44 return meta->l.prev != &nfp_prog->insns; 45 } 46 47 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 48 { 49 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { 50 pr_warn("instruction limit reached (%u NFP instructions)\n", 51 nfp_prog->prog_len); 52 nfp_prog->error = -ENOSPC; 53 return; 54 } 55 56 nfp_prog->prog[nfp_prog->prog_len] = insn; 57 nfp_prog->prog_len++; 58 } 59 60 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 61 { 62 return nfp_prog->prog_len; 63 } 64 65 static bool 66 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 67 { 68 /* If there is a recorded error we may have dropped instructions; 69 * that doesn't have to be due to translator bug, and the translation 70 * will fail anyway, so just return OK. 71 */ 72 if (nfp_prog->error) 73 return true; 74 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 75 } 76 77 /* --- Emitters --- */ 78 static void 79 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 80 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, 81 bool indir) 82 { 83 u64 insn; 84 85 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 86 FIELD_PREP(OP_CMD_CTX, ctx) | 87 FIELD_PREP(OP_CMD_B_SRC, breg) | 88 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 89 FIELD_PREP(OP_CMD_XFER, xfer) | 90 FIELD_PREP(OP_CMD_CNT, size) | 91 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | 92 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 93 FIELD_PREP(OP_CMD_INDIR, indir) | 94 FIELD_PREP(OP_CMD_MODE, mode); 95 96 nfp_prog_push(nfp_prog, insn); 97 } 98 99 static void 100 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 101 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) 102 { 103 struct nfp_insn_re_regs reg; 104 int err; 105 106 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 107 if (err) { 108 nfp_prog->error = err; 109 return; 110 } 111 if (reg.swap) { 112 pr_err("cmd can't swap arguments\n"); 113 nfp_prog->error = -EFAULT; 114 return; 115 } 116 if (reg.dst_lmextn || reg.src_lmextn) { 117 pr_err("cmd can't use LMextn\n"); 118 nfp_prog->error = -EFAULT; 119 return; 120 } 121 122 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, 123 indir); 124 } 125 126 static void 127 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 128 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 129 { 130 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); 131 } 132 133 static void 134 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 135 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 136 { 137 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); 138 } 139 140 static void 141 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 142 enum br_ctx_signal_state css, u16 addr, u8 defer) 143 { 144 u16 addr_lo, addr_hi; 145 u64 insn; 146 147 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 148 addr_hi = addr != addr_lo; 149 150 insn = OP_BR_BASE | 151 FIELD_PREP(OP_BR_MASK, mask) | 152 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 153 FIELD_PREP(OP_BR_CSS, css) | 154 FIELD_PREP(OP_BR_DEFBR, defer) | 155 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 156 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 157 158 nfp_prog_push(nfp_prog, insn); 159 } 160 161 static void 162 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 163 enum nfp_relo_type relo) 164 { 165 if (mask == BR_UNC && defer > 2) { 166 pr_err("BUG: branch defer out of bounds %d\n", defer); 167 nfp_prog->error = -EFAULT; 168 return; 169 } 170 171 __emit_br(nfp_prog, mask, 172 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 173 BR_CSS_NONE, addr, defer); 174 175 nfp_prog->prog[nfp_prog->prog_len - 1] |= 176 FIELD_PREP(OP_RELO_TYPE, relo); 177 } 178 179 static void 180 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 181 { 182 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 183 } 184 185 static void 186 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer, 187 bool set, bool src_lmextn) 188 { 189 u16 addr_lo, addr_hi; 190 u64 insn; 191 192 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO)); 193 addr_hi = addr != addr_lo; 194 195 insn = OP_BR_BIT_BASE | 196 FIELD_PREP(OP_BR_BIT_A_SRC, areg) | 197 FIELD_PREP(OP_BR_BIT_B_SRC, breg) | 198 FIELD_PREP(OP_BR_BIT_BV, set) | 199 FIELD_PREP(OP_BR_BIT_DEFBR, defer) | 200 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) | 201 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) | 202 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn); 203 204 nfp_prog_push(nfp_prog, insn); 205 } 206 207 static void 208 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, 209 u8 defer, bool set, enum nfp_relo_type relo) 210 { 211 struct nfp_insn_re_regs reg; 212 int err; 213 214 /* NOTE: The bit to test is specified as an rotation amount, such that 215 * the bit to test will be placed on the MSB of the result when 216 * doing a rotate right. For bit X, we need right rotate X + 1. 217 */ 218 bit += 1; 219 220 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false); 221 if (err) { 222 nfp_prog->error = err; 223 return; 224 } 225 226 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set, 227 reg.src_lmextn); 228 229 nfp_prog->prog[nfp_prog->prog_len - 1] |= 230 FIELD_PREP(OP_RELO_TYPE, relo); 231 } 232 233 static void 234 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) 235 { 236 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL); 237 } 238 239 static void 240 __emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 241 u8 defer, bool dst_lmextn, bool src_lmextn) 242 { 243 u64 insn; 244 245 insn = OP_BR_ALU_BASE | 246 FIELD_PREP(OP_BR_ALU_A_SRC, areg) | 247 FIELD_PREP(OP_BR_ALU_B_SRC, breg) | 248 FIELD_PREP(OP_BR_ALU_DEFBR, defer) | 249 FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) | 250 FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) | 251 FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn); 252 253 nfp_prog_push(nfp_prog, insn); 254 } 255 256 static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer) 257 { 258 struct nfp_insn_ur_regs reg; 259 int err; 260 261 err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), ®); 262 if (err) { 263 nfp_prog->error = err; 264 return; 265 } 266 267 __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn, 268 reg.src_lmextn); 269 } 270 271 static void 272 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 273 enum immed_width width, bool invert, 274 enum immed_shift shift, bool wr_both, 275 bool dst_lmextn, bool src_lmextn) 276 { 277 u64 insn; 278 279 insn = OP_IMMED_BASE | 280 FIELD_PREP(OP_IMMED_A_SRC, areg) | 281 FIELD_PREP(OP_IMMED_B_SRC, breg) | 282 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 283 FIELD_PREP(OP_IMMED_WIDTH, width) | 284 FIELD_PREP(OP_IMMED_INV, invert) | 285 FIELD_PREP(OP_IMMED_SHIFT, shift) | 286 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 287 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 288 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 289 290 nfp_prog_push(nfp_prog, insn); 291 } 292 293 static void 294 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 295 enum immed_width width, bool invert, enum immed_shift shift) 296 { 297 struct nfp_insn_ur_regs reg; 298 int err; 299 300 if (swreg_type(dst) == NN_REG_IMM) { 301 nfp_prog->error = -EFAULT; 302 return; 303 } 304 305 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 306 if (err) { 307 nfp_prog->error = err; 308 return; 309 } 310 311 /* Use reg.dst when destination is No-Dest. */ 312 __emit_immed(nfp_prog, 313 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 314 reg.breg, imm >> 8, width, invert, shift, 315 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 316 } 317 318 static void 319 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 320 enum shf_sc sc, u8 shift, 321 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 322 bool dst_lmextn, bool src_lmextn) 323 { 324 u64 insn; 325 326 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 327 nfp_prog->error = -EFAULT; 328 return; 329 } 330 331 if (sc == SHF_SC_L_SHF) 332 shift = 32 - shift; 333 334 insn = OP_SHF_BASE | 335 FIELD_PREP(OP_SHF_A_SRC, areg) | 336 FIELD_PREP(OP_SHF_SC, sc) | 337 FIELD_PREP(OP_SHF_B_SRC, breg) | 338 FIELD_PREP(OP_SHF_I8, i8) | 339 FIELD_PREP(OP_SHF_SW, sw) | 340 FIELD_PREP(OP_SHF_DST, dst) | 341 FIELD_PREP(OP_SHF_SHIFT, shift) | 342 FIELD_PREP(OP_SHF_OP, op) | 343 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 344 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 345 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 346 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 347 348 nfp_prog_push(nfp_prog, insn); 349 } 350 351 static void 352 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 353 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 354 { 355 struct nfp_insn_re_regs reg; 356 int err; 357 358 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 359 if (err) { 360 nfp_prog->error = err; 361 return; 362 } 363 364 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 365 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 366 reg.dst_lmextn, reg.src_lmextn); 367 } 368 369 static void 370 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst, 371 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc) 372 { 373 if (sc == SHF_SC_R_ROT) { 374 pr_err("indirect shift is not allowed on rotation\n"); 375 nfp_prog->error = -EFAULT; 376 return; 377 } 378 379 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0); 380 } 381 382 static void 383 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 384 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 385 bool dst_lmextn, bool src_lmextn) 386 { 387 u64 insn; 388 389 insn = OP_ALU_BASE | 390 FIELD_PREP(OP_ALU_A_SRC, areg) | 391 FIELD_PREP(OP_ALU_B_SRC, breg) | 392 FIELD_PREP(OP_ALU_DST, dst) | 393 FIELD_PREP(OP_ALU_SW, swap) | 394 FIELD_PREP(OP_ALU_OP, op) | 395 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 396 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 397 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 398 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 399 400 nfp_prog_push(nfp_prog, insn); 401 } 402 403 static void 404 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 405 swreg lreg, enum alu_op op, swreg rreg) 406 { 407 struct nfp_insn_ur_regs reg; 408 int err; 409 410 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 411 if (err) { 412 nfp_prog->error = err; 413 return; 414 } 415 416 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 417 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 418 reg.dst_lmextn, reg.src_lmextn); 419 } 420 421 static void 422 __emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg, 423 enum mul_type type, enum mul_step step, u16 breg, bool swap, 424 bool wr_both, bool dst_lmextn, bool src_lmextn) 425 { 426 u64 insn; 427 428 insn = OP_MUL_BASE | 429 FIELD_PREP(OP_MUL_A_SRC, areg) | 430 FIELD_PREP(OP_MUL_B_SRC, breg) | 431 FIELD_PREP(OP_MUL_STEP, step) | 432 FIELD_PREP(OP_MUL_DST_AB, dst_ab) | 433 FIELD_PREP(OP_MUL_SW, swap) | 434 FIELD_PREP(OP_MUL_TYPE, type) | 435 FIELD_PREP(OP_MUL_WR_AB, wr_both) | 436 FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) | 437 FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn); 438 439 nfp_prog_push(nfp_prog, insn); 440 } 441 442 static void 443 emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type, 444 enum mul_step step, swreg rreg) 445 { 446 struct nfp_insn_ur_regs reg; 447 u16 areg; 448 int err; 449 450 if (type == MUL_TYPE_START && step != MUL_STEP_NONE) { 451 nfp_prog->error = -EINVAL; 452 return; 453 } 454 455 if (step == MUL_LAST || step == MUL_LAST_2) { 456 /* When type is step and step Number is LAST or LAST2, left 457 * source is used as destination. 458 */ 459 err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®); 460 areg = reg.dst; 461 } else { 462 err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®); 463 areg = reg.areg; 464 } 465 466 if (err) { 467 nfp_prog->error = err; 468 return; 469 } 470 471 __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap, 472 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 473 } 474 475 static void 476 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 477 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 478 bool zero, bool swap, bool wr_both, 479 bool dst_lmextn, bool src_lmextn) 480 { 481 u64 insn; 482 483 insn = OP_LDF_BASE | 484 FIELD_PREP(OP_LDF_A_SRC, areg) | 485 FIELD_PREP(OP_LDF_SC, sc) | 486 FIELD_PREP(OP_LDF_B_SRC, breg) | 487 FIELD_PREP(OP_LDF_I8, imm8) | 488 FIELD_PREP(OP_LDF_SW, swap) | 489 FIELD_PREP(OP_LDF_ZF, zero) | 490 FIELD_PREP(OP_LDF_BMASK, bmask) | 491 FIELD_PREP(OP_LDF_SHF, shift) | 492 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 493 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 494 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 495 496 nfp_prog_push(nfp_prog, insn); 497 } 498 499 static void 500 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 501 enum shf_sc sc, u8 shift, bool zero) 502 { 503 struct nfp_insn_re_regs reg; 504 int err; 505 506 /* Note: ld_field is special as it uses one of the src regs as dst */ 507 err = swreg_to_restricted(dst, dst, src, ®, true); 508 if (err) { 509 nfp_prog->error = err; 510 return; 511 } 512 513 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 514 reg.i8, zero, reg.swap, reg.wr_both, 515 reg.dst_lmextn, reg.src_lmextn); 516 } 517 518 static void 519 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 520 enum shf_sc sc, u8 shift) 521 { 522 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 523 } 524 525 static void 526 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 527 bool dst_lmextn, bool src_lmextn) 528 { 529 u64 insn; 530 531 insn = OP_LCSR_BASE | 532 FIELD_PREP(OP_LCSR_A_SRC, areg) | 533 FIELD_PREP(OP_LCSR_B_SRC, breg) | 534 FIELD_PREP(OP_LCSR_WRITE, wr) | 535 FIELD_PREP(OP_LCSR_ADDR, addr / 4) | 536 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 537 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 538 539 nfp_prog_push(nfp_prog, insn); 540 } 541 542 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 543 { 544 struct nfp_insn_ur_regs reg; 545 int err; 546 547 /* This instruction takes immeds instead of reg_none() for the ignored 548 * operand, but we can't encode 2 immeds in one instr with our normal 549 * swreg infra so if param is an immed, we encode as reg_none() and 550 * copy the immed to both operands. 551 */ 552 if (swreg_type(src) == NN_REG_IMM) { 553 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 554 reg.breg = reg.areg; 555 } else { 556 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 557 } 558 if (err) { 559 nfp_prog->error = err; 560 return; 561 } 562 563 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, 564 false, reg.src_lmextn); 565 } 566 567 /* CSR value is read in following immed[gpr, 0] */ 568 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) 569 { 570 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); 571 } 572 573 static void emit_nop(struct nfp_prog *nfp_prog) 574 { 575 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 576 } 577 578 /* --- Wrappers --- */ 579 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 580 { 581 if (!(imm & 0xffff0000)) { 582 *val = imm; 583 *shift = IMMED_SHIFT_0B; 584 } else if (!(imm & 0xff0000ff)) { 585 *val = imm >> 8; 586 *shift = IMMED_SHIFT_1B; 587 } else if (!(imm & 0x0000ffff)) { 588 *val = imm >> 16; 589 *shift = IMMED_SHIFT_2B; 590 } else { 591 return false; 592 } 593 594 return true; 595 } 596 597 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 598 { 599 enum immed_shift shift; 600 u16 val; 601 602 if (pack_immed(imm, &val, &shift)) { 603 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 604 } else if (pack_immed(~imm, &val, &shift)) { 605 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 606 } else { 607 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 608 false, IMMED_SHIFT_0B); 609 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 610 false, IMMED_SHIFT_2B); 611 } 612 } 613 614 static void 615 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 616 enum nfp_relo_type relo) 617 { 618 if (imm > 0xffff) { 619 pr_err("relocation of a large immediate!\n"); 620 nfp_prog->error = -EFAULT; 621 return; 622 } 623 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 624 625 nfp_prog->prog[nfp_prog->prog_len - 1] |= 626 FIELD_PREP(OP_RELO_TYPE, relo); 627 } 628 629 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 630 * If the @imm is small enough encode it directly in operand and return 631 * otherwise load @imm to a spare register and return its encoding. 632 */ 633 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 634 { 635 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 636 return reg_imm(imm); 637 638 wrp_immed(nfp_prog, tmp_reg, imm); 639 return tmp_reg; 640 } 641 642 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 643 * If the @imm is small enough encode it directly in operand and return 644 * otherwise load @imm to a spare register and return its encoding. 645 */ 646 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 647 { 648 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 649 return reg_imm(imm); 650 651 wrp_immed(nfp_prog, tmp_reg, imm); 652 return tmp_reg; 653 } 654 655 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 656 { 657 while (count--) 658 emit_nop(nfp_prog); 659 } 660 661 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 662 { 663 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 664 } 665 666 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 667 { 668 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 669 } 670 671 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 672 * result to @dst from low end. 673 */ 674 static void 675 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 676 u8 offset) 677 { 678 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 679 u8 mask = (1 << field_len) - 1; 680 681 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 682 } 683 684 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 685 * result to @dst from offset, there is no change on the other bits of @dst. 686 */ 687 static void 688 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 689 u8 field_len, u8 offset) 690 { 691 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 692 u8 mask = ((1 << field_len) - 1) << offset; 693 694 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 695 } 696 697 static void 698 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 699 swreg *rega, swreg *regb) 700 { 701 if (offset == reg_imm(0)) { 702 *rega = reg_a(src_gpr); 703 *regb = reg_b(src_gpr + 1); 704 return; 705 } 706 707 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 708 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 709 reg_imm(0)); 710 *rega = imm_a(nfp_prog); 711 *regb = imm_b(nfp_prog); 712 } 713 714 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 715 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 716 { 717 bool descending_seq = meta->ldst_gather_len < 0; 718 s16 len = abs(meta->ldst_gather_len); 719 swreg src_base, off; 720 bool src_40bit_addr; 721 unsigned int i; 722 u8 xfer_num; 723 724 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 725 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 726 src_base = reg_a(meta->insn.src_reg * 2); 727 xfer_num = round_up(len, 4) / 4; 728 729 if (src_40bit_addr) 730 addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base, 731 &off); 732 733 /* Setup PREV_ALU fields to override memory read length. */ 734 if (len > 32) 735 wrp_immed(nfp_prog, reg_none(), 736 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 737 738 /* Memory read from source addr into transfer-in registers. */ 739 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 740 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 741 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); 742 743 /* Move from transfer-in to transfer-out. */ 744 for (i = 0; i < xfer_num; i++) 745 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 746 747 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 748 749 if (len <= 8) { 750 /* Use single direct_ref write8. */ 751 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 752 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 753 CMD_CTX_SWAP); 754 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 755 /* Use single direct_ref write32. */ 756 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 757 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 758 CMD_CTX_SWAP); 759 } else if (len <= 32) { 760 /* Use single indirect_ref write8. */ 761 wrp_immed(nfp_prog, reg_none(), 762 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 763 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 764 reg_a(meta->paired_st->dst_reg * 2), off, 765 len - 1, CMD_CTX_SWAP); 766 } else if (IS_ALIGNED(len, 4)) { 767 /* Use single indirect_ref write32. */ 768 wrp_immed(nfp_prog, reg_none(), 769 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 770 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 771 reg_a(meta->paired_st->dst_reg * 2), off, 772 xfer_num - 1, CMD_CTX_SWAP); 773 } else if (len <= 40) { 774 /* Use one direct_ref write32 to write the first 32-bytes, then 775 * another direct_ref write8 to write the remaining bytes. 776 */ 777 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 778 reg_a(meta->paired_st->dst_reg * 2), off, 7, 779 CMD_CTX_SWAP); 780 781 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 782 imm_b(nfp_prog)); 783 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 784 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 785 CMD_CTX_SWAP); 786 } else { 787 /* Use one indirect_ref write32 to write 4-bytes aligned length, 788 * then another direct_ref write8 to write the remaining bytes. 789 */ 790 u8 new_off; 791 792 wrp_immed(nfp_prog, reg_none(), 793 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 794 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 795 reg_a(meta->paired_st->dst_reg * 2), off, 796 xfer_num - 2, CMD_CTX_SWAP); 797 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 798 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 799 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 800 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 801 (len & 0x3) - 1, CMD_CTX_SWAP); 802 } 803 804 /* TODO: The following extra load is to make sure data flow be identical 805 * before and after we do memory copy optimization. 806 * 807 * The load destination register is not guaranteed to be dead, so we 808 * need to make sure it is loaded with the value the same as before 809 * this transformation. 810 * 811 * These extra loads could be removed once we have accurate register 812 * usage information. 813 */ 814 if (descending_seq) 815 xfer_num = 0; 816 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 817 xfer_num = xfer_num - 1; 818 else 819 xfer_num = xfer_num - 2; 820 821 switch (BPF_SIZE(meta->insn.code)) { 822 case BPF_B: 823 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 824 reg_xfer(xfer_num), 1, 825 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 826 break; 827 case BPF_H: 828 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 829 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 830 break; 831 case BPF_W: 832 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 833 reg_xfer(0)); 834 break; 835 case BPF_DW: 836 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 837 reg_xfer(xfer_num)); 838 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 839 reg_xfer(xfer_num + 1)); 840 break; 841 } 842 843 if (BPF_SIZE(meta->insn.code) != BPF_DW) 844 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 845 846 return 0; 847 } 848 849 static int 850 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 851 { 852 unsigned int i; 853 u16 shift, sz; 854 855 /* We load the value from the address indicated in @offset and then 856 * shift out the data we don't need. Note: this is big endian! 857 */ 858 sz = max(size, 4); 859 shift = size < 4 ? 4 - size : 0; 860 861 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 862 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); 863 864 i = 0; 865 if (shift) 866 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 867 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 868 else 869 for (; i * 4 < size; i++) 870 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 871 872 if (i < 2) 873 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 874 875 return 0; 876 } 877 878 static int 879 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 880 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 881 { 882 unsigned int i; 883 u8 mask, sz; 884 885 /* We load the value from the address indicated in rreg + lreg and then 886 * mask out the data we don't need. Note: this is little endian! 887 */ 888 sz = max(size, 4); 889 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 890 891 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 892 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); 893 894 i = 0; 895 if (mask) 896 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 897 reg_xfer(0), SHF_SC_NONE, 0, true); 898 else 899 for (; i * 4 < size; i++) 900 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 901 902 if (i < 2) 903 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 904 905 return 0; 906 } 907 908 static int 909 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 910 u8 dst_gpr, u8 size) 911 { 912 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 913 size, CMD_MODE_32b); 914 } 915 916 static int 917 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 918 u8 dst_gpr, u8 size) 919 { 920 swreg rega, regb; 921 922 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 923 924 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 925 size, CMD_MODE_40b_BA); 926 } 927 928 static int 929 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 930 { 931 swreg tmp_reg; 932 933 /* Calculate the true offset (src_reg + imm) */ 934 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 935 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 936 937 /* Check packet length (size guaranteed to fit b/c it's u8) */ 938 emit_alu(nfp_prog, imm_a(nfp_prog), 939 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 940 emit_alu(nfp_prog, reg_none(), 941 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 942 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 943 944 /* Load data */ 945 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 946 } 947 948 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 949 { 950 swreg tmp_reg; 951 952 /* Check packet length */ 953 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 954 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 955 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 956 957 /* Load data */ 958 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 959 return data_ld(nfp_prog, tmp_reg, 0, size); 960 } 961 962 static int 963 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 964 u8 src_gpr, u8 size) 965 { 966 unsigned int i; 967 968 for (i = 0; i * 4 < size; i++) 969 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 970 971 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 972 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 973 974 return 0; 975 } 976 977 static int 978 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 979 u64 imm, u8 size) 980 { 981 wrp_immed(nfp_prog, reg_xfer(0), imm); 982 if (size == 8) 983 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 984 985 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 986 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 987 988 return 0; 989 } 990 991 typedef int 992 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 993 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 994 bool needs_inc); 995 996 static int 997 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 998 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 999 bool needs_inc) 1000 { 1001 bool should_inc = needs_inc && new_gpr && !last; 1002 u32 idx, src_byte; 1003 enum shf_sc sc; 1004 swreg reg; 1005 int shf; 1006 u8 mask; 1007 1008 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 1009 return -EOPNOTSUPP; 1010 1011 idx = off / 4; 1012 1013 /* Move the entire word */ 1014 if (size == 4) { 1015 wrp_mov(nfp_prog, reg_both(dst), 1016 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 1017 return 0; 1018 } 1019 1020 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1021 return -EOPNOTSUPP; 1022 1023 src_byte = off % 4; 1024 1025 mask = (1 << size) - 1; 1026 mask <<= dst_byte; 1027 1028 if (WARN_ON_ONCE(mask > 0xf)) 1029 return -EOPNOTSUPP; 1030 1031 shf = abs(src_byte - dst_byte) * 8; 1032 if (src_byte == dst_byte) { 1033 sc = SHF_SC_NONE; 1034 } else if (src_byte < dst_byte) { 1035 shf = 32 - shf; 1036 sc = SHF_SC_L_SHF; 1037 } else { 1038 sc = SHF_SC_R_SHF; 1039 } 1040 1041 /* ld_field can address fewer indexes, if offset too large do RMW. 1042 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1043 */ 1044 if (idx <= RE_REG_LM_IDX_MAX) { 1045 reg = reg_lm(lm3 ? 3 : 0, idx); 1046 } else { 1047 reg = imm_a(nfp_prog); 1048 /* If it's not the first part of the load and we start a new GPR 1049 * that means we are loading a second part of the LMEM word into 1050 * a new GPR. IOW we've already looked that LMEM word and 1051 * therefore it has been loaded into imm_a(). 1052 */ 1053 if (first || !new_gpr) 1054 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1055 } 1056 1057 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 1058 1059 if (should_inc) 1060 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1061 1062 return 0; 1063 } 1064 1065 static int 1066 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 1067 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 1068 bool needs_inc) 1069 { 1070 bool should_inc = needs_inc && new_gpr && !last; 1071 u32 idx, dst_byte; 1072 enum shf_sc sc; 1073 swreg reg; 1074 int shf; 1075 u8 mask; 1076 1077 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 1078 return -EOPNOTSUPP; 1079 1080 idx = off / 4; 1081 1082 /* Move the entire word */ 1083 if (size == 4) { 1084 wrp_mov(nfp_prog, 1085 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 1086 reg_b(src)); 1087 return 0; 1088 } 1089 1090 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1091 return -EOPNOTSUPP; 1092 1093 dst_byte = off % 4; 1094 1095 mask = (1 << size) - 1; 1096 mask <<= dst_byte; 1097 1098 if (WARN_ON_ONCE(mask > 0xf)) 1099 return -EOPNOTSUPP; 1100 1101 shf = abs(src_byte - dst_byte) * 8; 1102 if (src_byte == dst_byte) { 1103 sc = SHF_SC_NONE; 1104 } else if (src_byte < dst_byte) { 1105 shf = 32 - shf; 1106 sc = SHF_SC_L_SHF; 1107 } else { 1108 sc = SHF_SC_R_SHF; 1109 } 1110 1111 /* ld_field can address fewer indexes, if offset too large do RMW. 1112 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1113 */ 1114 if (idx <= RE_REG_LM_IDX_MAX) { 1115 reg = reg_lm(lm3 ? 3 : 0, idx); 1116 } else { 1117 reg = imm_a(nfp_prog); 1118 /* Only first and last LMEM locations are going to need RMW, 1119 * the middle location will be overwritten fully. 1120 */ 1121 if (first || last) 1122 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1123 } 1124 1125 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 1126 1127 if (new_gpr || last) { 1128 if (idx > RE_REG_LM_IDX_MAX) 1129 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1130 if (should_inc) 1131 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1132 } 1133 1134 return 0; 1135 } 1136 1137 static int 1138 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1139 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1140 bool clr_gpr, lmem_step step) 1141 { 1142 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; 1143 bool first = true, last; 1144 bool needs_inc = false; 1145 swreg stack_off_reg; 1146 u8 prev_gpr = 255; 1147 u32 gpr_byte = 0; 1148 bool lm3 = true; 1149 int ret; 1150 1151 if (meta->ptr_not_const || 1152 meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) { 1153 /* Use of the last encountered ptr_off is OK, they all have 1154 * the same alignment. Depend on low bits of value being 1155 * discarded when written to LMaddr register. 1156 */ 1157 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1158 stack_imm(nfp_prog)); 1159 1160 emit_alu(nfp_prog, imm_b(nfp_prog), 1161 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1162 1163 needs_inc = true; 1164 } else if (off + size <= 64) { 1165 /* We can reach bottom 64B with LMaddr0 */ 1166 lm3 = false; 1167 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1168 /* We have to set up a new pointer. If we know the offset 1169 * and the entire access falls into a single 32 byte aligned 1170 * window we won't have to increment the LM pointer. 1171 * The 32 byte alignment is imporant because offset is ORed in 1172 * not added when doing *l$indexN[off]. 1173 */ 1174 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1175 stack_imm(nfp_prog)); 1176 emit_alu(nfp_prog, imm_b(nfp_prog), 1177 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1178 1179 off %= 32; 1180 } else { 1181 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1182 stack_imm(nfp_prog)); 1183 1184 emit_alu(nfp_prog, imm_b(nfp_prog), 1185 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1186 1187 needs_inc = true; 1188 } 1189 if (lm3) { 1190 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1191 /* For size < 4 one slot will be filled by zeroing of upper. */ 1192 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1193 } 1194 1195 if (clr_gpr && size < 8) 1196 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1197 1198 while (size) { 1199 u32 slice_end; 1200 u8 slice_size; 1201 1202 slice_size = min(size, 4 - gpr_byte); 1203 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1204 slice_size = slice_end - off; 1205 1206 last = slice_size == size; 1207 1208 if (needs_inc) 1209 off %= 4; 1210 1211 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1212 first, gpr != prev_gpr, last, lm3, needs_inc); 1213 if (ret) 1214 return ret; 1215 1216 prev_gpr = gpr; 1217 first = false; 1218 1219 gpr_byte += slice_size; 1220 if (gpr_byte >= 4) { 1221 gpr_byte -= 4; 1222 gpr++; 1223 } 1224 1225 size -= slice_size; 1226 off += slice_size; 1227 } 1228 1229 return 0; 1230 } 1231 1232 static void 1233 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1234 { 1235 swreg tmp_reg; 1236 1237 if (alu_op == ALU_OP_AND) { 1238 if (!imm) 1239 wrp_immed(nfp_prog, reg_both(dst), 0); 1240 if (!imm || !~imm) 1241 return; 1242 } 1243 if (alu_op == ALU_OP_OR) { 1244 if (!~imm) 1245 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1246 if (!imm || !~imm) 1247 return; 1248 } 1249 if (alu_op == ALU_OP_XOR) { 1250 if (!~imm) 1251 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1252 ALU_OP_NOT, reg_b(dst)); 1253 if (!imm || !~imm) 1254 return; 1255 } 1256 1257 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1258 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1259 } 1260 1261 static int 1262 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1263 enum alu_op alu_op, bool skip) 1264 { 1265 const struct bpf_insn *insn = &meta->insn; 1266 u64 imm = insn->imm; /* sign extend */ 1267 1268 if (skip) { 1269 meta->skip = true; 1270 return 0; 1271 } 1272 1273 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1274 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1275 1276 return 0; 1277 } 1278 1279 static int 1280 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1281 enum alu_op alu_op) 1282 { 1283 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1284 1285 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1286 emit_alu(nfp_prog, reg_both(dst + 1), 1287 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1288 1289 return 0; 1290 } 1291 1292 static int 1293 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1294 enum alu_op alu_op) 1295 { 1296 const struct bpf_insn *insn = &meta->insn; 1297 1298 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1299 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1300 1301 return 0; 1302 } 1303 1304 static int 1305 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1306 enum alu_op alu_op) 1307 { 1308 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1309 1310 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1311 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1312 1313 return 0; 1314 } 1315 1316 static void 1317 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1318 enum br_mask br_mask, u16 off) 1319 { 1320 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1321 emit_br(nfp_prog, br_mask, off, 0); 1322 } 1323 1324 static int 1325 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1326 enum alu_op alu_op, enum br_mask br_mask) 1327 { 1328 const struct bpf_insn *insn = &meta->insn; 1329 1330 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1331 insn->src_reg * 2, br_mask, insn->off); 1332 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1333 insn->src_reg * 2 + 1, br_mask, insn->off); 1334 1335 return 0; 1336 } 1337 1338 static const struct jmp_code_map { 1339 enum br_mask br_mask; 1340 bool swap; 1341 } jmp_code_map[] = { 1342 [BPF_JGT >> 4] = { BR_BLO, true }, 1343 [BPF_JGE >> 4] = { BR_BHS, false }, 1344 [BPF_JLT >> 4] = { BR_BLO, false }, 1345 [BPF_JLE >> 4] = { BR_BHS, true }, 1346 [BPF_JSGT >> 4] = { BR_BLT, true }, 1347 [BPF_JSGE >> 4] = { BR_BGE, false }, 1348 [BPF_JSLT >> 4] = { BR_BLT, false }, 1349 [BPF_JSLE >> 4] = { BR_BGE, true }, 1350 }; 1351 1352 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) 1353 { 1354 unsigned int op; 1355 1356 op = BPF_OP(meta->insn.code) >> 4; 1357 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ 1358 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || 1359 !jmp_code_map[op].br_mask, 1360 "no code found for jump instruction")) 1361 return NULL; 1362 1363 return &jmp_code_map[op]; 1364 } 1365 1366 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1367 { 1368 const struct bpf_insn *insn = &meta->insn; 1369 u64 imm = insn->imm; /* sign extend */ 1370 const struct jmp_code_map *code; 1371 enum alu_op alu_op, carry_op; 1372 u8 reg = insn->dst_reg * 2; 1373 swreg tmp_reg; 1374 1375 code = nfp_jmp_code_get(meta); 1376 if (!code) 1377 return -EINVAL; 1378 1379 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; 1380 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; 1381 1382 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1383 if (!code->swap) 1384 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); 1385 else 1386 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); 1387 1388 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1389 if (!code->swap) 1390 emit_alu(nfp_prog, reg_none(), 1391 reg_a(reg + 1), carry_op, tmp_reg); 1392 else 1393 emit_alu(nfp_prog, reg_none(), 1394 tmp_reg, carry_op, reg_a(reg + 1)); 1395 1396 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1397 1398 return 0; 1399 } 1400 1401 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1402 { 1403 const struct bpf_insn *insn = &meta->insn; 1404 const struct jmp_code_map *code; 1405 u8 areg, breg; 1406 1407 code = nfp_jmp_code_get(meta); 1408 if (!code) 1409 return -EINVAL; 1410 1411 areg = insn->dst_reg * 2; 1412 breg = insn->src_reg * 2; 1413 1414 if (code->swap) { 1415 areg ^= breg; 1416 breg ^= areg; 1417 areg ^= breg; 1418 } 1419 1420 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1421 emit_alu(nfp_prog, reg_none(), 1422 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1423 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1424 1425 return 0; 1426 } 1427 1428 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1429 { 1430 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1431 SHF_SC_R_ROT, 8); 1432 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1433 SHF_SC_R_ROT, 16); 1434 } 1435 1436 static void 1437 wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1438 swreg rreg, bool gen_high_half) 1439 { 1440 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1441 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg); 1442 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg); 1443 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg); 1444 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg); 1445 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none()); 1446 if (gen_high_half) 1447 emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2, 1448 reg_none()); 1449 else 1450 wrp_immed(nfp_prog, dst_hi, 0); 1451 } 1452 1453 static void 1454 wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1455 swreg rreg) 1456 { 1457 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1458 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg); 1459 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg); 1460 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none()); 1461 } 1462 1463 static int 1464 wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1465 bool gen_high_half, bool ropnd_from_reg) 1466 { 1467 swreg multiplier, multiplicand, dst_hi, dst_lo; 1468 const struct bpf_insn *insn = &meta->insn; 1469 u32 lopnd_max, ropnd_max; 1470 u8 dst_reg; 1471 1472 dst_reg = insn->dst_reg; 1473 multiplicand = reg_a(dst_reg * 2); 1474 dst_hi = reg_both(dst_reg * 2 + 1); 1475 dst_lo = reg_both(dst_reg * 2); 1476 lopnd_max = meta->umax_dst; 1477 if (ropnd_from_reg) { 1478 multiplier = reg_b(insn->src_reg * 2); 1479 ropnd_max = meta->umax_src; 1480 } else { 1481 u32 imm = insn->imm; 1482 1483 multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1484 ropnd_max = imm; 1485 } 1486 if (lopnd_max > U16_MAX || ropnd_max > U16_MAX) 1487 wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier, 1488 gen_high_half); 1489 else 1490 wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier); 1491 1492 return 0; 1493 } 1494 1495 static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) 1496 { 1497 swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst); 1498 struct reciprocal_value_adv rvalue; 1499 u8 pre_shift, exp; 1500 swreg magic; 1501 1502 if (imm > U32_MAX) { 1503 wrp_immed(nfp_prog, dst_both, 0); 1504 return 0; 1505 } 1506 1507 /* NOTE: because we are using "reciprocal_value_adv" which doesn't 1508 * support "divisor > (1u << 31)", we need to JIT separate NFP sequence 1509 * to handle such case which actually equals to the result of unsigned 1510 * comparison "dst >= imm" which could be calculated using the following 1511 * NFP sequence: 1512 * 1513 * alu[--, dst, -, imm] 1514 * immed[imm, 0] 1515 * alu[dst, imm, +carry, 0] 1516 * 1517 */ 1518 if (imm > 1U << 31) { 1519 swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1520 1521 emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b); 1522 wrp_immed(nfp_prog, imm_a(nfp_prog), 0); 1523 emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C, 1524 reg_imm(0)); 1525 return 0; 1526 } 1527 1528 rvalue = reciprocal_value_adv(imm, 32); 1529 exp = rvalue.exp; 1530 if (rvalue.is_wide_m && !(imm & 1)) { 1531 pre_shift = fls(imm & -imm) - 1; 1532 rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift); 1533 } else { 1534 pre_shift = 0; 1535 } 1536 magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog)); 1537 if (imm == 1U << exp) { 1538 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1539 SHF_SC_R_SHF, exp); 1540 } else if (rvalue.is_wide_m) { 1541 wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, 1542 magic, true); 1543 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, 1544 imm_b(nfp_prog)); 1545 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1546 SHF_SC_R_SHF, 1); 1547 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, 1548 imm_b(nfp_prog)); 1549 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1550 SHF_SC_R_SHF, rvalue.sh - 1); 1551 } else { 1552 if (pre_shift) 1553 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1554 dst_b, SHF_SC_R_SHF, pre_shift); 1555 wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true); 1556 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1557 dst_b, SHF_SC_R_SHF, rvalue.sh); 1558 } 1559 1560 return 0; 1561 } 1562 1563 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1564 { 1565 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1566 struct nfp_bpf_cap_adjust_head *adjust_head; 1567 u32 ret_einval, end; 1568 1569 adjust_head = &nfp_prog->bpf->adjust_head; 1570 1571 /* Optimized version - 5 vs 14 cycles */ 1572 if (nfp_prog->adjust_head_location != UINT_MAX) { 1573 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1574 return -EINVAL; 1575 1576 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1577 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1578 emit_alu(nfp_prog, plen_reg(nfp_prog), 1579 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1580 emit_alu(nfp_prog, pv_len(nfp_prog), 1581 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1582 1583 wrp_immed(nfp_prog, reg_both(0), 0); 1584 wrp_immed(nfp_prog, reg_both(1), 0); 1585 1586 /* TODO: when adjust head is guaranteed to succeed we can 1587 * also eliminate the following if (r0 == 0) branch. 1588 */ 1589 1590 return 0; 1591 } 1592 1593 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1594 end = ret_einval + 2; 1595 1596 /* We need to use a temp because offset is just a part of the pkt ptr */ 1597 emit_alu(nfp_prog, tmp, 1598 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1599 1600 /* Validate result will fit within FW datapath constraints */ 1601 emit_alu(nfp_prog, reg_none(), 1602 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1603 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1604 emit_alu(nfp_prog, reg_none(), 1605 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1606 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1607 1608 /* Validate the length is at least ETH_HLEN */ 1609 emit_alu(nfp_prog, tmp_len, 1610 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1611 emit_alu(nfp_prog, reg_none(), 1612 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1613 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1614 1615 /* Load the ret code */ 1616 wrp_immed(nfp_prog, reg_both(0), 0); 1617 wrp_immed(nfp_prog, reg_both(1), 0); 1618 1619 /* Modify the packet metadata */ 1620 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1621 1622 /* Skip over the -EINVAL ret code (defer 2) */ 1623 emit_br(nfp_prog, BR_UNC, end, 2); 1624 1625 emit_alu(nfp_prog, plen_reg(nfp_prog), 1626 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1627 emit_alu(nfp_prog, pv_len(nfp_prog), 1628 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1629 1630 /* return -EINVAL target */ 1631 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1632 return -EINVAL; 1633 1634 wrp_immed(nfp_prog, reg_both(0), -22); 1635 wrp_immed(nfp_prog, reg_both(1), ~0); 1636 1637 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1638 return -EINVAL; 1639 1640 return 0; 1641 } 1642 1643 static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1644 { 1645 u32 ret_einval, end; 1646 swreg plen, delta; 1647 1648 BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN)); 1649 1650 plen = imm_a(nfp_prog); 1651 delta = reg_a(2 * 2); 1652 1653 ret_einval = nfp_prog_current_offset(nfp_prog) + 9; 1654 end = nfp_prog_current_offset(nfp_prog) + 11; 1655 1656 /* Calculate resulting length */ 1657 emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta); 1658 /* delta == 0 is not allowed by the kernel, add must overflow to make 1659 * length smaller. 1660 */ 1661 emit_br(nfp_prog, BR_BCC, ret_einval, 0); 1662 1663 /* if (new_len < 14) then -EINVAL */ 1664 emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1665 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1666 1667 emit_alu(nfp_prog, plen_reg(nfp_prog), 1668 plen_reg(nfp_prog), ALU_OP_ADD, delta); 1669 emit_alu(nfp_prog, pv_len(nfp_prog), 1670 pv_len(nfp_prog), ALU_OP_ADD, delta); 1671 1672 emit_br(nfp_prog, BR_UNC, end, 2); 1673 wrp_immed(nfp_prog, reg_both(0), 0); 1674 wrp_immed(nfp_prog, reg_both(1), 0); 1675 1676 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1677 return -EINVAL; 1678 1679 wrp_immed(nfp_prog, reg_both(0), -22); 1680 wrp_immed(nfp_prog, reg_both(1), ~0); 1681 1682 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1683 return -EINVAL; 1684 1685 return 0; 1686 } 1687 1688 static int 1689 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1690 { 1691 bool load_lm_ptr; 1692 u32 ret_tgt; 1693 s64 lm_off; 1694 1695 /* We only have to reload LM0 if the key is not at start of stack */ 1696 lm_off = nfp_prog->stack_frame_depth; 1697 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1698 load_lm_ptr = meta->arg2.var_off || lm_off; 1699 1700 /* Set LM0 to start of key */ 1701 if (load_lm_ptr) 1702 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1703 if (meta->func_id == BPF_FUNC_map_update_elem) 1704 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1705 1706 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1707 2, RELO_BR_HELPER); 1708 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1709 1710 /* Load map ID into A0 */ 1711 wrp_mov(nfp_prog, reg_a(0), reg_a(2)); 1712 1713 /* Load the return address into B0 */ 1714 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1715 1716 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1717 return -EINVAL; 1718 1719 /* Reset the LM0 pointer */ 1720 if (!load_lm_ptr) 1721 return 0; 1722 1723 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1724 wrp_nops(nfp_prog, 3); 1725 1726 return 0; 1727 } 1728 1729 static int 1730 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1731 { 1732 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); 1733 /* CSR value is read in following immed[gpr, 0] */ 1734 emit_immed(nfp_prog, reg_both(0), 0, 1735 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1736 emit_immed(nfp_prog, reg_both(1), 0, 1737 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1738 return 0; 1739 } 1740 1741 static int 1742 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1743 { 1744 swreg ptr_type; 1745 u32 ret_tgt; 1746 1747 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); 1748 1749 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 1750 1751 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1752 2, RELO_BR_HELPER); 1753 1754 /* Load ptr type into A1 */ 1755 wrp_mov(nfp_prog, reg_a(1), ptr_type); 1756 1757 /* Load the return address into B0 */ 1758 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1759 1760 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1761 return -EINVAL; 1762 1763 return 0; 1764 } 1765 1766 static int 1767 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1768 { 1769 u32 jmp_tgt; 1770 1771 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; 1772 1773 /* Make sure the queue id fits into FW field */ 1774 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), 1775 ALU_OP_AND_NOT_B, reg_imm(0xff)); 1776 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); 1777 1778 /* Set the 'queue selected' bit and the queue value */ 1779 emit_shf(nfp_prog, pv_qsel_set(nfp_prog), 1780 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), 1781 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); 1782 emit_ld_field(nfp_prog, 1783 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), 1784 SHF_SC_NONE, 0); 1785 /* Delay slots end here, we will jump over next instruction if queue 1786 * value fits into the field. 1787 */ 1788 emit_ld_field(nfp_prog, 1789 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), 1790 SHF_SC_NONE, 0); 1791 1792 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) 1793 return -EINVAL; 1794 1795 return 0; 1796 } 1797 1798 /* --- Callbacks --- */ 1799 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1800 { 1801 const struct bpf_insn *insn = &meta->insn; 1802 u8 dst = insn->dst_reg * 2; 1803 u8 src = insn->src_reg * 2; 1804 1805 if (insn->src_reg == BPF_REG_10) { 1806 swreg stack_depth_reg; 1807 1808 stack_depth_reg = ur_load_imm_any(nfp_prog, 1809 nfp_prog->stack_frame_depth, 1810 stack_imm(nfp_prog)); 1811 emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog), 1812 ALU_OP_ADD, stack_depth_reg); 1813 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1814 } else { 1815 wrp_reg_mov(nfp_prog, dst, src); 1816 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1817 } 1818 1819 return 0; 1820 } 1821 1822 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1823 { 1824 u64 imm = meta->insn.imm; /* sign extend */ 1825 1826 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1827 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1828 1829 return 0; 1830 } 1831 1832 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1833 { 1834 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1835 } 1836 1837 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1838 { 1839 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1840 } 1841 1842 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1843 { 1844 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1845 } 1846 1847 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1848 { 1849 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1850 } 1851 1852 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1853 { 1854 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1855 } 1856 1857 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1858 { 1859 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1860 } 1861 1862 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1863 { 1864 const struct bpf_insn *insn = &meta->insn; 1865 1866 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1867 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1868 reg_b(insn->src_reg * 2)); 1869 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1870 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1871 reg_b(insn->src_reg * 2 + 1)); 1872 1873 return 0; 1874 } 1875 1876 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1877 { 1878 const struct bpf_insn *insn = &meta->insn; 1879 u64 imm = insn->imm; /* sign extend */ 1880 1881 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1882 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1883 1884 return 0; 1885 } 1886 1887 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1888 { 1889 const struct bpf_insn *insn = &meta->insn; 1890 1891 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1892 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1893 reg_b(insn->src_reg * 2)); 1894 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1895 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1896 reg_b(insn->src_reg * 2 + 1)); 1897 1898 return 0; 1899 } 1900 1901 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1902 { 1903 const struct bpf_insn *insn = &meta->insn; 1904 u64 imm = insn->imm; /* sign extend */ 1905 1906 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1907 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1908 1909 return 0; 1910 } 1911 1912 static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1913 { 1914 return wrp_mul(nfp_prog, meta, true, true); 1915 } 1916 1917 static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1918 { 1919 return wrp_mul(nfp_prog, meta, true, false); 1920 } 1921 1922 static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1923 { 1924 const struct bpf_insn *insn = &meta->insn; 1925 1926 return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm); 1927 } 1928 1929 static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1930 { 1931 /* NOTE: verifier hook has rejected cases for which verifier doesn't 1932 * know whether the source operand is constant or not. 1933 */ 1934 return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src); 1935 } 1936 1937 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1938 { 1939 const struct bpf_insn *insn = &meta->insn; 1940 1941 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1942 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1943 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1944 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1945 1946 return 0; 1947 } 1948 1949 /* Pseudo code: 1950 * if shift_amt >= 32 1951 * dst_high = dst_low << shift_amt[4:0] 1952 * dst_low = 0; 1953 * else 1954 * dst_high = (dst_high, dst_low) >> (32 - shift_amt) 1955 * dst_low = dst_low << shift_amt 1956 * 1957 * The indirect shift will use the same logic at runtime. 1958 */ 1959 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1960 { 1961 if (shift_amt < 32) { 1962 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), 1963 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, 1964 32 - shift_amt); 1965 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1966 reg_b(dst), SHF_SC_L_SHF, shift_amt); 1967 } else if (shift_amt == 32) { 1968 wrp_reg_mov(nfp_prog, dst + 1, dst); 1969 wrp_immed(nfp_prog, reg_both(dst), 0); 1970 } else if (shift_amt > 32) { 1971 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1972 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32); 1973 wrp_immed(nfp_prog, reg_both(dst), 0); 1974 } 1975 1976 return 0; 1977 } 1978 1979 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1980 { 1981 const struct bpf_insn *insn = &meta->insn; 1982 u8 dst = insn->dst_reg * 2; 1983 1984 return __shl_imm64(nfp_prog, dst, insn->imm); 1985 } 1986 1987 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1988 { 1989 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB, 1990 reg_b(src)); 1991 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0)); 1992 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, 1993 reg_b(dst), SHF_SC_R_DSHF); 1994 } 1995 1996 /* NOTE: for indirect left shift, HIGH part should be calculated first. */ 1997 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1998 { 1999 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2000 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2001 reg_b(dst), SHF_SC_L_SHF); 2002 } 2003 2004 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2005 { 2006 shl_reg64_lt32_high(nfp_prog, dst, src); 2007 shl_reg64_lt32_low(nfp_prog, dst, src); 2008 } 2009 2010 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2011 { 2012 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2013 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2014 reg_b(dst), SHF_SC_L_SHF); 2015 wrp_immed(nfp_prog, reg_both(dst), 0); 2016 } 2017 2018 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2019 { 2020 const struct bpf_insn *insn = &meta->insn; 2021 u64 umin, umax; 2022 u8 dst, src; 2023 2024 dst = insn->dst_reg * 2; 2025 umin = meta->umin_src; 2026 umax = meta->umax_src; 2027 if (umin == umax) 2028 return __shl_imm64(nfp_prog, dst, umin); 2029 2030 src = insn->src_reg * 2; 2031 if (umax < 32) { 2032 shl_reg64_lt32(nfp_prog, dst, src); 2033 } else if (umin >= 32) { 2034 shl_reg64_ge32(nfp_prog, dst, src); 2035 } else { 2036 /* Generate different instruction sequences depending on runtime 2037 * value of shift amount. 2038 */ 2039 u16 label_ge32, label_end; 2040 2041 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7; 2042 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2043 2044 shl_reg64_lt32_high(nfp_prog, dst, src); 2045 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2046 emit_br(nfp_prog, BR_UNC, label_end, 2); 2047 /* shl_reg64_lt32_low packed in delay slot. */ 2048 shl_reg64_lt32_low(nfp_prog, dst, src); 2049 2050 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2051 return -EINVAL; 2052 shl_reg64_ge32(nfp_prog, dst, src); 2053 2054 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2055 return -EINVAL; 2056 } 2057 2058 return 0; 2059 } 2060 2061 /* Pseudo code: 2062 * if shift_amt >= 32 2063 * dst_high = 0; 2064 * dst_low = dst_high >> shift_amt[4:0] 2065 * else 2066 * dst_high = dst_high >> shift_amt 2067 * dst_low = (dst_high, dst_low) >> shift_amt 2068 * 2069 * The indirect shift will use the same logic at runtime. 2070 */ 2071 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2072 { 2073 if (shift_amt < 32) { 2074 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2075 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2076 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2077 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2078 } else if (shift_amt == 32) { 2079 wrp_reg_mov(nfp_prog, dst, dst + 1); 2080 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2081 } else if (shift_amt > 32) { 2082 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2083 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2084 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2085 } 2086 2087 return 0; 2088 } 2089 2090 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2091 { 2092 const struct bpf_insn *insn = &meta->insn; 2093 u8 dst = insn->dst_reg * 2; 2094 2095 return __shr_imm64(nfp_prog, dst, insn->imm); 2096 } 2097 2098 /* NOTE: for indirect right shift, LOW part should be calculated first. */ 2099 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2100 { 2101 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2102 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2103 reg_b(dst + 1), SHF_SC_R_SHF); 2104 } 2105 2106 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2107 { 2108 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2109 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2110 reg_b(dst), SHF_SC_R_DSHF); 2111 } 2112 2113 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2114 { 2115 shr_reg64_lt32_low(nfp_prog, dst, src); 2116 shr_reg64_lt32_high(nfp_prog, dst, src); 2117 } 2118 2119 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2120 { 2121 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2122 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2123 reg_b(dst + 1), SHF_SC_R_SHF); 2124 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2125 } 2126 2127 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2128 { 2129 const struct bpf_insn *insn = &meta->insn; 2130 u64 umin, umax; 2131 u8 dst, src; 2132 2133 dst = insn->dst_reg * 2; 2134 umin = meta->umin_src; 2135 umax = meta->umax_src; 2136 if (umin == umax) 2137 return __shr_imm64(nfp_prog, dst, umin); 2138 2139 src = insn->src_reg * 2; 2140 if (umax < 32) { 2141 shr_reg64_lt32(nfp_prog, dst, src); 2142 } else if (umin >= 32) { 2143 shr_reg64_ge32(nfp_prog, dst, src); 2144 } else { 2145 /* Generate different instruction sequences depending on runtime 2146 * value of shift amount. 2147 */ 2148 u16 label_ge32, label_end; 2149 2150 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2151 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2152 shr_reg64_lt32_low(nfp_prog, dst, src); 2153 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2154 emit_br(nfp_prog, BR_UNC, label_end, 2); 2155 /* shr_reg64_lt32_high packed in delay slot. */ 2156 shr_reg64_lt32_high(nfp_prog, dst, src); 2157 2158 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2159 return -EINVAL; 2160 shr_reg64_ge32(nfp_prog, dst, src); 2161 2162 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2163 return -EINVAL; 2164 } 2165 2166 return 0; 2167 } 2168 2169 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit 2170 * told through PREV_ALU result. 2171 */ 2172 static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2173 { 2174 if (shift_amt < 32) { 2175 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2176 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2177 /* Set signedness bit. */ 2178 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2179 reg_imm(0)); 2180 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2181 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2182 } else if (shift_amt == 32) { 2183 /* NOTE: this also helps setting signedness bit. */ 2184 wrp_reg_mov(nfp_prog, dst, dst + 1); 2185 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2186 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2187 } else if (shift_amt > 32) { 2188 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2189 reg_imm(0)); 2190 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2191 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2192 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2193 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2194 } 2195 2196 return 0; 2197 } 2198 2199 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2200 { 2201 const struct bpf_insn *insn = &meta->insn; 2202 u8 dst = insn->dst_reg * 2; 2203 2204 return __ashr_imm64(nfp_prog, dst, insn->imm); 2205 } 2206 2207 static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2208 { 2209 /* NOTE: the first insn will set both indirect shift amount (source A) 2210 * and signedness bit (MSB of result). 2211 */ 2212 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2213 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2214 reg_b(dst + 1), SHF_SC_R_SHF); 2215 } 2216 2217 static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2218 { 2219 /* NOTE: it is the same as logic shift because we don't need to shift in 2220 * signedness bit when the shift amount is less than 32. 2221 */ 2222 return shr_reg64_lt32_low(nfp_prog, dst, src); 2223 } 2224 2225 static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2226 { 2227 ashr_reg64_lt32_low(nfp_prog, dst, src); 2228 ashr_reg64_lt32_high(nfp_prog, dst, src); 2229 } 2230 2231 static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2232 { 2233 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2234 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2235 reg_b(dst + 1), SHF_SC_R_SHF); 2236 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2237 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2238 } 2239 2240 /* Like ashr_imm64, but need to use indirect shift. */ 2241 static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2242 { 2243 const struct bpf_insn *insn = &meta->insn; 2244 u64 umin, umax; 2245 u8 dst, src; 2246 2247 dst = insn->dst_reg * 2; 2248 umin = meta->umin_src; 2249 umax = meta->umax_src; 2250 if (umin == umax) 2251 return __ashr_imm64(nfp_prog, dst, umin); 2252 2253 src = insn->src_reg * 2; 2254 if (umax < 32) { 2255 ashr_reg64_lt32(nfp_prog, dst, src); 2256 } else if (umin >= 32) { 2257 ashr_reg64_ge32(nfp_prog, dst, src); 2258 } else { 2259 u16 label_ge32, label_end; 2260 2261 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2262 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2263 ashr_reg64_lt32_low(nfp_prog, dst, src); 2264 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2265 emit_br(nfp_prog, BR_UNC, label_end, 2); 2266 /* ashr_reg64_lt32_high packed in delay slot. */ 2267 ashr_reg64_lt32_high(nfp_prog, dst, src); 2268 2269 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2270 return -EINVAL; 2271 ashr_reg64_ge32(nfp_prog, dst, src); 2272 2273 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2274 return -EINVAL; 2275 } 2276 2277 return 0; 2278 } 2279 2280 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2281 { 2282 const struct bpf_insn *insn = &meta->insn; 2283 2284 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 2285 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2286 2287 return 0; 2288 } 2289 2290 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2291 { 2292 const struct bpf_insn *insn = &meta->insn; 2293 2294 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 2295 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2296 2297 return 0; 2298 } 2299 2300 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2301 { 2302 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 2303 } 2304 2305 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2306 { 2307 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR); 2308 } 2309 2310 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2311 { 2312 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 2313 } 2314 2315 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2316 { 2317 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND); 2318 } 2319 2320 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2321 { 2322 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 2323 } 2324 2325 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2326 { 2327 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR); 2328 } 2329 2330 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2331 { 2332 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 2333 } 2334 2335 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2336 { 2337 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD); 2338 } 2339 2340 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2341 { 2342 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 2343 } 2344 2345 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2346 { 2347 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB); 2348 } 2349 2350 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2351 { 2352 return wrp_mul(nfp_prog, meta, false, true); 2353 } 2354 2355 static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2356 { 2357 return wrp_mul(nfp_prog, meta, false, false); 2358 } 2359 2360 static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2361 { 2362 return div_reg64(nfp_prog, meta); 2363 } 2364 2365 static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2366 { 2367 return div_imm64(nfp_prog, meta); 2368 } 2369 2370 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2371 { 2372 u8 dst = meta->insn.dst_reg * 2; 2373 2374 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 2375 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2376 2377 return 0; 2378 } 2379 2380 static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2381 { 2382 /* Set signedness bit (MSB of result). */ 2383 emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, reg_imm(0)); 2384 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst), 2385 SHF_SC_R_SHF, shift_amt); 2386 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2387 2388 return 0; 2389 } 2390 2391 static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2392 { 2393 const struct bpf_insn *insn = &meta->insn; 2394 u64 umin, umax; 2395 u8 dst, src; 2396 2397 dst = insn->dst_reg * 2; 2398 umin = meta->umin_src; 2399 umax = meta->umax_src; 2400 if (umin == umax) 2401 return __ashr_imm(nfp_prog, dst, umin); 2402 2403 src = insn->src_reg * 2; 2404 /* NOTE: the first insn will set both indirect shift amount (source A) 2405 * and signedness bit (MSB of result). 2406 */ 2407 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst)); 2408 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2409 reg_b(dst), SHF_SC_R_SHF); 2410 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2411 2412 return 0; 2413 } 2414 2415 static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2416 { 2417 const struct bpf_insn *insn = &meta->insn; 2418 u8 dst = insn->dst_reg * 2; 2419 2420 return __ashr_imm(nfp_prog, dst, insn->imm); 2421 } 2422 2423 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2424 { 2425 const struct bpf_insn *insn = &meta->insn; 2426 2427 if (!insn->imm) 2428 return 1; /* TODO: zero shift means indirect */ 2429 2430 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 2431 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 2432 SHF_SC_L_SHF, insn->imm); 2433 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2434 2435 return 0; 2436 } 2437 2438 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2439 { 2440 const struct bpf_insn *insn = &meta->insn; 2441 u8 gpr = insn->dst_reg * 2; 2442 2443 switch (insn->imm) { 2444 case 16: 2445 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 2446 SHF_SC_R_ROT, 8); 2447 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 2448 SHF_SC_R_SHF, 16); 2449 2450 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2451 break; 2452 case 32: 2453 wrp_end32(nfp_prog, reg_a(gpr), gpr); 2454 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2455 break; 2456 case 64: 2457 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 2458 2459 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 2460 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 2461 break; 2462 } 2463 2464 return 0; 2465 } 2466 2467 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2468 { 2469 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 2470 u32 imm_lo, imm_hi; 2471 u8 dst; 2472 2473 dst = prev->insn.dst_reg * 2; 2474 imm_lo = prev->insn.imm; 2475 imm_hi = meta->insn.imm; 2476 2477 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 2478 2479 /* mov is always 1 insn, load imm may be two, so try to use mov */ 2480 if (imm_hi == imm_lo) 2481 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 2482 else 2483 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 2484 2485 return 0; 2486 } 2487 2488 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2489 { 2490 meta->double_cb = imm_ld8_part2; 2491 return 0; 2492 } 2493 2494 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2495 { 2496 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 2497 } 2498 2499 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2500 { 2501 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 2502 } 2503 2504 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2505 { 2506 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 2507 } 2508 2509 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2510 { 2511 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2512 meta->insn.src_reg * 2, 1); 2513 } 2514 2515 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2516 { 2517 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2518 meta->insn.src_reg * 2, 2); 2519 } 2520 2521 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2522 { 2523 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2524 meta->insn.src_reg * 2, 4); 2525 } 2526 2527 static int 2528 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2529 unsigned int size, unsigned int ptr_off) 2530 { 2531 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2532 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 2533 true, wrp_lmem_load); 2534 } 2535 2536 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2537 u8 size) 2538 { 2539 swreg dst = reg_both(meta->insn.dst_reg * 2); 2540 2541 switch (meta->insn.off) { 2542 case offsetof(struct __sk_buff, len): 2543 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 2544 return -EOPNOTSUPP; 2545 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 2546 break; 2547 case offsetof(struct __sk_buff, data): 2548 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 2549 return -EOPNOTSUPP; 2550 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2551 break; 2552 case offsetof(struct __sk_buff, data_end): 2553 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 2554 return -EOPNOTSUPP; 2555 emit_alu(nfp_prog, dst, 2556 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2557 break; 2558 default: 2559 return -EOPNOTSUPP; 2560 } 2561 2562 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2563 2564 return 0; 2565 } 2566 2567 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2568 u8 size) 2569 { 2570 swreg dst = reg_both(meta->insn.dst_reg * 2); 2571 2572 switch (meta->insn.off) { 2573 case offsetof(struct xdp_md, data): 2574 if (size != FIELD_SIZEOF(struct xdp_md, data)) 2575 return -EOPNOTSUPP; 2576 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2577 break; 2578 case offsetof(struct xdp_md, data_end): 2579 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 2580 return -EOPNOTSUPP; 2581 emit_alu(nfp_prog, dst, 2582 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2583 break; 2584 default: 2585 return -EOPNOTSUPP; 2586 } 2587 2588 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2589 2590 return 0; 2591 } 2592 2593 static int 2594 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2595 unsigned int size) 2596 { 2597 swreg tmp_reg; 2598 2599 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2600 2601 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 2602 tmp_reg, meta->insn.dst_reg * 2, size); 2603 } 2604 2605 static int 2606 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2607 unsigned int size) 2608 { 2609 swreg tmp_reg; 2610 2611 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2612 2613 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 2614 tmp_reg, meta->insn.dst_reg * 2, size); 2615 } 2616 2617 static void 2618 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 2619 struct nfp_insn_meta *meta) 2620 { 2621 s16 range_start = meta->pkt_cache.range_start; 2622 s16 range_end = meta->pkt_cache.range_end; 2623 swreg src_base, off; 2624 u8 xfer_num, len; 2625 bool indir; 2626 2627 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 2628 src_base = reg_a(meta->insn.src_reg * 2); 2629 len = range_end - range_start; 2630 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 2631 2632 indir = len > 8 * REG_WIDTH; 2633 /* Setup PREV_ALU for indirect mode. */ 2634 if (indir) 2635 wrp_immed(nfp_prog, reg_none(), 2636 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 2637 2638 /* Cache memory into transfer-in registers. */ 2639 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 2640 off, xfer_num - 1, CMD_CTX_SWAP, indir); 2641 } 2642 2643 static int 2644 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 2645 struct nfp_insn_meta *meta, 2646 unsigned int size) 2647 { 2648 s16 range_start = meta->pkt_cache.range_start; 2649 s16 insn_off = meta->insn.off - range_start; 2650 swreg dst_lo, dst_hi, src_lo, src_mid; 2651 u8 dst_gpr = meta->insn.dst_reg * 2; 2652 u8 len_lo = size, len_mid = 0; 2653 u8 idx = insn_off / REG_WIDTH; 2654 u8 off = insn_off % REG_WIDTH; 2655 2656 dst_hi = reg_both(dst_gpr + 1); 2657 dst_lo = reg_both(dst_gpr); 2658 src_lo = reg_xfer(idx); 2659 2660 /* The read length could involve as many as three registers. */ 2661 if (size > REG_WIDTH - off) { 2662 /* Calculate the part in the second register. */ 2663 len_lo = REG_WIDTH - off; 2664 len_mid = size - len_lo; 2665 2666 /* Calculate the part in the third register. */ 2667 if (size > 2 * REG_WIDTH - off) 2668 len_mid = REG_WIDTH; 2669 } 2670 2671 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 2672 2673 if (!len_mid) { 2674 wrp_immed(nfp_prog, dst_hi, 0); 2675 return 0; 2676 } 2677 2678 src_mid = reg_xfer(idx + 1); 2679 2680 if (size <= REG_WIDTH) { 2681 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 2682 wrp_immed(nfp_prog, dst_hi, 0); 2683 } else { 2684 swreg src_hi = reg_xfer(idx + 2); 2685 2686 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 2687 REG_WIDTH - len_lo, len_lo); 2688 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 2689 REG_WIDTH - len_lo); 2690 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 2691 len_lo); 2692 } 2693 2694 return 0; 2695 } 2696 2697 static int 2698 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 2699 struct nfp_insn_meta *meta, 2700 unsigned int size) 2701 { 2702 swreg dst_lo, dst_hi, src_lo; 2703 u8 dst_gpr, idx; 2704 2705 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 2706 dst_gpr = meta->insn.dst_reg * 2; 2707 dst_hi = reg_both(dst_gpr + 1); 2708 dst_lo = reg_both(dst_gpr); 2709 src_lo = reg_xfer(idx); 2710 2711 if (size < REG_WIDTH) { 2712 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 2713 wrp_immed(nfp_prog, dst_hi, 0); 2714 } else if (size == REG_WIDTH) { 2715 wrp_mov(nfp_prog, dst_lo, src_lo); 2716 wrp_immed(nfp_prog, dst_hi, 0); 2717 } else { 2718 swreg src_hi = reg_xfer(idx + 1); 2719 2720 wrp_mov(nfp_prog, dst_lo, src_lo); 2721 wrp_mov(nfp_prog, dst_hi, src_hi); 2722 } 2723 2724 return 0; 2725 } 2726 2727 static int 2728 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 2729 struct nfp_insn_meta *meta, unsigned int size) 2730 { 2731 u8 off = meta->insn.off - meta->pkt_cache.range_start; 2732 2733 if (IS_ALIGNED(off, REG_WIDTH)) 2734 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 2735 2736 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 2737 } 2738 2739 static int 2740 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2741 unsigned int size) 2742 { 2743 if (meta->ldst_gather_len) 2744 return nfp_cpp_memcpy(nfp_prog, meta); 2745 2746 if (meta->ptr.type == PTR_TO_CTX) { 2747 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2748 return mem_ldx_xdp(nfp_prog, meta, size); 2749 else 2750 return mem_ldx_skb(nfp_prog, meta, size); 2751 } 2752 2753 if (meta->ptr.type == PTR_TO_PACKET) { 2754 if (meta->pkt_cache.range_end) { 2755 if (meta->pkt_cache.do_init) 2756 mem_ldx_data_init_pktcache(nfp_prog, meta); 2757 2758 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 2759 } else { 2760 return mem_ldx_data(nfp_prog, meta, size); 2761 } 2762 } 2763 2764 if (meta->ptr.type == PTR_TO_STACK) 2765 return mem_ldx_stack(nfp_prog, meta, size, 2766 meta->ptr.off + meta->ptr.var_off.value); 2767 2768 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2769 return mem_ldx_emem(nfp_prog, meta, size); 2770 2771 return -EOPNOTSUPP; 2772 } 2773 2774 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2775 { 2776 return mem_ldx(nfp_prog, meta, 1); 2777 } 2778 2779 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2780 { 2781 return mem_ldx(nfp_prog, meta, 2); 2782 } 2783 2784 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2785 { 2786 return mem_ldx(nfp_prog, meta, 4); 2787 } 2788 2789 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2790 { 2791 return mem_ldx(nfp_prog, meta, 8); 2792 } 2793 2794 static int 2795 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2796 unsigned int size) 2797 { 2798 u64 imm = meta->insn.imm; /* sign extend */ 2799 swreg off_reg; 2800 2801 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2802 2803 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2804 imm, size); 2805 } 2806 2807 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2808 unsigned int size) 2809 { 2810 if (meta->ptr.type == PTR_TO_PACKET) 2811 return mem_st_data(nfp_prog, meta, size); 2812 2813 return -EOPNOTSUPP; 2814 } 2815 2816 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2817 { 2818 return mem_st(nfp_prog, meta, 1); 2819 } 2820 2821 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2822 { 2823 return mem_st(nfp_prog, meta, 2); 2824 } 2825 2826 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2827 { 2828 return mem_st(nfp_prog, meta, 4); 2829 } 2830 2831 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2832 { 2833 return mem_st(nfp_prog, meta, 8); 2834 } 2835 2836 static int 2837 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2838 unsigned int size) 2839 { 2840 swreg off_reg; 2841 2842 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2843 2844 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2845 meta->insn.src_reg * 2, size); 2846 } 2847 2848 static int 2849 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2850 unsigned int size, unsigned int ptr_off) 2851 { 2852 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2853 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2854 false, wrp_lmem_store); 2855 } 2856 2857 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2858 { 2859 switch (meta->insn.off) { 2860 case offsetof(struct xdp_md, rx_queue_index): 2861 return nfp_queue_select(nfp_prog, meta); 2862 } 2863 2864 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ 2865 return -EOPNOTSUPP; 2866 } 2867 2868 static int 2869 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2870 unsigned int size) 2871 { 2872 if (meta->ptr.type == PTR_TO_PACKET) 2873 return mem_stx_data(nfp_prog, meta, size); 2874 2875 if (meta->ptr.type == PTR_TO_STACK) 2876 return mem_stx_stack(nfp_prog, meta, size, 2877 meta->ptr.off + meta->ptr.var_off.value); 2878 2879 return -EOPNOTSUPP; 2880 } 2881 2882 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2883 { 2884 return mem_stx(nfp_prog, meta, 1); 2885 } 2886 2887 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2888 { 2889 return mem_stx(nfp_prog, meta, 2); 2890 } 2891 2892 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2893 { 2894 if (meta->ptr.type == PTR_TO_CTX) 2895 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2896 return mem_stx_xdp(nfp_prog, meta); 2897 return mem_stx(nfp_prog, meta, 4); 2898 } 2899 2900 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2901 { 2902 return mem_stx(nfp_prog, meta, 8); 2903 } 2904 2905 static int 2906 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) 2907 { 2908 u8 dst_gpr = meta->insn.dst_reg * 2; 2909 u8 src_gpr = meta->insn.src_reg * 2; 2910 unsigned int full_add, out; 2911 swreg addra, addrb, off; 2912 2913 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2914 2915 /* We can fit 16 bits into command immediate, if we know the immediate 2916 * is guaranteed to either always or never fit into 16 bit we only 2917 * generate code to handle that particular case, otherwise generate 2918 * code for both. 2919 */ 2920 out = nfp_prog_current_offset(nfp_prog); 2921 full_add = nfp_prog_current_offset(nfp_prog); 2922 2923 if (meta->insn.off) { 2924 out += 2; 2925 full_add += 2; 2926 } 2927 if (meta->xadd_maybe_16bit) { 2928 out += 3; 2929 full_add += 3; 2930 } 2931 if (meta->xadd_over_16bit) 2932 out += 2 + is64; 2933 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2934 out += 5; 2935 full_add += 5; 2936 } 2937 2938 /* Generate the branch for choosing add_imm vs add */ 2939 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2940 swreg max_imm = imm_a(nfp_prog); 2941 2942 wrp_immed(nfp_prog, max_imm, 0xffff); 2943 emit_alu(nfp_prog, reg_none(), 2944 max_imm, ALU_OP_SUB, reg_b(src_gpr)); 2945 emit_alu(nfp_prog, reg_none(), 2946 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); 2947 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); 2948 /* defer for add */ 2949 } 2950 2951 /* If insn has an offset add to the address */ 2952 if (!meta->insn.off) { 2953 addra = reg_a(dst_gpr); 2954 addrb = reg_b(dst_gpr + 1); 2955 } else { 2956 emit_alu(nfp_prog, imma_a(nfp_prog), 2957 reg_a(dst_gpr), ALU_OP_ADD, off); 2958 emit_alu(nfp_prog, imma_b(nfp_prog), 2959 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); 2960 addra = imma_a(nfp_prog); 2961 addrb = imma_b(nfp_prog); 2962 } 2963 2964 /* Generate the add_imm if 16 bits are possible */ 2965 if (meta->xadd_maybe_16bit) { 2966 swreg prev_alu = imm_a(nfp_prog); 2967 2968 wrp_immed(nfp_prog, prev_alu, 2969 FIELD_PREP(CMD_OVE_DATA, 2) | 2970 CMD_OVE_LEN | 2971 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); 2972 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); 2973 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, 2974 addra, addrb, 0, CMD_CTX_NO_SWAP); 2975 2976 if (meta->xadd_over_16bit) 2977 emit_br(nfp_prog, BR_UNC, out, 0); 2978 } 2979 2980 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) 2981 return -EINVAL; 2982 2983 /* Generate the add if 16 bits are not guaranteed */ 2984 if (meta->xadd_over_16bit) { 2985 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, 2986 addra, addrb, is64 << 2, 2987 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); 2988 2989 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); 2990 if (is64) 2991 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); 2992 } 2993 2994 if (!nfp_prog_confirm_current_offset(nfp_prog, out)) 2995 return -EINVAL; 2996 2997 return 0; 2998 } 2999 3000 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3001 { 3002 return mem_xadd(nfp_prog, meta, false); 3003 } 3004 3005 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3006 { 3007 return mem_xadd(nfp_prog, meta, true); 3008 } 3009 3010 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3011 { 3012 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 3013 3014 return 0; 3015 } 3016 3017 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3018 { 3019 const struct bpf_insn *insn = &meta->insn; 3020 u64 imm = insn->imm; /* sign extend */ 3021 swreg or1, or2, tmp_reg; 3022 3023 or1 = reg_a(insn->dst_reg * 2); 3024 or2 = reg_b(insn->dst_reg * 2 + 1); 3025 3026 if (imm & ~0U) { 3027 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3028 emit_alu(nfp_prog, imm_a(nfp_prog), 3029 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3030 or1 = imm_a(nfp_prog); 3031 } 3032 3033 if (imm >> 32) { 3034 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3035 emit_alu(nfp_prog, imm_b(nfp_prog), 3036 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3037 or2 = imm_b(nfp_prog); 3038 } 3039 3040 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 3041 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3042 3043 return 0; 3044 } 3045 3046 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3047 { 3048 const struct bpf_insn *insn = &meta->insn; 3049 u64 imm = insn->imm; /* sign extend */ 3050 u8 dst_gpr = insn->dst_reg * 2; 3051 swreg tmp_reg; 3052 3053 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3054 emit_alu(nfp_prog, imm_b(nfp_prog), 3055 reg_a(dst_gpr), ALU_OP_AND, tmp_reg); 3056 /* Upper word of the mask can only be 0 or ~0 from sign extension, 3057 * so either ignore it or OR the whole thing in. 3058 */ 3059 if (imm >> 32) 3060 emit_alu(nfp_prog, reg_none(), 3061 reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog)); 3062 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3063 3064 return 0; 3065 } 3066 3067 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3068 { 3069 const struct bpf_insn *insn = &meta->insn; 3070 u64 imm = insn->imm; /* sign extend */ 3071 swreg tmp_reg; 3072 3073 if (!imm) { 3074 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 3075 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 3076 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3077 return 0; 3078 } 3079 3080 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3081 emit_alu(nfp_prog, reg_none(), 3082 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3083 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3084 3085 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3086 emit_alu(nfp_prog, reg_none(), 3087 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3088 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3089 3090 return 0; 3091 } 3092 3093 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3094 { 3095 const struct bpf_insn *insn = &meta->insn; 3096 3097 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 3098 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 3099 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 3100 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 3101 emit_alu(nfp_prog, reg_none(), 3102 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 3103 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3104 3105 return 0; 3106 } 3107 3108 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3109 { 3110 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 3111 } 3112 3113 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3114 { 3115 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 3116 } 3117 3118 static int 3119 bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3120 { 3121 u32 ret_tgt, stack_depth, offset_br; 3122 swreg tmp_reg; 3123 3124 stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN); 3125 /* Space for saving the return address is accounted for by the callee, 3126 * so stack_depth can be zero for the main function. 3127 */ 3128 if (stack_depth) { 3129 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3130 stack_imm(nfp_prog)); 3131 emit_alu(nfp_prog, stack_reg(nfp_prog), 3132 stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg); 3133 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3134 NFP_CSR_ACT_LM_ADDR0); 3135 } 3136 3137 /* Two cases for jumping to the callee: 3138 * 3139 * - If callee uses and needs to save R6~R9 then: 3140 * 1. Put the start offset of the callee into imm_b(). This will 3141 * require a fixup step, as we do not necessarily know this 3142 * address yet. 3143 * 2. Put the return address from the callee to the caller into 3144 * register ret_reg(). 3145 * 3. (After defer slots are consumed) Jump to the subroutine that 3146 * pushes the registers to the stack. 3147 * The subroutine acts as a trampoline, and returns to the address in 3148 * imm_b(), i.e. jumps to the callee. 3149 * 3150 * - If callee does not need to save R6~R9 then just load return 3151 * address to the caller in ret_reg(), and jump to the callee 3152 * directly. 3153 * 3154 * Using ret_reg() to pass the return address to the callee is set here 3155 * as a convention. The callee can then push this address onto its 3156 * stack frame in its prologue. The advantages of passing the return 3157 * address through ret_reg(), instead of pushing it to the stack right 3158 * here, are the following: 3159 * - It looks cleaner. 3160 * - If the called function is called multiple time, we get a lower 3161 * program size. 3162 * - We save two no-op instructions that should be added just before 3163 * the emit_br() when stack depth is not null otherwise. 3164 * - If we ever find a register to hold the return address during whole 3165 * execution of the callee, we will not have to push the return 3166 * address to the stack for leaf functions. 3167 */ 3168 if (!meta->jmp_dst) { 3169 pr_err("BUG: BPF-to-BPF call has no destination recorded\n"); 3170 return -ELOOP; 3171 } 3172 if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) { 3173 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 3174 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, 3175 RELO_BR_GO_CALL_PUSH_REGS); 3176 offset_br = nfp_prog_current_offset(nfp_prog); 3177 wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL); 3178 } else { 3179 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 3180 emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1); 3181 offset_br = nfp_prog_current_offset(nfp_prog); 3182 } 3183 wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL); 3184 3185 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 3186 return -EINVAL; 3187 3188 if (stack_depth) { 3189 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3190 stack_imm(nfp_prog)); 3191 emit_alu(nfp_prog, stack_reg(nfp_prog), 3192 stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 3193 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3194 NFP_CSR_ACT_LM_ADDR0); 3195 wrp_nops(nfp_prog, 3); 3196 } 3197 3198 meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog); 3199 meta->num_insns_after_br -= offset_br; 3200 3201 return 0; 3202 } 3203 3204 static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3205 { 3206 switch (meta->insn.imm) { 3207 case BPF_FUNC_xdp_adjust_head: 3208 return adjust_head(nfp_prog, meta); 3209 case BPF_FUNC_xdp_adjust_tail: 3210 return adjust_tail(nfp_prog, meta); 3211 case BPF_FUNC_map_lookup_elem: 3212 case BPF_FUNC_map_update_elem: 3213 case BPF_FUNC_map_delete_elem: 3214 return map_call_stack_common(nfp_prog, meta); 3215 case BPF_FUNC_get_prandom_u32: 3216 return nfp_get_prandom_u32(nfp_prog, meta); 3217 case BPF_FUNC_perf_event_output: 3218 return nfp_perf_event_output(nfp_prog, meta); 3219 default: 3220 WARN_ONCE(1, "verifier allowed unsupported function\n"); 3221 return -EOPNOTSUPP; 3222 } 3223 } 3224 3225 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3226 { 3227 if (is_mbpf_pseudo_call(meta)) 3228 return bpf_to_bpf_call(nfp_prog, meta); 3229 else 3230 return helper_call(nfp_prog, meta); 3231 } 3232 3233 static bool nfp_is_main_function(struct nfp_insn_meta *meta) 3234 { 3235 return meta->subprog_idx == 0; 3236 } 3237 3238 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3239 { 3240 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 3241 3242 return 0; 3243 } 3244 3245 static int 3246 nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3247 { 3248 if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) { 3249 /* Pop R6~R9 to the stack via related subroutine. 3250 * We loaded the return address to the caller into ret_reg(). 3251 * This means that the subroutine does not come back here, we 3252 * make it jump back to the subprogram caller directly! 3253 */ 3254 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1, 3255 RELO_BR_GO_CALL_POP_REGS); 3256 /* Pop return address from the stack. */ 3257 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3258 } else { 3259 /* Pop return address from the stack. */ 3260 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3261 /* Jump back to caller if no callee-saved registers were used 3262 * by the subprogram. 3263 */ 3264 emit_rtn(nfp_prog, ret_reg(nfp_prog), 0); 3265 } 3266 3267 return 0; 3268 } 3269 3270 static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3271 { 3272 if (nfp_is_main_function(meta)) 3273 return goto_out(nfp_prog, meta); 3274 else 3275 return nfp_subprog_epilogue(nfp_prog, meta); 3276 } 3277 3278 static const instr_cb_t instr_cb[256] = { 3279 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 3280 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 3281 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 3282 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 3283 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 3284 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 3285 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 3286 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 3287 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 3288 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 3289 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 3290 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 3291 [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64, 3292 [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64, 3293 [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64, 3294 [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64, 3295 [BPF_ALU64 | BPF_NEG] = neg_reg64, 3296 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, 3297 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 3298 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, 3299 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 3300 [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64, 3301 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, 3302 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 3303 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 3304 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 3305 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 3306 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 3307 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 3308 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 3309 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 3310 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 3311 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 3312 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 3313 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 3314 [BPF_ALU | BPF_MUL | BPF_X] = mul_reg, 3315 [BPF_ALU | BPF_MUL | BPF_K] = mul_imm, 3316 [BPF_ALU | BPF_DIV | BPF_X] = div_reg, 3317 [BPF_ALU | BPF_DIV | BPF_K] = div_imm, 3318 [BPF_ALU | BPF_NEG] = neg_reg, 3319 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 3320 [BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg, 3321 [BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm, 3322 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 3323 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 3324 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 3325 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 3326 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 3327 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 3328 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 3329 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 3330 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 3331 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 3332 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 3333 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 3334 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 3335 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 3336 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 3337 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 3338 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 3339 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 3340 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 3341 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 3342 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 3343 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 3344 [BPF_JMP | BPF_JA | BPF_K] = jump, 3345 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 3346 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, 3347 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, 3348 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, 3349 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, 3350 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, 3351 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, 3352 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, 3353 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, 3354 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 3355 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 3356 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 3357 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, 3358 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, 3359 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, 3360 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, 3361 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, 3362 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, 3363 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, 3364 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, 3365 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 3366 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 3367 [BPF_JMP | BPF_CALL] = call, 3368 [BPF_JMP | BPF_EXIT] = jmp_exit, 3369 }; 3370 3371 /* --- Assembler logic --- */ 3372 static int 3373 nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 3374 struct nfp_insn_meta *jmp_dst, u32 br_idx) 3375 { 3376 if (immed_get_value(nfp_prog->prog[br_idx + 1])) { 3377 pr_err("BUG: failed to fix up callee register saving\n"); 3378 return -EINVAL; 3379 } 3380 3381 immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off); 3382 3383 return 0; 3384 } 3385 3386 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 3387 { 3388 struct nfp_insn_meta *meta, *jmp_dst; 3389 u32 idx, br_idx; 3390 int err; 3391 3392 list_for_each_entry(meta, &nfp_prog->insns, l) { 3393 if (meta->skip) 3394 continue; 3395 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 3396 continue; 3397 if (meta->insn.code == (BPF_JMP | BPF_EXIT) && 3398 !nfp_is_main_function(meta)) 3399 continue; 3400 if (is_mbpf_helper_call(meta)) 3401 continue; 3402 3403 if (list_is_last(&meta->l, &nfp_prog->insns)) 3404 br_idx = nfp_prog->last_bpf_off; 3405 else 3406 br_idx = list_next_entry(meta, l)->off - 1; 3407 3408 /* For BPF-to-BPF function call, a stack adjustment sequence is 3409 * generated after the return instruction. Therefore, we must 3410 * withdraw the length of this sequence to have br_idx pointing 3411 * to where the "branch" NFP instruction is expected to be. 3412 */ 3413 if (is_mbpf_pseudo_call(meta)) 3414 br_idx -= meta->num_insns_after_br; 3415 3416 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 3417 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 3418 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 3419 return -ELOOP; 3420 } 3421 3422 if (meta->insn.code == (BPF_JMP | BPF_EXIT)) 3423 continue; 3424 3425 /* Leave special branches for later */ 3426 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3427 RELO_BR_REL && !is_mbpf_pseudo_call(meta)) 3428 continue; 3429 3430 if (!meta->jmp_dst) { 3431 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 3432 return -ELOOP; 3433 } 3434 3435 jmp_dst = meta->jmp_dst; 3436 3437 if (jmp_dst->skip) { 3438 pr_err("Branch landing on removed instruction!!\n"); 3439 return -ELOOP; 3440 } 3441 3442 if (is_mbpf_pseudo_call(meta) && 3443 nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) { 3444 err = nfp_fixup_immed_relo(nfp_prog, meta, 3445 jmp_dst, br_idx); 3446 if (err) 3447 return err; 3448 } 3449 3450 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3451 RELO_BR_REL) 3452 continue; 3453 3454 for (idx = meta->off; idx <= br_idx; idx++) { 3455 if (!nfp_is_br(nfp_prog->prog[idx])) 3456 continue; 3457 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 3458 } 3459 } 3460 3461 return 0; 3462 } 3463 3464 static void nfp_intro(struct nfp_prog *nfp_prog) 3465 { 3466 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 3467 emit_alu(nfp_prog, plen_reg(nfp_prog), 3468 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 3469 } 3470 3471 static void 3472 nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3473 { 3474 /* Save return address into the stack. */ 3475 wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog)); 3476 } 3477 3478 static void 3479 nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3480 { 3481 unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth; 3482 3483 nfp_prog->stack_frame_depth = round_up(depth, 4); 3484 nfp_subprog_prologue(nfp_prog, meta); 3485 } 3486 3487 bool nfp_is_subprog_start(struct nfp_insn_meta *meta) 3488 { 3489 return meta->flags & FLAG_INSN_IS_SUBPROG_START; 3490 } 3491 3492 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 3493 { 3494 /* TC direct-action mode: 3495 * 0,1 ok NOT SUPPORTED[1] 3496 * 2 drop 0x22 -> drop, count as stat1 3497 * 4,5 nuke 0x02 -> drop 3498 * 7 redir 0x44 -> redir, count as stat2 3499 * * unspec 0x11 -> pass, count as stat0 3500 * 3501 * [1] We can't support OK and RECLASSIFY because we can't tell TC 3502 * the exact decision made. We are forced to support UNSPEC 3503 * to handle aborts so that's the only one we handle for passing 3504 * packets up the stack. 3505 */ 3506 /* Target for aborts */ 3507 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3508 3509 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3510 3511 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3512 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 3513 3514 /* Target for normal exits */ 3515 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3516 3517 /* if R0 > 7 jump to abort */ 3518 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 3519 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3520 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3521 3522 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 3523 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 3524 3525 emit_shf(nfp_prog, reg_a(1), 3526 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 3527 3528 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3529 emit_shf(nfp_prog, reg_a(2), 3530 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3531 3532 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3533 emit_shf(nfp_prog, reg_b(2), 3534 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 3535 3536 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3537 3538 emit_shf(nfp_prog, reg_b(2), 3539 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 3540 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3541 } 3542 3543 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 3544 { 3545 /* XDP return codes: 3546 * 0 aborted 0x82 -> drop, count as stat3 3547 * 1 drop 0x22 -> drop, count as stat1 3548 * 2 pass 0x11 -> pass, count as stat0 3549 * 3 tx 0x44 -> redir, count as stat2 3550 * * unknown 0x82 -> drop, count as stat3 3551 */ 3552 /* Target for aborts */ 3553 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3554 3555 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3556 3557 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3558 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 3559 3560 /* Target for normal exits */ 3561 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3562 3563 /* if R0 > 3 jump to abort */ 3564 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 3565 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3566 3567 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 3568 3569 emit_shf(nfp_prog, reg_a(1), 3570 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 3571 3572 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3573 emit_shf(nfp_prog, reg_b(2), 3574 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3575 3576 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3577 3578 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3579 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3580 } 3581 3582 static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog) 3583 { 3584 unsigned int idx; 3585 3586 for (idx = 1; idx < nfp_prog->subprog_cnt; idx++) 3587 if (nfp_prog->subprog[idx].needs_reg_push) 3588 return true; 3589 3590 return false; 3591 } 3592 3593 static void nfp_push_callee_registers(struct nfp_prog *nfp_prog) 3594 { 3595 u8 reg; 3596 3597 /* Subroutine: Save all callee saved registers (R6 ~ R9). 3598 * imm_b() holds the return address. 3599 */ 3600 nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog); 3601 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3602 u8 adj = (reg - BPF_REG_0) * 2; 3603 u8 idx = (reg - BPF_REG_6) * 2; 3604 3605 /* The first slot in the stack frame is used to push the return 3606 * address in bpf_to_bpf_call(), start just after. 3607 */ 3608 wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj)); 3609 3610 if (reg == BPF_REG_8) 3611 /* Prepare to jump back, last 3 insns use defer slots */ 3612 emit_rtn(nfp_prog, imm_b(nfp_prog), 3); 3613 3614 wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1)); 3615 } 3616 } 3617 3618 static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog) 3619 { 3620 u8 reg; 3621 3622 /* Subroutine: Restore all callee saved registers (R6 ~ R9). 3623 * ret_reg() holds the return address. 3624 */ 3625 nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog); 3626 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3627 u8 adj = (reg - BPF_REG_0) * 2; 3628 u8 idx = (reg - BPF_REG_6) * 2; 3629 3630 /* The first slot in the stack frame holds the return address, 3631 * start popping just after that. 3632 */ 3633 wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx)); 3634 3635 if (reg == BPF_REG_8) 3636 /* Prepare to jump back, last 3 insns use defer slots */ 3637 emit_rtn(nfp_prog, ret_reg(nfp_prog), 3); 3638 3639 wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1)); 3640 } 3641 } 3642 3643 static void nfp_outro(struct nfp_prog *nfp_prog) 3644 { 3645 switch (nfp_prog->type) { 3646 case BPF_PROG_TYPE_SCHED_CLS: 3647 nfp_outro_tc_da(nfp_prog); 3648 break; 3649 case BPF_PROG_TYPE_XDP: 3650 nfp_outro_xdp(nfp_prog); 3651 break; 3652 default: 3653 WARN_ON(1); 3654 } 3655 3656 if (!nfp_prog_needs_callee_reg_save(nfp_prog)) 3657 return; 3658 3659 nfp_push_callee_registers(nfp_prog); 3660 nfp_pop_callee_registers(nfp_prog); 3661 } 3662 3663 static int nfp_translate(struct nfp_prog *nfp_prog) 3664 { 3665 struct nfp_insn_meta *meta; 3666 unsigned int depth; 3667 int err; 3668 3669 depth = nfp_prog->subprog[0].stack_depth; 3670 nfp_prog->stack_frame_depth = round_up(depth, 4); 3671 3672 nfp_intro(nfp_prog); 3673 if (nfp_prog->error) 3674 return nfp_prog->error; 3675 3676 list_for_each_entry(meta, &nfp_prog->insns, l) { 3677 instr_cb_t cb = instr_cb[meta->insn.code]; 3678 3679 meta->off = nfp_prog_current_offset(nfp_prog); 3680 3681 if (nfp_is_subprog_start(meta)) { 3682 nfp_start_subprog(nfp_prog, meta); 3683 if (nfp_prog->error) 3684 return nfp_prog->error; 3685 } 3686 3687 if (meta->skip) { 3688 nfp_prog->n_translated++; 3689 continue; 3690 } 3691 3692 if (nfp_meta_has_prev(nfp_prog, meta) && 3693 nfp_meta_prev(meta)->double_cb) 3694 cb = nfp_meta_prev(meta)->double_cb; 3695 if (!cb) 3696 return -ENOENT; 3697 err = cb(nfp_prog, meta); 3698 if (err) 3699 return err; 3700 if (nfp_prog->error) 3701 return nfp_prog->error; 3702 3703 nfp_prog->n_translated++; 3704 } 3705 3706 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 3707 3708 nfp_outro(nfp_prog); 3709 if (nfp_prog->error) 3710 return nfp_prog->error; 3711 3712 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 3713 if (nfp_prog->error) 3714 return nfp_prog->error; 3715 3716 return nfp_fixup_branches(nfp_prog); 3717 } 3718 3719 /* --- Optimizations --- */ 3720 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 3721 { 3722 struct nfp_insn_meta *meta; 3723 3724 list_for_each_entry(meta, &nfp_prog->insns, l) { 3725 struct bpf_insn insn = meta->insn; 3726 3727 /* Programs converted from cBPF start with register xoring */ 3728 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 3729 insn.src_reg == insn.dst_reg) 3730 continue; 3731 3732 /* Programs start with R6 = R1 but we ignore the skb pointer */ 3733 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 3734 insn.src_reg == 1 && insn.dst_reg == 6) 3735 meta->skip = true; 3736 3737 /* Return as soon as something doesn't match */ 3738 if (!meta->skip) 3739 return; 3740 } 3741 } 3742 3743 /* abs(insn.imm) will fit better into unrestricted reg immediate - 3744 * convert add/sub of a negative number into a sub/add of a positive one. 3745 */ 3746 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) 3747 { 3748 struct nfp_insn_meta *meta; 3749 3750 list_for_each_entry(meta, &nfp_prog->insns, l) { 3751 struct bpf_insn insn = meta->insn; 3752 3753 if (meta->skip) 3754 continue; 3755 3756 if (BPF_CLASS(insn.code) != BPF_ALU && 3757 BPF_CLASS(insn.code) != BPF_ALU64 && 3758 BPF_CLASS(insn.code) != BPF_JMP) 3759 continue; 3760 if (BPF_SRC(insn.code) != BPF_K) 3761 continue; 3762 if (insn.imm >= 0) 3763 continue; 3764 3765 if (BPF_CLASS(insn.code) == BPF_JMP) { 3766 switch (BPF_OP(insn.code)) { 3767 case BPF_JGE: 3768 case BPF_JSGE: 3769 case BPF_JLT: 3770 case BPF_JSLT: 3771 meta->jump_neg_op = true; 3772 break; 3773 default: 3774 continue; 3775 } 3776 } else { 3777 if (BPF_OP(insn.code) == BPF_ADD) 3778 insn.code = BPF_CLASS(insn.code) | BPF_SUB; 3779 else if (BPF_OP(insn.code) == BPF_SUB) 3780 insn.code = BPF_CLASS(insn.code) | BPF_ADD; 3781 else 3782 continue; 3783 3784 meta->insn.code = insn.code | BPF_K; 3785 } 3786 3787 meta->insn.imm = -insn.imm; 3788 } 3789 } 3790 3791 /* Remove masking after load since our load guarantees this is not needed */ 3792 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 3793 { 3794 struct nfp_insn_meta *meta1, *meta2; 3795 const s32 exp_mask[] = { 3796 [BPF_B] = 0x000000ffU, 3797 [BPF_H] = 0x0000ffffU, 3798 [BPF_W] = 0xffffffffU, 3799 }; 3800 3801 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3802 struct bpf_insn insn, next; 3803 3804 insn = meta1->insn; 3805 next = meta2->insn; 3806 3807 if (BPF_CLASS(insn.code) != BPF_LD) 3808 continue; 3809 if (BPF_MODE(insn.code) != BPF_ABS && 3810 BPF_MODE(insn.code) != BPF_IND) 3811 continue; 3812 3813 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 3814 continue; 3815 3816 if (!exp_mask[BPF_SIZE(insn.code)]) 3817 continue; 3818 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 3819 continue; 3820 3821 if (next.src_reg || next.dst_reg) 3822 continue; 3823 3824 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 3825 continue; 3826 3827 meta2->skip = true; 3828 } 3829 } 3830 3831 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 3832 { 3833 struct nfp_insn_meta *meta1, *meta2, *meta3; 3834 3835 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 3836 struct bpf_insn insn, next1, next2; 3837 3838 insn = meta1->insn; 3839 next1 = meta2->insn; 3840 next2 = meta3->insn; 3841 3842 if (BPF_CLASS(insn.code) != BPF_LD) 3843 continue; 3844 if (BPF_MODE(insn.code) != BPF_ABS && 3845 BPF_MODE(insn.code) != BPF_IND) 3846 continue; 3847 if (BPF_SIZE(insn.code) != BPF_W) 3848 continue; 3849 3850 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 3851 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 3852 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 3853 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 3854 continue; 3855 3856 if (next1.src_reg || next1.dst_reg || 3857 next2.src_reg || next2.dst_reg) 3858 continue; 3859 3860 if (next1.imm != 0x20 || next2.imm != 0x20) 3861 continue; 3862 3863 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 3864 meta3->flags & FLAG_INSN_IS_JUMP_DST) 3865 continue; 3866 3867 meta2->skip = true; 3868 meta3->skip = true; 3869 } 3870 } 3871 3872 /* load/store pair that forms memory copy sould look like the following: 3873 * 3874 * ld_width R, [addr_src + offset_src] 3875 * st_width [addr_dest + offset_dest], R 3876 * 3877 * The destination register of load and source register of store should 3878 * be the same, load and store should also perform at the same width. 3879 * If either of addr_src or addr_dest is stack pointer, we don't do the 3880 * CPP optimization as stack is modelled by registers on NFP. 3881 */ 3882 static bool 3883 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 3884 struct nfp_insn_meta *st_meta) 3885 { 3886 struct bpf_insn *ld = &ld_meta->insn; 3887 struct bpf_insn *st = &st_meta->insn; 3888 3889 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 3890 return false; 3891 3892 if (ld_meta->ptr.type != PTR_TO_PACKET && 3893 ld_meta->ptr.type != PTR_TO_MAP_VALUE) 3894 return false; 3895 3896 if (st_meta->ptr.type != PTR_TO_PACKET) 3897 return false; 3898 3899 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 3900 return false; 3901 3902 if (ld->dst_reg != st->src_reg) 3903 return false; 3904 3905 /* There is jump to the store insn in this pair. */ 3906 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 3907 return false; 3908 3909 return true; 3910 } 3911 3912 /* Currently, we only support chaining load/store pairs if: 3913 * 3914 * - Their address base registers are the same. 3915 * - Their address offsets are in the same order. 3916 * - They operate at the same memory width. 3917 * - There is no jump into the middle of them. 3918 */ 3919 static bool 3920 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 3921 struct nfp_insn_meta *st_meta, 3922 struct bpf_insn *prev_ld, 3923 struct bpf_insn *prev_st) 3924 { 3925 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 3926 struct bpf_insn *ld = &ld_meta->insn; 3927 struct bpf_insn *st = &st_meta->insn; 3928 s16 prev_ld_off, prev_st_off; 3929 3930 /* This pair is the start pair. */ 3931 if (!prev_ld) 3932 return true; 3933 3934 prev_size = BPF_LDST_BYTES(prev_ld); 3935 curr_size = BPF_LDST_BYTES(ld); 3936 prev_ld_base = prev_ld->src_reg; 3937 prev_st_base = prev_st->dst_reg; 3938 prev_ld_dst = prev_ld->dst_reg; 3939 prev_ld_off = prev_ld->off; 3940 prev_st_off = prev_st->off; 3941 3942 if (ld->dst_reg != prev_ld_dst) 3943 return false; 3944 3945 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 3946 return false; 3947 3948 if (curr_size != prev_size) 3949 return false; 3950 3951 /* There is jump to the head of this pair. */ 3952 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 3953 return false; 3954 3955 /* Both in ascending order. */ 3956 if (prev_ld_off + prev_size == ld->off && 3957 prev_st_off + prev_size == st->off) 3958 return true; 3959 3960 /* Both in descending order. */ 3961 if (ld->off + curr_size == prev_ld_off && 3962 st->off + curr_size == prev_st_off) 3963 return true; 3964 3965 return false; 3966 } 3967 3968 /* Return TRUE if cross memory access happens. Cross memory access means 3969 * store area is overlapping with load area that a later load might load 3970 * the value from previous store, for this case we can't treat the sequence 3971 * as an memory copy. 3972 */ 3973 static bool 3974 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 3975 struct nfp_insn_meta *head_st_meta) 3976 { 3977 s16 head_ld_off, head_st_off, ld_off; 3978 3979 /* Different pointer types does not overlap. */ 3980 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 3981 return false; 3982 3983 /* load and store are both PTR_TO_PACKET, check ID info. */ 3984 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 3985 return true; 3986 3987 /* Canonicalize the offsets. Turn all of them against the original 3988 * base register. 3989 */ 3990 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 3991 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 3992 ld_off = ld->off + head_ld_meta->ptr.off; 3993 3994 /* Ascending order cross. */ 3995 if (ld_off > head_ld_off && 3996 head_ld_off < head_st_off && ld_off >= head_st_off) 3997 return true; 3998 3999 /* Descending order cross. */ 4000 if (ld_off < head_ld_off && 4001 head_ld_off > head_st_off && ld_off <= head_st_off) 4002 return true; 4003 4004 return false; 4005 } 4006 4007 /* This pass try to identify the following instructoin sequences. 4008 * 4009 * load R, [regA + offA] 4010 * store [regB + offB], R 4011 * load R, [regA + offA + const_imm_A] 4012 * store [regB + offB + const_imm_A], R 4013 * load R, [regA + offA + 2 * const_imm_A] 4014 * store [regB + offB + 2 * const_imm_A], R 4015 * ... 4016 * 4017 * Above sequence is typically generated by compiler when lowering 4018 * memcpy. NFP prefer using CPP instructions to accelerate it. 4019 */ 4020 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 4021 { 4022 struct nfp_insn_meta *head_ld_meta = NULL; 4023 struct nfp_insn_meta *head_st_meta = NULL; 4024 struct nfp_insn_meta *meta1, *meta2; 4025 struct bpf_insn *prev_ld = NULL; 4026 struct bpf_insn *prev_st = NULL; 4027 u8 count = 0; 4028 4029 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4030 struct bpf_insn *ld = &meta1->insn; 4031 struct bpf_insn *st = &meta2->insn; 4032 4033 /* Reset record status if any of the following if true: 4034 * - The current insn pair is not load/store. 4035 * - The load/store pair doesn't chain with previous one. 4036 * - The chained load/store pair crossed with previous pair. 4037 * - The chained load/store pair has a total size of memory 4038 * copy beyond 128 bytes which is the maximum length a 4039 * single NFP CPP command can transfer. 4040 */ 4041 if (!curr_pair_is_memcpy(meta1, meta2) || 4042 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 4043 prev_st) || 4044 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 4045 head_st_meta) || 4046 head_ld_meta->ldst_gather_len >= 128))) { 4047 if (!count) 4048 continue; 4049 4050 if (count > 1) { 4051 s16 prev_ld_off = prev_ld->off; 4052 s16 prev_st_off = prev_st->off; 4053 s16 head_ld_off = head_ld_meta->insn.off; 4054 4055 if (prev_ld_off < head_ld_off) { 4056 head_ld_meta->insn.off = prev_ld_off; 4057 head_st_meta->insn.off = prev_st_off; 4058 head_ld_meta->ldst_gather_len = 4059 -head_ld_meta->ldst_gather_len; 4060 } 4061 4062 head_ld_meta->paired_st = &head_st_meta->insn; 4063 head_st_meta->skip = true; 4064 } else { 4065 head_ld_meta->ldst_gather_len = 0; 4066 } 4067 4068 /* If the chain is ended by an load/store pair then this 4069 * could serve as the new head of the the next chain. 4070 */ 4071 if (curr_pair_is_memcpy(meta1, meta2)) { 4072 head_ld_meta = meta1; 4073 head_st_meta = meta2; 4074 head_ld_meta->ldst_gather_len = 4075 BPF_LDST_BYTES(ld); 4076 meta1 = nfp_meta_next(meta1); 4077 meta2 = nfp_meta_next(meta2); 4078 prev_ld = ld; 4079 prev_st = st; 4080 count = 1; 4081 } else { 4082 head_ld_meta = NULL; 4083 head_st_meta = NULL; 4084 prev_ld = NULL; 4085 prev_st = NULL; 4086 count = 0; 4087 } 4088 4089 continue; 4090 } 4091 4092 if (!head_ld_meta) { 4093 head_ld_meta = meta1; 4094 head_st_meta = meta2; 4095 } else { 4096 meta1->skip = true; 4097 meta2->skip = true; 4098 } 4099 4100 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 4101 meta1 = nfp_meta_next(meta1); 4102 meta2 = nfp_meta_next(meta2); 4103 prev_ld = ld; 4104 prev_st = st; 4105 count++; 4106 } 4107 } 4108 4109 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 4110 { 4111 struct nfp_insn_meta *meta, *range_node = NULL; 4112 s16 range_start = 0, range_end = 0; 4113 bool cache_avail = false; 4114 struct bpf_insn *insn; 4115 s32 range_ptr_off = 0; 4116 u32 range_ptr_id = 0; 4117 4118 list_for_each_entry(meta, &nfp_prog->insns, l) { 4119 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 4120 cache_avail = false; 4121 4122 if (meta->skip) 4123 continue; 4124 4125 insn = &meta->insn; 4126 4127 if (is_mbpf_store_pkt(meta) || 4128 insn->code == (BPF_JMP | BPF_CALL) || 4129 is_mbpf_classic_store_pkt(meta) || 4130 is_mbpf_classic_load(meta)) { 4131 cache_avail = false; 4132 continue; 4133 } 4134 4135 if (!is_mbpf_load(meta)) 4136 continue; 4137 4138 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 4139 cache_avail = false; 4140 continue; 4141 } 4142 4143 if (!cache_avail) { 4144 cache_avail = true; 4145 if (range_node) 4146 goto end_current_then_start_new; 4147 goto start_new; 4148 } 4149 4150 /* Check ID to make sure two reads share the same 4151 * variable offset against PTR_TO_PACKET, and check OFF 4152 * to make sure they also share the same constant 4153 * offset. 4154 * 4155 * OFFs don't really need to be the same, because they 4156 * are the constant offsets against PTR_TO_PACKET, so 4157 * for different OFFs, we could canonicalize them to 4158 * offsets against original packet pointer. We don't 4159 * support this. 4160 */ 4161 if (meta->ptr.id == range_ptr_id && 4162 meta->ptr.off == range_ptr_off) { 4163 s16 new_start = range_start; 4164 s16 end, off = insn->off; 4165 s16 new_end = range_end; 4166 bool changed = false; 4167 4168 if (off < range_start) { 4169 new_start = off; 4170 changed = true; 4171 } 4172 4173 end = off + BPF_LDST_BYTES(insn); 4174 if (end > range_end) { 4175 new_end = end; 4176 changed = true; 4177 } 4178 4179 if (!changed) 4180 continue; 4181 4182 if (new_end - new_start <= 64) { 4183 /* Install new range. */ 4184 range_start = new_start; 4185 range_end = new_end; 4186 continue; 4187 } 4188 } 4189 4190 end_current_then_start_new: 4191 range_node->pkt_cache.range_start = range_start; 4192 range_node->pkt_cache.range_end = range_end; 4193 start_new: 4194 range_node = meta; 4195 range_node->pkt_cache.do_init = true; 4196 range_ptr_id = range_node->ptr.id; 4197 range_ptr_off = range_node->ptr.off; 4198 range_start = insn->off; 4199 range_end = insn->off + BPF_LDST_BYTES(insn); 4200 } 4201 4202 if (range_node) { 4203 range_node->pkt_cache.range_start = range_start; 4204 range_node->pkt_cache.range_end = range_end; 4205 } 4206 4207 list_for_each_entry(meta, &nfp_prog->insns, l) { 4208 if (meta->skip) 4209 continue; 4210 4211 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 4212 if (meta->pkt_cache.do_init) { 4213 range_start = meta->pkt_cache.range_start; 4214 range_end = meta->pkt_cache.range_end; 4215 } else { 4216 meta->pkt_cache.range_start = range_start; 4217 meta->pkt_cache.range_end = range_end; 4218 } 4219 } 4220 } 4221 } 4222 4223 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 4224 { 4225 nfp_bpf_opt_reg_init(nfp_prog); 4226 4227 nfp_bpf_opt_neg_add_sub(nfp_prog); 4228 nfp_bpf_opt_ld_mask(nfp_prog); 4229 nfp_bpf_opt_ld_shift(nfp_prog); 4230 nfp_bpf_opt_ldst_gather(nfp_prog); 4231 nfp_bpf_opt_pkt_cache(nfp_prog); 4232 4233 return 0; 4234 } 4235 4236 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) 4237 { 4238 struct nfp_insn_meta *meta1, *meta2; 4239 struct nfp_bpf_map *nfp_map; 4240 struct bpf_map *map; 4241 u32 id; 4242 4243 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4244 if (meta1->skip || meta2->skip) 4245 continue; 4246 4247 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || 4248 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) 4249 continue; 4250 4251 map = (void *)(unsigned long)((u32)meta1->insn.imm | 4252 (u64)meta2->insn.imm << 32); 4253 if (bpf_map_offload_neutral(map)) { 4254 id = map->id; 4255 } else { 4256 nfp_map = map_to_offmap(map)->dev_priv; 4257 id = nfp_map->tid; 4258 } 4259 4260 meta1->insn.imm = id; 4261 meta2->insn.imm = 0; 4262 } 4263 4264 return 0; 4265 } 4266 4267 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 4268 { 4269 __le64 *ustore = (__force __le64 *)prog; 4270 int i; 4271 4272 for (i = 0; i < len; i++) { 4273 int err; 4274 4275 err = nfp_ustore_check_valid_no_ecc(prog[i]); 4276 if (err) 4277 return err; 4278 4279 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 4280 } 4281 4282 return 0; 4283 } 4284 4285 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 4286 { 4287 void *prog; 4288 4289 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 4290 if (!prog) 4291 return; 4292 4293 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 4294 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 4295 kvfree(nfp_prog->prog); 4296 nfp_prog->prog = prog; 4297 } 4298 4299 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 4300 { 4301 int ret; 4302 4303 ret = nfp_bpf_replace_map_ptrs(nfp_prog); 4304 if (ret) 4305 return ret; 4306 4307 ret = nfp_bpf_optimize(nfp_prog); 4308 if (ret) 4309 return ret; 4310 4311 ret = nfp_translate(nfp_prog); 4312 if (ret) { 4313 pr_err("Translation failed with error %d (translated: %u)\n", 4314 ret, nfp_prog->n_translated); 4315 return -EINVAL; 4316 } 4317 4318 nfp_bpf_prog_trim(nfp_prog); 4319 4320 return ret; 4321 } 4322 4323 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 4324 { 4325 struct nfp_insn_meta *meta; 4326 4327 /* Another pass to record jump information. */ 4328 list_for_each_entry(meta, &nfp_prog->insns, l) { 4329 struct nfp_insn_meta *dst_meta; 4330 u64 code = meta->insn.code; 4331 unsigned int dst_idx; 4332 bool pseudo_call; 4333 4334 if (BPF_CLASS(code) != BPF_JMP) 4335 continue; 4336 if (BPF_OP(code) == BPF_EXIT) 4337 continue; 4338 if (is_mbpf_helper_call(meta)) 4339 continue; 4340 4341 /* If opcode is BPF_CALL at this point, this can only be a 4342 * BPF-to-BPF call (a.k.a pseudo call). 4343 */ 4344 pseudo_call = BPF_OP(code) == BPF_CALL; 4345 4346 if (pseudo_call) 4347 dst_idx = meta->n + 1 + meta->insn.imm; 4348 else 4349 dst_idx = meta->n + 1 + meta->insn.off; 4350 4351 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt); 4352 4353 if (pseudo_call) 4354 dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START; 4355 4356 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 4357 meta->jmp_dst = dst_meta; 4358 } 4359 } 4360 4361 bool nfp_bpf_supported_opcode(u8 code) 4362 { 4363 return !!instr_cb[code]; 4364 } 4365 4366 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 4367 { 4368 unsigned int i; 4369 u64 *prog; 4370 int err; 4371 4372 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 4373 GFP_KERNEL); 4374 if (!prog) 4375 return ERR_PTR(-ENOMEM); 4376 4377 for (i = 0; i < nfp_prog->prog_len; i++) { 4378 enum nfp_relo_type special; 4379 u32 val; 4380 u16 off; 4381 4382 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 4383 switch (special) { 4384 case RELO_NONE: 4385 continue; 4386 case RELO_BR_REL: 4387 br_add_offset(&prog[i], bv->start_off); 4388 break; 4389 case RELO_BR_GO_OUT: 4390 br_set_offset(&prog[i], 4391 nfp_prog->tgt_out + bv->start_off); 4392 break; 4393 case RELO_BR_GO_ABORT: 4394 br_set_offset(&prog[i], 4395 nfp_prog->tgt_abort + bv->start_off); 4396 break; 4397 case RELO_BR_GO_CALL_PUSH_REGS: 4398 if (!nfp_prog->tgt_call_push_regs) { 4399 pr_err("BUG: failed to detect subprogram registers needs\n"); 4400 err = -EINVAL; 4401 goto err_free_prog; 4402 } 4403 off = nfp_prog->tgt_call_push_regs + bv->start_off; 4404 br_set_offset(&prog[i], off); 4405 break; 4406 case RELO_BR_GO_CALL_POP_REGS: 4407 if (!nfp_prog->tgt_call_pop_regs) { 4408 pr_err("BUG: failed to detect subprogram registers needs\n"); 4409 err = -EINVAL; 4410 goto err_free_prog; 4411 } 4412 off = nfp_prog->tgt_call_pop_regs + bv->start_off; 4413 br_set_offset(&prog[i], off); 4414 break; 4415 case RELO_BR_NEXT_PKT: 4416 br_set_offset(&prog[i], bv->tgt_done); 4417 break; 4418 case RELO_BR_HELPER: 4419 val = br_get_offset(prog[i]); 4420 val -= BR_OFF_RELO; 4421 switch (val) { 4422 case BPF_FUNC_map_lookup_elem: 4423 val = nfp_prog->bpf->helpers.map_lookup; 4424 break; 4425 case BPF_FUNC_map_update_elem: 4426 val = nfp_prog->bpf->helpers.map_update; 4427 break; 4428 case BPF_FUNC_map_delete_elem: 4429 val = nfp_prog->bpf->helpers.map_delete; 4430 break; 4431 case BPF_FUNC_perf_event_output: 4432 val = nfp_prog->bpf->helpers.perf_event_output; 4433 break; 4434 default: 4435 pr_err("relocation of unknown helper %d\n", 4436 val); 4437 err = -EINVAL; 4438 goto err_free_prog; 4439 } 4440 br_set_offset(&prog[i], val); 4441 break; 4442 case RELO_IMMED_REL: 4443 immed_add_value(&prog[i], bv->start_off); 4444 break; 4445 } 4446 4447 prog[i] &= ~OP_RELO_TYPE; 4448 } 4449 4450 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 4451 if (err) 4452 goto err_free_prog; 4453 4454 return prog; 4455 4456 err_free_prog: 4457 kfree(prog); 4458 return ERR_PTR(err); 4459 } 4460