1 /* 2 * Copyright (C) 2016-2018 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/bug.h> 37 #include <linux/kernel.h> 38 #include <linux/bpf.h> 39 #include <linux/filter.h> 40 #include <linux/pkt_cls.h> 41 #include <linux/unistd.h> 42 43 #include "main.h" 44 #include "../nfp_asm.h" 45 #include "../nfp_net_ctrl.h" 46 47 /* --- NFP prog --- */ 48 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 49 * It's safe to modify the next pointers (but not pos). 50 */ 51 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 52 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 53 next = list_next_entry(pos, l); \ 54 &(nfp_prog)->insns != &pos->l && \ 55 &(nfp_prog)->insns != &next->l; \ 56 pos = nfp_meta_next(pos), \ 57 next = nfp_meta_next(pos)) 58 59 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 60 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 61 next = list_next_entry(pos, l), \ 62 next2 = list_next_entry(next, l); \ 63 &(nfp_prog)->insns != &pos->l && \ 64 &(nfp_prog)->insns != &next->l && \ 65 &(nfp_prog)->insns != &next2->l; \ 66 pos = nfp_meta_next(pos), \ 67 next = nfp_meta_next(pos), \ 68 next2 = nfp_meta_next(next)) 69 70 static bool 71 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 72 { 73 return meta->l.prev != &nfp_prog->insns; 74 } 75 76 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 77 { 78 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { 79 pr_warn("instruction limit reached (%u NFP instructions)\n", 80 nfp_prog->prog_len); 81 nfp_prog->error = -ENOSPC; 82 return; 83 } 84 85 nfp_prog->prog[nfp_prog->prog_len] = insn; 86 nfp_prog->prog_len++; 87 } 88 89 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 90 { 91 return nfp_prog->prog_len; 92 } 93 94 static bool 95 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 96 { 97 /* If there is a recorded error we may have dropped instructions; 98 * that doesn't have to be due to translator bug, and the translation 99 * will fail anyway, so just return OK. 100 */ 101 if (nfp_prog->error) 102 return true; 103 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 104 } 105 106 /* --- Emitters --- */ 107 static void 108 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 109 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, 110 bool indir) 111 { 112 u64 insn; 113 114 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 115 FIELD_PREP(OP_CMD_CTX, ctx) | 116 FIELD_PREP(OP_CMD_B_SRC, breg) | 117 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 118 FIELD_PREP(OP_CMD_XFER, xfer) | 119 FIELD_PREP(OP_CMD_CNT, size) | 120 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | 121 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 122 FIELD_PREP(OP_CMD_INDIR, indir) | 123 FIELD_PREP(OP_CMD_MODE, mode); 124 125 nfp_prog_push(nfp_prog, insn); 126 } 127 128 static void 129 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 130 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) 131 { 132 struct nfp_insn_re_regs reg; 133 int err; 134 135 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 136 if (err) { 137 nfp_prog->error = err; 138 return; 139 } 140 if (reg.swap) { 141 pr_err("cmd can't swap arguments\n"); 142 nfp_prog->error = -EFAULT; 143 return; 144 } 145 if (reg.dst_lmextn || reg.src_lmextn) { 146 pr_err("cmd can't use LMextn\n"); 147 nfp_prog->error = -EFAULT; 148 return; 149 } 150 151 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, 152 indir); 153 } 154 155 static void 156 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 157 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 158 { 159 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); 160 } 161 162 static void 163 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 164 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 165 { 166 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); 167 } 168 169 static void 170 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 171 enum br_ctx_signal_state css, u16 addr, u8 defer) 172 { 173 u16 addr_lo, addr_hi; 174 u64 insn; 175 176 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 177 addr_hi = addr != addr_lo; 178 179 insn = OP_BR_BASE | 180 FIELD_PREP(OP_BR_MASK, mask) | 181 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 182 FIELD_PREP(OP_BR_CSS, css) | 183 FIELD_PREP(OP_BR_DEFBR, defer) | 184 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 185 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 186 187 nfp_prog_push(nfp_prog, insn); 188 } 189 190 static void 191 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 192 enum nfp_relo_type relo) 193 { 194 if (mask == BR_UNC && defer > 2) { 195 pr_err("BUG: branch defer out of bounds %d\n", defer); 196 nfp_prog->error = -EFAULT; 197 return; 198 } 199 200 __emit_br(nfp_prog, mask, 201 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 202 BR_CSS_NONE, addr, defer); 203 204 nfp_prog->prog[nfp_prog->prog_len - 1] |= 205 FIELD_PREP(OP_RELO_TYPE, relo); 206 } 207 208 static void 209 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 210 { 211 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 212 } 213 214 static void 215 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 216 enum immed_width width, bool invert, 217 enum immed_shift shift, bool wr_both, 218 bool dst_lmextn, bool src_lmextn) 219 { 220 u64 insn; 221 222 insn = OP_IMMED_BASE | 223 FIELD_PREP(OP_IMMED_A_SRC, areg) | 224 FIELD_PREP(OP_IMMED_B_SRC, breg) | 225 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 226 FIELD_PREP(OP_IMMED_WIDTH, width) | 227 FIELD_PREP(OP_IMMED_INV, invert) | 228 FIELD_PREP(OP_IMMED_SHIFT, shift) | 229 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 230 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 231 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 232 233 nfp_prog_push(nfp_prog, insn); 234 } 235 236 static void 237 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 238 enum immed_width width, bool invert, enum immed_shift shift) 239 { 240 struct nfp_insn_ur_regs reg; 241 int err; 242 243 if (swreg_type(dst) == NN_REG_IMM) { 244 nfp_prog->error = -EFAULT; 245 return; 246 } 247 248 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 249 if (err) { 250 nfp_prog->error = err; 251 return; 252 } 253 254 /* Use reg.dst when destination is No-Dest. */ 255 __emit_immed(nfp_prog, 256 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 257 reg.breg, imm >> 8, width, invert, shift, 258 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 259 } 260 261 static void 262 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 263 enum shf_sc sc, u8 shift, 264 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 265 bool dst_lmextn, bool src_lmextn) 266 { 267 u64 insn; 268 269 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 270 nfp_prog->error = -EFAULT; 271 return; 272 } 273 274 if (sc == SHF_SC_L_SHF) 275 shift = 32 - shift; 276 277 insn = OP_SHF_BASE | 278 FIELD_PREP(OP_SHF_A_SRC, areg) | 279 FIELD_PREP(OP_SHF_SC, sc) | 280 FIELD_PREP(OP_SHF_B_SRC, breg) | 281 FIELD_PREP(OP_SHF_I8, i8) | 282 FIELD_PREP(OP_SHF_SW, sw) | 283 FIELD_PREP(OP_SHF_DST, dst) | 284 FIELD_PREP(OP_SHF_SHIFT, shift) | 285 FIELD_PREP(OP_SHF_OP, op) | 286 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 287 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 288 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 289 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 290 291 nfp_prog_push(nfp_prog, insn); 292 } 293 294 static void 295 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 296 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 297 { 298 struct nfp_insn_re_regs reg; 299 int err; 300 301 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 302 if (err) { 303 nfp_prog->error = err; 304 return; 305 } 306 307 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 308 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 309 reg.dst_lmextn, reg.src_lmextn); 310 } 311 312 static void 313 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 314 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 315 bool dst_lmextn, bool src_lmextn) 316 { 317 u64 insn; 318 319 insn = OP_ALU_BASE | 320 FIELD_PREP(OP_ALU_A_SRC, areg) | 321 FIELD_PREP(OP_ALU_B_SRC, breg) | 322 FIELD_PREP(OP_ALU_DST, dst) | 323 FIELD_PREP(OP_ALU_SW, swap) | 324 FIELD_PREP(OP_ALU_OP, op) | 325 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 326 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 327 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 328 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 329 330 nfp_prog_push(nfp_prog, insn); 331 } 332 333 static void 334 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 335 swreg lreg, enum alu_op op, swreg rreg) 336 { 337 struct nfp_insn_ur_regs reg; 338 int err; 339 340 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 341 if (err) { 342 nfp_prog->error = err; 343 return; 344 } 345 346 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 347 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 348 reg.dst_lmextn, reg.src_lmextn); 349 } 350 351 static void 352 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 353 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 354 bool zero, bool swap, bool wr_both, 355 bool dst_lmextn, bool src_lmextn) 356 { 357 u64 insn; 358 359 insn = OP_LDF_BASE | 360 FIELD_PREP(OP_LDF_A_SRC, areg) | 361 FIELD_PREP(OP_LDF_SC, sc) | 362 FIELD_PREP(OP_LDF_B_SRC, breg) | 363 FIELD_PREP(OP_LDF_I8, imm8) | 364 FIELD_PREP(OP_LDF_SW, swap) | 365 FIELD_PREP(OP_LDF_ZF, zero) | 366 FIELD_PREP(OP_LDF_BMASK, bmask) | 367 FIELD_PREP(OP_LDF_SHF, shift) | 368 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 369 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 370 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 371 372 nfp_prog_push(nfp_prog, insn); 373 } 374 375 static void 376 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 377 enum shf_sc sc, u8 shift, bool zero) 378 { 379 struct nfp_insn_re_regs reg; 380 int err; 381 382 /* Note: ld_field is special as it uses one of the src regs as dst */ 383 err = swreg_to_restricted(dst, dst, src, ®, true); 384 if (err) { 385 nfp_prog->error = err; 386 return; 387 } 388 389 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 390 reg.i8, zero, reg.swap, reg.wr_both, 391 reg.dst_lmextn, reg.src_lmextn); 392 } 393 394 static void 395 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 396 enum shf_sc sc, u8 shift) 397 { 398 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 399 } 400 401 static void 402 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 403 bool dst_lmextn, bool src_lmextn) 404 { 405 u64 insn; 406 407 insn = OP_LCSR_BASE | 408 FIELD_PREP(OP_LCSR_A_SRC, areg) | 409 FIELD_PREP(OP_LCSR_B_SRC, breg) | 410 FIELD_PREP(OP_LCSR_WRITE, wr) | 411 FIELD_PREP(OP_LCSR_ADDR, addr / 4) | 412 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 413 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 414 415 nfp_prog_push(nfp_prog, insn); 416 } 417 418 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 419 { 420 struct nfp_insn_ur_regs reg; 421 int err; 422 423 /* This instruction takes immeds instead of reg_none() for the ignored 424 * operand, but we can't encode 2 immeds in one instr with our normal 425 * swreg infra so if param is an immed, we encode as reg_none() and 426 * copy the immed to both operands. 427 */ 428 if (swreg_type(src) == NN_REG_IMM) { 429 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 430 reg.breg = reg.areg; 431 } else { 432 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 433 } 434 if (err) { 435 nfp_prog->error = err; 436 return; 437 } 438 439 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, 440 false, reg.src_lmextn); 441 } 442 443 /* CSR value is read in following immed[gpr, 0] */ 444 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) 445 { 446 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); 447 } 448 449 static void emit_nop(struct nfp_prog *nfp_prog) 450 { 451 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 452 } 453 454 /* --- Wrappers --- */ 455 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 456 { 457 if (!(imm & 0xffff0000)) { 458 *val = imm; 459 *shift = IMMED_SHIFT_0B; 460 } else if (!(imm & 0xff0000ff)) { 461 *val = imm >> 8; 462 *shift = IMMED_SHIFT_1B; 463 } else if (!(imm & 0x0000ffff)) { 464 *val = imm >> 16; 465 *shift = IMMED_SHIFT_2B; 466 } else { 467 return false; 468 } 469 470 return true; 471 } 472 473 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 474 { 475 enum immed_shift shift; 476 u16 val; 477 478 if (pack_immed(imm, &val, &shift)) { 479 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 480 } else if (pack_immed(~imm, &val, &shift)) { 481 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 482 } else { 483 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 484 false, IMMED_SHIFT_0B); 485 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 486 false, IMMED_SHIFT_2B); 487 } 488 } 489 490 static void 491 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 492 enum nfp_relo_type relo) 493 { 494 if (imm > 0xffff) { 495 pr_err("relocation of a large immediate!\n"); 496 nfp_prog->error = -EFAULT; 497 return; 498 } 499 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 500 501 nfp_prog->prog[nfp_prog->prog_len - 1] |= 502 FIELD_PREP(OP_RELO_TYPE, relo); 503 } 504 505 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 506 * If the @imm is small enough encode it directly in operand and return 507 * otherwise load @imm to a spare register and return its encoding. 508 */ 509 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 510 { 511 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 512 return reg_imm(imm); 513 514 wrp_immed(nfp_prog, tmp_reg, imm); 515 return tmp_reg; 516 } 517 518 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 519 * If the @imm is small enough encode it directly in operand and return 520 * otherwise load @imm to a spare register and return its encoding. 521 */ 522 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 523 { 524 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 525 return reg_imm(imm); 526 527 wrp_immed(nfp_prog, tmp_reg, imm); 528 return tmp_reg; 529 } 530 531 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 532 { 533 while (count--) 534 emit_nop(nfp_prog); 535 } 536 537 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 538 { 539 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 540 } 541 542 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 543 { 544 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 545 } 546 547 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 548 * result to @dst from low end. 549 */ 550 static void 551 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 552 u8 offset) 553 { 554 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 555 u8 mask = (1 << field_len) - 1; 556 557 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 558 } 559 560 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 561 * result to @dst from offset, there is no change on the other bits of @dst. 562 */ 563 static void 564 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 565 u8 field_len, u8 offset) 566 { 567 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 568 u8 mask = ((1 << field_len) - 1) << offset; 569 570 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 571 } 572 573 static void 574 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 575 swreg *rega, swreg *regb) 576 { 577 if (offset == reg_imm(0)) { 578 *rega = reg_a(src_gpr); 579 *regb = reg_b(src_gpr + 1); 580 return; 581 } 582 583 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 584 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 585 reg_imm(0)); 586 *rega = imm_a(nfp_prog); 587 *regb = imm_b(nfp_prog); 588 } 589 590 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 591 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 592 { 593 bool descending_seq = meta->ldst_gather_len < 0; 594 s16 len = abs(meta->ldst_gather_len); 595 swreg src_base, off; 596 bool src_40bit_addr; 597 unsigned int i; 598 u8 xfer_num; 599 600 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 601 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 602 src_base = reg_a(meta->insn.src_reg * 2); 603 xfer_num = round_up(len, 4) / 4; 604 605 if (src_40bit_addr) 606 addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base, 607 &off); 608 609 /* Setup PREV_ALU fields to override memory read length. */ 610 if (len > 32) 611 wrp_immed(nfp_prog, reg_none(), 612 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 613 614 /* Memory read from source addr into transfer-in registers. */ 615 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 616 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 617 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); 618 619 /* Move from transfer-in to transfer-out. */ 620 for (i = 0; i < xfer_num; i++) 621 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 622 623 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 624 625 if (len <= 8) { 626 /* Use single direct_ref write8. */ 627 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 628 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 629 CMD_CTX_SWAP); 630 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 631 /* Use single direct_ref write32. */ 632 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 633 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 634 CMD_CTX_SWAP); 635 } else if (len <= 32) { 636 /* Use single indirect_ref write8. */ 637 wrp_immed(nfp_prog, reg_none(), 638 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 639 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 640 reg_a(meta->paired_st->dst_reg * 2), off, 641 len - 1, CMD_CTX_SWAP); 642 } else if (IS_ALIGNED(len, 4)) { 643 /* Use single indirect_ref write32. */ 644 wrp_immed(nfp_prog, reg_none(), 645 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 646 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 647 reg_a(meta->paired_st->dst_reg * 2), off, 648 xfer_num - 1, CMD_CTX_SWAP); 649 } else if (len <= 40) { 650 /* Use one direct_ref write32 to write the first 32-bytes, then 651 * another direct_ref write8 to write the remaining bytes. 652 */ 653 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 654 reg_a(meta->paired_st->dst_reg * 2), off, 7, 655 CMD_CTX_SWAP); 656 657 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 658 imm_b(nfp_prog)); 659 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 660 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 661 CMD_CTX_SWAP); 662 } else { 663 /* Use one indirect_ref write32 to write 4-bytes aligned length, 664 * then another direct_ref write8 to write the remaining bytes. 665 */ 666 u8 new_off; 667 668 wrp_immed(nfp_prog, reg_none(), 669 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 670 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 671 reg_a(meta->paired_st->dst_reg * 2), off, 672 xfer_num - 2, CMD_CTX_SWAP); 673 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 674 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 675 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 676 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 677 (len & 0x3) - 1, CMD_CTX_SWAP); 678 } 679 680 /* TODO: The following extra load is to make sure data flow be identical 681 * before and after we do memory copy optimization. 682 * 683 * The load destination register is not guaranteed to be dead, so we 684 * need to make sure it is loaded with the value the same as before 685 * this transformation. 686 * 687 * These extra loads could be removed once we have accurate register 688 * usage information. 689 */ 690 if (descending_seq) 691 xfer_num = 0; 692 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 693 xfer_num = xfer_num - 1; 694 else 695 xfer_num = xfer_num - 2; 696 697 switch (BPF_SIZE(meta->insn.code)) { 698 case BPF_B: 699 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 700 reg_xfer(xfer_num), 1, 701 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 702 break; 703 case BPF_H: 704 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 705 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 706 break; 707 case BPF_W: 708 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 709 reg_xfer(0)); 710 break; 711 case BPF_DW: 712 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 713 reg_xfer(xfer_num)); 714 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 715 reg_xfer(xfer_num + 1)); 716 break; 717 } 718 719 if (BPF_SIZE(meta->insn.code) != BPF_DW) 720 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 721 722 return 0; 723 } 724 725 static int 726 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 727 { 728 unsigned int i; 729 u16 shift, sz; 730 731 /* We load the value from the address indicated in @offset and then 732 * shift out the data we don't need. Note: this is big endian! 733 */ 734 sz = max(size, 4); 735 shift = size < 4 ? 4 - size : 0; 736 737 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 738 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); 739 740 i = 0; 741 if (shift) 742 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 743 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 744 else 745 for (; i * 4 < size; i++) 746 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 747 748 if (i < 2) 749 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 750 751 return 0; 752 } 753 754 static int 755 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 756 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 757 { 758 unsigned int i; 759 u8 mask, sz; 760 761 /* We load the value from the address indicated in rreg + lreg and then 762 * mask out the data we don't need. Note: this is little endian! 763 */ 764 sz = max(size, 4); 765 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 766 767 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 768 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); 769 770 i = 0; 771 if (mask) 772 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 773 reg_xfer(0), SHF_SC_NONE, 0, true); 774 else 775 for (; i * 4 < size; i++) 776 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 777 778 if (i < 2) 779 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 780 781 return 0; 782 } 783 784 static int 785 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 786 u8 dst_gpr, u8 size) 787 { 788 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 789 size, CMD_MODE_32b); 790 } 791 792 static int 793 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 794 u8 dst_gpr, u8 size) 795 { 796 swreg rega, regb; 797 798 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 799 800 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 801 size, CMD_MODE_40b_BA); 802 } 803 804 static int 805 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 806 { 807 swreg tmp_reg; 808 809 /* Calculate the true offset (src_reg + imm) */ 810 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 811 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 812 813 /* Check packet length (size guaranteed to fit b/c it's u8) */ 814 emit_alu(nfp_prog, imm_a(nfp_prog), 815 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 816 emit_alu(nfp_prog, reg_none(), 817 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 818 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 819 820 /* Load data */ 821 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 822 } 823 824 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 825 { 826 swreg tmp_reg; 827 828 /* Check packet length */ 829 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 830 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 831 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 832 833 /* Load data */ 834 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 835 return data_ld(nfp_prog, tmp_reg, 0, size); 836 } 837 838 static int 839 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 840 u8 src_gpr, u8 size) 841 { 842 unsigned int i; 843 844 for (i = 0; i * 4 < size; i++) 845 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 846 847 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 848 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 849 850 return 0; 851 } 852 853 static int 854 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 855 u64 imm, u8 size) 856 { 857 wrp_immed(nfp_prog, reg_xfer(0), imm); 858 if (size == 8) 859 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 860 861 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 862 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 863 864 return 0; 865 } 866 867 typedef int 868 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 869 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 870 bool needs_inc); 871 872 static int 873 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 874 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 875 bool needs_inc) 876 { 877 bool should_inc = needs_inc && new_gpr && !last; 878 u32 idx, src_byte; 879 enum shf_sc sc; 880 swreg reg; 881 int shf; 882 u8 mask; 883 884 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 885 return -EOPNOTSUPP; 886 887 idx = off / 4; 888 889 /* Move the entire word */ 890 if (size == 4) { 891 wrp_mov(nfp_prog, reg_both(dst), 892 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 893 return 0; 894 } 895 896 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 897 return -EOPNOTSUPP; 898 899 src_byte = off % 4; 900 901 mask = (1 << size) - 1; 902 mask <<= dst_byte; 903 904 if (WARN_ON_ONCE(mask > 0xf)) 905 return -EOPNOTSUPP; 906 907 shf = abs(src_byte - dst_byte) * 8; 908 if (src_byte == dst_byte) { 909 sc = SHF_SC_NONE; 910 } else if (src_byte < dst_byte) { 911 shf = 32 - shf; 912 sc = SHF_SC_L_SHF; 913 } else { 914 sc = SHF_SC_R_SHF; 915 } 916 917 /* ld_field can address fewer indexes, if offset too large do RMW. 918 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 919 */ 920 if (idx <= RE_REG_LM_IDX_MAX) { 921 reg = reg_lm(lm3 ? 3 : 0, idx); 922 } else { 923 reg = imm_a(nfp_prog); 924 /* If it's not the first part of the load and we start a new GPR 925 * that means we are loading a second part of the LMEM word into 926 * a new GPR. IOW we've already looked that LMEM word and 927 * therefore it has been loaded into imm_a(). 928 */ 929 if (first || !new_gpr) 930 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 931 } 932 933 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 934 935 if (should_inc) 936 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 937 938 return 0; 939 } 940 941 static int 942 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 943 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 944 bool needs_inc) 945 { 946 bool should_inc = needs_inc && new_gpr && !last; 947 u32 idx, dst_byte; 948 enum shf_sc sc; 949 swreg reg; 950 int shf; 951 u8 mask; 952 953 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 954 return -EOPNOTSUPP; 955 956 idx = off / 4; 957 958 /* Move the entire word */ 959 if (size == 4) { 960 wrp_mov(nfp_prog, 961 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 962 reg_b(src)); 963 return 0; 964 } 965 966 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 967 return -EOPNOTSUPP; 968 969 dst_byte = off % 4; 970 971 mask = (1 << size) - 1; 972 mask <<= dst_byte; 973 974 if (WARN_ON_ONCE(mask > 0xf)) 975 return -EOPNOTSUPP; 976 977 shf = abs(src_byte - dst_byte) * 8; 978 if (src_byte == dst_byte) { 979 sc = SHF_SC_NONE; 980 } else if (src_byte < dst_byte) { 981 shf = 32 - shf; 982 sc = SHF_SC_L_SHF; 983 } else { 984 sc = SHF_SC_R_SHF; 985 } 986 987 /* ld_field can address fewer indexes, if offset too large do RMW. 988 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 989 */ 990 if (idx <= RE_REG_LM_IDX_MAX) { 991 reg = reg_lm(lm3 ? 3 : 0, idx); 992 } else { 993 reg = imm_a(nfp_prog); 994 /* Only first and last LMEM locations are going to need RMW, 995 * the middle location will be overwritten fully. 996 */ 997 if (first || last) 998 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 999 } 1000 1001 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 1002 1003 if (new_gpr || last) { 1004 if (idx > RE_REG_LM_IDX_MAX) 1005 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1006 if (should_inc) 1007 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1008 } 1009 1010 return 0; 1011 } 1012 1013 static int 1014 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1015 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1016 bool clr_gpr, lmem_step step) 1017 { 1018 s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; 1019 bool first = true, last; 1020 bool needs_inc = false; 1021 swreg stack_off_reg; 1022 u8 prev_gpr = 255; 1023 u32 gpr_byte = 0; 1024 bool lm3 = true; 1025 int ret; 1026 1027 if (meta->ptr_not_const) { 1028 /* Use of the last encountered ptr_off is OK, they all have 1029 * the same alignment. Depend on low bits of value being 1030 * discarded when written to LMaddr register. 1031 */ 1032 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1033 stack_imm(nfp_prog)); 1034 1035 emit_alu(nfp_prog, imm_b(nfp_prog), 1036 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1037 1038 needs_inc = true; 1039 } else if (off + size <= 64) { 1040 /* We can reach bottom 64B with LMaddr0 */ 1041 lm3 = false; 1042 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1043 /* We have to set up a new pointer. If we know the offset 1044 * and the entire access falls into a single 32 byte aligned 1045 * window we won't have to increment the LM pointer. 1046 * The 32 byte alignment is imporant because offset is ORed in 1047 * not added when doing *l$indexN[off]. 1048 */ 1049 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1050 stack_imm(nfp_prog)); 1051 emit_alu(nfp_prog, imm_b(nfp_prog), 1052 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1053 1054 off %= 32; 1055 } else { 1056 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1057 stack_imm(nfp_prog)); 1058 1059 emit_alu(nfp_prog, imm_b(nfp_prog), 1060 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1061 1062 needs_inc = true; 1063 } 1064 if (lm3) { 1065 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1066 /* For size < 4 one slot will be filled by zeroing of upper. */ 1067 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1068 } 1069 1070 if (clr_gpr && size < 8) 1071 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1072 1073 while (size) { 1074 u32 slice_end; 1075 u8 slice_size; 1076 1077 slice_size = min(size, 4 - gpr_byte); 1078 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1079 slice_size = slice_end - off; 1080 1081 last = slice_size == size; 1082 1083 if (needs_inc) 1084 off %= 4; 1085 1086 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1087 first, gpr != prev_gpr, last, lm3, needs_inc); 1088 if (ret) 1089 return ret; 1090 1091 prev_gpr = gpr; 1092 first = false; 1093 1094 gpr_byte += slice_size; 1095 if (gpr_byte >= 4) { 1096 gpr_byte -= 4; 1097 gpr++; 1098 } 1099 1100 size -= slice_size; 1101 off += slice_size; 1102 } 1103 1104 return 0; 1105 } 1106 1107 static void 1108 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1109 { 1110 swreg tmp_reg; 1111 1112 if (alu_op == ALU_OP_AND) { 1113 if (!imm) 1114 wrp_immed(nfp_prog, reg_both(dst), 0); 1115 if (!imm || !~imm) 1116 return; 1117 } 1118 if (alu_op == ALU_OP_OR) { 1119 if (!~imm) 1120 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1121 if (!imm || !~imm) 1122 return; 1123 } 1124 if (alu_op == ALU_OP_XOR) { 1125 if (!~imm) 1126 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1127 ALU_OP_NOT, reg_b(dst)); 1128 if (!imm || !~imm) 1129 return; 1130 } 1131 1132 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1133 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1134 } 1135 1136 static int 1137 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1138 enum alu_op alu_op, bool skip) 1139 { 1140 const struct bpf_insn *insn = &meta->insn; 1141 u64 imm = insn->imm; /* sign extend */ 1142 1143 if (skip) { 1144 meta->skip = true; 1145 return 0; 1146 } 1147 1148 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1149 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1150 1151 return 0; 1152 } 1153 1154 static int 1155 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1156 enum alu_op alu_op) 1157 { 1158 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1159 1160 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1161 emit_alu(nfp_prog, reg_both(dst + 1), 1162 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1163 1164 return 0; 1165 } 1166 1167 static int 1168 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1169 enum alu_op alu_op, bool skip) 1170 { 1171 const struct bpf_insn *insn = &meta->insn; 1172 1173 if (skip) { 1174 meta->skip = true; 1175 return 0; 1176 } 1177 1178 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1179 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1180 1181 return 0; 1182 } 1183 1184 static int 1185 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1186 enum alu_op alu_op) 1187 { 1188 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1189 1190 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1191 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1192 1193 return 0; 1194 } 1195 1196 static void 1197 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1198 enum br_mask br_mask, u16 off) 1199 { 1200 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1201 emit_br(nfp_prog, br_mask, off, 0); 1202 } 1203 1204 static int 1205 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1206 enum alu_op alu_op, enum br_mask br_mask) 1207 { 1208 const struct bpf_insn *insn = &meta->insn; 1209 1210 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1211 insn->src_reg * 2, br_mask, insn->off); 1212 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1213 insn->src_reg * 2 + 1, br_mask, insn->off); 1214 1215 return 0; 1216 } 1217 1218 static const struct jmp_code_map { 1219 enum br_mask br_mask; 1220 bool swap; 1221 } jmp_code_map[] = { 1222 [BPF_JGT >> 4] = { BR_BLO, true }, 1223 [BPF_JGE >> 4] = { BR_BHS, false }, 1224 [BPF_JLT >> 4] = { BR_BLO, false }, 1225 [BPF_JLE >> 4] = { BR_BHS, true }, 1226 [BPF_JSGT >> 4] = { BR_BLT, true }, 1227 [BPF_JSGE >> 4] = { BR_BGE, false }, 1228 [BPF_JSLT >> 4] = { BR_BLT, false }, 1229 [BPF_JSLE >> 4] = { BR_BGE, true }, 1230 }; 1231 1232 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) 1233 { 1234 unsigned int op; 1235 1236 op = BPF_OP(meta->insn.code) >> 4; 1237 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ 1238 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || 1239 !jmp_code_map[op].br_mask, 1240 "no code found for jump instruction")) 1241 return NULL; 1242 1243 return &jmp_code_map[op]; 1244 } 1245 1246 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1247 { 1248 const struct bpf_insn *insn = &meta->insn; 1249 u64 imm = insn->imm; /* sign extend */ 1250 const struct jmp_code_map *code; 1251 enum alu_op alu_op, carry_op; 1252 u8 reg = insn->dst_reg * 2; 1253 swreg tmp_reg; 1254 1255 code = nfp_jmp_code_get(meta); 1256 if (!code) 1257 return -EINVAL; 1258 1259 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; 1260 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; 1261 1262 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1263 if (!code->swap) 1264 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); 1265 else 1266 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); 1267 1268 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1269 if (!code->swap) 1270 emit_alu(nfp_prog, reg_none(), 1271 reg_a(reg + 1), carry_op, tmp_reg); 1272 else 1273 emit_alu(nfp_prog, reg_none(), 1274 tmp_reg, carry_op, reg_a(reg + 1)); 1275 1276 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1277 1278 return 0; 1279 } 1280 1281 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1282 { 1283 const struct bpf_insn *insn = &meta->insn; 1284 const struct jmp_code_map *code; 1285 u8 areg, breg; 1286 1287 code = nfp_jmp_code_get(meta); 1288 if (!code) 1289 return -EINVAL; 1290 1291 areg = insn->dst_reg * 2; 1292 breg = insn->src_reg * 2; 1293 1294 if (code->swap) { 1295 areg ^= breg; 1296 breg ^= areg; 1297 areg ^= breg; 1298 } 1299 1300 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1301 emit_alu(nfp_prog, reg_none(), 1302 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1303 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1304 1305 return 0; 1306 } 1307 1308 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1309 { 1310 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1311 SHF_SC_R_ROT, 8); 1312 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1313 SHF_SC_R_ROT, 16); 1314 } 1315 1316 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1317 { 1318 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1319 struct nfp_bpf_cap_adjust_head *adjust_head; 1320 u32 ret_einval, end; 1321 1322 adjust_head = &nfp_prog->bpf->adjust_head; 1323 1324 /* Optimized version - 5 vs 14 cycles */ 1325 if (nfp_prog->adjust_head_location != UINT_MAX) { 1326 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1327 return -EINVAL; 1328 1329 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1330 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1331 emit_alu(nfp_prog, plen_reg(nfp_prog), 1332 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1333 emit_alu(nfp_prog, pv_len(nfp_prog), 1334 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1335 1336 wrp_immed(nfp_prog, reg_both(0), 0); 1337 wrp_immed(nfp_prog, reg_both(1), 0); 1338 1339 /* TODO: when adjust head is guaranteed to succeed we can 1340 * also eliminate the following if (r0 == 0) branch. 1341 */ 1342 1343 return 0; 1344 } 1345 1346 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1347 end = ret_einval + 2; 1348 1349 /* We need to use a temp because offset is just a part of the pkt ptr */ 1350 emit_alu(nfp_prog, tmp, 1351 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1352 1353 /* Validate result will fit within FW datapath constraints */ 1354 emit_alu(nfp_prog, reg_none(), 1355 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1356 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1357 emit_alu(nfp_prog, reg_none(), 1358 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1359 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1360 1361 /* Validate the length is at least ETH_HLEN */ 1362 emit_alu(nfp_prog, tmp_len, 1363 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1364 emit_alu(nfp_prog, reg_none(), 1365 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1366 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1367 1368 /* Load the ret code */ 1369 wrp_immed(nfp_prog, reg_both(0), 0); 1370 wrp_immed(nfp_prog, reg_both(1), 0); 1371 1372 /* Modify the packet metadata */ 1373 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1374 1375 /* Skip over the -EINVAL ret code (defer 2) */ 1376 emit_br(nfp_prog, BR_UNC, end, 2); 1377 1378 emit_alu(nfp_prog, plen_reg(nfp_prog), 1379 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1380 emit_alu(nfp_prog, pv_len(nfp_prog), 1381 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1382 1383 /* return -EINVAL target */ 1384 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1385 return -EINVAL; 1386 1387 wrp_immed(nfp_prog, reg_both(0), -22); 1388 wrp_immed(nfp_prog, reg_both(1), ~0); 1389 1390 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1391 return -EINVAL; 1392 1393 return 0; 1394 } 1395 1396 static int 1397 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1398 { 1399 bool load_lm_ptr; 1400 u32 ret_tgt; 1401 s64 lm_off; 1402 1403 /* We only have to reload LM0 if the key is not at start of stack */ 1404 lm_off = nfp_prog->stack_depth; 1405 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1406 load_lm_ptr = meta->arg2.var_off || lm_off; 1407 1408 /* Set LM0 to start of key */ 1409 if (load_lm_ptr) 1410 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1411 if (meta->func_id == BPF_FUNC_map_update_elem) 1412 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1413 1414 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1415 2, RELO_BR_HELPER); 1416 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1417 1418 /* Load map ID into A0 */ 1419 wrp_mov(nfp_prog, reg_a(0), reg_a(2)); 1420 1421 /* Load the return address into B0 */ 1422 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1423 1424 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1425 return -EINVAL; 1426 1427 /* Reset the LM0 pointer */ 1428 if (!load_lm_ptr) 1429 return 0; 1430 1431 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1432 wrp_nops(nfp_prog, 3); 1433 1434 return 0; 1435 } 1436 1437 static int 1438 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1439 { 1440 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); 1441 /* CSR value is read in following immed[gpr, 0] */ 1442 emit_immed(nfp_prog, reg_both(0), 0, 1443 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1444 emit_immed(nfp_prog, reg_both(1), 0, 1445 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1446 return 0; 1447 } 1448 1449 static int 1450 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1451 { 1452 swreg ptr_type; 1453 u32 ret_tgt; 1454 1455 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); 1456 1457 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 1458 1459 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1460 2, RELO_BR_HELPER); 1461 1462 /* Load ptr type into A1 */ 1463 wrp_mov(nfp_prog, reg_a(1), ptr_type); 1464 1465 /* Load the return address into B0 */ 1466 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1467 1468 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1469 return -EINVAL; 1470 1471 return 0; 1472 } 1473 1474 static int 1475 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1476 { 1477 u32 jmp_tgt; 1478 1479 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; 1480 1481 /* Make sure the queue id fits into FW field */ 1482 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), 1483 ALU_OP_AND_NOT_B, reg_imm(0xff)); 1484 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); 1485 1486 /* Set the 'queue selected' bit and the queue value */ 1487 emit_shf(nfp_prog, pv_qsel_set(nfp_prog), 1488 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), 1489 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); 1490 emit_ld_field(nfp_prog, 1491 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), 1492 SHF_SC_NONE, 0); 1493 /* Delay slots end here, we will jump over next instruction if queue 1494 * value fits into the field. 1495 */ 1496 emit_ld_field(nfp_prog, 1497 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), 1498 SHF_SC_NONE, 0); 1499 1500 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) 1501 return -EINVAL; 1502 1503 return 0; 1504 } 1505 1506 /* --- Callbacks --- */ 1507 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1508 { 1509 const struct bpf_insn *insn = &meta->insn; 1510 u8 dst = insn->dst_reg * 2; 1511 u8 src = insn->src_reg * 2; 1512 1513 if (insn->src_reg == BPF_REG_10) { 1514 swreg stack_depth_reg; 1515 1516 stack_depth_reg = ur_load_imm_any(nfp_prog, 1517 nfp_prog->stack_depth, 1518 stack_imm(nfp_prog)); 1519 emit_alu(nfp_prog, reg_both(dst), 1520 stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); 1521 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1522 } else { 1523 wrp_reg_mov(nfp_prog, dst, src); 1524 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1525 } 1526 1527 return 0; 1528 } 1529 1530 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1531 { 1532 u64 imm = meta->insn.imm; /* sign extend */ 1533 1534 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1535 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1536 1537 return 0; 1538 } 1539 1540 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1541 { 1542 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1543 } 1544 1545 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1546 { 1547 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1548 } 1549 1550 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1551 { 1552 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1553 } 1554 1555 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1556 { 1557 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1558 } 1559 1560 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1561 { 1562 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1563 } 1564 1565 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1566 { 1567 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1568 } 1569 1570 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1571 { 1572 const struct bpf_insn *insn = &meta->insn; 1573 1574 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1575 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1576 reg_b(insn->src_reg * 2)); 1577 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1578 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1579 reg_b(insn->src_reg * 2 + 1)); 1580 1581 return 0; 1582 } 1583 1584 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1585 { 1586 const struct bpf_insn *insn = &meta->insn; 1587 u64 imm = insn->imm; /* sign extend */ 1588 1589 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1590 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1591 1592 return 0; 1593 } 1594 1595 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1596 { 1597 const struct bpf_insn *insn = &meta->insn; 1598 1599 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1600 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1601 reg_b(insn->src_reg * 2)); 1602 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1603 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1604 reg_b(insn->src_reg * 2 + 1)); 1605 1606 return 0; 1607 } 1608 1609 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1610 { 1611 const struct bpf_insn *insn = &meta->insn; 1612 u64 imm = insn->imm; /* sign extend */ 1613 1614 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1615 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1616 1617 return 0; 1618 } 1619 1620 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1621 { 1622 const struct bpf_insn *insn = &meta->insn; 1623 1624 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1625 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1626 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1627 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1628 1629 return 0; 1630 } 1631 1632 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1633 { 1634 const struct bpf_insn *insn = &meta->insn; 1635 u8 dst = insn->dst_reg * 2; 1636 1637 if (insn->imm < 32) { 1638 emit_shf(nfp_prog, reg_both(dst + 1), 1639 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1640 SHF_SC_R_DSHF, 32 - insn->imm); 1641 emit_shf(nfp_prog, reg_both(dst), 1642 reg_none(), SHF_OP_NONE, reg_b(dst), 1643 SHF_SC_L_SHF, insn->imm); 1644 } else if (insn->imm == 32) { 1645 wrp_reg_mov(nfp_prog, dst + 1, dst); 1646 wrp_immed(nfp_prog, reg_both(dst), 0); 1647 } else if (insn->imm > 32) { 1648 emit_shf(nfp_prog, reg_both(dst + 1), 1649 reg_none(), SHF_OP_NONE, reg_b(dst), 1650 SHF_SC_L_SHF, insn->imm - 32); 1651 wrp_immed(nfp_prog, reg_both(dst), 0); 1652 } 1653 1654 return 0; 1655 } 1656 1657 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1658 { 1659 const struct bpf_insn *insn = &meta->insn; 1660 u8 dst = insn->dst_reg * 2; 1661 1662 if (insn->imm < 32) { 1663 emit_shf(nfp_prog, reg_both(dst), 1664 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1665 SHF_SC_R_DSHF, insn->imm); 1666 emit_shf(nfp_prog, reg_both(dst + 1), 1667 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1668 SHF_SC_R_SHF, insn->imm); 1669 } else if (insn->imm == 32) { 1670 wrp_reg_mov(nfp_prog, dst, dst + 1); 1671 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1672 } else if (insn->imm > 32) { 1673 emit_shf(nfp_prog, reg_both(dst), 1674 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1675 SHF_SC_R_SHF, insn->imm - 32); 1676 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1677 } 1678 1679 return 0; 1680 } 1681 1682 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1683 { 1684 const struct bpf_insn *insn = &meta->insn; 1685 1686 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 1687 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1688 1689 return 0; 1690 } 1691 1692 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1693 { 1694 const struct bpf_insn *insn = &meta->insn; 1695 1696 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 1697 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1698 1699 return 0; 1700 } 1701 1702 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1703 { 1704 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 1705 } 1706 1707 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1708 { 1709 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 1710 } 1711 1712 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1713 { 1714 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 1715 } 1716 1717 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1718 { 1719 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1720 } 1721 1722 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1723 { 1724 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 1725 } 1726 1727 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1728 { 1729 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1730 } 1731 1732 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1733 { 1734 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 1735 } 1736 1737 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1738 { 1739 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 1740 } 1741 1742 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1743 { 1744 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 1745 } 1746 1747 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1748 { 1749 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 1750 } 1751 1752 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1753 { 1754 u8 dst = meta->insn.dst_reg * 2; 1755 1756 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 1757 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1758 1759 return 0; 1760 } 1761 1762 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1763 { 1764 const struct bpf_insn *insn = &meta->insn; 1765 1766 if (!insn->imm) 1767 return 1; /* TODO: zero shift means indirect */ 1768 1769 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 1770 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 1771 SHF_SC_L_SHF, insn->imm); 1772 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1773 1774 return 0; 1775 } 1776 1777 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1778 { 1779 const struct bpf_insn *insn = &meta->insn; 1780 u8 gpr = insn->dst_reg * 2; 1781 1782 switch (insn->imm) { 1783 case 16: 1784 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 1785 SHF_SC_R_ROT, 8); 1786 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 1787 SHF_SC_R_SHF, 16); 1788 1789 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1790 break; 1791 case 32: 1792 wrp_end32(nfp_prog, reg_a(gpr), gpr); 1793 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1794 break; 1795 case 64: 1796 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 1797 1798 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 1799 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 1800 break; 1801 } 1802 1803 return 0; 1804 } 1805 1806 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1807 { 1808 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 1809 u32 imm_lo, imm_hi; 1810 u8 dst; 1811 1812 dst = prev->insn.dst_reg * 2; 1813 imm_lo = prev->insn.imm; 1814 imm_hi = meta->insn.imm; 1815 1816 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 1817 1818 /* mov is always 1 insn, load imm may be two, so try to use mov */ 1819 if (imm_hi == imm_lo) 1820 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 1821 else 1822 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 1823 1824 return 0; 1825 } 1826 1827 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1828 { 1829 meta->double_cb = imm_ld8_part2; 1830 return 0; 1831 } 1832 1833 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1834 { 1835 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 1836 } 1837 1838 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1839 { 1840 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 1841 } 1842 1843 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1844 { 1845 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 1846 } 1847 1848 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1849 { 1850 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1851 meta->insn.src_reg * 2, 1); 1852 } 1853 1854 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1855 { 1856 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1857 meta->insn.src_reg * 2, 2); 1858 } 1859 1860 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1861 { 1862 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1863 meta->insn.src_reg * 2, 4); 1864 } 1865 1866 static int 1867 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1868 unsigned int size, unsigned int ptr_off) 1869 { 1870 return mem_op_stack(nfp_prog, meta, size, ptr_off, 1871 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 1872 true, wrp_lmem_load); 1873 } 1874 1875 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1876 u8 size) 1877 { 1878 swreg dst = reg_both(meta->insn.dst_reg * 2); 1879 1880 switch (meta->insn.off) { 1881 case offsetof(struct __sk_buff, len): 1882 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 1883 return -EOPNOTSUPP; 1884 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 1885 break; 1886 case offsetof(struct __sk_buff, data): 1887 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 1888 return -EOPNOTSUPP; 1889 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1890 break; 1891 case offsetof(struct __sk_buff, data_end): 1892 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 1893 return -EOPNOTSUPP; 1894 emit_alu(nfp_prog, dst, 1895 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1896 break; 1897 default: 1898 return -EOPNOTSUPP; 1899 } 1900 1901 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1902 1903 return 0; 1904 } 1905 1906 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1907 u8 size) 1908 { 1909 swreg dst = reg_both(meta->insn.dst_reg * 2); 1910 1911 switch (meta->insn.off) { 1912 case offsetof(struct xdp_md, data): 1913 if (size != FIELD_SIZEOF(struct xdp_md, data)) 1914 return -EOPNOTSUPP; 1915 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1916 break; 1917 case offsetof(struct xdp_md, data_end): 1918 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 1919 return -EOPNOTSUPP; 1920 emit_alu(nfp_prog, dst, 1921 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1922 break; 1923 default: 1924 return -EOPNOTSUPP; 1925 } 1926 1927 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1928 1929 return 0; 1930 } 1931 1932 static int 1933 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1934 unsigned int size) 1935 { 1936 swreg tmp_reg; 1937 1938 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1939 1940 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 1941 tmp_reg, meta->insn.dst_reg * 2, size); 1942 } 1943 1944 static int 1945 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1946 unsigned int size) 1947 { 1948 swreg tmp_reg; 1949 1950 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1951 1952 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 1953 tmp_reg, meta->insn.dst_reg * 2, size); 1954 } 1955 1956 static void 1957 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 1958 struct nfp_insn_meta *meta) 1959 { 1960 s16 range_start = meta->pkt_cache.range_start; 1961 s16 range_end = meta->pkt_cache.range_end; 1962 swreg src_base, off; 1963 u8 xfer_num, len; 1964 bool indir; 1965 1966 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 1967 src_base = reg_a(meta->insn.src_reg * 2); 1968 len = range_end - range_start; 1969 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 1970 1971 indir = len > 8 * REG_WIDTH; 1972 /* Setup PREV_ALU for indirect mode. */ 1973 if (indir) 1974 wrp_immed(nfp_prog, reg_none(), 1975 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 1976 1977 /* Cache memory into transfer-in registers. */ 1978 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 1979 off, xfer_num - 1, CMD_CTX_SWAP, indir); 1980 } 1981 1982 static int 1983 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 1984 struct nfp_insn_meta *meta, 1985 unsigned int size) 1986 { 1987 s16 range_start = meta->pkt_cache.range_start; 1988 s16 insn_off = meta->insn.off - range_start; 1989 swreg dst_lo, dst_hi, src_lo, src_mid; 1990 u8 dst_gpr = meta->insn.dst_reg * 2; 1991 u8 len_lo = size, len_mid = 0; 1992 u8 idx = insn_off / REG_WIDTH; 1993 u8 off = insn_off % REG_WIDTH; 1994 1995 dst_hi = reg_both(dst_gpr + 1); 1996 dst_lo = reg_both(dst_gpr); 1997 src_lo = reg_xfer(idx); 1998 1999 /* The read length could involve as many as three registers. */ 2000 if (size > REG_WIDTH - off) { 2001 /* Calculate the part in the second register. */ 2002 len_lo = REG_WIDTH - off; 2003 len_mid = size - len_lo; 2004 2005 /* Calculate the part in the third register. */ 2006 if (size > 2 * REG_WIDTH - off) 2007 len_mid = REG_WIDTH; 2008 } 2009 2010 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 2011 2012 if (!len_mid) { 2013 wrp_immed(nfp_prog, dst_hi, 0); 2014 return 0; 2015 } 2016 2017 src_mid = reg_xfer(idx + 1); 2018 2019 if (size <= REG_WIDTH) { 2020 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 2021 wrp_immed(nfp_prog, dst_hi, 0); 2022 } else { 2023 swreg src_hi = reg_xfer(idx + 2); 2024 2025 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 2026 REG_WIDTH - len_lo, len_lo); 2027 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 2028 REG_WIDTH - len_lo); 2029 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 2030 len_lo); 2031 } 2032 2033 return 0; 2034 } 2035 2036 static int 2037 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 2038 struct nfp_insn_meta *meta, 2039 unsigned int size) 2040 { 2041 swreg dst_lo, dst_hi, src_lo; 2042 u8 dst_gpr, idx; 2043 2044 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 2045 dst_gpr = meta->insn.dst_reg * 2; 2046 dst_hi = reg_both(dst_gpr + 1); 2047 dst_lo = reg_both(dst_gpr); 2048 src_lo = reg_xfer(idx); 2049 2050 if (size < REG_WIDTH) { 2051 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 2052 wrp_immed(nfp_prog, dst_hi, 0); 2053 } else if (size == REG_WIDTH) { 2054 wrp_mov(nfp_prog, dst_lo, src_lo); 2055 wrp_immed(nfp_prog, dst_hi, 0); 2056 } else { 2057 swreg src_hi = reg_xfer(idx + 1); 2058 2059 wrp_mov(nfp_prog, dst_lo, src_lo); 2060 wrp_mov(nfp_prog, dst_hi, src_hi); 2061 } 2062 2063 return 0; 2064 } 2065 2066 static int 2067 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 2068 struct nfp_insn_meta *meta, unsigned int size) 2069 { 2070 u8 off = meta->insn.off - meta->pkt_cache.range_start; 2071 2072 if (IS_ALIGNED(off, REG_WIDTH)) 2073 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 2074 2075 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 2076 } 2077 2078 static int 2079 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2080 unsigned int size) 2081 { 2082 if (meta->ldst_gather_len) 2083 return nfp_cpp_memcpy(nfp_prog, meta); 2084 2085 if (meta->ptr.type == PTR_TO_CTX) { 2086 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2087 return mem_ldx_xdp(nfp_prog, meta, size); 2088 else 2089 return mem_ldx_skb(nfp_prog, meta, size); 2090 } 2091 2092 if (meta->ptr.type == PTR_TO_PACKET) { 2093 if (meta->pkt_cache.range_end) { 2094 if (meta->pkt_cache.do_init) 2095 mem_ldx_data_init_pktcache(nfp_prog, meta); 2096 2097 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 2098 } else { 2099 return mem_ldx_data(nfp_prog, meta, size); 2100 } 2101 } 2102 2103 if (meta->ptr.type == PTR_TO_STACK) 2104 return mem_ldx_stack(nfp_prog, meta, size, 2105 meta->ptr.off + meta->ptr.var_off.value); 2106 2107 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2108 return mem_ldx_emem(nfp_prog, meta, size); 2109 2110 return -EOPNOTSUPP; 2111 } 2112 2113 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2114 { 2115 return mem_ldx(nfp_prog, meta, 1); 2116 } 2117 2118 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2119 { 2120 return mem_ldx(nfp_prog, meta, 2); 2121 } 2122 2123 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2124 { 2125 return mem_ldx(nfp_prog, meta, 4); 2126 } 2127 2128 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2129 { 2130 return mem_ldx(nfp_prog, meta, 8); 2131 } 2132 2133 static int 2134 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2135 unsigned int size) 2136 { 2137 u64 imm = meta->insn.imm; /* sign extend */ 2138 swreg off_reg; 2139 2140 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2141 2142 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2143 imm, size); 2144 } 2145 2146 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2147 unsigned int size) 2148 { 2149 if (meta->ptr.type == PTR_TO_PACKET) 2150 return mem_st_data(nfp_prog, meta, size); 2151 2152 return -EOPNOTSUPP; 2153 } 2154 2155 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2156 { 2157 return mem_st(nfp_prog, meta, 1); 2158 } 2159 2160 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2161 { 2162 return mem_st(nfp_prog, meta, 2); 2163 } 2164 2165 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2166 { 2167 return mem_st(nfp_prog, meta, 4); 2168 } 2169 2170 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2171 { 2172 return mem_st(nfp_prog, meta, 8); 2173 } 2174 2175 static int 2176 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2177 unsigned int size) 2178 { 2179 swreg off_reg; 2180 2181 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2182 2183 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2184 meta->insn.src_reg * 2, size); 2185 } 2186 2187 static int 2188 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2189 unsigned int size, unsigned int ptr_off) 2190 { 2191 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2192 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2193 false, wrp_lmem_store); 2194 } 2195 2196 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2197 { 2198 switch (meta->insn.off) { 2199 case offsetof(struct xdp_md, rx_queue_index): 2200 return nfp_queue_select(nfp_prog, meta); 2201 } 2202 2203 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ 2204 return -EOPNOTSUPP; 2205 } 2206 2207 static int 2208 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2209 unsigned int size) 2210 { 2211 if (meta->ptr.type == PTR_TO_PACKET) 2212 return mem_stx_data(nfp_prog, meta, size); 2213 2214 if (meta->ptr.type == PTR_TO_STACK) 2215 return mem_stx_stack(nfp_prog, meta, size, 2216 meta->ptr.off + meta->ptr.var_off.value); 2217 2218 return -EOPNOTSUPP; 2219 } 2220 2221 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2222 { 2223 return mem_stx(nfp_prog, meta, 1); 2224 } 2225 2226 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2227 { 2228 return mem_stx(nfp_prog, meta, 2); 2229 } 2230 2231 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2232 { 2233 if (meta->ptr.type == PTR_TO_CTX) 2234 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2235 return mem_stx_xdp(nfp_prog, meta); 2236 return mem_stx(nfp_prog, meta, 4); 2237 } 2238 2239 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2240 { 2241 return mem_stx(nfp_prog, meta, 8); 2242 } 2243 2244 static int 2245 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) 2246 { 2247 u8 dst_gpr = meta->insn.dst_reg * 2; 2248 u8 src_gpr = meta->insn.src_reg * 2; 2249 unsigned int full_add, out; 2250 swreg addra, addrb, off; 2251 2252 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2253 2254 /* We can fit 16 bits into command immediate, if we know the immediate 2255 * is guaranteed to either always or never fit into 16 bit we only 2256 * generate code to handle that particular case, otherwise generate 2257 * code for both. 2258 */ 2259 out = nfp_prog_current_offset(nfp_prog); 2260 full_add = nfp_prog_current_offset(nfp_prog); 2261 2262 if (meta->insn.off) { 2263 out += 2; 2264 full_add += 2; 2265 } 2266 if (meta->xadd_maybe_16bit) { 2267 out += 3; 2268 full_add += 3; 2269 } 2270 if (meta->xadd_over_16bit) 2271 out += 2 + is64; 2272 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2273 out += 5; 2274 full_add += 5; 2275 } 2276 2277 /* Generate the branch for choosing add_imm vs add */ 2278 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2279 swreg max_imm = imm_a(nfp_prog); 2280 2281 wrp_immed(nfp_prog, max_imm, 0xffff); 2282 emit_alu(nfp_prog, reg_none(), 2283 max_imm, ALU_OP_SUB, reg_b(src_gpr)); 2284 emit_alu(nfp_prog, reg_none(), 2285 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); 2286 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); 2287 /* defer for add */ 2288 } 2289 2290 /* If insn has an offset add to the address */ 2291 if (!meta->insn.off) { 2292 addra = reg_a(dst_gpr); 2293 addrb = reg_b(dst_gpr + 1); 2294 } else { 2295 emit_alu(nfp_prog, imma_a(nfp_prog), 2296 reg_a(dst_gpr), ALU_OP_ADD, off); 2297 emit_alu(nfp_prog, imma_b(nfp_prog), 2298 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); 2299 addra = imma_a(nfp_prog); 2300 addrb = imma_b(nfp_prog); 2301 } 2302 2303 /* Generate the add_imm if 16 bits are possible */ 2304 if (meta->xadd_maybe_16bit) { 2305 swreg prev_alu = imm_a(nfp_prog); 2306 2307 wrp_immed(nfp_prog, prev_alu, 2308 FIELD_PREP(CMD_OVE_DATA, 2) | 2309 CMD_OVE_LEN | 2310 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); 2311 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); 2312 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, 2313 addra, addrb, 0, CMD_CTX_NO_SWAP); 2314 2315 if (meta->xadd_over_16bit) 2316 emit_br(nfp_prog, BR_UNC, out, 0); 2317 } 2318 2319 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) 2320 return -EINVAL; 2321 2322 /* Generate the add if 16 bits are not guaranteed */ 2323 if (meta->xadd_over_16bit) { 2324 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, 2325 addra, addrb, is64 << 2, 2326 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); 2327 2328 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); 2329 if (is64) 2330 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); 2331 } 2332 2333 if (!nfp_prog_confirm_current_offset(nfp_prog, out)) 2334 return -EINVAL; 2335 2336 return 0; 2337 } 2338 2339 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2340 { 2341 return mem_xadd(nfp_prog, meta, false); 2342 } 2343 2344 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2345 { 2346 return mem_xadd(nfp_prog, meta, true); 2347 } 2348 2349 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2350 { 2351 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 2352 2353 return 0; 2354 } 2355 2356 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2357 { 2358 const struct bpf_insn *insn = &meta->insn; 2359 u64 imm = insn->imm; /* sign extend */ 2360 swreg or1, or2, tmp_reg; 2361 2362 or1 = reg_a(insn->dst_reg * 2); 2363 or2 = reg_b(insn->dst_reg * 2 + 1); 2364 2365 if (imm & ~0U) { 2366 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2367 emit_alu(nfp_prog, imm_a(nfp_prog), 2368 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2369 or1 = imm_a(nfp_prog); 2370 } 2371 2372 if (imm >> 32) { 2373 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2374 emit_alu(nfp_prog, imm_b(nfp_prog), 2375 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2376 or2 = imm_b(nfp_prog); 2377 } 2378 2379 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 2380 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2381 2382 return 0; 2383 } 2384 2385 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2386 { 2387 const struct bpf_insn *insn = &meta->insn; 2388 u64 imm = insn->imm; /* sign extend */ 2389 swreg tmp_reg; 2390 2391 if (!imm) { 2392 meta->skip = true; 2393 return 0; 2394 } 2395 2396 if (imm & ~0U) { 2397 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2398 emit_alu(nfp_prog, reg_none(), 2399 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); 2400 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2401 } 2402 2403 if (imm >> 32) { 2404 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2405 emit_alu(nfp_prog, reg_none(), 2406 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 2407 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2408 } 2409 2410 return 0; 2411 } 2412 2413 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2414 { 2415 const struct bpf_insn *insn = &meta->insn; 2416 u64 imm = insn->imm; /* sign extend */ 2417 swreg tmp_reg; 2418 2419 if (!imm) { 2420 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 2421 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 2422 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2423 return 0; 2424 } 2425 2426 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2427 emit_alu(nfp_prog, reg_none(), 2428 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2429 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2430 2431 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2432 emit_alu(nfp_prog, reg_none(), 2433 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2434 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2435 2436 return 0; 2437 } 2438 2439 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2440 { 2441 const struct bpf_insn *insn = &meta->insn; 2442 2443 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 2444 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 2445 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 2446 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 2447 emit_alu(nfp_prog, reg_none(), 2448 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 2449 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2450 2451 return 0; 2452 } 2453 2454 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2455 { 2456 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 2457 } 2458 2459 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2460 { 2461 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 2462 } 2463 2464 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2465 { 2466 switch (meta->insn.imm) { 2467 case BPF_FUNC_xdp_adjust_head: 2468 return adjust_head(nfp_prog, meta); 2469 case BPF_FUNC_map_lookup_elem: 2470 case BPF_FUNC_map_update_elem: 2471 case BPF_FUNC_map_delete_elem: 2472 return map_call_stack_common(nfp_prog, meta); 2473 case BPF_FUNC_get_prandom_u32: 2474 return nfp_get_prandom_u32(nfp_prog, meta); 2475 case BPF_FUNC_perf_event_output: 2476 return nfp_perf_event_output(nfp_prog, meta); 2477 default: 2478 WARN_ONCE(1, "verifier allowed unsupported function\n"); 2479 return -EOPNOTSUPP; 2480 } 2481 } 2482 2483 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2484 { 2485 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 2486 2487 return 0; 2488 } 2489 2490 static const instr_cb_t instr_cb[256] = { 2491 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 2492 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 2493 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 2494 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 2495 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 2496 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 2497 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 2498 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 2499 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 2500 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 2501 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 2502 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 2503 [BPF_ALU64 | BPF_NEG] = neg_reg64, 2504 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 2505 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 2506 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 2507 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 2508 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 2509 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 2510 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 2511 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 2512 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 2513 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 2514 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 2515 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 2516 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 2517 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 2518 [BPF_ALU | BPF_NEG] = neg_reg, 2519 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 2520 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 2521 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 2522 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 2523 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 2524 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 2525 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 2526 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 2527 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 2528 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 2529 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 2530 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 2531 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 2532 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 2533 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 2534 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 2535 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 2536 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 2537 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 2538 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 2539 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 2540 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 2541 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 2542 [BPF_JMP | BPF_JA | BPF_K] = jump, 2543 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 2544 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, 2545 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, 2546 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, 2547 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, 2548 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, 2549 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, 2550 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, 2551 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, 2552 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 2553 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 2554 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 2555 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, 2556 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, 2557 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, 2558 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, 2559 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, 2560 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, 2561 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, 2562 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, 2563 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 2564 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 2565 [BPF_JMP | BPF_CALL] = call, 2566 [BPF_JMP | BPF_EXIT] = goto_out, 2567 }; 2568 2569 /* --- Assembler logic --- */ 2570 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 2571 { 2572 struct nfp_insn_meta *meta, *jmp_dst; 2573 u32 idx, br_idx; 2574 2575 list_for_each_entry(meta, &nfp_prog->insns, l) { 2576 if (meta->skip) 2577 continue; 2578 if (meta->insn.code == (BPF_JMP | BPF_CALL)) 2579 continue; 2580 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 2581 continue; 2582 2583 if (list_is_last(&meta->l, &nfp_prog->insns)) 2584 br_idx = nfp_prog->last_bpf_off; 2585 else 2586 br_idx = list_next_entry(meta, l)->off - 1; 2587 2588 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 2589 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 2590 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 2591 return -ELOOP; 2592 } 2593 /* Leave special branches for later */ 2594 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 2595 RELO_BR_REL) 2596 continue; 2597 2598 if (!meta->jmp_dst) { 2599 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 2600 return -ELOOP; 2601 } 2602 2603 jmp_dst = meta->jmp_dst; 2604 2605 if (jmp_dst->skip) { 2606 pr_err("Branch landing on removed instruction!!\n"); 2607 return -ELOOP; 2608 } 2609 2610 for (idx = meta->off; idx <= br_idx; idx++) { 2611 if (!nfp_is_br(nfp_prog->prog[idx])) 2612 continue; 2613 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 2614 } 2615 } 2616 2617 return 0; 2618 } 2619 2620 static void nfp_intro(struct nfp_prog *nfp_prog) 2621 { 2622 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 2623 emit_alu(nfp_prog, plen_reg(nfp_prog), 2624 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 2625 } 2626 2627 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 2628 { 2629 /* TC direct-action mode: 2630 * 0,1 ok NOT SUPPORTED[1] 2631 * 2 drop 0x22 -> drop, count as stat1 2632 * 4,5 nuke 0x02 -> drop 2633 * 7 redir 0x44 -> redir, count as stat2 2634 * * unspec 0x11 -> pass, count as stat0 2635 * 2636 * [1] We can't support OK and RECLASSIFY because we can't tell TC 2637 * the exact decision made. We are forced to support UNSPEC 2638 * to handle aborts so that's the only one we handle for passing 2639 * packets up the stack. 2640 */ 2641 /* Target for aborts */ 2642 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2643 2644 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2645 2646 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2647 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 2648 2649 /* Target for normal exits */ 2650 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2651 2652 /* if R0 > 7 jump to abort */ 2653 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 2654 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2655 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2656 2657 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 2658 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 2659 2660 emit_shf(nfp_prog, reg_a(1), 2661 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 2662 2663 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2664 emit_shf(nfp_prog, reg_a(2), 2665 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2666 2667 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2668 emit_shf(nfp_prog, reg_b(2), 2669 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 2670 2671 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2672 2673 emit_shf(nfp_prog, reg_b(2), 2674 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 2675 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2676 } 2677 2678 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 2679 { 2680 /* XDP return codes: 2681 * 0 aborted 0x82 -> drop, count as stat3 2682 * 1 drop 0x22 -> drop, count as stat1 2683 * 2 pass 0x11 -> pass, count as stat0 2684 * 3 tx 0x44 -> redir, count as stat2 2685 * * unknown 0x82 -> drop, count as stat3 2686 */ 2687 /* Target for aborts */ 2688 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2689 2690 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2691 2692 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2693 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 2694 2695 /* Target for normal exits */ 2696 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2697 2698 /* if R0 > 3 jump to abort */ 2699 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 2700 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2701 2702 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 2703 2704 emit_shf(nfp_prog, reg_a(1), 2705 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 2706 2707 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2708 emit_shf(nfp_prog, reg_b(2), 2709 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2710 2711 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2712 2713 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2714 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2715 } 2716 2717 static void nfp_outro(struct nfp_prog *nfp_prog) 2718 { 2719 switch (nfp_prog->type) { 2720 case BPF_PROG_TYPE_SCHED_CLS: 2721 nfp_outro_tc_da(nfp_prog); 2722 break; 2723 case BPF_PROG_TYPE_XDP: 2724 nfp_outro_xdp(nfp_prog); 2725 break; 2726 default: 2727 WARN_ON(1); 2728 } 2729 } 2730 2731 static int nfp_translate(struct nfp_prog *nfp_prog) 2732 { 2733 struct nfp_insn_meta *meta; 2734 int err; 2735 2736 nfp_intro(nfp_prog); 2737 if (nfp_prog->error) 2738 return nfp_prog->error; 2739 2740 list_for_each_entry(meta, &nfp_prog->insns, l) { 2741 instr_cb_t cb = instr_cb[meta->insn.code]; 2742 2743 meta->off = nfp_prog_current_offset(nfp_prog); 2744 2745 if (meta->skip) { 2746 nfp_prog->n_translated++; 2747 continue; 2748 } 2749 2750 if (nfp_meta_has_prev(nfp_prog, meta) && 2751 nfp_meta_prev(meta)->double_cb) 2752 cb = nfp_meta_prev(meta)->double_cb; 2753 if (!cb) 2754 return -ENOENT; 2755 err = cb(nfp_prog, meta); 2756 if (err) 2757 return err; 2758 if (nfp_prog->error) 2759 return nfp_prog->error; 2760 2761 nfp_prog->n_translated++; 2762 } 2763 2764 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 2765 2766 nfp_outro(nfp_prog); 2767 if (nfp_prog->error) 2768 return nfp_prog->error; 2769 2770 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 2771 if (nfp_prog->error) 2772 return nfp_prog->error; 2773 2774 return nfp_fixup_branches(nfp_prog); 2775 } 2776 2777 /* --- Optimizations --- */ 2778 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 2779 { 2780 struct nfp_insn_meta *meta; 2781 2782 list_for_each_entry(meta, &nfp_prog->insns, l) { 2783 struct bpf_insn insn = meta->insn; 2784 2785 /* Programs converted from cBPF start with register xoring */ 2786 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 2787 insn.src_reg == insn.dst_reg) 2788 continue; 2789 2790 /* Programs start with R6 = R1 but we ignore the skb pointer */ 2791 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 2792 insn.src_reg == 1 && insn.dst_reg == 6) 2793 meta->skip = true; 2794 2795 /* Return as soon as something doesn't match */ 2796 if (!meta->skip) 2797 return; 2798 } 2799 } 2800 2801 /* abs(insn.imm) will fit better into unrestricted reg immediate - 2802 * convert add/sub of a negative number into a sub/add of a positive one. 2803 */ 2804 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) 2805 { 2806 struct nfp_insn_meta *meta; 2807 2808 list_for_each_entry(meta, &nfp_prog->insns, l) { 2809 struct bpf_insn insn = meta->insn; 2810 2811 if (meta->skip) 2812 continue; 2813 2814 if (BPF_CLASS(insn.code) != BPF_ALU && 2815 BPF_CLASS(insn.code) != BPF_ALU64 && 2816 BPF_CLASS(insn.code) != BPF_JMP) 2817 continue; 2818 if (BPF_SRC(insn.code) != BPF_K) 2819 continue; 2820 if (insn.imm >= 0) 2821 continue; 2822 2823 if (BPF_CLASS(insn.code) == BPF_JMP) { 2824 switch (BPF_OP(insn.code)) { 2825 case BPF_JGE: 2826 case BPF_JSGE: 2827 case BPF_JLT: 2828 case BPF_JSLT: 2829 meta->jump_neg_op = true; 2830 break; 2831 default: 2832 continue; 2833 } 2834 } else { 2835 if (BPF_OP(insn.code) == BPF_ADD) 2836 insn.code = BPF_CLASS(insn.code) | BPF_SUB; 2837 else if (BPF_OP(insn.code) == BPF_SUB) 2838 insn.code = BPF_CLASS(insn.code) | BPF_ADD; 2839 else 2840 continue; 2841 2842 meta->insn.code = insn.code | BPF_K; 2843 } 2844 2845 meta->insn.imm = -insn.imm; 2846 } 2847 } 2848 2849 /* Remove masking after load since our load guarantees this is not needed */ 2850 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 2851 { 2852 struct nfp_insn_meta *meta1, *meta2; 2853 const s32 exp_mask[] = { 2854 [BPF_B] = 0x000000ffU, 2855 [BPF_H] = 0x0000ffffU, 2856 [BPF_W] = 0xffffffffU, 2857 }; 2858 2859 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 2860 struct bpf_insn insn, next; 2861 2862 insn = meta1->insn; 2863 next = meta2->insn; 2864 2865 if (BPF_CLASS(insn.code) != BPF_LD) 2866 continue; 2867 if (BPF_MODE(insn.code) != BPF_ABS && 2868 BPF_MODE(insn.code) != BPF_IND) 2869 continue; 2870 2871 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 2872 continue; 2873 2874 if (!exp_mask[BPF_SIZE(insn.code)]) 2875 continue; 2876 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 2877 continue; 2878 2879 if (next.src_reg || next.dst_reg) 2880 continue; 2881 2882 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 2883 continue; 2884 2885 meta2->skip = true; 2886 } 2887 } 2888 2889 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 2890 { 2891 struct nfp_insn_meta *meta1, *meta2, *meta3; 2892 2893 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 2894 struct bpf_insn insn, next1, next2; 2895 2896 insn = meta1->insn; 2897 next1 = meta2->insn; 2898 next2 = meta3->insn; 2899 2900 if (BPF_CLASS(insn.code) != BPF_LD) 2901 continue; 2902 if (BPF_MODE(insn.code) != BPF_ABS && 2903 BPF_MODE(insn.code) != BPF_IND) 2904 continue; 2905 if (BPF_SIZE(insn.code) != BPF_W) 2906 continue; 2907 2908 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 2909 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 2910 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 2911 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 2912 continue; 2913 2914 if (next1.src_reg || next1.dst_reg || 2915 next2.src_reg || next2.dst_reg) 2916 continue; 2917 2918 if (next1.imm != 0x20 || next2.imm != 0x20) 2919 continue; 2920 2921 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 2922 meta3->flags & FLAG_INSN_IS_JUMP_DST) 2923 continue; 2924 2925 meta2->skip = true; 2926 meta3->skip = true; 2927 } 2928 } 2929 2930 /* load/store pair that forms memory copy sould look like the following: 2931 * 2932 * ld_width R, [addr_src + offset_src] 2933 * st_width [addr_dest + offset_dest], R 2934 * 2935 * The destination register of load and source register of store should 2936 * be the same, load and store should also perform at the same width. 2937 * If either of addr_src or addr_dest is stack pointer, we don't do the 2938 * CPP optimization as stack is modelled by registers on NFP. 2939 */ 2940 static bool 2941 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 2942 struct nfp_insn_meta *st_meta) 2943 { 2944 struct bpf_insn *ld = &ld_meta->insn; 2945 struct bpf_insn *st = &st_meta->insn; 2946 2947 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 2948 return false; 2949 2950 if (ld_meta->ptr.type != PTR_TO_PACKET) 2951 return false; 2952 2953 if (st_meta->ptr.type != PTR_TO_PACKET) 2954 return false; 2955 2956 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 2957 return false; 2958 2959 if (ld->dst_reg != st->src_reg) 2960 return false; 2961 2962 /* There is jump to the store insn in this pair. */ 2963 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 2964 return false; 2965 2966 return true; 2967 } 2968 2969 /* Currently, we only support chaining load/store pairs if: 2970 * 2971 * - Their address base registers are the same. 2972 * - Their address offsets are in the same order. 2973 * - They operate at the same memory width. 2974 * - There is no jump into the middle of them. 2975 */ 2976 static bool 2977 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 2978 struct nfp_insn_meta *st_meta, 2979 struct bpf_insn *prev_ld, 2980 struct bpf_insn *prev_st) 2981 { 2982 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 2983 struct bpf_insn *ld = &ld_meta->insn; 2984 struct bpf_insn *st = &st_meta->insn; 2985 s16 prev_ld_off, prev_st_off; 2986 2987 /* This pair is the start pair. */ 2988 if (!prev_ld) 2989 return true; 2990 2991 prev_size = BPF_LDST_BYTES(prev_ld); 2992 curr_size = BPF_LDST_BYTES(ld); 2993 prev_ld_base = prev_ld->src_reg; 2994 prev_st_base = prev_st->dst_reg; 2995 prev_ld_dst = prev_ld->dst_reg; 2996 prev_ld_off = prev_ld->off; 2997 prev_st_off = prev_st->off; 2998 2999 if (ld->dst_reg != prev_ld_dst) 3000 return false; 3001 3002 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 3003 return false; 3004 3005 if (curr_size != prev_size) 3006 return false; 3007 3008 /* There is jump to the head of this pair. */ 3009 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 3010 return false; 3011 3012 /* Both in ascending order. */ 3013 if (prev_ld_off + prev_size == ld->off && 3014 prev_st_off + prev_size == st->off) 3015 return true; 3016 3017 /* Both in descending order. */ 3018 if (ld->off + curr_size == prev_ld_off && 3019 st->off + curr_size == prev_st_off) 3020 return true; 3021 3022 return false; 3023 } 3024 3025 /* Return TRUE if cross memory access happens. Cross memory access means 3026 * store area is overlapping with load area that a later load might load 3027 * the value from previous store, for this case we can't treat the sequence 3028 * as an memory copy. 3029 */ 3030 static bool 3031 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 3032 struct nfp_insn_meta *head_st_meta) 3033 { 3034 s16 head_ld_off, head_st_off, ld_off; 3035 3036 /* Different pointer types does not overlap. */ 3037 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 3038 return false; 3039 3040 /* load and store are both PTR_TO_PACKET, check ID info. */ 3041 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 3042 return true; 3043 3044 /* Canonicalize the offsets. Turn all of them against the original 3045 * base register. 3046 */ 3047 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 3048 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 3049 ld_off = ld->off + head_ld_meta->ptr.off; 3050 3051 /* Ascending order cross. */ 3052 if (ld_off > head_ld_off && 3053 head_ld_off < head_st_off && ld_off >= head_st_off) 3054 return true; 3055 3056 /* Descending order cross. */ 3057 if (ld_off < head_ld_off && 3058 head_ld_off > head_st_off && ld_off <= head_st_off) 3059 return true; 3060 3061 return false; 3062 } 3063 3064 /* This pass try to identify the following instructoin sequences. 3065 * 3066 * load R, [regA + offA] 3067 * store [regB + offB], R 3068 * load R, [regA + offA + const_imm_A] 3069 * store [regB + offB + const_imm_A], R 3070 * load R, [regA + offA + 2 * const_imm_A] 3071 * store [regB + offB + 2 * const_imm_A], R 3072 * ... 3073 * 3074 * Above sequence is typically generated by compiler when lowering 3075 * memcpy. NFP prefer using CPP instructions to accelerate it. 3076 */ 3077 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 3078 { 3079 struct nfp_insn_meta *head_ld_meta = NULL; 3080 struct nfp_insn_meta *head_st_meta = NULL; 3081 struct nfp_insn_meta *meta1, *meta2; 3082 struct bpf_insn *prev_ld = NULL; 3083 struct bpf_insn *prev_st = NULL; 3084 u8 count = 0; 3085 3086 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3087 struct bpf_insn *ld = &meta1->insn; 3088 struct bpf_insn *st = &meta2->insn; 3089 3090 /* Reset record status if any of the following if true: 3091 * - The current insn pair is not load/store. 3092 * - The load/store pair doesn't chain with previous one. 3093 * - The chained load/store pair crossed with previous pair. 3094 * - The chained load/store pair has a total size of memory 3095 * copy beyond 128 bytes which is the maximum length a 3096 * single NFP CPP command can transfer. 3097 */ 3098 if (!curr_pair_is_memcpy(meta1, meta2) || 3099 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 3100 prev_st) || 3101 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 3102 head_st_meta) || 3103 head_ld_meta->ldst_gather_len >= 128))) { 3104 if (!count) 3105 continue; 3106 3107 if (count > 1) { 3108 s16 prev_ld_off = prev_ld->off; 3109 s16 prev_st_off = prev_st->off; 3110 s16 head_ld_off = head_ld_meta->insn.off; 3111 3112 if (prev_ld_off < head_ld_off) { 3113 head_ld_meta->insn.off = prev_ld_off; 3114 head_st_meta->insn.off = prev_st_off; 3115 head_ld_meta->ldst_gather_len = 3116 -head_ld_meta->ldst_gather_len; 3117 } 3118 3119 head_ld_meta->paired_st = &head_st_meta->insn; 3120 head_st_meta->skip = true; 3121 } else { 3122 head_ld_meta->ldst_gather_len = 0; 3123 } 3124 3125 /* If the chain is ended by an load/store pair then this 3126 * could serve as the new head of the the next chain. 3127 */ 3128 if (curr_pair_is_memcpy(meta1, meta2)) { 3129 head_ld_meta = meta1; 3130 head_st_meta = meta2; 3131 head_ld_meta->ldst_gather_len = 3132 BPF_LDST_BYTES(ld); 3133 meta1 = nfp_meta_next(meta1); 3134 meta2 = nfp_meta_next(meta2); 3135 prev_ld = ld; 3136 prev_st = st; 3137 count = 1; 3138 } else { 3139 head_ld_meta = NULL; 3140 head_st_meta = NULL; 3141 prev_ld = NULL; 3142 prev_st = NULL; 3143 count = 0; 3144 } 3145 3146 continue; 3147 } 3148 3149 if (!head_ld_meta) { 3150 head_ld_meta = meta1; 3151 head_st_meta = meta2; 3152 } else { 3153 meta1->skip = true; 3154 meta2->skip = true; 3155 } 3156 3157 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 3158 meta1 = nfp_meta_next(meta1); 3159 meta2 = nfp_meta_next(meta2); 3160 prev_ld = ld; 3161 prev_st = st; 3162 count++; 3163 } 3164 } 3165 3166 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 3167 { 3168 struct nfp_insn_meta *meta, *range_node = NULL; 3169 s16 range_start = 0, range_end = 0; 3170 bool cache_avail = false; 3171 struct bpf_insn *insn; 3172 s32 range_ptr_off = 0; 3173 u32 range_ptr_id = 0; 3174 3175 list_for_each_entry(meta, &nfp_prog->insns, l) { 3176 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 3177 cache_avail = false; 3178 3179 if (meta->skip) 3180 continue; 3181 3182 insn = &meta->insn; 3183 3184 if (is_mbpf_store_pkt(meta) || 3185 insn->code == (BPF_JMP | BPF_CALL) || 3186 is_mbpf_classic_store_pkt(meta) || 3187 is_mbpf_classic_load(meta)) { 3188 cache_avail = false; 3189 continue; 3190 } 3191 3192 if (!is_mbpf_load(meta)) 3193 continue; 3194 3195 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 3196 cache_avail = false; 3197 continue; 3198 } 3199 3200 if (!cache_avail) { 3201 cache_avail = true; 3202 if (range_node) 3203 goto end_current_then_start_new; 3204 goto start_new; 3205 } 3206 3207 /* Check ID to make sure two reads share the same 3208 * variable offset against PTR_TO_PACKET, and check OFF 3209 * to make sure they also share the same constant 3210 * offset. 3211 * 3212 * OFFs don't really need to be the same, because they 3213 * are the constant offsets against PTR_TO_PACKET, so 3214 * for different OFFs, we could canonicalize them to 3215 * offsets against original packet pointer. We don't 3216 * support this. 3217 */ 3218 if (meta->ptr.id == range_ptr_id && 3219 meta->ptr.off == range_ptr_off) { 3220 s16 new_start = range_start; 3221 s16 end, off = insn->off; 3222 s16 new_end = range_end; 3223 bool changed = false; 3224 3225 if (off < range_start) { 3226 new_start = off; 3227 changed = true; 3228 } 3229 3230 end = off + BPF_LDST_BYTES(insn); 3231 if (end > range_end) { 3232 new_end = end; 3233 changed = true; 3234 } 3235 3236 if (!changed) 3237 continue; 3238 3239 if (new_end - new_start <= 64) { 3240 /* Install new range. */ 3241 range_start = new_start; 3242 range_end = new_end; 3243 continue; 3244 } 3245 } 3246 3247 end_current_then_start_new: 3248 range_node->pkt_cache.range_start = range_start; 3249 range_node->pkt_cache.range_end = range_end; 3250 start_new: 3251 range_node = meta; 3252 range_node->pkt_cache.do_init = true; 3253 range_ptr_id = range_node->ptr.id; 3254 range_ptr_off = range_node->ptr.off; 3255 range_start = insn->off; 3256 range_end = insn->off + BPF_LDST_BYTES(insn); 3257 } 3258 3259 if (range_node) { 3260 range_node->pkt_cache.range_start = range_start; 3261 range_node->pkt_cache.range_end = range_end; 3262 } 3263 3264 list_for_each_entry(meta, &nfp_prog->insns, l) { 3265 if (meta->skip) 3266 continue; 3267 3268 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 3269 if (meta->pkt_cache.do_init) { 3270 range_start = meta->pkt_cache.range_start; 3271 range_end = meta->pkt_cache.range_end; 3272 } else { 3273 meta->pkt_cache.range_start = range_start; 3274 meta->pkt_cache.range_end = range_end; 3275 } 3276 } 3277 } 3278 } 3279 3280 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 3281 { 3282 nfp_bpf_opt_reg_init(nfp_prog); 3283 3284 nfp_bpf_opt_neg_add_sub(nfp_prog); 3285 nfp_bpf_opt_ld_mask(nfp_prog); 3286 nfp_bpf_opt_ld_shift(nfp_prog); 3287 nfp_bpf_opt_ldst_gather(nfp_prog); 3288 nfp_bpf_opt_pkt_cache(nfp_prog); 3289 3290 return 0; 3291 } 3292 3293 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) 3294 { 3295 struct nfp_insn_meta *meta1, *meta2; 3296 struct nfp_bpf_map *nfp_map; 3297 struct bpf_map *map; 3298 3299 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3300 if (meta1->skip || meta2->skip) 3301 continue; 3302 3303 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || 3304 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) 3305 continue; 3306 3307 map = (void *)(unsigned long)((u32)meta1->insn.imm | 3308 (u64)meta2->insn.imm << 32); 3309 if (bpf_map_offload_neutral(map)) 3310 continue; 3311 nfp_map = map_to_offmap(map)->dev_priv; 3312 3313 meta1->insn.imm = nfp_map->tid; 3314 meta2->insn.imm = 0; 3315 } 3316 3317 return 0; 3318 } 3319 3320 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 3321 { 3322 __le64 *ustore = (__force __le64 *)prog; 3323 int i; 3324 3325 for (i = 0; i < len; i++) { 3326 int err; 3327 3328 err = nfp_ustore_check_valid_no_ecc(prog[i]); 3329 if (err) 3330 return err; 3331 3332 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 3333 } 3334 3335 return 0; 3336 } 3337 3338 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 3339 { 3340 void *prog; 3341 3342 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 3343 if (!prog) 3344 return; 3345 3346 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 3347 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 3348 kvfree(nfp_prog->prog); 3349 nfp_prog->prog = prog; 3350 } 3351 3352 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 3353 { 3354 int ret; 3355 3356 ret = nfp_bpf_replace_map_ptrs(nfp_prog); 3357 if (ret) 3358 return ret; 3359 3360 ret = nfp_bpf_optimize(nfp_prog); 3361 if (ret) 3362 return ret; 3363 3364 ret = nfp_translate(nfp_prog); 3365 if (ret) { 3366 pr_err("Translation failed with error %d (translated: %u)\n", 3367 ret, nfp_prog->n_translated); 3368 return -EINVAL; 3369 } 3370 3371 nfp_bpf_prog_trim(nfp_prog); 3372 3373 return ret; 3374 } 3375 3376 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 3377 { 3378 struct nfp_insn_meta *meta; 3379 3380 /* Another pass to record jump information. */ 3381 list_for_each_entry(meta, &nfp_prog->insns, l) { 3382 u64 code = meta->insn.code; 3383 3384 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && 3385 BPF_OP(code) != BPF_CALL) { 3386 struct nfp_insn_meta *dst_meta; 3387 unsigned short dst_indx; 3388 3389 dst_indx = meta->n + 1 + meta->insn.off; 3390 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, 3391 cnt); 3392 3393 meta->jmp_dst = dst_meta; 3394 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 3395 } 3396 } 3397 } 3398 3399 bool nfp_bpf_supported_opcode(u8 code) 3400 { 3401 return !!instr_cb[code]; 3402 } 3403 3404 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 3405 { 3406 unsigned int i; 3407 u64 *prog; 3408 int err; 3409 3410 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 3411 GFP_KERNEL); 3412 if (!prog) 3413 return ERR_PTR(-ENOMEM); 3414 3415 for (i = 0; i < nfp_prog->prog_len; i++) { 3416 enum nfp_relo_type special; 3417 u32 val; 3418 3419 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 3420 switch (special) { 3421 case RELO_NONE: 3422 continue; 3423 case RELO_BR_REL: 3424 br_add_offset(&prog[i], bv->start_off); 3425 break; 3426 case RELO_BR_GO_OUT: 3427 br_set_offset(&prog[i], 3428 nfp_prog->tgt_out + bv->start_off); 3429 break; 3430 case RELO_BR_GO_ABORT: 3431 br_set_offset(&prog[i], 3432 nfp_prog->tgt_abort + bv->start_off); 3433 break; 3434 case RELO_BR_NEXT_PKT: 3435 br_set_offset(&prog[i], bv->tgt_done); 3436 break; 3437 case RELO_BR_HELPER: 3438 val = br_get_offset(prog[i]); 3439 val -= BR_OFF_RELO; 3440 switch (val) { 3441 case BPF_FUNC_map_lookup_elem: 3442 val = nfp_prog->bpf->helpers.map_lookup; 3443 break; 3444 case BPF_FUNC_map_update_elem: 3445 val = nfp_prog->bpf->helpers.map_update; 3446 break; 3447 case BPF_FUNC_map_delete_elem: 3448 val = nfp_prog->bpf->helpers.map_delete; 3449 break; 3450 case BPF_FUNC_perf_event_output: 3451 val = nfp_prog->bpf->helpers.perf_event_output; 3452 break; 3453 default: 3454 pr_err("relocation of unknown helper %d\n", 3455 val); 3456 err = -EINVAL; 3457 goto err_free_prog; 3458 } 3459 br_set_offset(&prog[i], val); 3460 break; 3461 case RELO_IMMED_REL: 3462 immed_add_value(&prog[i], bv->start_off); 3463 break; 3464 } 3465 3466 prog[i] &= ~OP_RELO_TYPE; 3467 } 3468 3469 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 3470 if (err) 3471 goto err_free_prog; 3472 3473 return prog; 3474 3475 err_free_prog: 3476 kfree(prog); 3477 return ERR_PTR(err); 3478 } 3479