1 /* 2 * Copyright (C) 2016-2018 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/bug.h> 37 #include <linux/bpf.h> 38 #include <linux/filter.h> 39 #include <linux/kernel.h> 40 #include <linux/pkt_cls.h> 41 #include <linux/reciprocal_div.h> 42 #include <linux/unistd.h> 43 44 #include "main.h" 45 #include "../nfp_asm.h" 46 #include "../nfp_net_ctrl.h" 47 48 /* --- NFP prog --- */ 49 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 50 * It's safe to modify the next pointers (but not pos). 51 */ 52 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 53 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 54 next = list_next_entry(pos, l); \ 55 &(nfp_prog)->insns != &pos->l && \ 56 &(nfp_prog)->insns != &next->l; \ 57 pos = nfp_meta_next(pos), \ 58 next = nfp_meta_next(pos)) 59 60 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 61 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 62 next = list_next_entry(pos, l), \ 63 next2 = list_next_entry(next, l); \ 64 &(nfp_prog)->insns != &pos->l && \ 65 &(nfp_prog)->insns != &next->l && \ 66 &(nfp_prog)->insns != &next2->l; \ 67 pos = nfp_meta_next(pos), \ 68 next = nfp_meta_next(pos), \ 69 next2 = nfp_meta_next(next)) 70 71 static bool 72 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 73 { 74 return meta->l.prev != &nfp_prog->insns; 75 } 76 77 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 78 { 79 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { 80 pr_warn("instruction limit reached (%u NFP instructions)\n", 81 nfp_prog->prog_len); 82 nfp_prog->error = -ENOSPC; 83 return; 84 } 85 86 nfp_prog->prog[nfp_prog->prog_len] = insn; 87 nfp_prog->prog_len++; 88 } 89 90 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 91 { 92 return nfp_prog->prog_len; 93 } 94 95 static bool 96 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 97 { 98 /* If there is a recorded error we may have dropped instructions; 99 * that doesn't have to be due to translator bug, and the translation 100 * will fail anyway, so just return OK. 101 */ 102 if (nfp_prog->error) 103 return true; 104 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 105 } 106 107 /* --- Emitters --- */ 108 static void 109 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 110 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, 111 bool indir) 112 { 113 u64 insn; 114 115 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 116 FIELD_PREP(OP_CMD_CTX, ctx) | 117 FIELD_PREP(OP_CMD_B_SRC, breg) | 118 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 119 FIELD_PREP(OP_CMD_XFER, xfer) | 120 FIELD_PREP(OP_CMD_CNT, size) | 121 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | 122 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 123 FIELD_PREP(OP_CMD_INDIR, indir) | 124 FIELD_PREP(OP_CMD_MODE, mode); 125 126 nfp_prog_push(nfp_prog, insn); 127 } 128 129 static void 130 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 131 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) 132 { 133 struct nfp_insn_re_regs reg; 134 int err; 135 136 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 137 if (err) { 138 nfp_prog->error = err; 139 return; 140 } 141 if (reg.swap) { 142 pr_err("cmd can't swap arguments\n"); 143 nfp_prog->error = -EFAULT; 144 return; 145 } 146 if (reg.dst_lmextn || reg.src_lmextn) { 147 pr_err("cmd can't use LMextn\n"); 148 nfp_prog->error = -EFAULT; 149 return; 150 } 151 152 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, 153 indir); 154 } 155 156 static void 157 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 158 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 159 { 160 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); 161 } 162 163 static void 164 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 165 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 166 { 167 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); 168 } 169 170 static void 171 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 172 enum br_ctx_signal_state css, u16 addr, u8 defer) 173 { 174 u16 addr_lo, addr_hi; 175 u64 insn; 176 177 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 178 addr_hi = addr != addr_lo; 179 180 insn = OP_BR_BASE | 181 FIELD_PREP(OP_BR_MASK, mask) | 182 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 183 FIELD_PREP(OP_BR_CSS, css) | 184 FIELD_PREP(OP_BR_DEFBR, defer) | 185 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 186 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 187 188 nfp_prog_push(nfp_prog, insn); 189 } 190 191 static void 192 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 193 enum nfp_relo_type relo) 194 { 195 if (mask == BR_UNC && defer > 2) { 196 pr_err("BUG: branch defer out of bounds %d\n", defer); 197 nfp_prog->error = -EFAULT; 198 return; 199 } 200 201 __emit_br(nfp_prog, mask, 202 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 203 BR_CSS_NONE, addr, defer); 204 205 nfp_prog->prog[nfp_prog->prog_len - 1] |= 206 FIELD_PREP(OP_RELO_TYPE, relo); 207 } 208 209 static void 210 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 211 { 212 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 213 } 214 215 static void 216 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer, 217 bool set, bool src_lmextn) 218 { 219 u16 addr_lo, addr_hi; 220 u64 insn; 221 222 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO)); 223 addr_hi = addr != addr_lo; 224 225 insn = OP_BR_BIT_BASE | 226 FIELD_PREP(OP_BR_BIT_A_SRC, areg) | 227 FIELD_PREP(OP_BR_BIT_B_SRC, breg) | 228 FIELD_PREP(OP_BR_BIT_BV, set) | 229 FIELD_PREP(OP_BR_BIT_DEFBR, defer) | 230 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) | 231 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) | 232 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn); 233 234 nfp_prog_push(nfp_prog, insn); 235 } 236 237 static void 238 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, 239 u8 defer, bool set, enum nfp_relo_type relo) 240 { 241 struct nfp_insn_re_regs reg; 242 int err; 243 244 /* NOTE: The bit to test is specified as an rotation amount, such that 245 * the bit to test will be placed on the MSB of the result when 246 * doing a rotate right. For bit X, we need right rotate X + 1. 247 */ 248 bit += 1; 249 250 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false); 251 if (err) { 252 nfp_prog->error = err; 253 return; 254 } 255 256 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set, 257 reg.src_lmextn); 258 259 nfp_prog->prog[nfp_prog->prog_len - 1] |= 260 FIELD_PREP(OP_RELO_TYPE, relo); 261 } 262 263 static void 264 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) 265 { 266 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL); 267 } 268 269 static void 270 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 271 enum immed_width width, bool invert, 272 enum immed_shift shift, bool wr_both, 273 bool dst_lmextn, bool src_lmextn) 274 { 275 u64 insn; 276 277 insn = OP_IMMED_BASE | 278 FIELD_PREP(OP_IMMED_A_SRC, areg) | 279 FIELD_PREP(OP_IMMED_B_SRC, breg) | 280 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 281 FIELD_PREP(OP_IMMED_WIDTH, width) | 282 FIELD_PREP(OP_IMMED_INV, invert) | 283 FIELD_PREP(OP_IMMED_SHIFT, shift) | 284 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 285 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 286 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 287 288 nfp_prog_push(nfp_prog, insn); 289 } 290 291 static void 292 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 293 enum immed_width width, bool invert, enum immed_shift shift) 294 { 295 struct nfp_insn_ur_regs reg; 296 int err; 297 298 if (swreg_type(dst) == NN_REG_IMM) { 299 nfp_prog->error = -EFAULT; 300 return; 301 } 302 303 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 304 if (err) { 305 nfp_prog->error = err; 306 return; 307 } 308 309 /* Use reg.dst when destination is No-Dest. */ 310 __emit_immed(nfp_prog, 311 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 312 reg.breg, imm >> 8, width, invert, shift, 313 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 314 } 315 316 static void 317 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 318 enum shf_sc sc, u8 shift, 319 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 320 bool dst_lmextn, bool src_lmextn) 321 { 322 u64 insn; 323 324 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 325 nfp_prog->error = -EFAULT; 326 return; 327 } 328 329 if (sc == SHF_SC_L_SHF) 330 shift = 32 - shift; 331 332 insn = OP_SHF_BASE | 333 FIELD_PREP(OP_SHF_A_SRC, areg) | 334 FIELD_PREP(OP_SHF_SC, sc) | 335 FIELD_PREP(OP_SHF_B_SRC, breg) | 336 FIELD_PREP(OP_SHF_I8, i8) | 337 FIELD_PREP(OP_SHF_SW, sw) | 338 FIELD_PREP(OP_SHF_DST, dst) | 339 FIELD_PREP(OP_SHF_SHIFT, shift) | 340 FIELD_PREP(OP_SHF_OP, op) | 341 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 342 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 343 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 344 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 345 346 nfp_prog_push(nfp_prog, insn); 347 } 348 349 static void 350 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 351 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 352 { 353 struct nfp_insn_re_regs reg; 354 int err; 355 356 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 357 if (err) { 358 nfp_prog->error = err; 359 return; 360 } 361 362 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 363 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 364 reg.dst_lmextn, reg.src_lmextn); 365 } 366 367 static void 368 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst, 369 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc) 370 { 371 if (sc == SHF_SC_R_ROT) { 372 pr_err("indirect shift is not allowed on rotation\n"); 373 nfp_prog->error = -EFAULT; 374 return; 375 } 376 377 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0); 378 } 379 380 static void 381 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 382 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 383 bool dst_lmextn, bool src_lmextn) 384 { 385 u64 insn; 386 387 insn = OP_ALU_BASE | 388 FIELD_PREP(OP_ALU_A_SRC, areg) | 389 FIELD_PREP(OP_ALU_B_SRC, breg) | 390 FIELD_PREP(OP_ALU_DST, dst) | 391 FIELD_PREP(OP_ALU_SW, swap) | 392 FIELD_PREP(OP_ALU_OP, op) | 393 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 394 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 395 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 396 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 397 398 nfp_prog_push(nfp_prog, insn); 399 } 400 401 static void 402 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 403 swreg lreg, enum alu_op op, swreg rreg) 404 { 405 struct nfp_insn_ur_regs reg; 406 int err; 407 408 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 409 if (err) { 410 nfp_prog->error = err; 411 return; 412 } 413 414 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 415 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 416 reg.dst_lmextn, reg.src_lmextn); 417 } 418 419 static void 420 __emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg, 421 enum mul_type type, enum mul_step step, u16 breg, bool swap, 422 bool wr_both, bool dst_lmextn, bool src_lmextn) 423 { 424 u64 insn; 425 426 insn = OP_MUL_BASE | 427 FIELD_PREP(OP_MUL_A_SRC, areg) | 428 FIELD_PREP(OP_MUL_B_SRC, breg) | 429 FIELD_PREP(OP_MUL_STEP, step) | 430 FIELD_PREP(OP_MUL_DST_AB, dst_ab) | 431 FIELD_PREP(OP_MUL_SW, swap) | 432 FIELD_PREP(OP_MUL_TYPE, type) | 433 FIELD_PREP(OP_MUL_WR_AB, wr_both) | 434 FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) | 435 FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn); 436 437 nfp_prog_push(nfp_prog, insn); 438 } 439 440 static void 441 emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type, 442 enum mul_step step, swreg rreg) 443 { 444 struct nfp_insn_ur_regs reg; 445 u16 areg; 446 int err; 447 448 if (type == MUL_TYPE_START && step != MUL_STEP_NONE) { 449 nfp_prog->error = -EINVAL; 450 return; 451 } 452 453 if (step == MUL_LAST || step == MUL_LAST_2) { 454 /* When type is step and step Number is LAST or LAST2, left 455 * source is used as destination. 456 */ 457 err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®); 458 areg = reg.dst; 459 } else { 460 err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®); 461 areg = reg.areg; 462 } 463 464 if (err) { 465 nfp_prog->error = err; 466 return; 467 } 468 469 __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap, 470 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 471 } 472 473 static void 474 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 475 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 476 bool zero, bool swap, bool wr_both, 477 bool dst_lmextn, bool src_lmextn) 478 { 479 u64 insn; 480 481 insn = OP_LDF_BASE | 482 FIELD_PREP(OP_LDF_A_SRC, areg) | 483 FIELD_PREP(OP_LDF_SC, sc) | 484 FIELD_PREP(OP_LDF_B_SRC, breg) | 485 FIELD_PREP(OP_LDF_I8, imm8) | 486 FIELD_PREP(OP_LDF_SW, swap) | 487 FIELD_PREP(OP_LDF_ZF, zero) | 488 FIELD_PREP(OP_LDF_BMASK, bmask) | 489 FIELD_PREP(OP_LDF_SHF, shift) | 490 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 491 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 492 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 493 494 nfp_prog_push(nfp_prog, insn); 495 } 496 497 static void 498 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 499 enum shf_sc sc, u8 shift, bool zero) 500 { 501 struct nfp_insn_re_regs reg; 502 int err; 503 504 /* Note: ld_field is special as it uses one of the src regs as dst */ 505 err = swreg_to_restricted(dst, dst, src, ®, true); 506 if (err) { 507 nfp_prog->error = err; 508 return; 509 } 510 511 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 512 reg.i8, zero, reg.swap, reg.wr_both, 513 reg.dst_lmextn, reg.src_lmextn); 514 } 515 516 static void 517 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 518 enum shf_sc sc, u8 shift) 519 { 520 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 521 } 522 523 static void 524 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 525 bool dst_lmextn, bool src_lmextn) 526 { 527 u64 insn; 528 529 insn = OP_LCSR_BASE | 530 FIELD_PREP(OP_LCSR_A_SRC, areg) | 531 FIELD_PREP(OP_LCSR_B_SRC, breg) | 532 FIELD_PREP(OP_LCSR_WRITE, wr) | 533 FIELD_PREP(OP_LCSR_ADDR, addr / 4) | 534 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 535 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 536 537 nfp_prog_push(nfp_prog, insn); 538 } 539 540 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 541 { 542 struct nfp_insn_ur_regs reg; 543 int err; 544 545 /* This instruction takes immeds instead of reg_none() for the ignored 546 * operand, but we can't encode 2 immeds in one instr with our normal 547 * swreg infra so if param is an immed, we encode as reg_none() and 548 * copy the immed to both operands. 549 */ 550 if (swreg_type(src) == NN_REG_IMM) { 551 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 552 reg.breg = reg.areg; 553 } else { 554 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 555 } 556 if (err) { 557 nfp_prog->error = err; 558 return; 559 } 560 561 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, 562 false, reg.src_lmextn); 563 } 564 565 /* CSR value is read in following immed[gpr, 0] */ 566 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) 567 { 568 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); 569 } 570 571 static void emit_nop(struct nfp_prog *nfp_prog) 572 { 573 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 574 } 575 576 /* --- Wrappers --- */ 577 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 578 { 579 if (!(imm & 0xffff0000)) { 580 *val = imm; 581 *shift = IMMED_SHIFT_0B; 582 } else if (!(imm & 0xff0000ff)) { 583 *val = imm >> 8; 584 *shift = IMMED_SHIFT_1B; 585 } else if (!(imm & 0x0000ffff)) { 586 *val = imm >> 16; 587 *shift = IMMED_SHIFT_2B; 588 } else { 589 return false; 590 } 591 592 return true; 593 } 594 595 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 596 { 597 enum immed_shift shift; 598 u16 val; 599 600 if (pack_immed(imm, &val, &shift)) { 601 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 602 } else if (pack_immed(~imm, &val, &shift)) { 603 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 604 } else { 605 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 606 false, IMMED_SHIFT_0B); 607 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 608 false, IMMED_SHIFT_2B); 609 } 610 } 611 612 static void 613 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 614 enum nfp_relo_type relo) 615 { 616 if (imm > 0xffff) { 617 pr_err("relocation of a large immediate!\n"); 618 nfp_prog->error = -EFAULT; 619 return; 620 } 621 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 622 623 nfp_prog->prog[nfp_prog->prog_len - 1] |= 624 FIELD_PREP(OP_RELO_TYPE, relo); 625 } 626 627 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 628 * If the @imm is small enough encode it directly in operand and return 629 * otherwise load @imm to a spare register and return its encoding. 630 */ 631 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 632 { 633 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 634 return reg_imm(imm); 635 636 wrp_immed(nfp_prog, tmp_reg, imm); 637 return tmp_reg; 638 } 639 640 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 641 * If the @imm is small enough encode it directly in operand and return 642 * otherwise load @imm to a spare register and return its encoding. 643 */ 644 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 645 { 646 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 647 return reg_imm(imm); 648 649 wrp_immed(nfp_prog, tmp_reg, imm); 650 return tmp_reg; 651 } 652 653 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 654 { 655 while (count--) 656 emit_nop(nfp_prog); 657 } 658 659 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 660 { 661 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 662 } 663 664 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 665 { 666 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 667 } 668 669 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 670 * result to @dst from low end. 671 */ 672 static void 673 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 674 u8 offset) 675 { 676 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 677 u8 mask = (1 << field_len) - 1; 678 679 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 680 } 681 682 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 683 * result to @dst from offset, there is no change on the other bits of @dst. 684 */ 685 static void 686 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 687 u8 field_len, u8 offset) 688 { 689 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 690 u8 mask = ((1 << field_len) - 1) << offset; 691 692 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 693 } 694 695 static void 696 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 697 swreg *rega, swreg *regb) 698 { 699 if (offset == reg_imm(0)) { 700 *rega = reg_a(src_gpr); 701 *regb = reg_b(src_gpr + 1); 702 return; 703 } 704 705 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 706 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 707 reg_imm(0)); 708 *rega = imm_a(nfp_prog); 709 *regb = imm_b(nfp_prog); 710 } 711 712 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 713 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 714 { 715 bool descending_seq = meta->ldst_gather_len < 0; 716 s16 len = abs(meta->ldst_gather_len); 717 swreg src_base, off; 718 bool src_40bit_addr; 719 unsigned int i; 720 u8 xfer_num; 721 722 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 723 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 724 src_base = reg_a(meta->insn.src_reg * 2); 725 xfer_num = round_up(len, 4) / 4; 726 727 if (src_40bit_addr) 728 addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base, 729 &off); 730 731 /* Setup PREV_ALU fields to override memory read length. */ 732 if (len > 32) 733 wrp_immed(nfp_prog, reg_none(), 734 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 735 736 /* Memory read from source addr into transfer-in registers. */ 737 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 738 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 739 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); 740 741 /* Move from transfer-in to transfer-out. */ 742 for (i = 0; i < xfer_num; i++) 743 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 744 745 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 746 747 if (len <= 8) { 748 /* Use single direct_ref write8. */ 749 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 750 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 751 CMD_CTX_SWAP); 752 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 753 /* Use single direct_ref write32. */ 754 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 755 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 756 CMD_CTX_SWAP); 757 } else if (len <= 32) { 758 /* Use single indirect_ref write8. */ 759 wrp_immed(nfp_prog, reg_none(), 760 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 761 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 762 reg_a(meta->paired_st->dst_reg * 2), off, 763 len - 1, CMD_CTX_SWAP); 764 } else if (IS_ALIGNED(len, 4)) { 765 /* Use single indirect_ref write32. */ 766 wrp_immed(nfp_prog, reg_none(), 767 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 768 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 769 reg_a(meta->paired_st->dst_reg * 2), off, 770 xfer_num - 1, CMD_CTX_SWAP); 771 } else if (len <= 40) { 772 /* Use one direct_ref write32 to write the first 32-bytes, then 773 * another direct_ref write8 to write the remaining bytes. 774 */ 775 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 776 reg_a(meta->paired_st->dst_reg * 2), off, 7, 777 CMD_CTX_SWAP); 778 779 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 780 imm_b(nfp_prog)); 781 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 782 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 783 CMD_CTX_SWAP); 784 } else { 785 /* Use one indirect_ref write32 to write 4-bytes aligned length, 786 * then another direct_ref write8 to write the remaining bytes. 787 */ 788 u8 new_off; 789 790 wrp_immed(nfp_prog, reg_none(), 791 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 792 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 793 reg_a(meta->paired_st->dst_reg * 2), off, 794 xfer_num - 2, CMD_CTX_SWAP); 795 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 796 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 797 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 798 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 799 (len & 0x3) - 1, CMD_CTX_SWAP); 800 } 801 802 /* TODO: The following extra load is to make sure data flow be identical 803 * before and after we do memory copy optimization. 804 * 805 * The load destination register is not guaranteed to be dead, so we 806 * need to make sure it is loaded with the value the same as before 807 * this transformation. 808 * 809 * These extra loads could be removed once we have accurate register 810 * usage information. 811 */ 812 if (descending_seq) 813 xfer_num = 0; 814 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 815 xfer_num = xfer_num - 1; 816 else 817 xfer_num = xfer_num - 2; 818 819 switch (BPF_SIZE(meta->insn.code)) { 820 case BPF_B: 821 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 822 reg_xfer(xfer_num), 1, 823 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 824 break; 825 case BPF_H: 826 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 827 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 828 break; 829 case BPF_W: 830 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 831 reg_xfer(0)); 832 break; 833 case BPF_DW: 834 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 835 reg_xfer(xfer_num)); 836 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 837 reg_xfer(xfer_num + 1)); 838 break; 839 } 840 841 if (BPF_SIZE(meta->insn.code) != BPF_DW) 842 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 843 844 return 0; 845 } 846 847 static int 848 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 849 { 850 unsigned int i; 851 u16 shift, sz; 852 853 /* We load the value from the address indicated in @offset and then 854 * shift out the data we don't need. Note: this is big endian! 855 */ 856 sz = max(size, 4); 857 shift = size < 4 ? 4 - size : 0; 858 859 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 860 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); 861 862 i = 0; 863 if (shift) 864 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 865 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 866 else 867 for (; i * 4 < size; i++) 868 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 869 870 if (i < 2) 871 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 872 873 return 0; 874 } 875 876 static int 877 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 878 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 879 { 880 unsigned int i; 881 u8 mask, sz; 882 883 /* We load the value from the address indicated in rreg + lreg and then 884 * mask out the data we don't need. Note: this is little endian! 885 */ 886 sz = max(size, 4); 887 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 888 889 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 890 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); 891 892 i = 0; 893 if (mask) 894 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 895 reg_xfer(0), SHF_SC_NONE, 0, true); 896 else 897 for (; i * 4 < size; i++) 898 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 899 900 if (i < 2) 901 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 902 903 return 0; 904 } 905 906 static int 907 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 908 u8 dst_gpr, u8 size) 909 { 910 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 911 size, CMD_MODE_32b); 912 } 913 914 static int 915 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 916 u8 dst_gpr, u8 size) 917 { 918 swreg rega, regb; 919 920 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 921 922 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 923 size, CMD_MODE_40b_BA); 924 } 925 926 static int 927 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 928 { 929 swreg tmp_reg; 930 931 /* Calculate the true offset (src_reg + imm) */ 932 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 933 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 934 935 /* Check packet length (size guaranteed to fit b/c it's u8) */ 936 emit_alu(nfp_prog, imm_a(nfp_prog), 937 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 938 emit_alu(nfp_prog, reg_none(), 939 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 940 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 941 942 /* Load data */ 943 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 944 } 945 946 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 947 { 948 swreg tmp_reg; 949 950 /* Check packet length */ 951 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 952 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 953 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 954 955 /* Load data */ 956 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 957 return data_ld(nfp_prog, tmp_reg, 0, size); 958 } 959 960 static int 961 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 962 u8 src_gpr, u8 size) 963 { 964 unsigned int i; 965 966 for (i = 0; i * 4 < size; i++) 967 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 968 969 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 970 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 971 972 return 0; 973 } 974 975 static int 976 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 977 u64 imm, u8 size) 978 { 979 wrp_immed(nfp_prog, reg_xfer(0), imm); 980 if (size == 8) 981 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 982 983 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 984 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 985 986 return 0; 987 } 988 989 typedef int 990 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 991 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 992 bool needs_inc); 993 994 static int 995 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 996 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 997 bool needs_inc) 998 { 999 bool should_inc = needs_inc && new_gpr && !last; 1000 u32 idx, src_byte; 1001 enum shf_sc sc; 1002 swreg reg; 1003 int shf; 1004 u8 mask; 1005 1006 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 1007 return -EOPNOTSUPP; 1008 1009 idx = off / 4; 1010 1011 /* Move the entire word */ 1012 if (size == 4) { 1013 wrp_mov(nfp_prog, reg_both(dst), 1014 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 1015 return 0; 1016 } 1017 1018 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1019 return -EOPNOTSUPP; 1020 1021 src_byte = off % 4; 1022 1023 mask = (1 << size) - 1; 1024 mask <<= dst_byte; 1025 1026 if (WARN_ON_ONCE(mask > 0xf)) 1027 return -EOPNOTSUPP; 1028 1029 shf = abs(src_byte - dst_byte) * 8; 1030 if (src_byte == dst_byte) { 1031 sc = SHF_SC_NONE; 1032 } else if (src_byte < dst_byte) { 1033 shf = 32 - shf; 1034 sc = SHF_SC_L_SHF; 1035 } else { 1036 sc = SHF_SC_R_SHF; 1037 } 1038 1039 /* ld_field can address fewer indexes, if offset too large do RMW. 1040 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1041 */ 1042 if (idx <= RE_REG_LM_IDX_MAX) { 1043 reg = reg_lm(lm3 ? 3 : 0, idx); 1044 } else { 1045 reg = imm_a(nfp_prog); 1046 /* If it's not the first part of the load and we start a new GPR 1047 * that means we are loading a second part of the LMEM word into 1048 * a new GPR. IOW we've already looked that LMEM word and 1049 * therefore it has been loaded into imm_a(). 1050 */ 1051 if (first || !new_gpr) 1052 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1053 } 1054 1055 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 1056 1057 if (should_inc) 1058 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1059 1060 return 0; 1061 } 1062 1063 static int 1064 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 1065 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 1066 bool needs_inc) 1067 { 1068 bool should_inc = needs_inc && new_gpr && !last; 1069 u32 idx, dst_byte; 1070 enum shf_sc sc; 1071 swreg reg; 1072 int shf; 1073 u8 mask; 1074 1075 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 1076 return -EOPNOTSUPP; 1077 1078 idx = off / 4; 1079 1080 /* Move the entire word */ 1081 if (size == 4) { 1082 wrp_mov(nfp_prog, 1083 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 1084 reg_b(src)); 1085 return 0; 1086 } 1087 1088 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1089 return -EOPNOTSUPP; 1090 1091 dst_byte = off % 4; 1092 1093 mask = (1 << size) - 1; 1094 mask <<= dst_byte; 1095 1096 if (WARN_ON_ONCE(mask > 0xf)) 1097 return -EOPNOTSUPP; 1098 1099 shf = abs(src_byte - dst_byte) * 8; 1100 if (src_byte == dst_byte) { 1101 sc = SHF_SC_NONE; 1102 } else if (src_byte < dst_byte) { 1103 shf = 32 - shf; 1104 sc = SHF_SC_L_SHF; 1105 } else { 1106 sc = SHF_SC_R_SHF; 1107 } 1108 1109 /* ld_field can address fewer indexes, if offset too large do RMW. 1110 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1111 */ 1112 if (idx <= RE_REG_LM_IDX_MAX) { 1113 reg = reg_lm(lm3 ? 3 : 0, idx); 1114 } else { 1115 reg = imm_a(nfp_prog); 1116 /* Only first and last LMEM locations are going to need RMW, 1117 * the middle location will be overwritten fully. 1118 */ 1119 if (first || last) 1120 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1121 } 1122 1123 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 1124 1125 if (new_gpr || last) { 1126 if (idx > RE_REG_LM_IDX_MAX) 1127 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1128 if (should_inc) 1129 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1130 } 1131 1132 return 0; 1133 } 1134 1135 static int 1136 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1137 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1138 bool clr_gpr, lmem_step step) 1139 { 1140 s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; 1141 bool first = true, last; 1142 bool needs_inc = false; 1143 swreg stack_off_reg; 1144 u8 prev_gpr = 255; 1145 u32 gpr_byte = 0; 1146 bool lm3 = true; 1147 int ret; 1148 1149 if (meta->ptr_not_const) { 1150 /* Use of the last encountered ptr_off is OK, they all have 1151 * the same alignment. Depend on low bits of value being 1152 * discarded when written to LMaddr register. 1153 */ 1154 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1155 stack_imm(nfp_prog)); 1156 1157 emit_alu(nfp_prog, imm_b(nfp_prog), 1158 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1159 1160 needs_inc = true; 1161 } else if (off + size <= 64) { 1162 /* We can reach bottom 64B with LMaddr0 */ 1163 lm3 = false; 1164 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1165 /* We have to set up a new pointer. If we know the offset 1166 * and the entire access falls into a single 32 byte aligned 1167 * window we won't have to increment the LM pointer. 1168 * The 32 byte alignment is imporant because offset is ORed in 1169 * not added when doing *l$indexN[off]. 1170 */ 1171 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1172 stack_imm(nfp_prog)); 1173 emit_alu(nfp_prog, imm_b(nfp_prog), 1174 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1175 1176 off %= 32; 1177 } else { 1178 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1179 stack_imm(nfp_prog)); 1180 1181 emit_alu(nfp_prog, imm_b(nfp_prog), 1182 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1183 1184 needs_inc = true; 1185 } 1186 if (lm3) { 1187 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1188 /* For size < 4 one slot will be filled by zeroing of upper. */ 1189 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1190 } 1191 1192 if (clr_gpr && size < 8) 1193 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1194 1195 while (size) { 1196 u32 slice_end; 1197 u8 slice_size; 1198 1199 slice_size = min(size, 4 - gpr_byte); 1200 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1201 slice_size = slice_end - off; 1202 1203 last = slice_size == size; 1204 1205 if (needs_inc) 1206 off %= 4; 1207 1208 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1209 first, gpr != prev_gpr, last, lm3, needs_inc); 1210 if (ret) 1211 return ret; 1212 1213 prev_gpr = gpr; 1214 first = false; 1215 1216 gpr_byte += slice_size; 1217 if (gpr_byte >= 4) { 1218 gpr_byte -= 4; 1219 gpr++; 1220 } 1221 1222 size -= slice_size; 1223 off += slice_size; 1224 } 1225 1226 return 0; 1227 } 1228 1229 static void 1230 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1231 { 1232 swreg tmp_reg; 1233 1234 if (alu_op == ALU_OP_AND) { 1235 if (!imm) 1236 wrp_immed(nfp_prog, reg_both(dst), 0); 1237 if (!imm || !~imm) 1238 return; 1239 } 1240 if (alu_op == ALU_OP_OR) { 1241 if (!~imm) 1242 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1243 if (!imm || !~imm) 1244 return; 1245 } 1246 if (alu_op == ALU_OP_XOR) { 1247 if (!~imm) 1248 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1249 ALU_OP_NOT, reg_b(dst)); 1250 if (!imm || !~imm) 1251 return; 1252 } 1253 1254 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1255 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1256 } 1257 1258 static int 1259 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1260 enum alu_op alu_op, bool skip) 1261 { 1262 const struct bpf_insn *insn = &meta->insn; 1263 u64 imm = insn->imm; /* sign extend */ 1264 1265 if (skip) { 1266 meta->skip = true; 1267 return 0; 1268 } 1269 1270 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1271 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1272 1273 return 0; 1274 } 1275 1276 static int 1277 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1278 enum alu_op alu_op) 1279 { 1280 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1281 1282 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1283 emit_alu(nfp_prog, reg_both(dst + 1), 1284 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1285 1286 return 0; 1287 } 1288 1289 static int 1290 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1291 enum alu_op alu_op, bool skip) 1292 { 1293 const struct bpf_insn *insn = &meta->insn; 1294 1295 if (skip) { 1296 meta->skip = true; 1297 return 0; 1298 } 1299 1300 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1301 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1302 1303 return 0; 1304 } 1305 1306 static int 1307 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1308 enum alu_op alu_op) 1309 { 1310 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1311 1312 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1313 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1314 1315 return 0; 1316 } 1317 1318 static void 1319 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1320 enum br_mask br_mask, u16 off) 1321 { 1322 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1323 emit_br(nfp_prog, br_mask, off, 0); 1324 } 1325 1326 static int 1327 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1328 enum alu_op alu_op, enum br_mask br_mask) 1329 { 1330 const struct bpf_insn *insn = &meta->insn; 1331 1332 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1333 insn->src_reg * 2, br_mask, insn->off); 1334 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1335 insn->src_reg * 2 + 1, br_mask, insn->off); 1336 1337 return 0; 1338 } 1339 1340 static const struct jmp_code_map { 1341 enum br_mask br_mask; 1342 bool swap; 1343 } jmp_code_map[] = { 1344 [BPF_JGT >> 4] = { BR_BLO, true }, 1345 [BPF_JGE >> 4] = { BR_BHS, false }, 1346 [BPF_JLT >> 4] = { BR_BLO, false }, 1347 [BPF_JLE >> 4] = { BR_BHS, true }, 1348 [BPF_JSGT >> 4] = { BR_BLT, true }, 1349 [BPF_JSGE >> 4] = { BR_BGE, false }, 1350 [BPF_JSLT >> 4] = { BR_BLT, false }, 1351 [BPF_JSLE >> 4] = { BR_BGE, true }, 1352 }; 1353 1354 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) 1355 { 1356 unsigned int op; 1357 1358 op = BPF_OP(meta->insn.code) >> 4; 1359 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ 1360 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || 1361 !jmp_code_map[op].br_mask, 1362 "no code found for jump instruction")) 1363 return NULL; 1364 1365 return &jmp_code_map[op]; 1366 } 1367 1368 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1369 { 1370 const struct bpf_insn *insn = &meta->insn; 1371 u64 imm = insn->imm; /* sign extend */ 1372 const struct jmp_code_map *code; 1373 enum alu_op alu_op, carry_op; 1374 u8 reg = insn->dst_reg * 2; 1375 swreg tmp_reg; 1376 1377 code = nfp_jmp_code_get(meta); 1378 if (!code) 1379 return -EINVAL; 1380 1381 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; 1382 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; 1383 1384 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1385 if (!code->swap) 1386 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); 1387 else 1388 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); 1389 1390 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1391 if (!code->swap) 1392 emit_alu(nfp_prog, reg_none(), 1393 reg_a(reg + 1), carry_op, tmp_reg); 1394 else 1395 emit_alu(nfp_prog, reg_none(), 1396 tmp_reg, carry_op, reg_a(reg + 1)); 1397 1398 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1399 1400 return 0; 1401 } 1402 1403 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1404 { 1405 const struct bpf_insn *insn = &meta->insn; 1406 const struct jmp_code_map *code; 1407 u8 areg, breg; 1408 1409 code = nfp_jmp_code_get(meta); 1410 if (!code) 1411 return -EINVAL; 1412 1413 areg = insn->dst_reg * 2; 1414 breg = insn->src_reg * 2; 1415 1416 if (code->swap) { 1417 areg ^= breg; 1418 breg ^= areg; 1419 areg ^= breg; 1420 } 1421 1422 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1423 emit_alu(nfp_prog, reg_none(), 1424 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1425 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1426 1427 return 0; 1428 } 1429 1430 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1431 { 1432 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1433 SHF_SC_R_ROT, 8); 1434 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1435 SHF_SC_R_ROT, 16); 1436 } 1437 1438 static void 1439 wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1440 swreg rreg, bool gen_high_half) 1441 { 1442 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1443 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg); 1444 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg); 1445 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg); 1446 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg); 1447 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none()); 1448 if (gen_high_half) 1449 emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2, 1450 reg_none()); 1451 else 1452 wrp_immed(nfp_prog, dst_hi, 0); 1453 } 1454 1455 static void 1456 wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1457 swreg rreg) 1458 { 1459 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1460 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg); 1461 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg); 1462 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none()); 1463 } 1464 1465 static int 1466 wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1467 bool gen_high_half, bool ropnd_from_reg) 1468 { 1469 swreg multiplier, multiplicand, dst_hi, dst_lo; 1470 const struct bpf_insn *insn = &meta->insn; 1471 u32 lopnd_max, ropnd_max; 1472 u8 dst_reg; 1473 1474 dst_reg = insn->dst_reg; 1475 multiplicand = reg_a(dst_reg * 2); 1476 dst_hi = reg_both(dst_reg * 2 + 1); 1477 dst_lo = reg_both(dst_reg * 2); 1478 lopnd_max = meta->umax_dst; 1479 if (ropnd_from_reg) { 1480 multiplier = reg_b(insn->src_reg * 2); 1481 ropnd_max = meta->umax_src; 1482 } else { 1483 u32 imm = insn->imm; 1484 1485 multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1486 ropnd_max = imm; 1487 } 1488 if (lopnd_max > U16_MAX || ropnd_max > U16_MAX) 1489 wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier, 1490 gen_high_half); 1491 else 1492 wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier); 1493 1494 return 0; 1495 } 1496 1497 static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) 1498 { 1499 swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst); 1500 struct reciprocal_value rvalue; 1501 swreg tmp_b = imm_b(nfp_prog); 1502 swreg magic; 1503 1504 if (imm > U32_MAX) { 1505 wrp_immed(nfp_prog, dst_both, 0); 1506 return 0; 1507 } 1508 1509 rvalue = reciprocal_value(imm); 1510 magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog)); 1511 wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, magic, 1512 true); 1513 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, tmp_b); 1514 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1515 SHF_SC_R_SHF, rvalue.sh1); 1516 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, tmp_b); 1517 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1518 SHF_SC_R_SHF, rvalue.sh2); 1519 1520 return 0; 1521 } 1522 1523 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1524 { 1525 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1526 struct nfp_bpf_cap_adjust_head *adjust_head; 1527 u32 ret_einval, end; 1528 1529 adjust_head = &nfp_prog->bpf->adjust_head; 1530 1531 /* Optimized version - 5 vs 14 cycles */ 1532 if (nfp_prog->adjust_head_location != UINT_MAX) { 1533 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1534 return -EINVAL; 1535 1536 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1537 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1538 emit_alu(nfp_prog, plen_reg(nfp_prog), 1539 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1540 emit_alu(nfp_prog, pv_len(nfp_prog), 1541 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1542 1543 wrp_immed(nfp_prog, reg_both(0), 0); 1544 wrp_immed(nfp_prog, reg_both(1), 0); 1545 1546 /* TODO: when adjust head is guaranteed to succeed we can 1547 * also eliminate the following if (r0 == 0) branch. 1548 */ 1549 1550 return 0; 1551 } 1552 1553 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1554 end = ret_einval + 2; 1555 1556 /* We need to use a temp because offset is just a part of the pkt ptr */ 1557 emit_alu(nfp_prog, tmp, 1558 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1559 1560 /* Validate result will fit within FW datapath constraints */ 1561 emit_alu(nfp_prog, reg_none(), 1562 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1563 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1564 emit_alu(nfp_prog, reg_none(), 1565 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1566 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1567 1568 /* Validate the length is at least ETH_HLEN */ 1569 emit_alu(nfp_prog, tmp_len, 1570 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1571 emit_alu(nfp_prog, reg_none(), 1572 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1573 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1574 1575 /* Load the ret code */ 1576 wrp_immed(nfp_prog, reg_both(0), 0); 1577 wrp_immed(nfp_prog, reg_both(1), 0); 1578 1579 /* Modify the packet metadata */ 1580 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1581 1582 /* Skip over the -EINVAL ret code (defer 2) */ 1583 emit_br(nfp_prog, BR_UNC, end, 2); 1584 1585 emit_alu(nfp_prog, plen_reg(nfp_prog), 1586 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1587 emit_alu(nfp_prog, pv_len(nfp_prog), 1588 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1589 1590 /* return -EINVAL target */ 1591 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1592 return -EINVAL; 1593 1594 wrp_immed(nfp_prog, reg_both(0), -22); 1595 wrp_immed(nfp_prog, reg_both(1), ~0); 1596 1597 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1598 return -EINVAL; 1599 1600 return 0; 1601 } 1602 1603 static int 1604 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1605 { 1606 bool load_lm_ptr; 1607 u32 ret_tgt; 1608 s64 lm_off; 1609 1610 /* We only have to reload LM0 if the key is not at start of stack */ 1611 lm_off = nfp_prog->stack_depth; 1612 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1613 load_lm_ptr = meta->arg2.var_off || lm_off; 1614 1615 /* Set LM0 to start of key */ 1616 if (load_lm_ptr) 1617 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1618 if (meta->func_id == BPF_FUNC_map_update_elem) 1619 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1620 1621 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1622 2, RELO_BR_HELPER); 1623 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1624 1625 /* Load map ID into A0 */ 1626 wrp_mov(nfp_prog, reg_a(0), reg_a(2)); 1627 1628 /* Load the return address into B0 */ 1629 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1630 1631 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1632 return -EINVAL; 1633 1634 /* Reset the LM0 pointer */ 1635 if (!load_lm_ptr) 1636 return 0; 1637 1638 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1639 wrp_nops(nfp_prog, 3); 1640 1641 return 0; 1642 } 1643 1644 static int 1645 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1646 { 1647 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); 1648 /* CSR value is read in following immed[gpr, 0] */ 1649 emit_immed(nfp_prog, reg_both(0), 0, 1650 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1651 emit_immed(nfp_prog, reg_both(1), 0, 1652 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1653 return 0; 1654 } 1655 1656 static int 1657 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1658 { 1659 swreg ptr_type; 1660 u32 ret_tgt; 1661 1662 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); 1663 1664 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 1665 1666 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1667 2, RELO_BR_HELPER); 1668 1669 /* Load ptr type into A1 */ 1670 wrp_mov(nfp_prog, reg_a(1), ptr_type); 1671 1672 /* Load the return address into B0 */ 1673 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1674 1675 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1676 return -EINVAL; 1677 1678 return 0; 1679 } 1680 1681 static int 1682 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1683 { 1684 u32 jmp_tgt; 1685 1686 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; 1687 1688 /* Make sure the queue id fits into FW field */ 1689 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), 1690 ALU_OP_AND_NOT_B, reg_imm(0xff)); 1691 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); 1692 1693 /* Set the 'queue selected' bit and the queue value */ 1694 emit_shf(nfp_prog, pv_qsel_set(nfp_prog), 1695 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), 1696 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); 1697 emit_ld_field(nfp_prog, 1698 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), 1699 SHF_SC_NONE, 0); 1700 /* Delay slots end here, we will jump over next instruction if queue 1701 * value fits into the field. 1702 */ 1703 emit_ld_field(nfp_prog, 1704 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), 1705 SHF_SC_NONE, 0); 1706 1707 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) 1708 return -EINVAL; 1709 1710 return 0; 1711 } 1712 1713 /* --- Callbacks --- */ 1714 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1715 { 1716 const struct bpf_insn *insn = &meta->insn; 1717 u8 dst = insn->dst_reg * 2; 1718 u8 src = insn->src_reg * 2; 1719 1720 if (insn->src_reg == BPF_REG_10) { 1721 swreg stack_depth_reg; 1722 1723 stack_depth_reg = ur_load_imm_any(nfp_prog, 1724 nfp_prog->stack_depth, 1725 stack_imm(nfp_prog)); 1726 emit_alu(nfp_prog, reg_both(dst), 1727 stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); 1728 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1729 } else { 1730 wrp_reg_mov(nfp_prog, dst, src); 1731 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1732 } 1733 1734 return 0; 1735 } 1736 1737 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1738 { 1739 u64 imm = meta->insn.imm; /* sign extend */ 1740 1741 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1742 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1743 1744 return 0; 1745 } 1746 1747 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1748 { 1749 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1750 } 1751 1752 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1753 { 1754 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1755 } 1756 1757 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1758 { 1759 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1760 } 1761 1762 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1763 { 1764 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1765 } 1766 1767 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1768 { 1769 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1770 } 1771 1772 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1773 { 1774 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1775 } 1776 1777 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1778 { 1779 const struct bpf_insn *insn = &meta->insn; 1780 1781 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1782 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1783 reg_b(insn->src_reg * 2)); 1784 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1785 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1786 reg_b(insn->src_reg * 2 + 1)); 1787 1788 return 0; 1789 } 1790 1791 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1792 { 1793 const struct bpf_insn *insn = &meta->insn; 1794 u64 imm = insn->imm; /* sign extend */ 1795 1796 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1797 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1798 1799 return 0; 1800 } 1801 1802 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1803 { 1804 const struct bpf_insn *insn = &meta->insn; 1805 1806 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1807 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1808 reg_b(insn->src_reg * 2)); 1809 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1810 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1811 reg_b(insn->src_reg * 2 + 1)); 1812 1813 return 0; 1814 } 1815 1816 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1817 { 1818 const struct bpf_insn *insn = &meta->insn; 1819 u64 imm = insn->imm; /* sign extend */ 1820 1821 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1822 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1823 1824 return 0; 1825 } 1826 1827 static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1828 { 1829 return wrp_mul(nfp_prog, meta, true, true); 1830 } 1831 1832 static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1833 { 1834 return wrp_mul(nfp_prog, meta, true, false); 1835 } 1836 1837 static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1838 { 1839 const struct bpf_insn *insn = &meta->insn; 1840 1841 return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm); 1842 } 1843 1844 static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1845 { 1846 /* NOTE: verifier hook has rejected cases for which verifier doesn't 1847 * know whether the source operand is constant or not. 1848 */ 1849 return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src); 1850 } 1851 1852 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1853 { 1854 const struct bpf_insn *insn = &meta->insn; 1855 1856 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1857 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1858 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1859 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1860 1861 return 0; 1862 } 1863 1864 /* Pseudo code: 1865 * if shift_amt >= 32 1866 * dst_high = dst_low << shift_amt[4:0] 1867 * dst_low = 0; 1868 * else 1869 * dst_high = (dst_high, dst_low) >> (32 - shift_amt) 1870 * dst_low = dst_low << shift_amt 1871 * 1872 * The indirect shift will use the same logic at runtime. 1873 */ 1874 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1875 { 1876 if (shift_amt < 32) { 1877 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), 1878 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, 1879 32 - shift_amt); 1880 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1881 reg_b(dst), SHF_SC_L_SHF, shift_amt); 1882 } else if (shift_amt == 32) { 1883 wrp_reg_mov(nfp_prog, dst + 1, dst); 1884 wrp_immed(nfp_prog, reg_both(dst), 0); 1885 } else if (shift_amt > 32) { 1886 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1887 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32); 1888 wrp_immed(nfp_prog, reg_both(dst), 0); 1889 } 1890 1891 return 0; 1892 } 1893 1894 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1895 { 1896 const struct bpf_insn *insn = &meta->insn; 1897 u8 dst = insn->dst_reg * 2; 1898 1899 return __shl_imm64(nfp_prog, dst, insn->imm); 1900 } 1901 1902 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1903 { 1904 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB, 1905 reg_b(src)); 1906 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0)); 1907 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, 1908 reg_b(dst), SHF_SC_R_DSHF); 1909 } 1910 1911 /* NOTE: for indirect left shift, HIGH part should be calculated first. */ 1912 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1913 { 1914 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1915 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1916 reg_b(dst), SHF_SC_L_SHF); 1917 } 1918 1919 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1920 { 1921 shl_reg64_lt32_high(nfp_prog, dst, src); 1922 shl_reg64_lt32_low(nfp_prog, dst, src); 1923 } 1924 1925 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1926 { 1927 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1928 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1929 reg_b(dst), SHF_SC_L_SHF); 1930 wrp_immed(nfp_prog, reg_both(dst), 0); 1931 } 1932 1933 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1934 { 1935 const struct bpf_insn *insn = &meta->insn; 1936 u64 umin, umax; 1937 u8 dst, src; 1938 1939 dst = insn->dst_reg * 2; 1940 umin = meta->umin_src; 1941 umax = meta->umax_src; 1942 if (umin == umax) 1943 return __shl_imm64(nfp_prog, dst, umin); 1944 1945 src = insn->src_reg * 2; 1946 if (umax < 32) { 1947 shl_reg64_lt32(nfp_prog, dst, src); 1948 } else if (umin >= 32) { 1949 shl_reg64_ge32(nfp_prog, dst, src); 1950 } else { 1951 /* Generate different instruction sequences depending on runtime 1952 * value of shift amount. 1953 */ 1954 u16 label_ge32, label_end; 1955 1956 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7; 1957 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 1958 1959 shl_reg64_lt32_high(nfp_prog, dst, src); 1960 label_end = nfp_prog_current_offset(nfp_prog) + 6; 1961 emit_br(nfp_prog, BR_UNC, label_end, 2); 1962 /* shl_reg64_lt32_low packed in delay slot. */ 1963 shl_reg64_lt32_low(nfp_prog, dst, src); 1964 1965 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 1966 return -EINVAL; 1967 shl_reg64_ge32(nfp_prog, dst, src); 1968 1969 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 1970 return -EINVAL; 1971 } 1972 1973 return 0; 1974 } 1975 1976 /* Pseudo code: 1977 * if shift_amt >= 32 1978 * dst_high = 0; 1979 * dst_low = dst_high >> shift_amt[4:0] 1980 * else 1981 * dst_high = dst_high >> shift_amt 1982 * dst_low = (dst_high, dst_low) >> shift_amt 1983 * 1984 * The indirect shift will use the same logic at runtime. 1985 */ 1986 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1987 { 1988 if (shift_amt < 32) { 1989 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 1990 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 1991 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1992 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 1993 } else if (shift_amt == 32) { 1994 wrp_reg_mov(nfp_prog, dst, dst + 1); 1995 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1996 } else if (shift_amt > 32) { 1997 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1998 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 1999 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2000 } 2001 2002 return 0; 2003 } 2004 2005 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2006 { 2007 const struct bpf_insn *insn = &meta->insn; 2008 u8 dst = insn->dst_reg * 2; 2009 2010 return __shr_imm64(nfp_prog, dst, insn->imm); 2011 } 2012 2013 /* NOTE: for indirect right shift, LOW part should be calculated first. */ 2014 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2015 { 2016 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2017 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2018 reg_b(dst + 1), SHF_SC_R_SHF); 2019 } 2020 2021 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2022 { 2023 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2024 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2025 reg_b(dst), SHF_SC_R_DSHF); 2026 } 2027 2028 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2029 { 2030 shr_reg64_lt32_low(nfp_prog, dst, src); 2031 shr_reg64_lt32_high(nfp_prog, dst, src); 2032 } 2033 2034 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2035 { 2036 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2037 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2038 reg_b(dst + 1), SHF_SC_R_SHF); 2039 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2040 } 2041 2042 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2043 { 2044 const struct bpf_insn *insn = &meta->insn; 2045 u64 umin, umax; 2046 u8 dst, src; 2047 2048 dst = insn->dst_reg * 2; 2049 umin = meta->umin_src; 2050 umax = meta->umax_src; 2051 if (umin == umax) 2052 return __shr_imm64(nfp_prog, dst, umin); 2053 2054 src = insn->src_reg * 2; 2055 if (umax < 32) { 2056 shr_reg64_lt32(nfp_prog, dst, src); 2057 } else if (umin >= 32) { 2058 shr_reg64_ge32(nfp_prog, dst, src); 2059 } else { 2060 /* Generate different instruction sequences depending on runtime 2061 * value of shift amount. 2062 */ 2063 u16 label_ge32, label_end; 2064 2065 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2066 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2067 shr_reg64_lt32_low(nfp_prog, dst, src); 2068 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2069 emit_br(nfp_prog, BR_UNC, label_end, 2); 2070 /* shr_reg64_lt32_high packed in delay slot. */ 2071 shr_reg64_lt32_high(nfp_prog, dst, src); 2072 2073 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2074 return -EINVAL; 2075 shr_reg64_ge32(nfp_prog, dst, src); 2076 2077 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2078 return -EINVAL; 2079 } 2080 2081 return 0; 2082 } 2083 2084 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit 2085 * told through PREV_ALU result. 2086 */ 2087 static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2088 { 2089 if (shift_amt < 32) { 2090 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2091 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2092 /* Set signedness bit. */ 2093 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2094 reg_imm(0)); 2095 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2096 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2097 } else if (shift_amt == 32) { 2098 /* NOTE: this also helps setting signedness bit. */ 2099 wrp_reg_mov(nfp_prog, dst, dst + 1); 2100 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2101 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2102 } else if (shift_amt > 32) { 2103 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2104 reg_imm(0)); 2105 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2106 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2107 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2108 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2109 } 2110 2111 return 0; 2112 } 2113 2114 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2115 { 2116 const struct bpf_insn *insn = &meta->insn; 2117 u8 dst = insn->dst_reg * 2; 2118 2119 return __ashr_imm64(nfp_prog, dst, insn->imm); 2120 } 2121 2122 static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2123 { 2124 /* NOTE: the first insn will set both indirect shift amount (source A) 2125 * and signedness bit (MSB of result). 2126 */ 2127 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2128 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2129 reg_b(dst + 1), SHF_SC_R_SHF); 2130 } 2131 2132 static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2133 { 2134 /* NOTE: it is the same as logic shift because we don't need to shift in 2135 * signedness bit when the shift amount is less than 32. 2136 */ 2137 return shr_reg64_lt32_low(nfp_prog, dst, src); 2138 } 2139 2140 static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2141 { 2142 ashr_reg64_lt32_low(nfp_prog, dst, src); 2143 ashr_reg64_lt32_high(nfp_prog, dst, src); 2144 } 2145 2146 static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2147 { 2148 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2149 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2150 reg_b(dst + 1), SHF_SC_R_SHF); 2151 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2152 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2153 } 2154 2155 /* Like ashr_imm64, but need to use indirect shift. */ 2156 static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2157 { 2158 const struct bpf_insn *insn = &meta->insn; 2159 u64 umin, umax; 2160 u8 dst, src; 2161 2162 dst = insn->dst_reg * 2; 2163 umin = meta->umin_src; 2164 umax = meta->umax_src; 2165 if (umin == umax) 2166 return __ashr_imm64(nfp_prog, dst, umin); 2167 2168 src = insn->src_reg * 2; 2169 if (umax < 32) { 2170 ashr_reg64_lt32(nfp_prog, dst, src); 2171 } else if (umin >= 32) { 2172 ashr_reg64_ge32(nfp_prog, dst, src); 2173 } else { 2174 u16 label_ge32, label_end; 2175 2176 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2177 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2178 ashr_reg64_lt32_low(nfp_prog, dst, src); 2179 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2180 emit_br(nfp_prog, BR_UNC, label_end, 2); 2181 /* ashr_reg64_lt32_high packed in delay slot. */ 2182 ashr_reg64_lt32_high(nfp_prog, dst, src); 2183 2184 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2185 return -EINVAL; 2186 ashr_reg64_ge32(nfp_prog, dst, src); 2187 2188 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2189 return -EINVAL; 2190 } 2191 2192 return 0; 2193 } 2194 2195 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2196 { 2197 const struct bpf_insn *insn = &meta->insn; 2198 2199 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 2200 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2201 2202 return 0; 2203 } 2204 2205 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2206 { 2207 const struct bpf_insn *insn = &meta->insn; 2208 2209 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 2210 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2211 2212 return 0; 2213 } 2214 2215 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2216 { 2217 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 2218 } 2219 2220 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2221 { 2222 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 2223 } 2224 2225 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2226 { 2227 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 2228 } 2229 2230 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2231 { 2232 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 2233 } 2234 2235 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2236 { 2237 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 2238 } 2239 2240 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2241 { 2242 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 2243 } 2244 2245 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2246 { 2247 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 2248 } 2249 2250 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2251 { 2252 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 2253 } 2254 2255 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2256 { 2257 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 2258 } 2259 2260 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2261 { 2262 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 2263 } 2264 2265 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2266 { 2267 return wrp_mul(nfp_prog, meta, false, true); 2268 } 2269 2270 static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2271 { 2272 return wrp_mul(nfp_prog, meta, false, false); 2273 } 2274 2275 static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2276 { 2277 return div_reg64(nfp_prog, meta); 2278 } 2279 2280 static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2281 { 2282 return div_imm64(nfp_prog, meta); 2283 } 2284 2285 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2286 { 2287 u8 dst = meta->insn.dst_reg * 2; 2288 2289 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 2290 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2291 2292 return 0; 2293 } 2294 2295 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2296 { 2297 const struct bpf_insn *insn = &meta->insn; 2298 2299 if (!insn->imm) 2300 return 1; /* TODO: zero shift means indirect */ 2301 2302 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 2303 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 2304 SHF_SC_L_SHF, insn->imm); 2305 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2306 2307 return 0; 2308 } 2309 2310 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2311 { 2312 const struct bpf_insn *insn = &meta->insn; 2313 u8 gpr = insn->dst_reg * 2; 2314 2315 switch (insn->imm) { 2316 case 16: 2317 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 2318 SHF_SC_R_ROT, 8); 2319 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 2320 SHF_SC_R_SHF, 16); 2321 2322 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2323 break; 2324 case 32: 2325 wrp_end32(nfp_prog, reg_a(gpr), gpr); 2326 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2327 break; 2328 case 64: 2329 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 2330 2331 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 2332 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 2333 break; 2334 } 2335 2336 return 0; 2337 } 2338 2339 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2340 { 2341 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 2342 u32 imm_lo, imm_hi; 2343 u8 dst; 2344 2345 dst = prev->insn.dst_reg * 2; 2346 imm_lo = prev->insn.imm; 2347 imm_hi = meta->insn.imm; 2348 2349 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 2350 2351 /* mov is always 1 insn, load imm may be two, so try to use mov */ 2352 if (imm_hi == imm_lo) 2353 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 2354 else 2355 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 2356 2357 return 0; 2358 } 2359 2360 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2361 { 2362 meta->double_cb = imm_ld8_part2; 2363 return 0; 2364 } 2365 2366 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2367 { 2368 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 2369 } 2370 2371 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2372 { 2373 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 2374 } 2375 2376 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2377 { 2378 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 2379 } 2380 2381 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2382 { 2383 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2384 meta->insn.src_reg * 2, 1); 2385 } 2386 2387 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2388 { 2389 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2390 meta->insn.src_reg * 2, 2); 2391 } 2392 2393 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2394 { 2395 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2396 meta->insn.src_reg * 2, 4); 2397 } 2398 2399 static int 2400 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2401 unsigned int size, unsigned int ptr_off) 2402 { 2403 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2404 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 2405 true, wrp_lmem_load); 2406 } 2407 2408 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2409 u8 size) 2410 { 2411 swreg dst = reg_both(meta->insn.dst_reg * 2); 2412 2413 switch (meta->insn.off) { 2414 case offsetof(struct __sk_buff, len): 2415 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 2416 return -EOPNOTSUPP; 2417 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 2418 break; 2419 case offsetof(struct __sk_buff, data): 2420 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 2421 return -EOPNOTSUPP; 2422 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2423 break; 2424 case offsetof(struct __sk_buff, data_end): 2425 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 2426 return -EOPNOTSUPP; 2427 emit_alu(nfp_prog, dst, 2428 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2429 break; 2430 default: 2431 return -EOPNOTSUPP; 2432 } 2433 2434 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2435 2436 return 0; 2437 } 2438 2439 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2440 u8 size) 2441 { 2442 swreg dst = reg_both(meta->insn.dst_reg * 2); 2443 2444 switch (meta->insn.off) { 2445 case offsetof(struct xdp_md, data): 2446 if (size != FIELD_SIZEOF(struct xdp_md, data)) 2447 return -EOPNOTSUPP; 2448 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2449 break; 2450 case offsetof(struct xdp_md, data_end): 2451 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 2452 return -EOPNOTSUPP; 2453 emit_alu(nfp_prog, dst, 2454 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2455 break; 2456 default: 2457 return -EOPNOTSUPP; 2458 } 2459 2460 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2461 2462 return 0; 2463 } 2464 2465 static int 2466 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2467 unsigned int size) 2468 { 2469 swreg tmp_reg; 2470 2471 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2472 2473 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 2474 tmp_reg, meta->insn.dst_reg * 2, size); 2475 } 2476 2477 static int 2478 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2479 unsigned int size) 2480 { 2481 swreg tmp_reg; 2482 2483 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2484 2485 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 2486 tmp_reg, meta->insn.dst_reg * 2, size); 2487 } 2488 2489 static void 2490 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 2491 struct nfp_insn_meta *meta) 2492 { 2493 s16 range_start = meta->pkt_cache.range_start; 2494 s16 range_end = meta->pkt_cache.range_end; 2495 swreg src_base, off; 2496 u8 xfer_num, len; 2497 bool indir; 2498 2499 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 2500 src_base = reg_a(meta->insn.src_reg * 2); 2501 len = range_end - range_start; 2502 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 2503 2504 indir = len > 8 * REG_WIDTH; 2505 /* Setup PREV_ALU for indirect mode. */ 2506 if (indir) 2507 wrp_immed(nfp_prog, reg_none(), 2508 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 2509 2510 /* Cache memory into transfer-in registers. */ 2511 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 2512 off, xfer_num - 1, CMD_CTX_SWAP, indir); 2513 } 2514 2515 static int 2516 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 2517 struct nfp_insn_meta *meta, 2518 unsigned int size) 2519 { 2520 s16 range_start = meta->pkt_cache.range_start; 2521 s16 insn_off = meta->insn.off - range_start; 2522 swreg dst_lo, dst_hi, src_lo, src_mid; 2523 u8 dst_gpr = meta->insn.dst_reg * 2; 2524 u8 len_lo = size, len_mid = 0; 2525 u8 idx = insn_off / REG_WIDTH; 2526 u8 off = insn_off % REG_WIDTH; 2527 2528 dst_hi = reg_both(dst_gpr + 1); 2529 dst_lo = reg_both(dst_gpr); 2530 src_lo = reg_xfer(idx); 2531 2532 /* The read length could involve as many as three registers. */ 2533 if (size > REG_WIDTH - off) { 2534 /* Calculate the part in the second register. */ 2535 len_lo = REG_WIDTH - off; 2536 len_mid = size - len_lo; 2537 2538 /* Calculate the part in the third register. */ 2539 if (size > 2 * REG_WIDTH - off) 2540 len_mid = REG_WIDTH; 2541 } 2542 2543 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 2544 2545 if (!len_mid) { 2546 wrp_immed(nfp_prog, dst_hi, 0); 2547 return 0; 2548 } 2549 2550 src_mid = reg_xfer(idx + 1); 2551 2552 if (size <= REG_WIDTH) { 2553 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 2554 wrp_immed(nfp_prog, dst_hi, 0); 2555 } else { 2556 swreg src_hi = reg_xfer(idx + 2); 2557 2558 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 2559 REG_WIDTH - len_lo, len_lo); 2560 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 2561 REG_WIDTH - len_lo); 2562 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 2563 len_lo); 2564 } 2565 2566 return 0; 2567 } 2568 2569 static int 2570 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 2571 struct nfp_insn_meta *meta, 2572 unsigned int size) 2573 { 2574 swreg dst_lo, dst_hi, src_lo; 2575 u8 dst_gpr, idx; 2576 2577 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 2578 dst_gpr = meta->insn.dst_reg * 2; 2579 dst_hi = reg_both(dst_gpr + 1); 2580 dst_lo = reg_both(dst_gpr); 2581 src_lo = reg_xfer(idx); 2582 2583 if (size < REG_WIDTH) { 2584 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 2585 wrp_immed(nfp_prog, dst_hi, 0); 2586 } else if (size == REG_WIDTH) { 2587 wrp_mov(nfp_prog, dst_lo, src_lo); 2588 wrp_immed(nfp_prog, dst_hi, 0); 2589 } else { 2590 swreg src_hi = reg_xfer(idx + 1); 2591 2592 wrp_mov(nfp_prog, dst_lo, src_lo); 2593 wrp_mov(nfp_prog, dst_hi, src_hi); 2594 } 2595 2596 return 0; 2597 } 2598 2599 static int 2600 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 2601 struct nfp_insn_meta *meta, unsigned int size) 2602 { 2603 u8 off = meta->insn.off - meta->pkt_cache.range_start; 2604 2605 if (IS_ALIGNED(off, REG_WIDTH)) 2606 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 2607 2608 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 2609 } 2610 2611 static int 2612 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2613 unsigned int size) 2614 { 2615 if (meta->ldst_gather_len) 2616 return nfp_cpp_memcpy(nfp_prog, meta); 2617 2618 if (meta->ptr.type == PTR_TO_CTX) { 2619 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2620 return mem_ldx_xdp(nfp_prog, meta, size); 2621 else 2622 return mem_ldx_skb(nfp_prog, meta, size); 2623 } 2624 2625 if (meta->ptr.type == PTR_TO_PACKET) { 2626 if (meta->pkt_cache.range_end) { 2627 if (meta->pkt_cache.do_init) 2628 mem_ldx_data_init_pktcache(nfp_prog, meta); 2629 2630 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 2631 } else { 2632 return mem_ldx_data(nfp_prog, meta, size); 2633 } 2634 } 2635 2636 if (meta->ptr.type == PTR_TO_STACK) 2637 return mem_ldx_stack(nfp_prog, meta, size, 2638 meta->ptr.off + meta->ptr.var_off.value); 2639 2640 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2641 return mem_ldx_emem(nfp_prog, meta, size); 2642 2643 return -EOPNOTSUPP; 2644 } 2645 2646 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2647 { 2648 return mem_ldx(nfp_prog, meta, 1); 2649 } 2650 2651 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2652 { 2653 return mem_ldx(nfp_prog, meta, 2); 2654 } 2655 2656 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2657 { 2658 return mem_ldx(nfp_prog, meta, 4); 2659 } 2660 2661 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2662 { 2663 return mem_ldx(nfp_prog, meta, 8); 2664 } 2665 2666 static int 2667 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2668 unsigned int size) 2669 { 2670 u64 imm = meta->insn.imm; /* sign extend */ 2671 swreg off_reg; 2672 2673 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2674 2675 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2676 imm, size); 2677 } 2678 2679 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2680 unsigned int size) 2681 { 2682 if (meta->ptr.type == PTR_TO_PACKET) 2683 return mem_st_data(nfp_prog, meta, size); 2684 2685 return -EOPNOTSUPP; 2686 } 2687 2688 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2689 { 2690 return mem_st(nfp_prog, meta, 1); 2691 } 2692 2693 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2694 { 2695 return mem_st(nfp_prog, meta, 2); 2696 } 2697 2698 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2699 { 2700 return mem_st(nfp_prog, meta, 4); 2701 } 2702 2703 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2704 { 2705 return mem_st(nfp_prog, meta, 8); 2706 } 2707 2708 static int 2709 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2710 unsigned int size) 2711 { 2712 swreg off_reg; 2713 2714 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2715 2716 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2717 meta->insn.src_reg * 2, size); 2718 } 2719 2720 static int 2721 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2722 unsigned int size, unsigned int ptr_off) 2723 { 2724 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2725 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2726 false, wrp_lmem_store); 2727 } 2728 2729 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2730 { 2731 switch (meta->insn.off) { 2732 case offsetof(struct xdp_md, rx_queue_index): 2733 return nfp_queue_select(nfp_prog, meta); 2734 } 2735 2736 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ 2737 return -EOPNOTSUPP; 2738 } 2739 2740 static int 2741 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2742 unsigned int size) 2743 { 2744 if (meta->ptr.type == PTR_TO_PACKET) 2745 return mem_stx_data(nfp_prog, meta, size); 2746 2747 if (meta->ptr.type == PTR_TO_STACK) 2748 return mem_stx_stack(nfp_prog, meta, size, 2749 meta->ptr.off + meta->ptr.var_off.value); 2750 2751 return -EOPNOTSUPP; 2752 } 2753 2754 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2755 { 2756 return mem_stx(nfp_prog, meta, 1); 2757 } 2758 2759 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2760 { 2761 return mem_stx(nfp_prog, meta, 2); 2762 } 2763 2764 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2765 { 2766 if (meta->ptr.type == PTR_TO_CTX) 2767 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2768 return mem_stx_xdp(nfp_prog, meta); 2769 return mem_stx(nfp_prog, meta, 4); 2770 } 2771 2772 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2773 { 2774 return mem_stx(nfp_prog, meta, 8); 2775 } 2776 2777 static int 2778 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) 2779 { 2780 u8 dst_gpr = meta->insn.dst_reg * 2; 2781 u8 src_gpr = meta->insn.src_reg * 2; 2782 unsigned int full_add, out; 2783 swreg addra, addrb, off; 2784 2785 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2786 2787 /* We can fit 16 bits into command immediate, if we know the immediate 2788 * is guaranteed to either always or never fit into 16 bit we only 2789 * generate code to handle that particular case, otherwise generate 2790 * code for both. 2791 */ 2792 out = nfp_prog_current_offset(nfp_prog); 2793 full_add = nfp_prog_current_offset(nfp_prog); 2794 2795 if (meta->insn.off) { 2796 out += 2; 2797 full_add += 2; 2798 } 2799 if (meta->xadd_maybe_16bit) { 2800 out += 3; 2801 full_add += 3; 2802 } 2803 if (meta->xadd_over_16bit) 2804 out += 2 + is64; 2805 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2806 out += 5; 2807 full_add += 5; 2808 } 2809 2810 /* Generate the branch for choosing add_imm vs add */ 2811 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2812 swreg max_imm = imm_a(nfp_prog); 2813 2814 wrp_immed(nfp_prog, max_imm, 0xffff); 2815 emit_alu(nfp_prog, reg_none(), 2816 max_imm, ALU_OP_SUB, reg_b(src_gpr)); 2817 emit_alu(nfp_prog, reg_none(), 2818 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); 2819 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); 2820 /* defer for add */ 2821 } 2822 2823 /* If insn has an offset add to the address */ 2824 if (!meta->insn.off) { 2825 addra = reg_a(dst_gpr); 2826 addrb = reg_b(dst_gpr + 1); 2827 } else { 2828 emit_alu(nfp_prog, imma_a(nfp_prog), 2829 reg_a(dst_gpr), ALU_OP_ADD, off); 2830 emit_alu(nfp_prog, imma_b(nfp_prog), 2831 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); 2832 addra = imma_a(nfp_prog); 2833 addrb = imma_b(nfp_prog); 2834 } 2835 2836 /* Generate the add_imm if 16 bits are possible */ 2837 if (meta->xadd_maybe_16bit) { 2838 swreg prev_alu = imm_a(nfp_prog); 2839 2840 wrp_immed(nfp_prog, prev_alu, 2841 FIELD_PREP(CMD_OVE_DATA, 2) | 2842 CMD_OVE_LEN | 2843 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); 2844 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); 2845 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, 2846 addra, addrb, 0, CMD_CTX_NO_SWAP); 2847 2848 if (meta->xadd_over_16bit) 2849 emit_br(nfp_prog, BR_UNC, out, 0); 2850 } 2851 2852 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) 2853 return -EINVAL; 2854 2855 /* Generate the add if 16 bits are not guaranteed */ 2856 if (meta->xadd_over_16bit) { 2857 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, 2858 addra, addrb, is64 << 2, 2859 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); 2860 2861 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); 2862 if (is64) 2863 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); 2864 } 2865 2866 if (!nfp_prog_confirm_current_offset(nfp_prog, out)) 2867 return -EINVAL; 2868 2869 return 0; 2870 } 2871 2872 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2873 { 2874 return mem_xadd(nfp_prog, meta, false); 2875 } 2876 2877 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2878 { 2879 return mem_xadd(nfp_prog, meta, true); 2880 } 2881 2882 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2883 { 2884 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 2885 2886 return 0; 2887 } 2888 2889 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2890 { 2891 const struct bpf_insn *insn = &meta->insn; 2892 u64 imm = insn->imm; /* sign extend */ 2893 swreg or1, or2, tmp_reg; 2894 2895 or1 = reg_a(insn->dst_reg * 2); 2896 or2 = reg_b(insn->dst_reg * 2 + 1); 2897 2898 if (imm & ~0U) { 2899 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2900 emit_alu(nfp_prog, imm_a(nfp_prog), 2901 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2902 or1 = imm_a(nfp_prog); 2903 } 2904 2905 if (imm >> 32) { 2906 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2907 emit_alu(nfp_prog, imm_b(nfp_prog), 2908 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2909 or2 = imm_b(nfp_prog); 2910 } 2911 2912 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 2913 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2914 2915 return 0; 2916 } 2917 2918 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2919 { 2920 const struct bpf_insn *insn = &meta->insn; 2921 u64 imm = insn->imm; /* sign extend */ 2922 swreg tmp_reg; 2923 2924 if (!imm) { 2925 meta->skip = true; 2926 return 0; 2927 } 2928 2929 if (imm & ~0U) { 2930 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2931 emit_alu(nfp_prog, reg_none(), 2932 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); 2933 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2934 } 2935 2936 if (imm >> 32) { 2937 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2938 emit_alu(nfp_prog, reg_none(), 2939 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 2940 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2941 } 2942 2943 return 0; 2944 } 2945 2946 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2947 { 2948 const struct bpf_insn *insn = &meta->insn; 2949 u64 imm = insn->imm; /* sign extend */ 2950 swreg tmp_reg; 2951 2952 if (!imm) { 2953 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 2954 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 2955 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2956 return 0; 2957 } 2958 2959 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2960 emit_alu(nfp_prog, reg_none(), 2961 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2962 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2963 2964 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2965 emit_alu(nfp_prog, reg_none(), 2966 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2967 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2968 2969 return 0; 2970 } 2971 2972 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2973 { 2974 const struct bpf_insn *insn = &meta->insn; 2975 2976 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 2977 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 2978 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 2979 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 2980 emit_alu(nfp_prog, reg_none(), 2981 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 2982 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2983 2984 return 0; 2985 } 2986 2987 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2988 { 2989 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 2990 } 2991 2992 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2993 { 2994 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 2995 } 2996 2997 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2998 { 2999 switch (meta->insn.imm) { 3000 case BPF_FUNC_xdp_adjust_head: 3001 return adjust_head(nfp_prog, meta); 3002 case BPF_FUNC_map_lookup_elem: 3003 case BPF_FUNC_map_update_elem: 3004 case BPF_FUNC_map_delete_elem: 3005 return map_call_stack_common(nfp_prog, meta); 3006 case BPF_FUNC_get_prandom_u32: 3007 return nfp_get_prandom_u32(nfp_prog, meta); 3008 case BPF_FUNC_perf_event_output: 3009 return nfp_perf_event_output(nfp_prog, meta); 3010 default: 3011 WARN_ONCE(1, "verifier allowed unsupported function\n"); 3012 return -EOPNOTSUPP; 3013 } 3014 } 3015 3016 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3017 { 3018 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 3019 3020 return 0; 3021 } 3022 3023 static const instr_cb_t instr_cb[256] = { 3024 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 3025 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 3026 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 3027 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 3028 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 3029 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 3030 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 3031 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 3032 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 3033 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 3034 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 3035 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 3036 [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64, 3037 [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64, 3038 [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64, 3039 [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64, 3040 [BPF_ALU64 | BPF_NEG] = neg_reg64, 3041 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, 3042 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 3043 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, 3044 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 3045 [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64, 3046 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, 3047 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 3048 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 3049 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 3050 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 3051 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 3052 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 3053 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 3054 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 3055 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 3056 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 3057 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 3058 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 3059 [BPF_ALU | BPF_MUL | BPF_X] = mul_reg, 3060 [BPF_ALU | BPF_MUL | BPF_K] = mul_imm, 3061 [BPF_ALU | BPF_DIV | BPF_X] = div_reg, 3062 [BPF_ALU | BPF_DIV | BPF_K] = div_imm, 3063 [BPF_ALU | BPF_NEG] = neg_reg, 3064 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 3065 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 3066 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 3067 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 3068 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 3069 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 3070 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 3071 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 3072 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 3073 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 3074 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 3075 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 3076 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 3077 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 3078 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 3079 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 3080 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 3081 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 3082 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 3083 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 3084 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 3085 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 3086 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 3087 [BPF_JMP | BPF_JA | BPF_K] = jump, 3088 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 3089 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, 3090 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, 3091 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, 3092 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, 3093 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, 3094 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, 3095 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, 3096 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, 3097 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 3098 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 3099 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 3100 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, 3101 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, 3102 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, 3103 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, 3104 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, 3105 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, 3106 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, 3107 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, 3108 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 3109 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 3110 [BPF_JMP | BPF_CALL] = call, 3111 [BPF_JMP | BPF_EXIT] = goto_out, 3112 }; 3113 3114 /* --- Assembler logic --- */ 3115 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 3116 { 3117 struct nfp_insn_meta *meta, *jmp_dst; 3118 u32 idx, br_idx; 3119 3120 list_for_each_entry(meta, &nfp_prog->insns, l) { 3121 if (meta->skip) 3122 continue; 3123 if (meta->insn.code == (BPF_JMP | BPF_CALL)) 3124 continue; 3125 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 3126 continue; 3127 3128 if (list_is_last(&meta->l, &nfp_prog->insns)) 3129 br_idx = nfp_prog->last_bpf_off; 3130 else 3131 br_idx = list_next_entry(meta, l)->off - 1; 3132 3133 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 3134 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 3135 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 3136 return -ELOOP; 3137 } 3138 /* Leave special branches for later */ 3139 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3140 RELO_BR_REL) 3141 continue; 3142 3143 if (!meta->jmp_dst) { 3144 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 3145 return -ELOOP; 3146 } 3147 3148 jmp_dst = meta->jmp_dst; 3149 3150 if (jmp_dst->skip) { 3151 pr_err("Branch landing on removed instruction!!\n"); 3152 return -ELOOP; 3153 } 3154 3155 for (idx = meta->off; idx <= br_idx; idx++) { 3156 if (!nfp_is_br(nfp_prog->prog[idx])) 3157 continue; 3158 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 3159 } 3160 } 3161 3162 return 0; 3163 } 3164 3165 static void nfp_intro(struct nfp_prog *nfp_prog) 3166 { 3167 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 3168 emit_alu(nfp_prog, plen_reg(nfp_prog), 3169 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 3170 } 3171 3172 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 3173 { 3174 /* TC direct-action mode: 3175 * 0,1 ok NOT SUPPORTED[1] 3176 * 2 drop 0x22 -> drop, count as stat1 3177 * 4,5 nuke 0x02 -> drop 3178 * 7 redir 0x44 -> redir, count as stat2 3179 * * unspec 0x11 -> pass, count as stat0 3180 * 3181 * [1] We can't support OK and RECLASSIFY because we can't tell TC 3182 * the exact decision made. We are forced to support UNSPEC 3183 * to handle aborts so that's the only one we handle for passing 3184 * packets up the stack. 3185 */ 3186 /* Target for aborts */ 3187 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3188 3189 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3190 3191 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3192 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 3193 3194 /* Target for normal exits */ 3195 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3196 3197 /* if R0 > 7 jump to abort */ 3198 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 3199 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3200 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3201 3202 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 3203 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 3204 3205 emit_shf(nfp_prog, reg_a(1), 3206 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 3207 3208 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3209 emit_shf(nfp_prog, reg_a(2), 3210 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3211 3212 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3213 emit_shf(nfp_prog, reg_b(2), 3214 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 3215 3216 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3217 3218 emit_shf(nfp_prog, reg_b(2), 3219 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 3220 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3221 } 3222 3223 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 3224 { 3225 /* XDP return codes: 3226 * 0 aborted 0x82 -> drop, count as stat3 3227 * 1 drop 0x22 -> drop, count as stat1 3228 * 2 pass 0x11 -> pass, count as stat0 3229 * 3 tx 0x44 -> redir, count as stat2 3230 * * unknown 0x82 -> drop, count as stat3 3231 */ 3232 /* Target for aborts */ 3233 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3234 3235 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3236 3237 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3238 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 3239 3240 /* Target for normal exits */ 3241 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3242 3243 /* if R0 > 3 jump to abort */ 3244 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 3245 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3246 3247 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 3248 3249 emit_shf(nfp_prog, reg_a(1), 3250 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 3251 3252 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3253 emit_shf(nfp_prog, reg_b(2), 3254 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3255 3256 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3257 3258 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3259 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3260 } 3261 3262 static void nfp_outro(struct nfp_prog *nfp_prog) 3263 { 3264 switch (nfp_prog->type) { 3265 case BPF_PROG_TYPE_SCHED_CLS: 3266 nfp_outro_tc_da(nfp_prog); 3267 break; 3268 case BPF_PROG_TYPE_XDP: 3269 nfp_outro_xdp(nfp_prog); 3270 break; 3271 default: 3272 WARN_ON(1); 3273 } 3274 } 3275 3276 static int nfp_translate(struct nfp_prog *nfp_prog) 3277 { 3278 struct nfp_insn_meta *meta; 3279 int err; 3280 3281 nfp_intro(nfp_prog); 3282 if (nfp_prog->error) 3283 return nfp_prog->error; 3284 3285 list_for_each_entry(meta, &nfp_prog->insns, l) { 3286 instr_cb_t cb = instr_cb[meta->insn.code]; 3287 3288 meta->off = nfp_prog_current_offset(nfp_prog); 3289 3290 if (meta->skip) { 3291 nfp_prog->n_translated++; 3292 continue; 3293 } 3294 3295 if (nfp_meta_has_prev(nfp_prog, meta) && 3296 nfp_meta_prev(meta)->double_cb) 3297 cb = nfp_meta_prev(meta)->double_cb; 3298 if (!cb) 3299 return -ENOENT; 3300 err = cb(nfp_prog, meta); 3301 if (err) 3302 return err; 3303 if (nfp_prog->error) 3304 return nfp_prog->error; 3305 3306 nfp_prog->n_translated++; 3307 } 3308 3309 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 3310 3311 nfp_outro(nfp_prog); 3312 if (nfp_prog->error) 3313 return nfp_prog->error; 3314 3315 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 3316 if (nfp_prog->error) 3317 return nfp_prog->error; 3318 3319 return nfp_fixup_branches(nfp_prog); 3320 } 3321 3322 /* --- Optimizations --- */ 3323 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 3324 { 3325 struct nfp_insn_meta *meta; 3326 3327 list_for_each_entry(meta, &nfp_prog->insns, l) { 3328 struct bpf_insn insn = meta->insn; 3329 3330 /* Programs converted from cBPF start with register xoring */ 3331 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 3332 insn.src_reg == insn.dst_reg) 3333 continue; 3334 3335 /* Programs start with R6 = R1 but we ignore the skb pointer */ 3336 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 3337 insn.src_reg == 1 && insn.dst_reg == 6) 3338 meta->skip = true; 3339 3340 /* Return as soon as something doesn't match */ 3341 if (!meta->skip) 3342 return; 3343 } 3344 } 3345 3346 /* abs(insn.imm) will fit better into unrestricted reg immediate - 3347 * convert add/sub of a negative number into a sub/add of a positive one. 3348 */ 3349 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) 3350 { 3351 struct nfp_insn_meta *meta; 3352 3353 list_for_each_entry(meta, &nfp_prog->insns, l) { 3354 struct bpf_insn insn = meta->insn; 3355 3356 if (meta->skip) 3357 continue; 3358 3359 if (BPF_CLASS(insn.code) != BPF_ALU && 3360 BPF_CLASS(insn.code) != BPF_ALU64 && 3361 BPF_CLASS(insn.code) != BPF_JMP) 3362 continue; 3363 if (BPF_SRC(insn.code) != BPF_K) 3364 continue; 3365 if (insn.imm >= 0) 3366 continue; 3367 3368 if (BPF_CLASS(insn.code) == BPF_JMP) { 3369 switch (BPF_OP(insn.code)) { 3370 case BPF_JGE: 3371 case BPF_JSGE: 3372 case BPF_JLT: 3373 case BPF_JSLT: 3374 meta->jump_neg_op = true; 3375 break; 3376 default: 3377 continue; 3378 } 3379 } else { 3380 if (BPF_OP(insn.code) == BPF_ADD) 3381 insn.code = BPF_CLASS(insn.code) | BPF_SUB; 3382 else if (BPF_OP(insn.code) == BPF_SUB) 3383 insn.code = BPF_CLASS(insn.code) | BPF_ADD; 3384 else 3385 continue; 3386 3387 meta->insn.code = insn.code | BPF_K; 3388 } 3389 3390 meta->insn.imm = -insn.imm; 3391 } 3392 } 3393 3394 /* Remove masking after load since our load guarantees this is not needed */ 3395 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 3396 { 3397 struct nfp_insn_meta *meta1, *meta2; 3398 const s32 exp_mask[] = { 3399 [BPF_B] = 0x000000ffU, 3400 [BPF_H] = 0x0000ffffU, 3401 [BPF_W] = 0xffffffffU, 3402 }; 3403 3404 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3405 struct bpf_insn insn, next; 3406 3407 insn = meta1->insn; 3408 next = meta2->insn; 3409 3410 if (BPF_CLASS(insn.code) != BPF_LD) 3411 continue; 3412 if (BPF_MODE(insn.code) != BPF_ABS && 3413 BPF_MODE(insn.code) != BPF_IND) 3414 continue; 3415 3416 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 3417 continue; 3418 3419 if (!exp_mask[BPF_SIZE(insn.code)]) 3420 continue; 3421 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 3422 continue; 3423 3424 if (next.src_reg || next.dst_reg) 3425 continue; 3426 3427 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 3428 continue; 3429 3430 meta2->skip = true; 3431 } 3432 } 3433 3434 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 3435 { 3436 struct nfp_insn_meta *meta1, *meta2, *meta3; 3437 3438 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 3439 struct bpf_insn insn, next1, next2; 3440 3441 insn = meta1->insn; 3442 next1 = meta2->insn; 3443 next2 = meta3->insn; 3444 3445 if (BPF_CLASS(insn.code) != BPF_LD) 3446 continue; 3447 if (BPF_MODE(insn.code) != BPF_ABS && 3448 BPF_MODE(insn.code) != BPF_IND) 3449 continue; 3450 if (BPF_SIZE(insn.code) != BPF_W) 3451 continue; 3452 3453 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 3454 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 3455 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 3456 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 3457 continue; 3458 3459 if (next1.src_reg || next1.dst_reg || 3460 next2.src_reg || next2.dst_reg) 3461 continue; 3462 3463 if (next1.imm != 0x20 || next2.imm != 0x20) 3464 continue; 3465 3466 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 3467 meta3->flags & FLAG_INSN_IS_JUMP_DST) 3468 continue; 3469 3470 meta2->skip = true; 3471 meta3->skip = true; 3472 } 3473 } 3474 3475 /* load/store pair that forms memory copy sould look like the following: 3476 * 3477 * ld_width R, [addr_src + offset_src] 3478 * st_width [addr_dest + offset_dest], R 3479 * 3480 * The destination register of load and source register of store should 3481 * be the same, load and store should also perform at the same width. 3482 * If either of addr_src or addr_dest is stack pointer, we don't do the 3483 * CPP optimization as stack is modelled by registers on NFP. 3484 */ 3485 static bool 3486 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 3487 struct nfp_insn_meta *st_meta) 3488 { 3489 struct bpf_insn *ld = &ld_meta->insn; 3490 struct bpf_insn *st = &st_meta->insn; 3491 3492 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 3493 return false; 3494 3495 if (ld_meta->ptr.type != PTR_TO_PACKET && 3496 ld_meta->ptr.type != PTR_TO_MAP_VALUE) 3497 return false; 3498 3499 if (st_meta->ptr.type != PTR_TO_PACKET) 3500 return false; 3501 3502 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 3503 return false; 3504 3505 if (ld->dst_reg != st->src_reg) 3506 return false; 3507 3508 /* There is jump to the store insn in this pair. */ 3509 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 3510 return false; 3511 3512 return true; 3513 } 3514 3515 /* Currently, we only support chaining load/store pairs if: 3516 * 3517 * - Their address base registers are the same. 3518 * - Their address offsets are in the same order. 3519 * - They operate at the same memory width. 3520 * - There is no jump into the middle of them. 3521 */ 3522 static bool 3523 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 3524 struct nfp_insn_meta *st_meta, 3525 struct bpf_insn *prev_ld, 3526 struct bpf_insn *prev_st) 3527 { 3528 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 3529 struct bpf_insn *ld = &ld_meta->insn; 3530 struct bpf_insn *st = &st_meta->insn; 3531 s16 prev_ld_off, prev_st_off; 3532 3533 /* This pair is the start pair. */ 3534 if (!prev_ld) 3535 return true; 3536 3537 prev_size = BPF_LDST_BYTES(prev_ld); 3538 curr_size = BPF_LDST_BYTES(ld); 3539 prev_ld_base = prev_ld->src_reg; 3540 prev_st_base = prev_st->dst_reg; 3541 prev_ld_dst = prev_ld->dst_reg; 3542 prev_ld_off = prev_ld->off; 3543 prev_st_off = prev_st->off; 3544 3545 if (ld->dst_reg != prev_ld_dst) 3546 return false; 3547 3548 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 3549 return false; 3550 3551 if (curr_size != prev_size) 3552 return false; 3553 3554 /* There is jump to the head of this pair. */ 3555 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 3556 return false; 3557 3558 /* Both in ascending order. */ 3559 if (prev_ld_off + prev_size == ld->off && 3560 prev_st_off + prev_size == st->off) 3561 return true; 3562 3563 /* Both in descending order. */ 3564 if (ld->off + curr_size == prev_ld_off && 3565 st->off + curr_size == prev_st_off) 3566 return true; 3567 3568 return false; 3569 } 3570 3571 /* Return TRUE if cross memory access happens. Cross memory access means 3572 * store area is overlapping with load area that a later load might load 3573 * the value from previous store, for this case we can't treat the sequence 3574 * as an memory copy. 3575 */ 3576 static bool 3577 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 3578 struct nfp_insn_meta *head_st_meta) 3579 { 3580 s16 head_ld_off, head_st_off, ld_off; 3581 3582 /* Different pointer types does not overlap. */ 3583 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 3584 return false; 3585 3586 /* load and store are both PTR_TO_PACKET, check ID info. */ 3587 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 3588 return true; 3589 3590 /* Canonicalize the offsets. Turn all of them against the original 3591 * base register. 3592 */ 3593 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 3594 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 3595 ld_off = ld->off + head_ld_meta->ptr.off; 3596 3597 /* Ascending order cross. */ 3598 if (ld_off > head_ld_off && 3599 head_ld_off < head_st_off && ld_off >= head_st_off) 3600 return true; 3601 3602 /* Descending order cross. */ 3603 if (ld_off < head_ld_off && 3604 head_ld_off > head_st_off && ld_off <= head_st_off) 3605 return true; 3606 3607 return false; 3608 } 3609 3610 /* This pass try to identify the following instructoin sequences. 3611 * 3612 * load R, [regA + offA] 3613 * store [regB + offB], R 3614 * load R, [regA + offA + const_imm_A] 3615 * store [regB + offB + const_imm_A], R 3616 * load R, [regA + offA + 2 * const_imm_A] 3617 * store [regB + offB + 2 * const_imm_A], R 3618 * ... 3619 * 3620 * Above sequence is typically generated by compiler when lowering 3621 * memcpy. NFP prefer using CPP instructions to accelerate it. 3622 */ 3623 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 3624 { 3625 struct nfp_insn_meta *head_ld_meta = NULL; 3626 struct nfp_insn_meta *head_st_meta = NULL; 3627 struct nfp_insn_meta *meta1, *meta2; 3628 struct bpf_insn *prev_ld = NULL; 3629 struct bpf_insn *prev_st = NULL; 3630 u8 count = 0; 3631 3632 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3633 struct bpf_insn *ld = &meta1->insn; 3634 struct bpf_insn *st = &meta2->insn; 3635 3636 /* Reset record status if any of the following if true: 3637 * - The current insn pair is not load/store. 3638 * - The load/store pair doesn't chain with previous one. 3639 * - The chained load/store pair crossed with previous pair. 3640 * - The chained load/store pair has a total size of memory 3641 * copy beyond 128 bytes which is the maximum length a 3642 * single NFP CPP command can transfer. 3643 */ 3644 if (!curr_pair_is_memcpy(meta1, meta2) || 3645 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 3646 prev_st) || 3647 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 3648 head_st_meta) || 3649 head_ld_meta->ldst_gather_len >= 128))) { 3650 if (!count) 3651 continue; 3652 3653 if (count > 1) { 3654 s16 prev_ld_off = prev_ld->off; 3655 s16 prev_st_off = prev_st->off; 3656 s16 head_ld_off = head_ld_meta->insn.off; 3657 3658 if (prev_ld_off < head_ld_off) { 3659 head_ld_meta->insn.off = prev_ld_off; 3660 head_st_meta->insn.off = prev_st_off; 3661 head_ld_meta->ldst_gather_len = 3662 -head_ld_meta->ldst_gather_len; 3663 } 3664 3665 head_ld_meta->paired_st = &head_st_meta->insn; 3666 head_st_meta->skip = true; 3667 } else { 3668 head_ld_meta->ldst_gather_len = 0; 3669 } 3670 3671 /* If the chain is ended by an load/store pair then this 3672 * could serve as the new head of the the next chain. 3673 */ 3674 if (curr_pair_is_memcpy(meta1, meta2)) { 3675 head_ld_meta = meta1; 3676 head_st_meta = meta2; 3677 head_ld_meta->ldst_gather_len = 3678 BPF_LDST_BYTES(ld); 3679 meta1 = nfp_meta_next(meta1); 3680 meta2 = nfp_meta_next(meta2); 3681 prev_ld = ld; 3682 prev_st = st; 3683 count = 1; 3684 } else { 3685 head_ld_meta = NULL; 3686 head_st_meta = NULL; 3687 prev_ld = NULL; 3688 prev_st = NULL; 3689 count = 0; 3690 } 3691 3692 continue; 3693 } 3694 3695 if (!head_ld_meta) { 3696 head_ld_meta = meta1; 3697 head_st_meta = meta2; 3698 } else { 3699 meta1->skip = true; 3700 meta2->skip = true; 3701 } 3702 3703 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 3704 meta1 = nfp_meta_next(meta1); 3705 meta2 = nfp_meta_next(meta2); 3706 prev_ld = ld; 3707 prev_st = st; 3708 count++; 3709 } 3710 } 3711 3712 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 3713 { 3714 struct nfp_insn_meta *meta, *range_node = NULL; 3715 s16 range_start = 0, range_end = 0; 3716 bool cache_avail = false; 3717 struct bpf_insn *insn; 3718 s32 range_ptr_off = 0; 3719 u32 range_ptr_id = 0; 3720 3721 list_for_each_entry(meta, &nfp_prog->insns, l) { 3722 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 3723 cache_avail = false; 3724 3725 if (meta->skip) 3726 continue; 3727 3728 insn = &meta->insn; 3729 3730 if (is_mbpf_store_pkt(meta) || 3731 insn->code == (BPF_JMP | BPF_CALL) || 3732 is_mbpf_classic_store_pkt(meta) || 3733 is_mbpf_classic_load(meta)) { 3734 cache_avail = false; 3735 continue; 3736 } 3737 3738 if (!is_mbpf_load(meta)) 3739 continue; 3740 3741 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 3742 cache_avail = false; 3743 continue; 3744 } 3745 3746 if (!cache_avail) { 3747 cache_avail = true; 3748 if (range_node) 3749 goto end_current_then_start_new; 3750 goto start_new; 3751 } 3752 3753 /* Check ID to make sure two reads share the same 3754 * variable offset against PTR_TO_PACKET, and check OFF 3755 * to make sure they also share the same constant 3756 * offset. 3757 * 3758 * OFFs don't really need to be the same, because they 3759 * are the constant offsets against PTR_TO_PACKET, so 3760 * for different OFFs, we could canonicalize them to 3761 * offsets against original packet pointer. We don't 3762 * support this. 3763 */ 3764 if (meta->ptr.id == range_ptr_id && 3765 meta->ptr.off == range_ptr_off) { 3766 s16 new_start = range_start; 3767 s16 end, off = insn->off; 3768 s16 new_end = range_end; 3769 bool changed = false; 3770 3771 if (off < range_start) { 3772 new_start = off; 3773 changed = true; 3774 } 3775 3776 end = off + BPF_LDST_BYTES(insn); 3777 if (end > range_end) { 3778 new_end = end; 3779 changed = true; 3780 } 3781 3782 if (!changed) 3783 continue; 3784 3785 if (new_end - new_start <= 64) { 3786 /* Install new range. */ 3787 range_start = new_start; 3788 range_end = new_end; 3789 continue; 3790 } 3791 } 3792 3793 end_current_then_start_new: 3794 range_node->pkt_cache.range_start = range_start; 3795 range_node->pkt_cache.range_end = range_end; 3796 start_new: 3797 range_node = meta; 3798 range_node->pkt_cache.do_init = true; 3799 range_ptr_id = range_node->ptr.id; 3800 range_ptr_off = range_node->ptr.off; 3801 range_start = insn->off; 3802 range_end = insn->off + BPF_LDST_BYTES(insn); 3803 } 3804 3805 if (range_node) { 3806 range_node->pkt_cache.range_start = range_start; 3807 range_node->pkt_cache.range_end = range_end; 3808 } 3809 3810 list_for_each_entry(meta, &nfp_prog->insns, l) { 3811 if (meta->skip) 3812 continue; 3813 3814 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 3815 if (meta->pkt_cache.do_init) { 3816 range_start = meta->pkt_cache.range_start; 3817 range_end = meta->pkt_cache.range_end; 3818 } else { 3819 meta->pkt_cache.range_start = range_start; 3820 meta->pkt_cache.range_end = range_end; 3821 } 3822 } 3823 } 3824 } 3825 3826 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 3827 { 3828 nfp_bpf_opt_reg_init(nfp_prog); 3829 3830 nfp_bpf_opt_neg_add_sub(nfp_prog); 3831 nfp_bpf_opt_ld_mask(nfp_prog); 3832 nfp_bpf_opt_ld_shift(nfp_prog); 3833 nfp_bpf_opt_ldst_gather(nfp_prog); 3834 nfp_bpf_opt_pkt_cache(nfp_prog); 3835 3836 return 0; 3837 } 3838 3839 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) 3840 { 3841 struct nfp_insn_meta *meta1, *meta2; 3842 struct nfp_bpf_map *nfp_map; 3843 struct bpf_map *map; 3844 3845 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3846 if (meta1->skip || meta2->skip) 3847 continue; 3848 3849 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || 3850 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) 3851 continue; 3852 3853 map = (void *)(unsigned long)((u32)meta1->insn.imm | 3854 (u64)meta2->insn.imm << 32); 3855 if (bpf_map_offload_neutral(map)) 3856 continue; 3857 nfp_map = map_to_offmap(map)->dev_priv; 3858 3859 meta1->insn.imm = nfp_map->tid; 3860 meta2->insn.imm = 0; 3861 } 3862 3863 return 0; 3864 } 3865 3866 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 3867 { 3868 __le64 *ustore = (__force __le64 *)prog; 3869 int i; 3870 3871 for (i = 0; i < len; i++) { 3872 int err; 3873 3874 err = nfp_ustore_check_valid_no_ecc(prog[i]); 3875 if (err) 3876 return err; 3877 3878 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 3879 } 3880 3881 return 0; 3882 } 3883 3884 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 3885 { 3886 void *prog; 3887 3888 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 3889 if (!prog) 3890 return; 3891 3892 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 3893 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 3894 kvfree(nfp_prog->prog); 3895 nfp_prog->prog = prog; 3896 } 3897 3898 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 3899 { 3900 int ret; 3901 3902 ret = nfp_bpf_replace_map_ptrs(nfp_prog); 3903 if (ret) 3904 return ret; 3905 3906 ret = nfp_bpf_optimize(nfp_prog); 3907 if (ret) 3908 return ret; 3909 3910 ret = nfp_translate(nfp_prog); 3911 if (ret) { 3912 pr_err("Translation failed with error %d (translated: %u)\n", 3913 ret, nfp_prog->n_translated); 3914 return -EINVAL; 3915 } 3916 3917 nfp_bpf_prog_trim(nfp_prog); 3918 3919 return ret; 3920 } 3921 3922 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 3923 { 3924 struct nfp_insn_meta *meta; 3925 3926 /* Another pass to record jump information. */ 3927 list_for_each_entry(meta, &nfp_prog->insns, l) { 3928 u64 code = meta->insn.code; 3929 3930 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && 3931 BPF_OP(code) != BPF_CALL) { 3932 struct nfp_insn_meta *dst_meta; 3933 unsigned short dst_indx; 3934 3935 dst_indx = meta->n + 1 + meta->insn.off; 3936 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, 3937 cnt); 3938 3939 meta->jmp_dst = dst_meta; 3940 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 3941 } 3942 } 3943 } 3944 3945 bool nfp_bpf_supported_opcode(u8 code) 3946 { 3947 return !!instr_cb[code]; 3948 } 3949 3950 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 3951 { 3952 unsigned int i; 3953 u64 *prog; 3954 int err; 3955 3956 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 3957 GFP_KERNEL); 3958 if (!prog) 3959 return ERR_PTR(-ENOMEM); 3960 3961 for (i = 0; i < nfp_prog->prog_len; i++) { 3962 enum nfp_relo_type special; 3963 u32 val; 3964 3965 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 3966 switch (special) { 3967 case RELO_NONE: 3968 continue; 3969 case RELO_BR_REL: 3970 br_add_offset(&prog[i], bv->start_off); 3971 break; 3972 case RELO_BR_GO_OUT: 3973 br_set_offset(&prog[i], 3974 nfp_prog->tgt_out + bv->start_off); 3975 break; 3976 case RELO_BR_GO_ABORT: 3977 br_set_offset(&prog[i], 3978 nfp_prog->tgt_abort + bv->start_off); 3979 break; 3980 case RELO_BR_NEXT_PKT: 3981 br_set_offset(&prog[i], bv->tgt_done); 3982 break; 3983 case RELO_BR_HELPER: 3984 val = br_get_offset(prog[i]); 3985 val -= BR_OFF_RELO; 3986 switch (val) { 3987 case BPF_FUNC_map_lookup_elem: 3988 val = nfp_prog->bpf->helpers.map_lookup; 3989 break; 3990 case BPF_FUNC_map_update_elem: 3991 val = nfp_prog->bpf->helpers.map_update; 3992 break; 3993 case BPF_FUNC_map_delete_elem: 3994 val = nfp_prog->bpf->helpers.map_delete; 3995 break; 3996 case BPF_FUNC_perf_event_output: 3997 val = nfp_prog->bpf->helpers.perf_event_output; 3998 break; 3999 default: 4000 pr_err("relocation of unknown helper %d\n", 4001 val); 4002 err = -EINVAL; 4003 goto err_free_prog; 4004 } 4005 br_set_offset(&prog[i], val); 4006 break; 4007 case RELO_IMMED_REL: 4008 immed_add_value(&prog[i], bv->start_off); 4009 break; 4010 } 4011 4012 prog[i] &= ~OP_RELO_TYPE; 4013 } 4014 4015 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 4016 if (err) 4017 goto err_free_prog; 4018 4019 return prog; 4020 4021 err_free_prog: 4022 kfree(prog); 4023 return ERR_PTR(err); 4024 } 4025