1 /* 2 * Copyright (C) 2016-2018 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/bug.h> 37 #include <linux/kernel.h> 38 #include <linux/bpf.h> 39 #include <linux/filter.h> 40 #include <linux/pkt_cls.h> 41 #include <linux/unistd.h> 42 43 #include "main.h" 44 #include "../nfp_asm.h" 45 #include "../nfp_net_ctrl.h" 46 47 /* --- NFP prog --- */ 48 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 49 * It's safe to modify the next pointers (but not pos). 50 */ 51 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 52 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 53 next = list_next_entry(pos, l); \ 54 &(nfp_prog)->insns != &pos->l && \ 55 &(nfp_prog)->insns != &next->l; \ 56 pos = nfp_meta_next(pos), \ 57 next = nfp_meta_next(pos)) 58 59 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 60 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 61 next = list_next_entry(pos, l), \ 62 next2 = list_next_entry(next, l); \ 63 &(nfp_prog)->insns != &pos->l && \ 64 &(nfp_prog)->insns != &next->l && \ 65 &(nfp_prog)->insns != &next2->l; \ 66 pos = nfp_meta_next(pos), \ 67 next = nfp_meta_next(pos), \ 68 next2 = nfp_meta_next(next)) 69 70 static bool 71 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 72 { 73 return meta->l.prev != &nfp_prog->insns; 74 } 75 76 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 77 { 78 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { 79 pr_warn("instruction limit reached (%u NFP instructions)\n", 80 nfp_prog->prog_len); 81 nfp_prog->error = -ENOSPC; 82 return; 83 } 84 85 nfp_prog->prog[nfp_prog->prog_len] = insn; 86 nfp_prog->prog_len++; 87 } 88 89 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 90 { 91 return nfp_prog->prog_len; 92 } 93 94 static bool 95 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 96 { 97 /* If there is a recorded error we may have dropped instructions; 98 * that doesn't have to be due to translator bug, and the translation 99 * will fail anyway, so just return OK. 100 */ 101 if (nfp_prog->error) 102 return true; 103 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 104 } 105 106 /* --- Emitters --- */ 107 static void 108 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 109 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, 110 bool indir) 111 { 112 u64 insn; 113 114 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 115 FIELD_PREP(OP_CMD_CTX, ctx) | 116 FIELD_PREP(OP_CMD_B_SRC, breg) | 117 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 118 FIELD_PREP(OP_CMD_XFER, xfer) | 119 FIELD_PREP(OP_CMD_CNT, size) | 120 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | 121 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 122 FIELD_PREP(OP_CMD_INDIR, indir) | 123 FIELD_PREP(OP_CMD_MODE, mode); 124 125 nfp_prog_push(nfp_prog, insn); 126 } 127 128 static void 129 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 130 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) 131 { 132 struct nfp_insn_re_regs reg; 133 int err; 134 135 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 136 if (err) { 137 nfp_prog->error = err; 138 return; 139 } 140 if (reg.swap) { 141 pr_err("cmd can't swap arguments\n"); 142 nfp_prog->error = -EFAULT; 143 return; 144 } 145 if (reg.dst_lmextn || reg.src_lmextn) { 146 pr_err("cmd can't use LMextn\n"); 147 nfp_prog->error = -EFAULT; 148 return; 149 } 150 151 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, 152 indir); 153 } 154 155 static void 156 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 157 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 158 { 159 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); 160 } 161 162 static void 163 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 164 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 165 { 166 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); 167 } 168 169 static void 170 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 171 enum br_ctx_signal_state css, u16 addr, u8 defer) 172 { 173 u16 addr_lo, addr_hi; 174 u64 insn; 175 176 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 177 addr_hi = addr != addr_lo; 178 179 insn = OP_BR_BASE | 180 FIELD_PREP(OP_BR_MASK, mask) | 181 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 182 FIELD_PREP(OP_BR_CSS, css) | 183 FIELD_PREP(OP_BR_DEFBR, defer) | 184 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 185 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 186 187 nfp_prog_push(nfp_prog, insn); 188 } 189 190 static void 191 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 192 enum nfp_relo_type relo) 193 { 194 if (mask == BR_UNC && defer > 2) { 195 pr_err("BUG: branch defer out of bounds %d\n", defer); 196 nfp_prog->error = -EFAULT; 197 return; 198 } 199 200 __emit_br(nfp_prog, mask, 201 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 202 BR_CSS_NONE, addr, defer); 203 204 nfp_prog->prog[nfp_prog->prog_len - 1] |= 205 FIELD_PREP(OP_RELO_TYPE, relo); 206 } 207 208 static void 209 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 210 { 211 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 212 } 213 214 static void 215 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer, 216 bool set, bool src_lmextn) 217 { 218 u16 addr_lo, addr_hi; 219 u64 insn; 220 221 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO)); 222 addr_hi = addr != addr_lo; 223 224 insn = OP_BR_BIT_BASE | 225 FIELD_PREP(OP_BR_BIT_A_SRC, areg) | 226 FIELD_PREP(OP_BR_BIT_B_SRC, breg) | 227 FIELD_PREP(OP_BR_BIT_BV, set) | 228 FIELD_PREP(OP_BR_BIT_DEFBR, defer) | 229 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) | 230 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) | 231 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn); 232 233 nfp_prog_push(nfp_prog, insn); 234 } 235 236 static void 237 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, 238 u8 defer, bool set, enum nfp_relo_type relo) 239 { 240 struct nfp_insn_re_regs reg; 241 int err; 242 243 /* NOTE: The bit to test is specified as an rotation amount, such that 244 * the bit to test will be placed on the MSB of the result when 245 * doing a rotate right. For bit X, we need right rotate X + 1. 246 */ 247 bit += 1; 248 249 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false); 250 if (err) { 251 nfp_prog->error = err; 252 return; 253 } 254 255 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set, 256 reg.src_lmextn); 257 258 nfp_prog->prog[nfp_prog->prog_len - 1] |= 259 FIELD_PREP(OP_RELO_TYPE, relo); 260 } 261 262 static void 263 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) 264 { 265 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL); 266 } 267 268 static void 269 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 270 enum immed_width width, bool invert, 271 enum immed_shift shift, bool wr_both, 272 bool dst_lmextn, bool src_lmextn) 273 { 274 u64 insn; 275 276 insn = OP_IMMED_BASE | 277 FIELD_PREP(OP_IMMED_A_SRC, areg) | 278 FIELD_PREP(OP_IMMED_B_SRC, breg) | 279 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 280 FIELD_PREP(OP_IMMED_WIDTH, width) | 281 FIELD_PREP(OP_IMMED_INV, invert) | 282 FIELD_PREP(OP_IMMED_SHIFT, shift) | 283 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 284 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 285 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 286 287 nfp_prog_push(nfp_prog, insn); 288 } 289 290 static void 291 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 292 enum immed_width width, bool invert, enum immed_shift shift) 293 { 294 struct nfp_insn_ur_regs reg; 295 int err; 296 297 if (swreg_type(dst) == NN_REG_IMM) { 298 nfp_prog->error = -EFAULT; 299 return; 300 } 301 302 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 303 if (err) { 304 nfp_prog->error = err; 305 return; 306 } 307 308 /* Use reg.dst when destination is No-Dest. */ 309 __emit_immed(nfp_prog, 310 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 311 reg.breg, imm >> 8, width, invert, shift, 312 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 313 } 314 315 static void 316 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 317 enum shf_sc sc, u8 shift, 318 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 319 bool dst_lmextn, bool src_lmextn) 320 { 321 u64 insn; 322 323 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 324 nfp_prog->error = -EFAULT; 325 return; 326 } 327 328 if (sc == SHF_SC_L_SHF) 329 shift = 32 - shift; 330 331 insn = OP_SHF_BASE | 332 FIELD_PREP(OP_SHF_A_SRC, areg) | 333 FIELD_PREP(OP_SHF_SC, sc) | 334 FIELD_PREP(OP_SHF_B_SRC, breg) | 335 FIELD_PREP(OP_SHF_I8, i8) | 336 FIELD_PREP(OP_SHF_SW, sw) | 337 FIELD_PREP(OP_SHF_DST, dst) | 338 FIELD_PREP(OP_SHF_SHIFT, shift) | 339 FIELD_PREP(OP_SHF_OP, op) | 340 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 341 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 342 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 343 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 344 345 nfp_prog_push(nfp_prog, insn); 346 } 347 348 static void 349 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 350 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 351 { 352 struct nfp_insn_re_regs reg; 353 int err; 354 355 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 356 if (err) { 357 nfp_prog->error = err; 358 return; 359 } 360 361 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 362 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 363 reg.dst_lmextn, reg.src_lmextn); 364 } 365 366 static void 367 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst, 368 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc) 369 { 370 if (sc == SHF_SC_R_ROT) { 371 pr_err("indirect shift is not allowed on rotation\n"); 372 nfp_prog->error = -EFAULT; 373 return; 374 } 375 376 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0); 377 } 378 379 static void 380 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 381 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 382 bool dst_lmextn, bool src_lmextn) 383 { 384 u64 insn; 385 386 insn = OP_ALU_BASE | 387 FIELD_PREP(OP_ALU_A_SRC, areg) | 388 FIELD_PREP(OP_ALU_B_SRC, breg) | 389 FIELD_PREP(OP_ALU_DST, dst) | 390 FIELD_PREP(OP_ALU_SW, swap) | 391 FIELD_PREP(OP_ALU_OP, op) | 392 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 393 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 394 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 395 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 396 397 nfp_prog_push(nfp_prog, insn); 398 } 399 400 static void 401 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 402 swreg lreg, enum alu_op op, swreg rreg) 403 { 404 struct nfp_insn_ur_regs reg; 405 int err; 406 407 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 408 if (err) { 409 nfp_prog->error = err; 410 return; 411 } 412 413 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 414 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 415 reg.dst_lmextn, reg.src_lmextn); 416 } 417 418 static void 419 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 420 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 421 bool zero, bool swap, bool wr_both, 422 bool dst_lmextn, bool src_lmextn) 423 { 424 u64 insn; 425 426 insn = OP_LDF_BASE | 427 FIELD_PREP(OP_LDF_A_SRC, areg) | 428 FIELD_PREP(OP_LDF_SC, sc) | 429 FIELD_PREP(OP_LDF_B_SRC, breg) | 430 FIELD_PREP(OP_LDF_I8, imm8) | 431 FIELD_PREP(OP_LDF_SW, swap) | 432 FIELD_PREP(OP_LDF_ZF, zero) | 433 FIELD_PREP(OP_LDF_BMASK, bmask) | 434 FIELD_PREP(OP_LDF_SHF, shift) | 435 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 436 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 437 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 438 439 nfp_prog_push(nfp_prog, insn); 440 } 441 442 static void 443 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 444 enum shf_sc sc, u8 shift, bool zero) 445 { 446 struct nfp_insn_re_regs reg; 447 int err; 448 449 /* Note: ld_field is special as it uses one of the src regs as dst */ 450 err = swreg_to_restricted(dst, dst, src, ®, true); 451 if (err) { 452 nfp_prog->error = err; 453 return; 454 } 455 456 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 457 reg.i8, zero, reg.swap, reg.wr_both, 458 reg.dst_lmextn, reg.src_lmextn); 459 } 460 461 static void 462 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 463 enum shf_sc sc, u8 shift) 464 { 465 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 466 } 467 468 static void 469 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 470 bool dst_lmextn, bool src_lmextn) 471 { 472 u64 insn; 473 474 insn = OP_LCSR_BASE | 475 FIELD_PREP(OP_LCSR_A_SRC, areg) | 476 FIELD_PREP(OP_LCSR_B_SRC, breg) | 477 FIELD_PREP(OP_LCSR_WRITE, wr) | 478 FIELD_PREP(OP_LCSR_ADDR, addr / 4) | 479 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 480 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 481 482 nfp_prog_push(nfp_prog, insn); 483 } 484 485 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 486 { 487 struct nfp_insn_ur_regs reg; 488 int err; 489 490 /* This instruction takes immeds instead of reg_none() for the ignored 491 * operand, but we can't encode 2 immeds in one instr with our normal 492 * swreg infra so if param is an immed, we encode as reg_none() and 493 * copy the immed to both operands. 494 */ 495 if (swreg_type(src) == NN_REG_IMM) { 496 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 497 reg.breg = reg.areg; 498 } else { 499 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 500 } 501 if (err) { 502 nfp_prog->error = err; 503 return; 504 } 505 506 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, 507 false, reg.src_lmextn); 508 } 509 510 /* CSR value is read in following immed[gpr, 0] */ 511 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) 512 { 513 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); 514 } 515 516 static void emit_nop(struct nfp_prog *nfp_prog) 517 { 518 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 519 } 520 521 /* --- Wrappers --- */ 522 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 523 { 524 if (!(imm & 0xffff0000)) { 525 *val = imm; 526 *shift = IMMED_SHIFT_0B; 527 } else if (!(imm & 0xff0000ff)) { 528 *val = imm >> 8; 529 *shift = IMMED_SHIFT_1B; 530 } else if (!(imm & 0x0000ffff)) { 531 *val = imm >> 16; 532 *shift = IMMED_SHIFT_2B; 533 } else { 534 return false; 535 } 536 537 return true; 538 } 539 540 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 541 { 542 enum immed_shift shift; 543 u16 val; 544 545 if (pack_immed(imm, &val, &shift)) { 546 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 547 } else if (pack_immed(~imm, &val, &shift)) { 548 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 549 } else { 550 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 551 false, IMMED_SHIFT_0B); 552 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 553 false, IMMED_SHIFT_2B); 554 } 555 } 556 557 static void 558 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 559 enum nfp_relo_type relo) 560 { 561 if (imm > 0xffff) { 562 pr_err("relocation of a large immediate!\n"); 563 nfp_prog->error = -EFAULT; 564 return; 565 } 566 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 567 568 nfp_prog->prog[nfp_prog->prog_len - 1] |= 569 FIELD_PREP(OP_RELO_TYPE, relo); 570 } 571 572 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 573 * If the @imm is small enough encode it directly in operand and return 574 * otherwise load @imm to a spare register and return its encoding. 575 */ 576 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 577 { 578 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 579 return reg_imm(imm); 580 581 wrp_immed(nfp_prog, tmp_reg, imm); 582 return tmp_reg; 583 } 584 585 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 586 * If the @imm is small enough encode it directly in operand and return 587 * otherwise load @imm to a spare register and return its encoding. 588 */ 589 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 590 { 591 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 592 return reg_imm(imm); 593 594 wrp_immed(nfp_prog, tmp_reg, imm); 595 return tmp_reg; 596 } 597 598 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 599 { 600 while (count--) 601 emit_nop(nfp_prog); 602 } 603 604 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 605 { 606 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 607 } 608 609 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 610 { 611 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 612 } 613 614 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 615 * result to @dst from low end. 616 */ 617 static void 618 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 619 u8 offset) 620 { 621 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 622 u8 mask = (1 << field_len) - 1; 623 624 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 625 } 626 627 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 628 * result to @dst from offset, there is no change on the other bits of @dst. 629 */ 630 static void 631 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 632 u8 field_len, u8 offset) 633 { 634 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 635 u8 mask = ((1 << field_len) - 1) << offset; 636 637 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 638 } 639 640 static void 641 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 642 swreg *rega, swreg *regb) 643 { 644 if (offset == reg_imm(0)) { 645 *rega = reg_a(src_gpr); 646 *regb = reg_b(src_gpr + 1); 647 return; 648 } 649 650 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 651 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 652 reg_imm(0)); 653 *rega = imm_a(nfp_prog); 654 *regb = imm_b(nfp_prog); 655 } 656 657 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 658 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 659 { 660 bool descending_seq = meta->ldst_gather_len < 0; 661 s16 len = abs(meta->ldst_gather_len); 662 swreg src_base, off; 663 bool src_40bit_addr; 664 unsigned int i; 665 u8 xfer_num; 666 667 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 668 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 669 src_base = reg_a(meta->insn.src_reg * 2); 670 xfer_num = round_up(len, 4) / 4; 671 672 if (src_40bit_addr) 673 addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base, 674 &off); 675 676 /* Setup PREV_ALU fields to override memory read length. */ 677 if (len > 32) 678 wrp_immed(nfp_prog, reg_none(), 679 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 680 681 /* Memory read from source addr into transfer-in registers. */ 682 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 683 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 684 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); 685 686 /* Move from transfer-in to transfer-out. */ 687 for (i = 0; i < xfer_num; i++) 688 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 689 690 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 691 692 if (len <= 8) { 693 /* Use single direct_ref write8. */ 694 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 695 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 696 CMD_CTX_SWAP); 697 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 698 /* Use single direct_ref write32. */ 699 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 700 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 701 CMD_CTX_SWAP); 702 } else if (len <= 32) { 703 /* Use single indirect_ref write8. */ 704 wrp_immed(nfp_prog, reg_none(), 705 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 706 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 707 reg_a(meta->paired_st->dst_reg * 2), off, 708 len - 1, CMD_CTX_SWAP); 709 } else if (IS_ALIGNED(len, 4)) { 710 /* Use single indirect_ref write32. */ 711 wrp_immed(nfp_prog, reg_none(), 712 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 713 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 714 reg_a(meta->paired_st->dst_reg * 2), off, 715 xfer_num - 1, CMD_CTX_SWAP); 716 } else if (len <= 40) { 717 /* Use one direct_ref write32 to write the first 32-bytes, then 718 * another direct_ref write8 to write the remaining bytes. 719 */ 720 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 721 reg_a(meta->paired_st->dst_reg * 2), off, 7, 722 CMD_CTX_SWAP); 723 724 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 725 imm_b(nfp_prog)); 726 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 727 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 728 CMD_CTX_SWAP); 729 } else { 730 /* Use one indirect_ref write32 to write 4-bytes aligned length, 731 * then another direct_ref write8 to write the remaining bytes. 732 */ 733 u8 new_off; 734 735 wrp_immed(nfp_prog, reg_none(), 736 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 737 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 738 reg_a(meta->paired_st->dst_reg * 2), off, 739 xfer_num - 2, CMD_CTX_SWAP); 740 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 741 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 742 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 743 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 744 (len & 0x3) - 1, CMD_CTX_SWAP); 745 } 746 747 /* TODO: The following extra load is to make sure data flow be identical 748 * before and after we do memory copy optimization. 749 * 750 * The load destination register is not guaranteed to be dead, so we 751 * need to make sure it is loaded with the value the same as before 752 * this transformation. 753 * 754 * These extra loads could be removed once we have accurate register 755 * usage information. 756 */ 757 if (descending_seq) 758 xfer_num = 0; 759 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 760 xfer_num = xfer_num - 1; 761 else 762 xfer_num = xfer_num - 2; 763 764 switch (BPF_SIZE(meta->insn.code)) { 765 case BPF_B: 766 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 767 reg_xfer(xfer_num), 1, 768 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 769 break; 770 case BPF_H: 771 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 772 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 773 break; 774 case BPF_W: 775 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 776 reg_xfer(0)); 777 break; 778 case BPF_DW: 779 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 780 reg_xfer(xfer_num)); 781 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 782 reg_xfer(xfer_num + 1)); 783 break; 784 } 785 786 if (BPF_SIZE(meta->insn.code) != BPF_DW) 787 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 788 789 return 0; 790 } 791 792 static int 793 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 794 { 795 unsigned int i; 796 u16 shift, sz; 797 798 /* We load the value from the address indicated in @offset and then 799 * shift out the data we don't need. Note: this is big endian! 800 */ 801 sz = max(size, 4); 802 shift = size < 4 ? 4 - size : 0; 803 804 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 805 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); 806 807 i = 0; 808 if (shift) 809 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 810 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 811 else 812 for (; i * 4 < size; i++) 813 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 814 815 if (i < 2) 816 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 817 818 return 0; 819 } 820 821 static int 822 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 823 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 824 { 825 unsigned int i; 826 u8 mask, sz; 827 828 /* We load the value from the address indicated in rreg + lreg and then 829 * mask out the data we don't need. Note: this is little endian! 830 */ 831 sz = max(size, 4); 832 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 833 834 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 835 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); 836 837 i = 0; 838 if (mask) 839 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 840 reg_xfer(0), SHF_SC_NONE, 0, true); 841 else 842 for (; i * 4 < size; i++) 843 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 844 845 if (i < 2) 846 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 847 848 return 0; 849 } 850 851 static int 852 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 853 u8 dst_gpr, u8 size) 854 { 855 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 856 size, CMD_MODE_32b); 857 } 858 859 static int 860 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 861 u8 dst_gpr, u8 size) 862 { 863 swreg rega, regb; 864 865 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 866 867 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 868 size, CMD_MODE_40b_BA); 869 } 870 871 static int 872 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 873 { 874 swreg tmp_reg; 875 876 /* Calculate the true offset (src_reg + imm) */ 877 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 878 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 879 880 /* Check packet length (size guaranteed to fit b/c it's u8) */ 881 emit_alu(nfp_prog, imm_a(nfp_prog), 882 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 883 emit_alu(nfp_prog, reg_none(), 884 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 885 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 886 887 /* Load data */ 888 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 889 } 890 891 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 892 { 893 swreg tmp_reg; 894 895 /* Check packet length */ 896 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 897 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 898 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 899 900 /* Load data */ 901 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 902 return data_ld(nfp_prog, tmp_reg, 0, size); 903 } 904 905 static int 906 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 907 u8 src_gpr, u8 size) 908 { 909 unsigned int i; 910 911 for (i = 0; i * 4 < size; i++) 912 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 913 914 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 915 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 916 917 return 0; 918 } 919 920 static int 921 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 922 u64 imm, u8 size) 923 { 924 wrp_immed(nfp_prog, reg_xfer(0), imm); 925 if (size == 8) 926 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 927 928 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 929 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 930 931 return 0; 932 } 933 934 typedef int 935 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 936 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 937 bool needs_inc); 938 939 static int 940 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 941 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 942 bool needs_inc) 943 { 944 bool should_inc = needs_inc && new_gpr && !last; 945 u32 idx, src_byte; 946 enum shf_sc sc; 947 swreg reg; 948 int shf; 949 u8 mask; 950 951 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 952 return -EOPNOTSUPP; 953 954 idx = off / 4; 955 956 /* Move the entire word */ 957 if (size == 4) { 958 wrp_mov(nfp_prog, reg_both(dst), 959 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 960 return 0; 961 } 962 963 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 964 return -EOPNOTSUPP; 965 966 src_byte = off % 4; 967 968 mask = (1 << size) - 1; 969 mask <<= dst_byte; 970 971 if (WARN_ON_ONCE(mask > 0xf)) 972 return -EOPNOTSUPP; 973 974 shf = abs(src_byte - dst_byte) * 8; 975 if (src_byte == dst_byte) { 976 sc = SHF_SC_NONE; 977 } else if (src_byte < dst_byte) { 978 shf = 32 - shf; 979 sc = SHF_SC_L_SHF; 980 } else { 981 sc = SHF_SC_R_SHF; 982 } 983 984 /* ld_field can address fewer indexes, if offset too large do RMW. 985 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 986 */ 987 if (idx <= RE_REG_LM_IDX_MAX) { 988 reg = reg_lm(lm3 ? 3 : 0, idx); 989 } else { 990 reg = imm_a(nfp_prog); 991 /* If it's not the first part of the load and we start a new GPR 992 * that means we are loading a second part of the LMEM word into 993 * a new GPR. IOW we've already looked that LMEM word and 994 * therefore it has been loaded into imm_a(). 995 */ 996 if (first || !new_gpr) 997 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 998 } 999 1000 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 1001 1002 if (should_inc) 1003 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1004 1005 return 0; 1006 } 1007 1008 static int 1009 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 1010 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 1011 bool needs_inc) 1012 { 1013 bool should_inc = needs_inc && new_gpr && !last; 1014 u32 idx, dst_byte; 1015 enum shf_sc sc; 1016 swreg reg; 1017 int shf; 1018 u8 mask; 1019 1020 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 1021 return -EOPNOTSUPP; 1022 1023 idx = off / 4; 1024 1025 /* Move the entire word */ 1026 if (size == 4) { 1027 wrp_mov(nfp_prog, 1028 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 1029 reg_b(src)); 1030 return 0; 1031 } 1032 1033 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1034 return -EOPNOTSUPP; 1035 1036 dst_byte = off % 4; 1037 1038 mask = (1 << size) - 1; 1039 mask <<= dst_byte; 1040 1041 if (WARN_ON_ONCE(mask > 0xf)) 1042 return -EOPNOTSUPP; 1043 1044 shf = abs(src_byte - dst_byte) * 8; 1045 if (src_byte == dst_byte) { 1046 sc = SHF_SC_NONE; 1047 } else if (src_byte < dst_byte) { 1048 shf = 32 - shf; 1049 sc = SHF_SC_L_SHF; 1050 } else { 1051 sc = SHF_SC_R_SHF; 1052 } 1053 1054 /* ld_field can address fewer indexes, if offset too large do RMW. 1055 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1056 */ 1057 if (idx <= RE_REG_LM_IDX_MAX) { 1058 reg = reg_lm(lm3 ? 3 : 0, idx); 1059 } else { 1060 reg = imm_a(nfp_prog); 1061 /* Only first and last LMEM locations are going to need RMW, 1062 * the middle location will be overwritten fully. 1063 */ 1064 if (first || last) 1065 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1066 } 1067 1068 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 1069 1070 if (new_gpr || last) { 1071 if (idx > RE_REG_LM_IDX_MAX) 1072 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1073 if (should_inc) 1074 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1075 } 1076 1077 return 0; 1078 } 1079 1080 static int 1081 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1082 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1083 bool clr_gpr, lmem_step step) 1084 { 1085 s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; 1086 bool first = true, last; 1087 bool needs_inc = false; 1088 swreg stack_off_reg; 1089 u8 prev_gpr = 255; 1090 u32 gpr_byte = 0; 1091 bool lm3 = true; 1092 int ret; 1093 1094 if (meta->ptr_not_const) { 1095 /* Use of the last encountered ptr_off is OK, they all have 1096 * the same alignment. Depend on low bits of value being 1097 * discarded when written to LMaddr register. 1098 */ 1099 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1100 stack_imm(nfp_prog)); 1101 1102 emit_alu(nfp_prog, imm_b(nfp_prog), 1103 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1104 1105 needs_inc = true; 1106 } else if (off + size <= 64) { 1107 /* We can reach bottom 64B with LMaddr0 */ 1108 lm3 = false; 1109 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1110 /* We have to set up a new pointer. If we know the offset 1111 * and the entire access falls into a single 32 byte aligned 1112 * window we won't have to increment the LM pointer. 1113 * The 32 byte alignment is imporant because offset is ORed in 1114 * not added when doing *l$indexN[off]. 1115 */ 1116 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1117 stack_imm(nfp_prog)); 1118 emit_alu(nfp_prog, imm_b(nfp_prog), 1119 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1120 1121 off %= 32; 1122 } else { 1123 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1124 stack_imm(nfp_prog)); 1125 1126 emit_alu(nfp_prog, imm_b(nfp_prog), 1127 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1128 1129 needs_inc = true; 1130 } 1131 if (lm3) { 1132 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1133 /* For size < 4 one slot will be filled by zeroing of upper. */ 1134 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1135 } 1136 1137 if (clr_gpr && size < 8) 1138 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1139 1140 while (size) { 1141 u32 slice_end; 1142 u8 slice_size; 1143 1144 slice_size = min(size, 4 - gpr_byte); 1145 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1146 slice_size = slice_end - off; 1147 1148 last = slice_size == size; 1149 1150 if (needs_inc) 1151 off %= 4; 1152 1153 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1154 first, gpr != prev_gpr, last, lm3, needs_inc); 1155 if (ret) 1156 return ret; 1157 1158 prev_gpr = gpr; 1159 first = false; 1160 1161 gpr_byte += slice_size; 1162 if (gpr_byte >= 4) { 1163 gpr_byte -= 4; 1164 gpr++; 1165 } 1166 1167 size -= slice_size; 1168 off += slice_size; 1169 } 1170 1171 return 0; 1172 } 1173 1174 static void 1175 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1176 { 1177 swreg tmp_reg; 1178 1179 if (alu_op == ALU_OP_AND) { 1180 if (!imm) 1181 wrp_immed(nfp_prog, reg_both(dst), 0); 1182 if (!imm || !~imm) 1183 return; 1184 } 1185 if (alu_op == ALU_OP_OR) { 1186 if (!~imm) 1187 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1188 if (!imm || !~imm) 1189 return; 1190 } 1191 if (alu_op == ALU_OP_XOR) { 1192 if (!~imm) 1193 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1194 ALU_OP_NOT, reg_b(dst)); 1195 if (!imm || !~imm) 1196 return; 1197 } 1198 1199 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1200 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1201 } 1202 1203 static int 1204 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1205 enum alu_op alu_op, bool skip) 1206 { 1207 const struct bpf_insn *insn = &meta->insn; 1208 u64 imm = insn->imm; /* sign extend */ 1209 1210 if (skip) { 1211 meta->skip = true; 1212 return 0; 1213 } 1214 1215 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1216 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1217 1218 return 0; 1219 } 1220 1221 static int 1222 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1223 enum alu_op alu_op) 1224 { 1225 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1226 1227 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1228 emit_alu(nfp_prog, reg_both(dst + 1), 1229 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1230 1231 return 0; 1232 } 1233 1234 static int 1235 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1236 enum alu_op alu_op, bool skip) 1237 { 1238 const struct bpf_insn *insn = &meta->insn; 1239 1240 if (skip) { 1241 meta->skip = true; 1242 return 0; 1243 } 1244 1245 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1246 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1247 1248 return 0; 1249 } 1250 1251 static int 1252 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1253 enum alu_op alu_op) 1254 { 1255 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1256 1257 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1258 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1259 1260 return 0; 1261 } 1262 1263 static void 1264 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1265 enum br_mask br_mask, u16 off) 1266 { 1267 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1268 emit_br(nfp_prog, br_mask, off, 0); 1269 } 1270 1271 static int 1272 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1273 enum alu_op alu_op, enum br_mask br_mask) 1274 { 1275 const struct bpf_insn *insn = &meta->insn; 1276 1277 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1278 insn->src_reg * 2, br_mask, insn->off); 1279 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1280 insn->src_reg * 2 + 1, br_mask, insn->off); 1281 1282 return 0; 1283 } 1284 1285 static const struct jmp_code_map { 1286 enum br_mask br_mask; 1287 bool swap; 1288 } jmp_code_map[] = { 1289 [BPF_JGT >> 4] = { BR_BLO, true }, 1290 [BPF_JGE >> 4] = { BR_BHS, false }, 1291 [BPF_JLT >> 4] = { BR_BLO, false }, 1292 [BPF_JLE >> 4] = { BR_BHS, true }, 1293 [BPF_JSGT >> 4] = { BR_BLT, true }, 1294 [BPF_JSGE >> 4] = { BR_BGE, false }, 1295 [BPF_JSLT >> 4] = { BR_BLT, false }, 1296 [BPF_JSLE >> 4] = { BR_BGE, true }, 1297 }; 1298 1299 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) 1300 { 1301 unsigned int op; 1302 1303 op = BPF_OP(meta->insn.code) >> 4; 1304 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ 1305 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || 1306 !jmp_code_map[op].br_mask, 1307 "no code found for jump instruction")) 1308 return NULL; 1309 1310 return &jmp_code_map[op]; 1311 } 1312 1313 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1314 { 1315 const struct bpf_insn *insn = &meta->insn; 1316 u64 imm = insn->imm; /* sign extend */ 1317 const struct jmp_code_map *code; 1318 enum alu_op alu_op, carry_op; 1319 u8 reg = insn->dst_reg * 2; 1320 swreg tmp_reg; 1321 1322 code = nfp_jmp_code_get(meta); 1323 if (!code) 1324 return -EINVAL; 1325 1326 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; 1327 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; 1328 1329 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1330 if (!code->swap) 1331 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); 1332 else 1333 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); 1334 1335 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1336 if (!code->swap) 1337 emit_alu(nfp_prog, reg_none(), 1338 reg_a(reg + 1), carry_op, tmp_reg); 1339 else 1340 emit_alu(nfp_prog, reg_none(), 1341 tmp_reg, carry_op, reg_a(reg + 1)); 1342 1343 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1344 1345 return 0; 1346 } 1347 1348 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1349 { 1350 const struct bpf_insn *insn = &meta->insn; 1351 const struct jmp_code_map *code; 1352 u8 areg, breg; 1353 1354 code = nfp_jmp_code_get(meta); 1355 if (!code) 1356 return -EINVAL; 1357 1358 areg = insn->dst_reg * 2; 1359 breg = insn->src_reg * 2; 1360 1361 if (code->swap) { 1362 areg ^= breg; 1363 breg ^= areg; 1364 areg ^= breg; 1365 } 1366 1367 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1368 emit_alu(nfp_prog, reg_none(), 1369 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1370 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1371 1372 return 0; 1373 } 1374 1375 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1376 { 1377 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1378 SHF_SC_R_ROT, 8); 1379 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1380 SHF_SC_R_ROT, 16); 1381 } 1382 1383 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1384 { 1385 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1386 struct nfp_bpf_cap_adjust_head *adjust_head; 1387 u32 ret_einval, end; 1388 1389 adjust_head = &nfp_prog->bpf->adjust_head; 1390 1391 /* Optimized version - 5 vs 14 cycles */ 1392 if (nfp_prog->adjust_head_location != UINT_MAX) { 1393 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1394 return -EINVAL; 1395 1396 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1397 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1398 emit_alu(nfp_prog, plen_reg(nfp_prog), 1399 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1400 emit_alu(nfp_prog, pv_len(nfp_prog), 1401 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1402 1403 wrp_immed(nfp_prog, reg_both(0), 0); 1404 wrp_immed(nfp_prog, reg_both(1), 0); 1405 1406 /* TODO: when adjust head is guaranteed to succeed we can 1407 * also eliminate the following if (r0 == 0) branch. 1408 */ 1409 1410 return 0; 1411 } 1412 1413 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1414 end = ret_einval + 2; 1415 1416 /* We need to use a temp because offset is just a part of the pkt ptr */ 1417 emit_alu(nfp_prog, tmp, 1418 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1419 1420 /* Validate result will fit within FW datapath constraints */ 1421 emit_alu(nfp_prog, reg_none(), 1422 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1423 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1424 emit_alu(nfp_prog, reg_none(), 1425 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1426 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1427 1428 /* Validate the length is at least ETH_HLEN */ 1429 emit_alu(nfp_prog, tmp_len, 1430 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1431 emit_alu(nfp_prog, reg_none(), 1432 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1433 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1434 1435 /* Load the ret code */ 1436 wrp_immed(nfp_prog, reg_both(0), 0); 1437 wrp_immed(nfp_prog, reg_both(1), 0); 1438 1439 /* Modify the packet metadata */ 1440 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1441 1442 /* Skip over the -EINVAL ret code (defer 2) */ 1443 emit_br(nfp_prog, BR_UNC, end, 2); 1444 1445 emit_alu(nfp_prog, plen_reg(nfp_prog), 1446 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1447 emit_alu(nfp_prog, pv_len(nfp_prog), 1448 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1449 1450 /* return -EINVAL target */ 1451 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1452 return -EINVAL; 1453 1454 wrp_immed(nfp_prog, reg_both(0), -22); 1455 wrp_immed(nfp_prog, reg_both(1), ~0); 1456 1457 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1458 return -EINVAL; 1459 1460 return 0; 1461 } 1462 1463 static int 1464 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1465 { 1466 bool load_lm_ptr; 1467 u32 ret_tgt; 1468 s64 lm_off; 1469 1470 /* We only have to reload LM0 if the key is not at start of stack */ 1471 lm_off = nfp_prog->stack_depth; 1472 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1473 load_lm_ptr = meta->arg2.var_off || lm_off; 1474 1475 /* Set LM0 to start of key */ 1476 if (load_lm_ptr) 1477 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1478 if (meta->func_id == BPF_FUNC_map_update_elem) 1479 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1480 1481 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1482 2, RELO_BR_HELPER); 1483 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1484 1485 /* Load map ID into A0 */ 1486 wrp_mov(nfp_prog, reg_a(0), reg_a(2)); 1487 1488 /* Load the return address into B0 */ 1489 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1490 1491 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1492 return -EINVAL; 1493 1494 /* Reset the LM0 pointer */ 1495 if (!load_lm_ptr) 1496 return 0; 1497 1498 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1499 wrp_nops(nfp_prog, 3); 1500 1501 return 0; 1502 } 1503 1504 static int 1505 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1506 { 1507 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); 1508 /* CSR value is read in following immed[gpr, 0] */ 1509 emit_immed(nfp_prog, reg_both(0), 0, 1510 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1511 emit_immed(nfp_prog, reg_both(1), 0, 1512 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1513 return 0; 1514 } 1515 1516 static int 1517 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1518 { 1519 swreg ptr_type; 1520 u32 ret_tgt; 1521 1522 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); 1523 1524 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 1525 1526 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1527 2, RELO_BR_HELPER); 1528 1529 /* Load ptr type into A1 */ 1530 wrp_mov(nfp_prog, reg_a(1), ptr_type); 1531 1532 /* Load the return address into B0 */ 1533 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1534 1535 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1536 return -EINVAL; 1537 1538 return 0; 1539 } 1540 1541 static int 1542 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1543 { 1544 u32 jmp_tgt; 1545 1546 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; 1547 1548 /* Make sure the queue id fits into FW field */ 1549 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), 1550 ALU_OP_AND_NOT_B, reg_imm(0xff)); 1551 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); 1552 1553 /* Set the 'queue selected' bit and the queue value */ 1554 emit_shf(nfp_prog, pv_qsel_set(nfp_prog), 1555 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), 1556 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); 1557 emit_ld_field(nfp_prog, 1558 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), 1559 SHF_SC_NONE, 0); 1560 /* Delay slots end here, we will jump over next instruction if queue 1561 * value fits into the field. 1562 */ 1563 emit_ld_field(nfp_prog, 1564 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), 1565 SHF_SC_NONE, 0); 1566 1567 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) 1568 return -EINVAL; 1569 1570 return 0; 1571 } 1572 1573 /* --- Callbacks --- */ 1574 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1575 { 1576 const struct bpf_insn *insn = &meta->insn; 1577 u8 dst = insn->dst_reg * 2; 1578 u8 src = insn->src_reg * 2; 1579 1580 if (insn->src_reg == BPF_REG_10) { 1581 swreg stack_depth_reg; 1582 1583 stack_depth_reg = ur_load_imm_any(nfp_prog, 1584 nfp_prog->stack_depth, 1585 stack_imm(nfp_prog)); 1586 emit_alu(nfp_prog, reg_both(dst), 1587 stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); 1588 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1589 } else { 1590 wrp_reg_mov(nfp_prog, dst, src); 1591 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1592 } 1593 1594 return 0; 1595 } 1596 1597 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1598 { 1599 u64 imm = meta->insn.imm; /* sign extend */ 1600 1601 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1602 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1603 1604 return 0; 1605 } 1606 1607 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1608 { 1609 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1610 } 1611 1612 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1613 { 1614 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1615 } 1616 1617 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1618 { 1619 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1620 } 1621 1622 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1623 { 1624 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1625 } 1626 1627 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1628 { 1629 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1630 } 1631 1632 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1633 { 1634 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1635 } 1636 1637 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1638 { 1639 const struct bpf_insn *insn = &meta->insn; 1640 1641 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1642 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1643 reg_b(insn->src_reg * 2)); 1644 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1645 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1646 reg_b(insn->src_reg * 2 + 1)); 1647 1648 return 0; 1649 } 1650 1651 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1652 { 1653 const struct bpf_insn *insn = &meta->insn; 1654 u64 imm = insn->imm; /* sign extend */ 1655 1656 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1657 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1658 1659 return 0; 1660 } 1661 1662 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1663 { 1664 const struct bpf_insn *insn = &meta->insn; 1665 1666 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1667 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1668 reg_b(insn->src_reg * 2)); 1669 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1670 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1671 reg_b(insn->src_reg * 2 + 1)); 1672 1673 return 0; 1674 } 1675 1676 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1677 { 1678 const struct bpf_insn *insn = &meta->insn; 1679 u64 imm = insn->imm; /* sign extend */ 1680 1681 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1682 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1683 1684 return 0; 1685 } 1686 1687 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1688 { 1689 const struct bpf_insn *insn = &meta->insn; 1690 1691 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1692 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1693 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1694 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1695 1696 return 0; 1697 } 1698 1699 /* Pseudo code: 1700 * if shift_amt >= 32 1701 * dst_high = dst_low << shift_amt[4:0] 1702 * dst_low = 0; 1703 * else 1704 * dst_high = (dst_high, dst_low) >> (32 - shift_amt) 1705 * dst_low = dst_low << shift_amt 1706 * 1707 * The indirect shift will use the same logic at runtime. 1708 */ 1709 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1710 { 1711 if (shift_amt < 32) { 1712 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), 1713 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, 1714 32 - shift_amt); 1715 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1716 reg_b(dst), SHF_SC_L_SHF, shift_amt); 1717 } else if (shift_amt == 32) { 1718 wrp_reg_mov(nfp_prog, dst + 1, dst); 1719 wrp_immed(nfp_prog, reg_both(dst), 0); 1720 } else if (shift_amt > 32) { 1721 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1722 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32); 1723 wrp_immed(nfp_prog, reg_both(dst), 0); 1724 } 1725 1726 return 0; 1727 } 1728 1729 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1730 { 1731 const struct bpf_insn *insn = &meta->insn; 1732 u8 dst = insn->dst_reg * 2; 1733 1734 return __shl_imm64(nfp_prog, dst, insn->imm); 1735 } 1736 1737 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1738 { 1739 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB, 1740 reg_b(src)); 1741 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0)); 1742 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, 1743 reg_b(dst), SHF_SC_R_DSHF); 1744 } 1745 1746 /* NOTE: for indirect left shift, HIGH part should be calculated first. */ 1747 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1748 { 1749 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1750 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1751 reg_b(dst), SHF_SC_L_SHF); 1752 } 1753 1754 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1755 { 1756 shl_reg64_lt32_high(nfp_prog, dst, src); 1757 shl_reg64_lt32_low(nfp_prog, dst, src); 1758 } 1759 1760 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1761 { 1762 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1763 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1764 reg_b(dst), SHF_SC_L_SHF); 1765 wrp_immed(nfp_prog, reg_both(dst), 0); 1766 } 1767 1768 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1769 { 1770 const struct bpf_insn *insn = &meta->insn; 1771 u64 umin, umax; 1772 u8 dst, src; 1773 1774 dst = insn->dst_reg * 2; 1775 umin = meta->umin; 1776 umax = meta->umax; 1777 if (umin == umax) 1778 return __shl_imm64(nfp_prog, dst, umin); 1779 1780 src = insn->src_reg * 2; 1781 if (umax < 32) { 1782 shl_reg64_lt32(nfp_prog, dst, src); 1783 } else if (umin >= 32) { 1784 shl_reg64_ge32(nfp_prog, dst, src); 1785 } else { 1786 /* Generate different instruction sequences depending on runtime 1787 * value of shift amount. 1788 */ 1789 u16 label_ge32, label_end; 1790 1791 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7; 1792 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 1793 1794 shl_reg64_lt32_high(nfp_prog, dst, src); 1795 label_end = nfp_prog_current_offset(nfp_prog) + 6; 1796 emit_br(nfp_prog, BR_UNC, label_end, 2); 1797 /* shl_reg64_lt32_low packed in delay slot. */ 1798 shl_reg64_lt32_low(nfp_prog, dst, src); 1799 1800 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 1801 return -EINVAL; 1802 shl_reg64_ge32(nfp_prog, dst, src); 1803 1804 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 1805 return -EINVAL; 1806 } 1807 1808 return 0; 1809 } 1810 1811 /* Pseudo code: 1812 * if shift_amt >= 32 1813 * dst_high = 0; 1814 * dst_low = dst_high >> shift_amt[4:0] 1815 * else 1816 * dst_high = dst_high >> shift_amt 1817 * dst_low = (dst_high, dst_low) >> shift_amt 1818 * 1819 * The indirect shift will use the same logic at runtime. 1820 */ 1821 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1822 { 1823 if (shift_amt < 32) { 1824 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 1825 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 1826 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1827 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 1828 } else if (shift_amt == 32) { 1829 wrp_reg_mov(nfp_prog, dst, dst + 1); 1830 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1831 } else if (shift_amt > 32) { 1832 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1833 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 1834 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1835 } 1836 1837 return 0; 1838 } 1839 1840 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1841 { 1842 const struct bpf_insn *insn = &meta->insn; 1843 u8 dst = insn->dst_reg * 2; 1844 1845 return __shr_imm64(nfp_prog, dst, insn->imm); 1846 } 1847 1848 /* NOTE: for indirect right shift, LOW part should be calculated first. */ 1849 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1850 { 1851 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1852 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 1853 reg_b(dst + 1), SHF_SC_R_SHF); 1854 } 1855 1856 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1857 { 1858 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1859 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 1860 reg_b(dst), SHF_SC_R_DSHF); 1861 } 1862 1863 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1864 { 1865 shr_reg64_lt32_low(nfp_prog, dst, src); 1866 shr_reg64_lt32_high(nfp_prog, dst, src); 1867 } 1868 1869 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 1870 { 1871 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 1872 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 1873 reg_b(dst + 1), SHF_SC_R_SHF); 1874 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1875 } 1876 1877 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1878 { 1879 const struct bpf_insn *insn = &meta->insn; 1880 u64 umin, umax; 1881 u8 dst, src; 1882 1883 dst = insn->dst_reg * 2; 1884 umin = meta->umin; 1885 umax = meta->umax; 1886 if (umin == umax) 1887 return __shr_imm64(nfp_prog, dst, umin); 1888 1889 src = insn->src_reg * 2; 1890 if (umax < 32) { 1891 shr_reg64_lt32(nfp_prog, dst, src); 1892 } else if (umin >= 32) { 1893 shr_reg64_ge32(nfp_prog, dst, src); 1894 } else { 1895 /* Generate different instruction sequences depending on runtime 1896 * value of shift amount. 1897 */ 1898 u16 label_ge32, label_end; 1899 1900 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 1901 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 1902 shr_reg64_lt32_low(nfp_prog, dst, src); 1903 label_end = nfp_prog_current_offset(nfp_prog) + 6; 1904 emit_br(nfp_prog, BR_UNC, label_end, 2); 1905 /* shr_reg64_lt32_high packed in delay slot. */ 1906 shr_reg64_lt32_high(nfp_prog, dst, src); 1907 1908 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 1909 return -EINVAL; 1910 shr_reg64_ge32(nfp_prog, dst, src); 1911 1912 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 1913 return -EINVAL; 1914 } 1915 1916 return 0; 1917 } 1918 1919 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit 1920 * told through PREV_ALU result. 1921 */ 1922 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1923 { 1924 const struct bpf_insn *insn = &meta->insn; 1925 u8 dst = insn->dst_reg * 2; 1926 1927 if (insn->imm < 32) { 1928 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 1929 reg_b(dst), SHF_SC_R_DSHF, insn->imm); 1930 /* Set signedness bit. */ 1931 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 1932 reg_imm(0)); 1933 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 1934 reg_b(dst + 1), SHF_SC_R_SHF, insn->imm); 1935 } else if (insn->imm == 32) { 1936 /* NOTE: this also helps setting signedness bit. */ 1937 wrp_reg_mov(nfp_prog, dst, dst + 1); 1938 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 1939 reg_b(dst + 1), SHF_SC_R_SHF, 31); 1940 } else if (insn->imm > 32) { 1941 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 1942 reg_imm(0)); 1943 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 1944 reg_b(dst + 1), SHF_SC_R_SHF, insn->imm - 32); 1945 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 1946 reg_b(dst + 1), SHF_SC_R_SHF, 31); 1947 } 1948 1949 return 0; 1950 } 1951 1952 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1953 { 1954 const struct bpf_insn *insn = &meta->insn; 1955 1956 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 1957 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1958 1959 return 0; 1960 } 1961 1962 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1963 { 1964 const struct bpf_insn *insn = &meta->insn; 1965 1966 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 1967 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1968 1969 return 0; 1970 } 1971 1972 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1973 { 1974 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 1975 } 1976 1977 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1978 { 1979 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 1980 } 1981 1982 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1983 { 1984 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 1985 } 1986 1987 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1988 { 1989 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1990 } 1991 1992 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1993 { 1994 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 1995 } 1996 1997 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1998 { 1999 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 2000 } 2001 2002 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2003 { 2004 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 2005 } 2006 2007 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2008 { 2009 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 2010 } 2011 2012 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2013 { 2014 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 2015 } 2016 2017 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2018 { 2019 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 2020 } 2021 2022 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2023 { 2024 u8 dst = meta->insn.dst_reg * 2; 2025 2026 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 2027 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2028 2029 return 0; 2030 } 2031 2032 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2033 { 2034 const struct bpf_insn *insn = &meta->insn; 2035 2036 if (!insn->imm) 2037 return 1; /* TODO: zero shift means indirect */ 2038 2039 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 2040 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 2041 SHF_SC_L_SHF, insn->imm); 2042 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2043 2044 return 0; 2045 } 2046 2047 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2048 { 2049 const struct bpf_insn *insn = &meta->insn; 2050 u8 gpr = insn->dst_reg * 2; 2051 2052 switch (insn->imm) { 2053 case 16: 2054 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 2055 SHF_SC_R_ROT, 8); 2056 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 2057 SHF_SC_R_SHF, 16); 2058 2059 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2060 break; 2061 case 32: 2062 wrp_end32(nfp_prog, reg_a(gpr), gpr); 2063 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2064 break; 2065 case 64: 2066 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 2067 2068 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 2069 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 2070 break; 2071 } 2072 2073 return 0; 2074 } 2075 2076 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2077 { 2078 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 2079 u32 imm_lo, imm_hi; 2080 u8 dst; 2081 2082 dst = prev->insn.dst_reg * 2; 2083 imm_lo = prev->insn.imm; 2084 imm_hi = meta->insn.imm; 2085 2086 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 2087 2088 /* mov is always 1 insn, load imm may be two, so try to use mov */ 2089 if (imm_hi == imm_lo) 2090 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 2091 else 2092 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 2093 2094 return 0; 2095 } 2096 2097 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2098 { 2099 meta->double_cb = imm_ld8_part2; 2100 return 0; 2101 } 2102 2103 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2104 { 2105 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 2106 } 2107 2108 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2109 { 2110 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 2111 } 2112 2113 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2114 { 2115 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 2116 } 2117 2118 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2119 { 2120 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2121 meta->insn.src_reg * 2, 1); 2122 } 2123 2124 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2125 { 2126 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2127 meta->insn.src_reg * 2, 2); 2128 } 2129 2130 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2131 { 2132 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2133 meta->insn.src_reg * 2, 4); 2134 } 2135 2136 static int 2137 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2138 unsigned int size, unsigned int ptr_off) 2139 { 2140 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2141 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 2142 true, wrp_lmem_load); 2143 } 2144 2145 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2146 u8 size) 2147 { 2148 swreg dst = reg_both(meta->insn.dst_reg * 2); 2149 2150 switch (meta->insn.off) { 2151 case offsetof(struct __sk_buff, len): 2152 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 2153 return -EOPNOTSUPP; 2154 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 2155 break; 2156 case offsetof(struct __sk_buff, data): 2157 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 2158 return -EOPNOTSUPP; 2159 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2160 break; 2161 case offsetof(struct __sk_buff, data_end): 2162 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 2163 return -EOPNOTSUPP; 2164 emit_alu(nfp_prog, dst, 2165 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2166 break; 2167 default: 2168 return -EOPNOTSUPP; 2169 } 2170 2171 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2172 2173 return 0; 2174 } 2175 2176 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2177 u8 size) 2178 { 2179 swreg dst = reg_both(meta->insn.dst_reg * 2); 2180 2181 switch (meta->insn.off) { 2182 case offsetof(struct xdp_md, data): 2183 if (size != FIELD_SIZEOF(struct xdp_md, data)) 2184 return -EOPNOTSUPP; 2185 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2186 break; 2187 case offsetof(struct xdp_md, data_end): 2188 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 2189 return -EOPNOTSUPP; 2190 emit_alu(nfp_prog, dst, 2191 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2192 break; 2193 default: 2194 return -EOPNOTSUPP; 2195 } 2196 2197 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2198 2199 return 0; 2200 } 2201 2202 static int 2203 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2204 unsigned int size) 2205 { 2206 swreg tmp_reg; 2207 2208 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2209 2210 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 2211 tmp_reg, meta->insn.dst_reg * 2, size); 2212 } 2213 2214 static int 2215 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2216 unsigned int size) 2217 { 2218 swreg tmp_reg; 2219 2220 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2221 2222 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 2223 tmp_reg, meta->insn.dst_reg * 2, size); 2224 } 2225 2226 static void 2227 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 2228 struct nfp_insn_meta *meta) 2229 { 2230 s16 range_start = meta->pkt_cache.range_start; 2231 s16 range_end = meta->pkt_cache.range_end; 2232 swreg src_base, off; 2233 u8 xfer_num, len; 2234 bool indir; 2235 2236 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 2237 src_base = reg_a(meta->insn.src_reg * 2); 2238 len = range_end - range_start; 2239 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 2240 2241 indir = len > 8 * REG_WIDTH; 2242 /* Setup PREV_ALU for indirect mode. */ 2243 if (indir) 2244 wrp_immed(nfp_prog, reg_none(), 2245 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 2246 2247 /* Cache memory into transfer-in registers. */ 2248 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 2249 off, xfer_num - 1, CMD_CTX_SWAP, indir); 2250 } 2251 2252 static int 2253 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 2254 struct nfp_insn_meta *meta, 2255 unsigned int size) 2256 { 2257 s16 range_start = meta->pkt_cache.range_start; 2258 s16 insn_off = meta->insn.off - range_start; 2259 swreg dst_lo, dst_hi, src_lo, src_mid; 2260 u8 dst_gpr = meta->insn.dst_reg * 2; 2261 u8 len_lo = size, len_mid = 0; 2262 u8 idx = insn_off / REG_WIDTH; 2263 u8 off = insn_off % REG_WIDTH; 2264 2265 dst_hi = reg_both(dst_gpr + 1); 2266 dst_lo = reg_both(dst_gpr); 2267 src_lo = reg_xfer(idx); 2268 2269 /* The read length could involve as many as three registers. */ 2270 if (size > REG_WIDTH - off) { 2271 /* Calculate the part in the second register. */ 2272 len_lo = REG_WIDTH - off; 2273 len_mid = size - len_lo; 2274 2275 /* Calculate the part in the third register. */ 2276 if (size > 2 * REG_WIDTH - off) 2277 len_mid = REG_WIDTH; 2278 } 2279 2280 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 2281 2282 if (!len_mid) { 2283 wrp_immed(nfp_prog, dst_hi, 0); 2284 return 0; 2285 } 2286 2287 src_mid = reg_xfer(idx + 1); 2288 2289 if (size <= REG_WIDTH) { 2290 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 2291 wrp_immed(nfp_prog, dst_hi, 0); 2292 } else { 2293 swreg src_hi = reg_xfer(idx + 2); 2294 2295 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 2296 REG_WIDTH - len_lo, len_lo); 2297 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 2298 REG_WIDTH - len_lo); 2299 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 2300 len_lo); 2301 } 2302 2303 return 0; 2304 } 2305 2306 static int 2307 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 2308 struct nfp_insn_meta *meta, 2309 unsigned int size) 2310 { 2311 swreg dst_lo, dst_hi, src_lo; 2312 u8 dst_gpr, idx; 2313 2314 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 2315 dst_gpr = meta->insn.dst_reg * 2; 2316 dst_hi = reg_both(dst_gpr + 1); 2317 dst_lo = reg_both(dst_gpr); 2318 src_lo = reg_xfer(idx); 2319 2320 if (size < REG_WIDTH) { 2321 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 2322 wrp_immed(nfp_prog, dst_hi, 0); 2323 } else if (size == REG_WIDTH) { 2324 wrp_mov(nfp_prog, dst_lo, src_lo); 2325 wrp_immed(nfp_prog, dst_hi, 0); 2326 } else { 2327 swreg src_hi = reg_xfer(idx + 1); 2328 2329 wrp_mov(nfp_prog, dst_lo, src_lo); 2330 wrp_mov(nfp_prog, dst_hi, src_hi); 2331 } 2332 2333 return 0; 2334 } 2335 2336 static int 2337 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 2338 struct nfp_insn_meta *meta, unsigned int size) 2339 { 2340 u8 off = meta->insn.off - meta->pkt_cache.range_start; 2341 2342 if (IS_ALIGNED(off, REG_WIDTH)) 2343 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 2344 2345 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 2346 } 2347 2348 static int 2349 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2350 unsigned int size) 2351 { 2352 if (meta->ldst_gather_len) 2353 return nfp_cpp_memcpy(nfp_prog, meta); 2354 2355 if (meta->ptr.type == PTR_TO_CTX) { 2356 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2357 return mem_ldx_xdp(nfp_prog, meta, size); 2358 else 2359 return mem_ldx_skb(nfp_prog, meta, size); 2360 } 2361 2362 if (meta->ptr.type == PTR_TO_PACKET) { 2363 if (meta->pkt_cache.range_end) { 2364 if (meta->pkt_cache.do_init) 2365 mem_ldx_data_init_pktcache(nfp_prog, meta); 2366 2367 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 2368 } else { 2369 return mem_ldx_data(nfp_prog, meta, size); 2370 } 2371 } 2372 2373 if (meta->ptr.type == PTR_TO_STACK) 2374 return mem_ldx_stack(nfp_prog, meta, size, 2375 meta->ptr.off + meta->ptr.var_off.value); 2376 2377 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2378 return mem_ldx_emem(nfp_prog, meta, size); 2379 2380 return -EOPNOTSUPP; 2381 } 2382 2383 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2384 { 2385 return mem_ldx(nfp_prog, meta, 1); 2386 } 2387 2388 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2389 { 2390 return mem_ldx(nfp_prog, meta, 2); 2391 } 2392 2393 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2394 { 2395 return mem_ldx(nfp_prog, meta, 4); 2396 } 2397 2398 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2399 { 2400 return mem_ldx(nfp_prog, meta, 8); 2401 } 2402 2403 static int 2404 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2405 unsigned int size) 2406 { 2407 u64 imm = meta->insn.imm; /* sign extend */ 2408 swreg off_reg; 2409 2410 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2411 2412 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2413 imm, size); 2414 } 2415 2416 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2417 unsigned int size) 2418 { 2419 if (meta->ptr.type == PTR_TO_PACKET) 2420 return mem_st_data(nfp_prog, meta, size); 2421 2422 return -EOPNOTSUPP; 2423 } 2424 2425 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2426 { 2427 return mem_st(nfp_prog, meta, 1); 2428 } 2429 2430 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2431 { 2432 return mem_st(nfp_prog, meta, 2); 2433 } 2434 2435 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2436 { 2437 return mem_st(nfp_prog, meta, 4); 2438 } 2439 2440 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2441 { 2442 return mem_st(nfp_prog, meta, 8); 2443 } 2444 2445 static int 2446 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2447 unsigned int size) 2448 { 2449 swreg off_reg; 2450 2451 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2452 2453 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2454 meta->insn.src_reg * 2, size); 2455 } 2456 2457 static int 2458 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2459 unsigned int size, unsigned int ptr_off) 2460 { 2461 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2462 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2463 false, wrp_lmem_store); 2464 } 2465 2466 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2467 { 2468 switch (meta->insn.off) { 2469 case offsetof(struct xdp_md, rx_queue_index): 2470 return nfp_queue_select(nfp_prog, meta); 2471 } 2472 2473 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ 2474 return -EOPNOTSUPP; 2475 } 2476 2477 static int 2478 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2479 unsigned int size) 2480 { 2481 if (meta->ptr.type == PTR_TO_PACKET) 2482 return mem_stx_data(nfp_prog, meta, size); 2483 2484 if (meta->ptr.type == PTR_TO_STACK) 2485 return mem_stx_stack(nfp_prog, meta, size, 2486 meta->ptr.off + meta->ptr.var_off.value); 2487 2488 return -EOPNOTSUPP; 2489 } 2490 2491 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2492 { 2493 return mem_stx(nfp_prog, meta, 1); 2494 } 2495 2496 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2497 { 2498 return mem_stx(nfp_prog, meta, 2); 2499 } 2500 2501 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2502 { 2503 if (meta->ptr.type == PTR_TO_CTX) 2504 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2505 return mem_stx_xdp(nfp_prog, meta); 2506 return mem_stx(nfp_prog, meta, 4); 2507 } 2508 2509 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2510 { 2511 return mem_stx(nfp_prog, meta, 8); 2512 } 2513 2514 static int 2515 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) 2516 { 2517 u8 dst_gpr = meta->insn.dst_reg * 2; 2518 u8 src_gpr = meta->insn.src_reg * 2; 2519 unsigned int full_add, out; 2520 swreg addra, addrb, off; 2521 2522 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2523 2524 /* We can fit 16 bits into command immediate, if we know the immediate 2525 * is guaranteed to either always or never fit into 16 bit we only 2526 * generate code to handle that particular case, otherwise generate 2527 * code for both. 2528 */ 2529 out = nfp_prog_current_offset(nfp_prog); 2530 full_add = nfp_prog_current_offset(nfp_prog); 2531 2532 if (meta->insn.off) { 2533 out += 2; 2534 full_add += 2; 2535 } 2536 if (meta->xadd_maybe_16bit) { 2537 out += 3; 2538 full_add += 3; 2539 } 2540 if (meta->xadd_over_16bit) 2541 out += 2 + is64; 2542 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2543 out += 5; 2544 full_add += 5; 2545 } 2546 2547 /* Generate the branch for choosing add_imm vs add */ 2548 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2549 swreg max_imm = imm_a(nfp_prog); 2550 2551 wrp_immed(nfp_prog, max_imm, 0xffff); 2552 emit_alu(nfp_prog, reg_none(), 2553 max_imm, ALU_OP_SUB, reg_b(src_gpr)); 2554 emit_alu(nfp_prog, reg_none(), 2555 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); 2556 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); 2557 /* defer for add */ 2558 } 2559 2560 /* If insn has an offset add to the address */ 2561 if (!meta->insn.off) { 2562 addra = reg_a(dst_gpr); 2563 addrb = reg_b(dst_gpr + 1); 2564 } else { 2565 emit_alu(nfp_prog, imma_a(nfp_prog), 2566 reg_a(dst_gpr), ALU_OP_ADD, off); 2567 emit_alu(nfp_prog, imma_b(nfp_prog), 2568 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); 2569 addra = imma_a(nfp_prog); 2570 addrb = imma_b(nfp_prog); 2571 } 2572 2573 /* Generate the add_imm if 16 bits are possible */ 2574 if (meta->xadd_maybe_16bit) { 2575 swreg prev_alu = imm_a(nfp_prog); 2576 2577 wrp_immed(nfp_prog, prev_alu, 2578 FIELD_PREP(CMD_OVE_DATA, 2) | 2579 CMD_OVE_LEN | 2580 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); 2581 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); 2582 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, 2583 addra, addrb, 0, CMD_CTX_NO_SWAP); 2584 2585 if (meta->xadd_over_16bit) 2586 emit_br(nfp_prog, BR_UNC, out, 0); 2587 } 2588 2589 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) 2590 return -EINVAL; 2591 2592 /* Generate the add if 16 bits are not guaranteed */ 2593 if (meta->xadd_over_16bit) { 2594 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, 2595 addra, addrb, is64 << 2, 2596 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); 2597 2598 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); 2599 if (is64) 2600 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); 2601 } 2602 2603 if (!nfp_prog_confirm_current_offset(nfp_prog, out)) 2604 return -EINVAL; 2605 2606 return 0; 2607 } 2608 2609 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2610 { 2611 return mem_xadd(nfp_prog, meta, false); 2612 } 2613 2614 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2615 { 2616 return mem_xadd(nfp_prog, meta, true); 2617 } 2618 2619 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2620 { 2621 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 2622 2623 return 0; 2624 } 2625 2626 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2627 { 2628 const struct bpf_insn *insn = &meta->insn; 2629 u64 imm = insn->imm; /* sign extend */ 2630 swreg or1, or2, tmp_reg; 2631 2632 or1 = reg_a(insn->dst_reg * 2); 2633 or2 = reg_b(insn->dst_reg * 2 + 1); 2634 2635 if (imm & ~0U) { 2636 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2637 emit_alu(nfp_prog, imm_a(nfp_prog), 2638 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2639 or1 = imm_a(nfp_prog); 2640 } 2641 2642 if (imm >> 32) { 2643 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2644 emit_alu(nfp_prog, imm_b(nfp_prog), 2645 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2646 or2 = imm_b(nfp_prog); 2647 } 2648 2649 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 2650 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2651 2652 return 0; 2653 } 2654 2655 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2656 { 2657 const struct bpf_insn *insn = &meta->insn; 2658 u64 imm = insn->imm; /* sign extend */ 2659 swreg tmp_reg; 2660 2661 if (!imm) { 2662 meta->skip = true; 2663 return 0; 2664 } 2665 2666 if (imm & ~0U) { 2667 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2668 emit_alu(nfp_prog, reg_none(), 2669 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); 2670 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2671 } 2672 2673 if (imm >> 32) { 2674 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2675 emit_alu(nfp_prog, reg_none(), 2676 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 2677 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2678 } 2679 2680 return 0; 2681 } 2682 2683 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2684 { 2685 const struct bpf_insn *insn = &meta->insn; 2686 u64 imm = insn->imm; /* sign extend */ 2687 swreg tmp_reg; 2688 2689 if (!imm) { 2690 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 2691 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 2692 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2693 return 0; 2694 } 2695 2696 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2697 emit_alu(nfp_prog, reg_none(), 2698 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2699 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2700 2701 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2702 emit_alu(nfp_prog, reg_none(), 2703 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2704 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2705 2706 return 0; 2707 } 2708 2709 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2710 { 2711 const struct bpf_insn *insn = &meta->insn; 2712 2713 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 2714 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 2715 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 2716 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 2717 emit_alu(nfp_prog, reg_none(), 2718 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 2719 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2720 2721 return 0; 2722 } 2723 2724 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2725 { 2726 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 2727 } 2728 2729 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2730 { 2731 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 2732 } 2733 2734 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2735 { 2736 switch (meta->insn.imm) { 2737 case BPF_FUNC_xdp_adjust_head: 2738 return adjust_head(nfp_prog, meta); 2739 case BPF_FUNC_map_lookup_elem: 2740 case BPF_FUNC_map_update_elem: 2741 case BPF_FUNC_map_delete_elem: 2742 return map_call_stack_common(nfp_prog, meta); 2743 case BPF_FUNC_get_prandom_u32: 2744 return nfp_get_prandom_u32(nfp_prog, meta); 2745 case BPF_FUNC_perf_event_output: 2746 return nfp_perf_event_output(nfp_prog, meta); 2747 default: 2748 WARN_ONCE(1, "verifier allowed unsupported function\n"); 2749 return -EOPNOTSUPP; 2750 } 2751 } 2752 2753 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2754 { 2755 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 2756 2757 return 0; 2758 } 2759 2760 static const instr_cb_t instr_cb[256] = { 2761 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 2762 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 2763 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 2764 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 2765 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 2766 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 2767 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 2768 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 2769 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 2770 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 2771 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 2772 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 2773 [BPF_ALU64 | BPF_NEG] = neg_reg64, 2774 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, 2775 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 2776 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, 2777 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 2778 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, 2779 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 2780 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 2781 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 2782 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 2783 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 2784 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 2785 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 2786 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 2787 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 2788 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 2789 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 2790 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 2791 [BPF_ALU | BPF_NEG] = neg_reg, 2792 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 2793 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 2794 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 2795 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 2796 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 2797 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 2798 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 2799 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 2800 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 2801 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 2802 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 2803 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 2804 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 2805 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 2806 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 2807 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 2808 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 2809 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 2810 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 2811 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 2812 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 2813 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 2814 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 2815 [BPF_JMP | BPF_JA | BPF_K] = jump, 2816 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 2817 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, 2818 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, 2819 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, 2820 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, 2821 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, 2822 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, 2823 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, 2824 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, 2825 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 2826 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 2827 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 2828 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, 2829 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, 2830 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, 2831 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, 2832 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, 2833 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, 2834 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, 2835 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, 2836 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 2837 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 2838 [BPF_JMP | BPF_CALL] = call, 2839 [BPF_JMP | BPF_EXIT] = goto_out, 2840 }; 2841 2842 /* --- Assembler logic --- */ 2843 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 2844 { 2845 struct nfp_insn_meta *meta, *jmp_dst; 2846 u32 idx, br_idx; 2847 2848 list_for_each_entry(meta, &nfp_prog->insns, l) { 2849 if (meta->skip) 2850 continue; 2851 if (meta->insn.code == (BPF_JMP | BPF_CALL)) 2852 continue; 2853 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 2854 continue; 2855 2856 if (list_is_last(&meta->l, &nfp_prog->insns)) 2857 br_idx = nfp_prog->last_bpf_off; 2858 else 2859 br_idx = list_next_entry(meta, l)->off - 1; 2860 2861 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 2862 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 2863 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 2864 return -ELOOP; 2865 } 2866 /* Leave special branches for later */ 2867 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 2868 RELO_BR_REL) 2869 continue; 2870 2871 if (!meta->jmp_dst) { 2872 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 2873 return -ELOOP; 2874 } 2875 2876 jmp_dst = meta->jmp_dst; 2877 2878 if (jmp_dst->skip) { 2879 pr_err("Branch landing on removed instruction!!\n"); 2880 return -ELOOP; 2881 } 2882 2883 for (idx = meta->off; idx <= br_idx; idx++) { 2884 if (!nfp_is_br(nfp_prog->prog[idx])) 2885 continue; 2886 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 2887 } 2888 } 2889 2890 return 0; 2891 } 2892 2893 static void nfp_intro(struct nfp_prog *nfp_prog) 2894 { 2895 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 2896 emit_alu(nfp_prog, plen_reg(nfp_prog), 2897 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 2898 } 2899 2900 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 2901 { 2902 /* TC direct-action mode: 2903 * 0,1 ok NOT SUPPORTED[1] 2904 * 2 drop 0x22 -> drop, count as stat1 2905 * 4,5 nuke 0x02 -> drop 2906 * 7 redir 0x44 -> redir, count as stat2 2907 * * unspec 0x11 -> pass, count as stat0 2908 * 2909 * [1] We can't support OK and RECLASSIFY because we can't tell TC 2910 * the exact decision made. We are forced to support UNSPEC 2911 * to handle aborts so that's the only one we handle for passing 2912 * packets up the stack. 2913 */ 2914 /* Target for aborts */ 2915 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2916 2917 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2918 2919 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2920 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 2921 2922 /* Target for normal exits */ 2923 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2924 2925 /* if R0 > 7 jump to abort */ 2926 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 2927 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2928 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2929 2930 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 2931 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 2932 2933 emit_shf(nfp_prog, reg_a(1), 2934 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 2935 2936 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2937 emit_shf(nfp_prog, reg_a(2), 2938 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2939 2940 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2941 emit_shf(nfp_prog, reg_b(2), 2942 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 2943 2944 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2945 2946 emit_shf(nfp_prog, reg_b(2), 2947 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 2948 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2949 } 2950 2951 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 2952 { 2953 /* XDP return codes: 2954 * 0 aborted 0x82 -> drop, count as stat3 2955 * 1 drop 0x22 -> drop, count as stat1 2956 * 2 pass 0x11 -> pass, count as stat0 2957 * 3 tx 0x44 -> redir, count as stat2 2958 * * unknown 0x82 -> drop, count as stat3 2959 */ 2960 /* Target for aborts */ 2961 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2962 2963 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2964 2965 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2966 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 2967 2968 /* Target for normal exits */ 2969 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2970 2971 /* if R0 > 3 jump to abort */ 2972 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 2973 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2974 2975 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 2976 2977 emit_shf(nfp_prog, reg_a(1), 2978 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 2979 2980 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2981 emit_shf(nfp_prog, reg_b(2), 2982 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2983 2984 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2985 2986 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2987 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2988 } 2989 2990 static void nfp_outro(struct nfp_prog *nfp_prog) 2991 { 2992 switch (nfp_prog->type) { 2993 case BPF_PROG_TYPE_SCHED_CLS: 2994 nfp_outro_tc_da(nfp_prog); 2995 break; 2996 case BPF_PROG_TYPE_XDP: 2997 nfp_outro_xdp(nfp_prog); 2998 break; 2999 default: 3000 WARN_ON(1); 3001 } 3002 } 3003 3004 static int nfp_translate(struct nfp_prog *nfp_prog) 3005 { 3006 struct nfp_insn_meta *meta; 3007 int err; 3008 3009 nfp_intro(nfp_prog); 3010 if (nfp_prog->error) 3011 return nfp_prog->error; 3012 3013 list_for_each_entry(meta, &nfp_prog->insns, l) { 3014 instr_cb_t cb = instr_cb[meta->insn.code]; 3015 3016 meta->off = nfp_prog_current_offset(nfp_prog); 3017 3018 if (meta->skip) { 3019 nfp_prog->n_translated++; 3020 continue; 3021 } 3022 3023 if (nfp_meta_has_prev(nfp_prog, meta) && 3024 nfp_meta_prev(meta)->double_cb) 3025 cb = nfp_meta_prev(meta)->double_cb; 3026 if (!cb) 3027 return -ENOENT; 3028 err = cb(nfp_prog, meta); 3029 if (err) 3030 return err; 3031 if (nfp_prog->error) 3032 return nfp_prog->error; 3033 3034 nfp_prog->n_translated++; 3035 } 3036 3037 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 3038 3039 nfp_outro(nfp_prog); 3040 if (nfp_prog->error) 3041 return nfp_prog->error; 3042 3043 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 3044 if (nfp_prog->error) 3045 return nfp_prog->error; 3046 3047 return nfp_fixup_branches(nfp_prog); 3048 } 3049 3050 /* --- Optimizations --- */ 3051 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 3052 { 3053 struct nfp_insn_meta *meta; 3054 3055 list_for_each_entry(meta, &nfp_prog->insns, l) { 3056 struct bpf_insn insn = meta->insn; 3057 3058 /* Programs converted from cBPF start with register xoring */ 3059 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 3060 insn.src_reg == insn.dst_reg) 3061 continue; 3062 3063 /* Programs start with R6 = R1 but we ignore the skb pointer */ 3064 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 3065 insn.src_reg == 1 && insn.dst_reg == 6) 3066 meta->skip = true; 3067 3068 /* Return as soon as something doesn't match */ 3069 if (!meta->skip) 3070 return; 3071 } 3072 } 3073 3074 /* abs(insn.imm) will fit better into unrestricted reg immediate - 3075 * convert add/sub of a negative number into a sub/add of a positive one. 3076 */ 3077 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) 3078 { 3079 struct nfp_insn_meta *meta; 3080 3081 list_for_each_entry(meta, &nfp_prog->insns, l) { 3082 struct bpf_insn insn = meta->insn; 3083 3084 if (meta->skip) 3085 continue; 3086 3087 if (BPF_CLASS(insn.code) != BPF_ALU && 3088 BPF_CLASS(insn.code) != BPF_ALU64 && 3089 BPF_CLASS(insn.code) != BPF_JMP) 3090 continue; 3091 if (BPF_SRC(insn.code) != BPF_K) 3092 continue; 3093 if (insn.imm >= 0) 3094 continue; 3095 3096 if (BPF_CLASS(insn.code) == BPF_JMP) { 3097 switch (BPF_OP(insn.code)) { 3098 case BPF_JGE: 3099 case BPF_JSGE: 3100 case BPF_JLT: 3101 case BPF_JSLT: 3102 meta->jump_neg_op = true; 3103 break; 3104 default: 3105 continue; 3106 } 3107 } else { 3108 if (BPF_OP(insn.code) == BPF_ADD) 3109 insn.code = BPF_CLASS(insn.code) | BPF_SUB; 3110 else if (BPF_OP(insn.code) == BPF_SUB) 3111 insn.code = BPF_CLASS(insn.code) | BPF_ADD; 3112 else 3113 continue; 3114 3115 meta->insn.code = insn.code | BPF_K; 3116 } 3117 3118 meta->insn.imm = -insn.imm; 3119 } 3120 } 3121 3122 /* Remove masking after load since our load guarantees this is not needed */ 3123 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 3124 { 3125 struct nfp_insn_meta *meta1, *meta2; 3126 const s32 exp_mask[] = { 3127 [BPF_B] = 0x000000ffU, 3128 [BPF_H] = 0x0000ffffU, 3129 [BPF_W] = 0xffffffffU, 3130 }; 3131 3132 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3133 struct bpf_insn insn, next; 3134 3135 insn = meta1->insn; 3136 next = meta2->insn; 3137 3138 if (BPF_CLASS(insn.code) != BPF_LD) 3139 continue; 3140 if (BPF_MODE(insn.code) != BPF_ABS && 3141 BPF_MODE(insn.code) != BPF_IND) 3142 continue; 3143 3144 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 3145 continue; 3146 3147 if (!exp_mask[BPF_SIZE(insn.code)]) 3148 continue; 3149 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 3150 continue; 3151 3152 if (next.src_reg || next.dst_reg) 3153 continue; 3154 3155 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 3156 continue; 3157 3158 meta2->skip = true; 3159 } 3160 } 3161 3162 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 3163 { 3164 struct nfp_insn_meta *meta1, *meta2, *meta3; 3165 3166 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 3167 struct bpf_insn insn, next1, next2; 3168 3169 insn = meta1->insn; 3170 next1 = meta2->insn; 3171 next2 = meta3->insn; 3172 3173 if (BPF_CLASS(insn.code) != BPF_LD) 3174 continue; 3175 if (BPF_MODE(insn.code) != BPF_ABS && 3176 BPF_MODE(insn.code) != BPF_IND) 3177 continue; 3178 if (BPF_SIZE(insn.code) != BPF_W) 3179 continue; 3180 3181 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 3182 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 3183 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 3184 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 3185 continue; 3186 3187 if (next1.src_reg || next1.dst_reg || 3188 next2.src_reg || next2.dst_reg) 3189 continue; 3190 3191 if (next1.imm != 0x20 || next2.imm != 0x20) 3192 continue; 3193 3194 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 3195 meta3->flags & FLAG_INSN_IS_JUMP_DST) 3196 continue; 3197 3198 meta2->skip = true; 3199 meta3->skip = true; 3200 } 3201 } 3202 3203 /* load/store pair that forms memory copy sould look like the following: 3204 * 3205 * ld_width R, [addr_src + offset_src] 3206 * st_width [addr_dest + offset_dest], R 3207 * 3208 * The destination register of load and source register of store should 3209 * be the same, load and store should also perform at the same width. 3210 * If either of addr_src or addr_dest is stack pointer, we don't do the 3211 * CPP optimization as stack is modelled by registers on NFP. 3212 */ 3213 static bool 3214 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 3215 struct nfp_insn_meta *st_meta) 3216 { 3217 struct bpf_insn *ld = &ld_meta->insn; 3218 struct bpf_insn *st = &st_meta->insn; 3219 3220 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 3221 return false; 3222 3223 if (ld_meta->ptr.type != PTR_TO_PACKET) 3224 return false; 3225 3226 if (st_meta->ptr.type != PTR_TO_PACKET) 3227 return false; 3228 3229 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 3230 return false; 3231 3232 if (ld->dst_reg != st->src_reg) 3233 return false; 3234 3235 /* There is jump to the store insn in this pair. */ 3236 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 3237 return false; 3238 3239 return true; 3240 } 3241 3242 /* Currently, we only support chaining load/store pairs if: 3243 * 3244 * - Their address base registers are the same. 3245 * - Their address offsets are in the same order. 3246 * - They operate at the same memory width. 3247 * - There is no jump into the middle of them. 3248 */ 3249 static bool 3250 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 3251 struct nfp_insn_meta *st_meta, 3252 struct bpf_insn *prev_ld, 3253 struct bpf_insn *prev_st) 3254 { 3255 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 3256 struct bpf_insn *ld = &ld_meta->insn; 3257 struct bpf_insn *st = &st_meta->insn; 3258 s16 prev_ld_off, prev_st_off; 3259 3260 /* This pair is the start pair. */ 3261 if (!prev_ld) 3262 return true; 3263 3264 prev_size = BPF_LDST_BYTES(prev_ld); 3265 curr_size = BPF_LDST_BYTES(ld); 3266 prev_ld_base = prev_ld->src_reg; 3267 prev_st_base = prev_st->dst_reg; 3268 prev_ld_dst = prev_ld->dst_reg; 3269 prev_ld_off = prev_ld->off; 3270 prev_st_off = prev_st->off; 3271 3272 if (ld->dst_reg != prev_ld_dst) 3273 return false; 3274 3275 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 3276 return false; 3277 3278 if (curr_size != prev_size) 3279 return false; 3280 3281 /* There is jump to the head of this pair. */ 3282 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 3283 return false; 3284 3285 /* Both in ascending order. */ 3286 if (prev_ld_off + prev_size == ld->off && 3287 prev_st_off + prev_size == st->off) 3288 return true; 3289 3290 /* Both in descending order. */ 3291 if (ld->off + curr_size == prev_ld_off && 3292 st->off + curr_size == prev_st_off) 3293 return true; 3294 3295 return false; 3296 } 3297 3298 /* Return TRUE if cross memory access happens. Cross memory access means 3299 * store area is overlapping with load area that a later load might load 3300 * the value from previous store, for this case we can't treat the sequence 3301 * as an memory copy. 3302 */ 3303 static bool 3304 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 3305 struct nfp_insn_meta *head_st_meta) 3306 { 3307 s16 head_ld_off, head_st_off, ld_off; 3308 3309 /* Different pointer types does not overlap. */ 3310 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 3311 return false; 3312 3313 /* load and store are both PTR_TO_PACKET, check ID info. */ 3314 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 3315 return true; 3316 3317 /* Canonicalize the offsets. Turn all of them against the original 3318 * base register. 3319 */ 3320 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 3321 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 3322 ld_off = ld->off + head_ld_meta->ptr.off; 3323 3324 /* Ascending order cross. */ 3325 if (ld_off > head_ld_off && 3326 head_ld_off < head_st_off && ld_off >= head_st_off) 3327 return true; 3328 3329 /* Descending order cross. */ 3330 if (ld_off < head_ld_off && 3331 head_ld_off > head_st_off && ld_off <= head_st_off) 3332 return true; 3333 3334 return false; 3335 } 3336 3337 /* This pass try to identify the following instructoin sequences. 3338 * 3339 * load R, [regA + offA] 3340 * store [regB + offB], R 3341 * load R, [regA + offA + const_imm_A] 3342 * store [regB + offB + const_imm_A], R 3343 * load R, [regA + offA + 2 * const_imm_A] 3344 * store [regB + offB + 2 * const_imm_A], R 3345 * ... 3346 * 3347 * Above sequence is typically generated by compiler when lowering 3348 * memcpy. NFP prefer using CPP instructions to accelerate it. 3349 */ 3350 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 3351 { 3352 struct nfp_insn_meta *head_ld_meta = NULL; 3353 struct nfp_insn_meta *head_st_meta = NULL; 3354 struct nfp_insn_meta *meta1, *meta2; 3355 struct bpf_insn *prev_ld = NULL; 3356 struct bpf_insn *prev_st = NULL; 3357 u8 count = 0; 3358 3359 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3360 struct bpf_insn *ld = &meta1->insn; 3361 struct bpf_insn *st = &meta2->insn; 3362 3363 /* Reset record status if any of the following if true: 3364 * - The current insn pair is not load/store. 3365 * - The load/store pair doesn't chain with previous one. 3366 * - The chained load/store pair crossed with previous pair. 3367 * - The chained load/store pair has a total size of memory 3368 * copy beyond 128 bytes which is the maximum length a 3369 * single NFP CPP command can transfer. 3370 */ 3371 if (!curr_pair_is_memcpy(meta1, meta2) || 3372 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 3373 prev_st) || 3374 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 3375 head_st_meta) || 3376 head_ld_meta->ldst_gather_len >= 128))) { 3377 if (!count) 3378 continue; 3379 3380 if (count > 1) { 3381 s16 prev_ld_off = prev_ld->off; 3382 s16 prev_st_off = prev_st->off; 3383 s16 head_ld_off = head_ld_meta->insn.off; 3384 3385 if (prev_ld_off < head_ld_off) { 3386 head_ld_meta->insn.off = prev_ld_off; 3387 head_st_meta->insn.off = prev_st_off; 3388 head_ld_meta->ldst_gather_len = 3389 -head_ld_meta->ldst_gather_len; 3390 } 3391 3392 head_ld_meta->paired_st = &head_st_meta->insn; 3393 head_st_meta->skip = true; 3394 } else { 3395 head_ld_meta->ldst_gather_len = 0; 3396 } 3397 3398 /* If the chain is ended by an load/store pair then this 3399 * could serve as the new head of the the next chain. 3400 */ 3401 if (curr_pair_is_memcpy(meta1, meta2)) { 3402 head_ld_meta = meta1; 3403 head_st_meta = meta2; 3404 head_ld_meta->ldst_gather_len = 3405 BPF_LDST_BYTES(ld); 3406 meta1 = nfp_meta_next(meta1); 3407 meta2 = nfp_meta_next(meta2); 3408 prev_ld = ld; 3409 prev_st = st; 3410 count = 1; 3411 } else { 3412 head_ld_meta = NULL; 3413 head_st_meta = NULL; 3414 prev_ld = NULL; 3415 prev_st = NULL; 3416 count = 0; 3417 } 3418 3419 continue; 3420 } 3421 3422 if (!head_ld_meta) { 3423 head_ld_meta = meta1; 3424 head_st_meta = meta2; 3425 } else { 3426 meta1->skip = true; 3427 meta2->skip = true; 3428 } 3429 3430 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 3431 meta1 = nfp_meta_next(meta1); 3432 meta2 = nfp_meta_next(meta2); 3433 prev_ld = ld; 3434 prev_st = st; 3435 count++; 3436 } 3437 } 3438 3439 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 3440 { 3441 struct nfp_insn_meta *meta, *range_node = NULL; 3442 s16 range_start = 0, range_end = 0; 3443 bool cache_avail = false; 3444 struct bpf_insn *insn; 3445 s32 range_ptr_off = 0; 3446 u32 range_ptr_id = 0; 3447 3448 list_for_each_entry(meta, &nfp_prog->insns, l) { 3449 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 3450 cache_avail = false; 3451 3452 if (meta->skip) 3453 continue; 3454 3455 insn = &meta->insn; 3456 3457 if (is_mbpf_store_pkt(meta) || 3458 insn->code == (BPF_JMP | BPF_CALL) || 3459 is_mbpf_classic_store_pkt(meta) || 3460 is_mbpf_classic_load(meta)) { 3461 cache_avail = false; 3462 continue; 3463 } 3464 3465 if (!is_mbpf_load(meta)) 3466 continue; 3467 3468 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 3469 cache_avail = false; 3470 continue; 3471 } 3472 3473 if (!cache_avail) { 3474 cache_avail = true; 3475 if (range_node) 3476 goto end_current_then_start_new; 3477 goto start_new; 3478 } 3479 3480 /* Check ID to make sure two reads share the same 3481 * variable offset against PTR_TO_PACKET, and check OFF 3482 * to make sure they also share the same constant 3483 * offset. 3484 * 3485 * OFFs don't really need to be the same, because they 3486 * are the constant offsets against PTR_TO_PACKET, so 3487 * for different OFFs, we could canonicalize them to 3488 * offsets against original packet pointer. We don't 3489 * support this. 3490 */ 3491 if (meta->ptr.id == range_ptr_id && 3492 meta->ptr.off == range_ptr_off) { 3493 s16 new_start = range_start; 3494 s16 end, off = insn->off; 3495 s16 new_end = range_end; 3496 bool changed = false; 3497 3498 if (off < range_start) { 3499 new_start = off; 3500 changed = true; 3501 } 3502 3503 end = off + BPF_LDST_BYTES(insn); 3504 if (end > range_end) { 3505 new_end = end; 3506 changed = true; 3507 } 3508 3509 if (!changed) 3510 continue; 3511 3512 if (new_end - new_start <= 64) { 3513 /* Install new range. */ 3514 range_start = new_start; 3515 range_end = new_end; 3516 continue; 3517 } 3518 } 3519 3520 end_current_then_start_new: 3521 range_node->pkt_cache.range_start = range_start; 3522 range_node->pkt_cache.range_end = range_end; 3523 start_new: 3524 range_node = meta; 3525 range_node->pkt_cache.do_init = true; 3526 range_ptr_id = range_node->ptr.id; 3527 range_ptr_off = range_node->ptr.off; 3528 range_start = insn->off; 3529 range_end = insn->off + BPF_LDST_BYTES(insn); 3530 } 3531 3532 if (range_node) { 3533 range_node->pkt_cache.range_start = range_start; 3534 range_node->pkt_cache.range_end = range_end; 3535 } 3536 3537 list_for_each_entry(meta, &nfp_prog->insns, l) { 3538 if (meta->skip) 3539 continue; 3540 3541 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 3542 if (meta->pkt_cache.do_init) { 3543 range_start = meta->pkt_cache.range_start; 3544 range_end = meta->pkt_cache.range_end; 3545 } else { 3546 meta->pkt_cache.range_start = range_start; 3547 meta->pkt_cache.range_end = range_end; 3548 } 3549 } 3550 } 3551 } 3552 3553 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 3554 { 3555 nfp_bpf_opt_reg_init(nfp_prog); 3556 3557 nfp_bpf_opt_neg_add_sub(nfp_prog); 3558 nfp_bpf_opt_ld_mask(nfp_prog); 3559 nfp_bpf_opt_ld_shift(nfp_prog); 3560 nfp_bpf_opt_ldst_gather(nfp_prog); 3561 nfp_bpf_opt_pkt_cache(nfp_prog); 3562 3563 return 0; 3564 } 3565 3566 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) 3567 { 3568 struct nfp_insn_meta *meta1, *meta2; 3569 struct nfp_bpf_map *nfp_map; 3570 struct bpf_map *map; 3571 3572 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3573 if (meta1->skip || meta2->skip) 3574 continue; 3575 3576 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || 3577 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) 3578 continue; 3579 3580 map = (void *)(unsigned long)((u32)meta1->insn.imm | 3581 (u64)meta2->insn.imm << 32); 3582 if (bpf_map_offload_neutral(map)) 3583 continue; 3584 nfp_map = map_to_offmap(map)->dev_priv; 3585 3586 meta1->insn.imm = nfp_map->tid; 3587 meta2->insn.imm = 0; 3588 } 3589 3590 return 0; 3591 } 3592 3593 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 3594 { 3595 __le64 *ustore = (__force __le64 *)prog; 3596 int i; 3597 3598 for (i = 0; i < len; i++) { 3599 int err; 3600 3601 err = nfp_ustore_check_valid_no_ecc(prog[i]); 3602 if (err) 3603 return err; 3604 3605 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 3606 } 3607 3608 return 0; 3609 } 3610 3611 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 3612 { 3613 void *prog; 3614 3615 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 3616 if (!prog) 3617 return; 3618 3619 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 3620 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 3621 kvfree(nfp_prog->prog); 3622 nfp_prog->prog = prog; 3623 } 3624 3625 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 3626 { 3627 int ret; 3628 3629 ret = nfp_bpf_replace_map_ptrs(nfp_prog); 3630 if (ret) 3631 return ret; 3632 3633 ret = nfp_bpf_optimize(nfp_prog); 3634 if (ret) 3635 return ret; 3636 3637 ret = nfp_translate(nfp_prog); 3638 if (ret) { 3639 pr_err("Translation failed with error %d (translated: %u)\n", 3640 ret, nfp_prog->n_translated); 3641 return -EINVAL; 3642 } 3643 3644 nfp_bpf_prog_trim(nfp_prog); 3645 3646 return ret; 3647 } 3648 3649 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 3650 { 3651 struct nfp_insn_meta *meta; 3652 3653 /* Another pass to record jump information. */ 3654 list_for_each_entry(meta, &nfp_prog->insns, l) { 3655 u64 code = meta->insn.code; 3656 3657 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && 3658 BPF_OP(code) != BPF_CALL) { 3659 struct nfp_insn_meta *dst_meta; 3660 unsigned short dst_indx; 3661 3662 dst_indx = meta->n + 1 + meta->insn.off; 3663 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, 3664 cnt); 3665 3666 meta->jmp_dst = dst_meta; 3667 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 3668 } 3669 } 3670 } 3671 3672 bool nfp_bpf_supported_opcode(u8 code) 3673 { 3674 return !!instr_cb[code]; 3675 } 3676 3677 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 3678 { 3679 unsigned int i; 3680 u64 *prog; 3681 int err; 3682 3683 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 3684 GFP_KERNEL); 3685 if (!prog) 3686 return ERR_PTR(-ENOMEM); 3687 3688 for (i = 0; i < nfp_prog->prog_len; i++) { 3689 enum nfp_relo_type special; 3690 u32 val; 3691 3692 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 3693 switch (special) { 3694 case RELO_NONE: 3695 continue; 3696 case RELO_BR_REL: 3697 br_add_offset(&prog[i], bv->start_off); 3698 break; 3699 case RELO_BR_GO_OUT: 3700 br_set_offset(&prog[i], 3701 nfp_prog->tgt_out + bv->start_off); 3702 break; 3703 case RELO_BR_GO_ABORT: 3704 br_set_offset(&prog[i], 3705 nfp_prog->tgt_abort + bv->start_off); 3706 break; 3707 case RELO_BR_NEXT_PKT: 3708 br_set_offset(&prog[i], bv->tgt_done); 3709 break; 3710 case RELO_BR_HELPER: 3711 val = br_get_offset(prog[i]); 3712 val -= BR_OFF_RELO; 3713 switch (val) { 3714 case BPF_FUNC_map_lookup_elem: 3715 val = nfp_prog->bpf->helpers.map_lookup; 3716 break; 3717 case BPF_FUNC_map_update_elem: 3718 val = nfp_prog->bpf->helpers.map_update; 3719 break; 3720 case BPF_FUNC_map_delete_elem: 3721 val = nfp_prog->bpf->helpers.map_delete; 3722 break; 3723 case BPF_FUNC_perf_event_output: 3724 val = nfp_prog->bpf->helpers.perf_event_output; 3725 break; 3726 default: 3727 pr_err("relocation of unknown helper %d\n", 3728 val); 3729 err = -EINVAL; 3730 goto err_free_prog; 3731 } 3732 br_set_offset(&prog[i], val); 3733 break; 3734 case RELO_IMMED_REL: 3735 immed_add_value(&prog[i], bv->start_off); 3736 break; 3737 } 3738 3739 prog[i] &= ~OP_RELO_TYPE; 3740 } 3741 3742 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 3743 if (err) 3744 goto err_free_prog; 3745 3746 return prog; 3747 3748 err_free_prog: 3749 kfree(prog); 3750 return ERR_PTR(err); 3751 } 3752