1 /* 2 * Copyright (C) 2016-2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/bug.h> 37 #include <linux/kernel.h> 38 #include <linux/bpf.h> 39 #include <linux/filter.h> 40 #include <linux/pkt_cls.h> 41 #include <linux/unistd.h> 42 43 #include "main.h" 44 #include "../nfp_asm.h" 45 46 /* --- NFP prog --- */ 47 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 48 * It's safe to modify the next pointers (but not pos). 49 */ 50 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 51 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 52 next = list_next_entry(pos, l); \ 53 &(nfp_prog)->insns != &pos->l && \ 54 &(nfp_prog)->insns != &next->l; \ 55 pos = nfp_meta_next(pos), \ 56 next = nfp_meta_next(pos)) 57 58 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 59 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 60 next = list_next_entry(pos, l), \ 61 next2 = list_next_entry(next, l); \ 62 &(nfp_prog)->insns != &pos->l && \ 63 &(nfp_prog)->insns != &next->l && \ 64 &(nfp_prog)->insns != &next2->l; \ 65 pos = nfp_meta_next(pos), \ 66 next = nfp_meta_next(pos), \ 67 next2 = nfp_meta_next(next)) 68 69 static bool 70 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 71 { 72 return meta->l.prev != &nfp_prog->insns; 73 } 74 75 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 76 { 77 if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) { 78 nfp_prog->error = -ENOSPC; 79 return; 80 } 81 82 nfp_prog->prog[nfp_prog->prog_len] = insn; 83 nfp_prog->prog_len++; 84 } 85 86 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 87 { 88 return nfp_prog->prog_len; 89 } 90 91 static bool 92 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 93 { 94 /* If there is a recorded error we may have dropped instructions; 95 * that doesn't have to be due to translator bug, and the translation 96 * will fail anyway, so just return OK. 97 */ 98 if (nfp_prog->error) 99 return true; 100 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 101 } 102 103 /* --- Emitters --- */ 104 static void 105 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 106 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir) 107 { 108 enum cmd_ctx_swap ctx; 109 u64 insn; 110 111 if (sync) 112 ctx = CMD_CTX_SWAP; 113 else 114 ctx = CMD_CTX_NO_SWAP; 115 116 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 117 FIELD_PREP(OP_CMD_CTX, ctx) | 118 FIELD_PREP(OP_CMD_B_SRC, breg) | 119 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 120 FIELD_PREP(OP_CMD_XFER, xfer) | 121 FIELD_PREP(OP_CMD_CNT, size) | 122 FIELD_PREP(OP_CMD_SIG, sync) | 123 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 124 FIELD_PREP(OP_CMD_INDIR, indir) | 125 FIELD_PREP(OP_CMD_MODE, mode); 126 127 nfp_prog_push(nfp_prog, insn); 128 } 129 130 static void 131 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 132 swreg lreg, swreg rreg, u8 size, bool sync, bool indir) 133 { 134 struct nfp_insn_re_regs reg; 135 int err; 136 137 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 138 if (err) { 139 nfp_prog->error = err; 140 return; 141 } 142 if (reg.swap) { 143 pr_err("cmd can't swap arguments\n"); 144 nfp_prog->error = -EFAULT; 145 return; 146 } 147 if (reg.dst_lmextn || reg.src_lmextn) { 148 pr_err("cmd can't use LMextn\n"); 149 nfp_prog->error = -EFAULT; 150 return; 151 } 152 153 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync, 154 indir); 155 } 156 157 static void 158 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 159 swreg lreg, swreg rreg, u8 size, bool sync) 160 { 161 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false); 162 } 163 164 static void 165 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 166 swreg lreg, swreg rreg, u8 size, bool sync) 167 { 168 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true); 169 } 170 171 static void 172 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 173 enum br_ctx_signal_state css, u16 addr, u8 defer) 174 { 175 u16 addr_lo, addr_hi; 176 u64 insn; 177 178 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 179 addr_hi = addr != addr_lo; 180 181 insn = OP_BR_BASE | 182 FIELD_PREP(OP_BR_MASK, mask) | 183 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 184 FIELD_PREP(OP_BR_CSS, css) | 185 FIELD_PREP(OP_BR_DEFBR, defer) | 186 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 187 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 188 189 nfp_prog_push(nfp_prog, insn); 190 } 191 192 static void 193 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 194 enum nfp_relo_type relo) 195 { 196 if (mask == BR_UNC && defer > 2) { 197 pr_err("BUG: branch defer out of bounds %d\n", defer); 198 nfp_prog->error = -EFAULT; 199 return; 200 } 201 202 __emit_br(nfp_prog, mask, 203 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 204 BR_CSS_NONE, addr, defer); 205 206 nfp_prog->prog[nfp_prog->prog_len - 1] |= 207 FIELD_PREP(OP_RELO_TYPE, relo); 208 } 209 210 static void 211 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 212 { 213 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 214 } 215 216 static void 217 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 218 enum immed_width width, bool invert, 219 enum immed_shift shift, bool wr_both, 220 bool dst_lmextn, bool src_lmextn) 221 { 222 u64 insn; 223 224 insn = OP_IMMED_BASE | 225 FIELD_PREP(OP_IMMED_A_SRC, areg) | 226 FIELD_PREP(OP_IMMED_B_SRC, breg) | 227 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 228 FIELD_PREP(OP_IMMED_WIDTH, width) | 229 FIELD_PREP(OP_IMMED_INV, invert) | 230 FIELD_PREP(OP_IMMED_SHIFT, shift) | 231 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 232 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 233 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 234 235 nfp_prog_push(nfp_prog, insn); 236 } 237 238 static void 239 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 240 enum immed_width width, bool invert, enum immed_shift shift) 241 { 242 struct nfp_insn_ur_regs reg; 243 int err; 244 245 if (swreg_type(dst) == NN_REG_IMM) { 246 nfp_prog->error = -EFAULT; 247 return; 248 } 249 250 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 251 if (err) { 252 nfp_prog->error = err; 253 return; 254 } 255 256 /* Use reg.dst when destination is No-Dest. */ 257 __emit_immed(nfp_prog, 258 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 259 reg.breg, imm >> 8, width, invert, shift, 260 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 261 } 262 263 static void 264 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 265 enum shf_sc sc, u8 shift, 266 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 267 bool dst_lmextn, bool src_lmextn) 268 { 269 u64 insn; 270 271 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 272 nfp_prog->error = -EFAULT; 273 return; 274 } 275 276 if (sc == SHF_SC_L_SHF) 277 shift = 32 - shift; 278 279 insn = OP_SHF_BASE | 280 FIELD_PREP(OP_SHF_A_SRC, areg) | 281 FIELD_PREP(OP_SHF_SC, sc) | 282 FIELD_PREP(OP_SHF_B_SRC, breg) | 283 FIELD_PREP(OP_SHF_I8, i8) | 284 FIELD_PREP(OP_SHF_SW, sw) | 285 FIELD_PREP(OP_SHF_DST, dst) | 286 FIELD_PREP(OP_SHF_SHIFT, shift) | 287 FIELD_PREP(OP_SHF_OP, op) | 288 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 289 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 290 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 291 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 292 293 nfp_prog_push(nfp_prog, insn); 294 } 295 296 static void 297 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 298 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 299 { 300 struct nfp_insn_re_regs reg; 301 int err; 302 303 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 304 if (err) { 305 nfp_prog->error = err; 306 return; 307 } 308 309 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 310 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 311 reg.dst_lmextn, reg.src_lmextn); 312 } 313 314 static void 315 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 316 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 317 bool dst_lmextn, bool src_lmextn) 318 { 319 u64 insn; 320 321 insn = OP_ALU_BASE | 322 FIELD_PREP(OP_ALU_A_SRC, areg) | 323 FIELD_PREP(OP_ALU_B_SRC, breg) | 324 FIELD_PREP(OP_ALU_DST, dst) | 325 FIELD_PREP(OP_ALU_SW, swap) | 326 FIELD_PREP(OP_ALU_OP, op) | 327 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 328 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 329 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 330 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 331 332 nfp_prog_push(nfp_prog, insn); 333 } 334 335 static void 336 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 337 swreg lreg, enum alu_op op, swreg rreg) 338 { 339 struct nfp_insn_ur_regs reg; 340 int err; 341 342 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 343 if (err) { 344 nfp_prog->error = err; 345 return; 346 } 347 348 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 349 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 350 reg.dst_lmextn, reg.src_lmextn); 351 } 352 353 static void 354 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 355 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 356 bool zero, bool swap, bool wr_both, 357 bool dst_lmextn, bool src_lmextn) 358 { 359 u64 insn; 360 361 insn = OP_LDF_BASE | 362 FIELD_PREP(OP_LDF_A_SRC, areg) | 363 FIELD_PREP(OP_LDF_SC, sc) | 364 FIELD_PREP(OP_LDF_B_SRC, breg) | 365 FIELD_PREP(OP_LDF_I8, imm8) | 366 FIELD_PREP(OP_LDF_SW, swap) | 367 FIELD_PREP(OP_LDF_ZF, zero) | 368 FIELD_PREP(OP_LDF_BMASK, bmask) | 369 FIELD_PREP(OP_LDF_SHF, shift) | 370 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 371 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 372 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 373 374 nfp_prog_push(nfp_prog, insn); 375 } 376 377 static void 378 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 379 enum shf_sc sc, u8 shift, bool zero) 380 { 381 struct nfp_insn_re_regs reg; 382 int err; 383 384 /* Note: ld_field is special as it uses one of the src regs as dst */ 385 err = swreg_to_restricted(dst, dst, src, ®, true); 386 if (err) { 387 nfp_prog->error = err; 388 return; 389 } 390 391 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 392 reg.i8, zero, reg.swap, reg.wr_both, 393 reg.dst_lmextn, reg.src_lmextn); 394 } 395 396 static void 397 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 398 enum shf_sc sc, u8 shift) 399 { 400 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 401 } 402 403 static void 404 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 405 bool dst_lmextn, bool src_lmextn) 406 { 407 u64 insn; 408 409 insn = OP_LCSR_BASE | 410 FIELD_PREP(OP_LCSR_A_SRC, areg) | 411 FIELD_PREP(OP_LCSR_B_SRC, breg) | 412 FIELD_PREP(OP_LCSR_WRITE, wr) | 413 FIELD_PREP(OP_LCSR_ADDR, addr) | 414 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 415 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 416 417 nfp_prog_push(nfp_prog, insn); 418 } 419 420 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 421 { 422 struct nfp_insn_ur_regs reg; 423 int err; 424 425 /* This instruction takes immeds instead of reg_none() for the ignored 426 * operand, but we can't encode 2 immeds in one instr with our normal 427 * swreg infra so if param is an immed, we encode as reg_none() and 428 * copy the immed to both operands. 429 */ 430 if (swreg_type(src) == NN_REG_IMM) { 431 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 432 reg.breg = reg.areg; 433 } else { 434 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 435 } 436 if (err) { 437 nfp_prog->error = err; 438 return; 439 } 440 441 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4, 442 false, reg.src_lmextn); 443 } 444 445 static void emit_nop(struct nfp_prog *nfp_prog) 446 { 447 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 448 } 449 450 /* --- Wrappers --- */ 451 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 452 { 453 if (!(imm & 0xffff0000)) { 454 *val = imm; 455 *shift = IMMED_SHIFT_0B; 456 } else if (!(imm & 0xff0000ff)) { 457 *val = imm >> 8; 458 *shift = IMMED_SHIFT_1B; 459 } else if (!(imm & 0x0000ffff)) { 460 *val = imm >> 16; 461 *shift = IMMED_SHIFT_2B; 462 } else { 463 return false; 464 } 465 466 return true; 467 } 468 469 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 470 { 471 enum immed_shift shift; 472 u16 val; 473 474 if (pack_immed(imm, &val, &shift)) { 475 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 476 } else if (pack_immed(~imm, &val, &shift)) { 477 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 478 } else { 479 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 480 false, IMMED_SHIFT_0B); 481 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 482 false, IMMED_SHIFT_2B); 483 } 484 } 485 486 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 487 * If the @imm is small enough encode it directly in operand and return 488 * otherwise load @imm to a spare register and return its encoding. 489 */ 490 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 491 { 492 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 493 return reg_imm(imm); 494 495 wrp_immed(nfp_prog, tmp_reg, imm); 496 return tmp_reg; 497 } 498 499 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 500 * If the @imm is small enough encode it directly in operand and return 501 * otherwise load @imm to a spare register and return its encoding. 502 */ 503 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 504 { 505 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 506 return reg_imm(imm); 507 508 wrp_immed(nfp_prog, tmp_reg, imm); 509 return tmp_reg; 510 } 511 512 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 513 { 514 while (count--) 515 emit_nop(nfp_prog); 516 } 517 518 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 519 { 520 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 521 } 522 523 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 524 { 525 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 526 } 527 528 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 529 * result to @dst from low end. 530 */ 531 static void 532 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 533 u8 offset) 534 { 535 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 536 u8 mask = (1 << field_len) - 1; 537 538 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 539 } 540 541 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 542 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 543 { 544 bool descending_seq = meta->ldst_gather_len < 0; 545 s16 len = abs(meta->ldst_gather_len); 546 swreg src_base, off; 547 unsigned int i; 548 u8 xfer_num; 549 550 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 551 src_base = reg_a(meta->insn.src_reg * 2); 552 xfer_num = round_up(len, 4) / 4; 553 554 /* Setup PREV_ALU fields to override memory read length. */ 555 if (len > 32) 556 wrp_immed(nfp_prog, reg_none(), 557 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 558 559 /* Memory read from source addr into transfer-in registers. */ 560 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 561 off, xfer_num - 1, true, len > 32); 562 563 /* Move from transfer-in to transfer-out. */ 564 for (i = 0; i < xfer_num; i++) 565 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 566 567 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 568 569 if (len <= 8) { 570 /* Use single direct_ref write8. */ 571 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 572 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 573 true); 574 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 575 /* Use single direct_ref write32. */ 576 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 577 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 578 true); 579 } else if (len <= 32) { 580 /* Use single indirect_ref write8. */ 581 wrp_immed(nfp_prog, reg_none(), 582 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 583 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 584 reg_a(meta->paired_st->dst_reg * 2), off, 585 len - 1, true); 586 } else if (IS_ALIGNED(len, 4)) { 587 /* Use single indirect_ref write32. */ 588 wrp_immed(nfp_prog, reg_none(), 589 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 590 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 591 reg_a(meta->paired_st->dst_reg * 2), off, 592 xfer_num - 1, true); 593 } else if (len <= 40) { 594 /* Use one direct_ref write32 to write the first 32-bytes, then 595 * another direct_ref write8 to write the remaining bytes. 596 */ 597 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 598 reg_a(meta->paired_st->dst_reg * 2), off, 7, 599 true); 600 601 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 602 imm_b(nfp_prog)); 603 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 604 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 605 true); 606 } else { 607 /* Use one indirect_ref write32 to write 4-bytes aligned length, 608 * then another direct_ref write8 to write the remaining bytes. 609 */ 610 u8 new_off; 611 612 wrp_immed(nfp_prog, reg_none(), 613 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 614 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 615 reg_a(meta->paired_st->dst_reg * 2), off, 616 xfer_num - 2, true); 617 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 618 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 619 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 620 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 621 (len & 0x3) - 1, true); 622 } 623 624 /* TODO: The following extra load is to make sure data flow be identical 625 * before and after we do memory copy optimization. 626 * 627 * The load destination register is not guaranteed to be dead, so we 628 * need to make sure it is loaded with the value the same as before 629 * this transformation. 630 * 631 * These extra loads could be removed once we have accurate register 632 * usage information. 633 */ 634 if (descending_seq) 635 xfer_num = 0; 636 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 637 xfer_num = xfer_num - 1; 638 else 639 xfer_num = xfer_num - 2; 640 641 switch (BPF_SIZE(meta->insn.code)) { 642 case BPF_B: 643 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 644 reg_xfer(xfer_num), 1, 645 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 646 break; 647 case BPF_H: 648 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 649 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 650 break; 651 case BPF_W: 652 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 653 reg_xfer(0)); 654 break; 655 case BPF_DW: 656 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 657 reg_xfer(xfer_num)); 658 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 659 reg_xfer(xfer_num + 1)); 660 break; 661 } 662 663 if (BPF_SIZE(meta->insn.code) != BPF_DW) 664 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 665 666 return 0; 667 } 668 669 static int 670 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 671 { 672 unsigned int i; 673 u16 shift, sz; 674 675 /* We load the value from the address indicated in @offset and then 676 * shift out the data we don't need. Note: this is big endian! 677 */ 678 sz = max(size, 4); 679 shift = size < 4 ? 4 - size : 0; 680 681 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 682 pptr_reg(nfp_prog), offset, sz - 1, true); 683 684 i = 0; 685 if (shift) 686 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 687 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 688 else 689 for (; i * 4 < size; i++) 690 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 691 692 if (i < 2) 693 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 694 695 return 0; 696 } 697 698 static int 699 data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 700 u8 dst_gpr, int size) 701 { 702 unsigned int i; 703 u8 mask, sz; 704 705 /* We load the value from the address indicated in @offset and then 706 * mask out the data we don't need. Note: this is little endian! 707 */ 708 sz = max(size, 4); 709 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 710 711 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, 712 reg_a(src_gpr), offset, sz / 4 - 1, true); 713 714 i = 0; 715 if (mask) 716 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 717 reg_xfer(0), SHF_SC_NONE, 0, true); 718 else 719 for (; i * 4 < size; i++) 720 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 721 722 if (i < 2) 723 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 724 725 return 0; 726 } 727 728 static int 729 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 730 { 731 swreg tmp_reg; 732 733 /* Calculate the true offset (src_reg + imm) */ 734 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 735 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 736 737 /* Check packet length (size guaranteed to fit b/c it's u8) */ 738 emit_alu(nfp_prog, imm_a(nfp_prog), 739 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 740 emit_alu(nfp_prog, reg_none(), 741 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 742 emit_br_relo(nfp_prog, BR_BLO, 0, 0, RELO_BR_GO_ABORT); 743 744 /* Load data */ 745 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 746 } 747 748 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 749 { 750 swreg tmp_reg; 751 752 /* Check packet length */ 753 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 754 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 755 emit_br_relo(nfp_prog, BR_BLO, 0, 0, RELO_BR_GO_ABORT); 756 757 /* Load data */ 758 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 759 return data_ld(nfp_prog, tmp_reg, 0, size); 760 } 761 762 static int 763 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 764 u8 src_gpr, u8 size) 765 { 766 unsigned int i; 767 768 for (i = 0; i * 4 < size; i++) 769 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 770 771 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 772 reg_a(dst_gpr), offset, size - 1, true); 773 774 return 0; 775 } 776 777 static int 778 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 779 u64 imm, u8 size) 780 { 781 wrp_immed(nfp_prog, reg_xfer(0), imm); 782 if (size == 8) 783 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 784 785 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 786 reg_a(dst_gpr), offset, size - 1, true); 787 788 return 0; 789 } 790 791 typedef int 792 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 793 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 794 bool needs_inc); 795 796 static int 797 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 798 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 799 bool needs_inc) 800 { 801 bool should_inc = needs_inc && new_gpr && !last; 802 u32 idx, src_byte; 803 enum shf_sc sc; 804 swreg reg; 805 int shf; 806 u8 mask; 807 808 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 809 return -EOPNOTSUPP; 810 811 idx = off / 4; 812 813 /* Move the entire word */ 814 if (size == 4) { 815 wrp_mov(nfp_prog, reg_both(dst), 816 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 817 return 0; 818 } 819 820 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 821 return -EOPNOTSUPP; 822 823 src_byte = off % 4; 824 825 mask = (1 << size) - 1; 826 mask <<= dst_byte; 827 828 if (WARN_ON_ONCE(mask > 0xf)) 829 return -EOPNOTSUPP; 830 831 shf = abs(src_byte - dst_byte) * 8; 832 if (src_byte == dst_byte) { 833 sc = SHF_SC_NONE; 834 } else if (src_byte < dst_byte) { 835 shf = 32 - shf; 836 sc = SHF_SC_L_SHF; 837 } else { 838 sc = SHF_SC_R_SHF; 839 } 840 841 /* ld_field can address fewer indexes, if offset too large do RMW. 842 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 843 */ 844 if (idx <= RE_REG_LM_IDX_MAX) { 845 reg = reg_lm(lm3 ? 3 : 0, idx); 846 } else { 847 reg = imm_a(nfp_prog); 848 /* If it's not the first part of the load and we start a new GPR 849 * that means we are loading a second part of the LMEM word into 850 * a new GPR. IOW we've already looked that LMEM word and 851 * therefore it has been loaded into imm_a(). 852 */ 853 if (first || !new_gpr) 854 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 855 } 856 857 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 858 859 if (should_inc) 860 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 861 862 return 0; 863 } 864 865 static int 866 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 867 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 868 bool needs_inc) 869 { 870 bool should_inc = needs_inc && new_gpr && !last; 871 u32 idx, dst_byte; 872 enum shf_sc sc; 873 swreg reg; 874 int shf; 875 u8 mask; 876 877 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 878 return -EOPNOTSUPP; 879 880 idx = off / 4; 881 882 /* Move the entire word */ 883 if (size == 4) { 884 wrp_mov(nfp_prog, 885 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 886 reg_b(src)); 887 return 0; 888 } 889 890 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 891 return -EOPNOTSUPP; 892 893 dst_byte = off % 4; 894 895 mask = (1 << size) - 1; 896 mask <<= dst_byte; 897 898 if (WARN_ON_ONCE(mask > 0xf)) 899 return -EOPNOTSUPP; 900 901 shf = abs(src_byte - dst_byte) * 8; 902 if (src_byte == dst_byte) { 903 sc = SHF_SC_NONE; 904 } else if (src_byte < dst_byte) { 905 shf = 32 - shf; 906 sc = SHF_SC_L_SHF; 907 } else { 908 sc = SHF_SC_R_SHF; 909 } 910 911 /* ld_field can address fewer indexes, if offset too large do RMW. 912 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 913 */ 914 if (idx <= RE_REG_LM_IDX_MAX) { 915 reg = reg_lm(lm3 ? 3 : 0, idx); 916 } else { 917 reg = imm_a(nfp_prog); 918 /* Only first and last LMEM locations are going to need RMW, 919 * the middle location will be overwritten fully. 920 */ 921 if (first || last) 922 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 923 } 924 925 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 926 927 if (new_gpr || last) { 928 if (idx > RE_REG_LM_IDX_MAX) 929 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 930 if (should_inc) 931 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 932 } 933 934 return 0; 935 } 936 937 static int 938 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 939 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 940 bool clr_gpr, lmem_step step) 941 { 942 s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; 943 bool first = true, last; 944 bool needs_inc = false; 945 swreg stack_off_reg; 946 u8 prev_gpr = 255; 947 u32 gpr_byte = 0; 948 bool lm3 = true; 949 int ret; 950 951 if (meta->ptr_not_const) { 952 /* Use of the last encountered ptr_off is OK, they all have 953 * the same alignment. Depend on low bits of value being 954 * discarded when written to LMaddr register. 955 */ 956 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 957 stack_imm(nfp_prog)); 958 959 emit_alu(nfp_prog, imm_b(nfp_prog), 960 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 961 962 needs_inc = true; 963 } else if (off + size <= 64) { 964 /* We can reach bottom 64B with LMaddr0 */ 965 lm3 = false; 966 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 967 /* We have to set up a new pointer. If we know the offset 968 * and the entire access falls into a single 32 byte aligned 969 * window we won't have to increment the LM pointer. 970 * The 32 byte alignment is imporant because offset is ORed in 971 * not added when doing *l$indexN[off]. 972 */ 973 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 974 stack_imm(nfp_prog)); 975 emit_alu(nfp_prog, imm_b(nfp_prog), 976 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 977 978 off %= 32; 979 } else { 980 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 981 stack_imm(nfp_prog)); 982 983 emit_alu(nfp_prog, imm_b(nfp_prog), 984 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 985 986 needs_inc = true; 987 } 988 if (lm3) { 989 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 990 /* For size < 4 one slot will be filled by zeroing of upper. */ 991 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 992 } 993 994 if (clr_gpr && size < 8) 995 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 996 997 while (size) { 998 u32 slice_end; 999 u8 slice_size; 1000 1001 slice_size = min(size, 4 - gpr_byte); 1002 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1003 slice_size = slice_end - off; 1004 1005 last = slice_size == size; 1006 1007 if (needs_inc) 1008 off %= 4; 1009 1010 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1011 first, gpr != prev_gpr, last, lm3, needs_inc); 1012 if (ret) 1013 return ret; 1014 1015 prev_gpr = gpr; 1016 first = false; 1017 1018 gpr_byte += slice_size; 1019 if (gpr_byte >= 4) { 1020 gpr_byte -= 4; 1021 gpr++; 1022 } 1023 1024 size -= slice_size; 1025 off += slice_size; 1026 } 1027 1028 return 0; 1029 } 1030 1031 static void 1032 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1033 { 1034 swreg tmp_reg; 1035 1036 if (alu_op == ALU_OP_AND) { 1037 if (!imm) 1038 wrp_immed(nfp_prog, reg_both(dst), 0); 1039 if (!imm || !~imm) 1040 return; 1041 } 1042 if (alu_op == ALU_OP_OR) { 1043 if (!~imm) 1044 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1045 if (!imm || !~imm) 1046 return; 1047 } 1048 if (alu_op == ALU_OP_XOR) { 1049 if (!~imm) 1050 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1051 ALU_OP_NOT, reg_b(dst)); 1052 if (!imm || !~imm) 1053 return; 1054 } 1055 1056 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1057 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1058 } 1059 1060 static int 1061 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1062 enum alu_op alu_op, bool skip) 1063 { 1064 const struct bpf_insn *insn = &meta->insn; 1065 u64 imm = insn->imm; /* sign extend */ 1066 1067 if (skip) { 1068 meta->skip = true; 1069 return 0; 1070 } 1071 1072 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1073 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1074 1075 return 0; 1076 } 1077 1078 static int 1079 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1080 enum alu_op alu_op) 1081 { 1082 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1083 1084 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1085 emit_alu(nfp_prog, reg_both(dst + 1), 1086 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1087 1088 return 0; 1089 } 1090 1091 static int 1092 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1093 enum alu_op alu_op, bool skip) 1094 { 1095 const struct bpf_insn *insn = &meta->insn; 1096 1097 if (skip) { 1098 meta->skip = true; 1099 return 0; 1100 } 1101 1102 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1103 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1104 1105 return 0; 1106 } 1107 1108 static int 1109 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1110 enum alu_op alu_op) 1111 { 1112 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1113 1114 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1115 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1116 1117 return 0; 1118 } 1119 1120 static void 1121 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1122 enum br_mask br_mask, u16 off) 1123 { 1124 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1125 emit_br(nfp_prog, br_mask, off, 0); 1126 } 1127 1128 static int 1129 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1130 enum alu_op alu_op, enum br_mask br_mask) 1131 { 1132 const struct bpf_insn *insn = &meta->insn; 1133 1134 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1135 insn->src_reg * 2, br_mask, insn->off); 1136 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1137 insn->src_reg * 2 + 1, br_mask, insn->off); 1138 1139 return 0; 1140 } 1141 1142 static int 1143 wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1144 enum br_mask br_mask, bool swap) 1145 { 1146 const struct bpf_insn *insn = &meta->insn; 1147 u64 imm = insn->imm; /* sign extend */ 1148 u8 reg = insn->dst_reg * 2; 1149 swreg tmp_reg; 1150 1151 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1152 if (!swap) 1153 emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg); 1154 else 1155 emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg)); 1156 1157 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1158 if (!swap) 1159 emit_alu(nfp_prog, reg_none(), 1160 reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg); 1161 else 1162 emit_alu(nfp_prog, reg_none(), 1163 tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1)); 1164 1165 emit_br(nfp_prog, br_mask, insn->off, 0); 1166 1167 return 0; 1168 } 1169 1170 static int 1171 wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1172 enum br_mask br_mask, bool swap) 1173 { 1174 const struct bpf_insn *insn = &meta->insn; 1175 u8 areg, breg; 1176 1177 areg = insn->dst_reg * 2; 1178 breg = insn->src_reg * 2; 1179 1180 if (swap) { 1181 areg ^= breg; 1182 breg ^= areg; 1183 areg ^= breg; 1184 } 1185 1186 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1187 emit_alu(nfp_prog, reg_none(), 1188 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1189 emit_br(nfp_prog, br_mask, insn->off, 0); 1190 1191 return 0; 1192 } 1193 1194 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1195 { 1196 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1197 SHF_SC_R_ROT, 8); 1198 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1199 SHF_SC_R_ROT, 16); 1200 } 1201 1202 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1203 { 1204 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1205 struct nfp_bpf_cap_adjust_head *adjust_head; 1206 u32 ret_einval, end; 1207 1208 adjust_head = &nfp_prog->bpf->adjust_head; 1209 1210 /* Optimized version - 5 vs 14 cycles */ 1211 if (nfp_prog->adjust_head_location != UINT_MAX) { 1212 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1213 return -EINVAL; 1214 1215 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1216 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1217 emit_alu(nfp_prog, plen_reg(nfp_prog), 1218 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1219 emit_alu(nfp_prog, pv_len(nfp_prog), 1220 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1221 1222 wrp_immed(nfp_prog, reg_both(0), 0); 1223 wrp_immed(nfp_prog, reg_both(1), 0); 1224 1225 /* TODO: when adjust head is guaranteed to succeed we can 1226 * also eliminate the following if (r0 == 0) branch. 1227 */ 1228 1229 return 0; 1230 } 1231 1232 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1233 end = ret_einval + 2; 1234 1235 /* We need to use a temp because offset is just a part of the pkt ptr */ 1236 emit_alu(nfp_prog, tmp, 1237 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1238 1239 /* Validate result will fit within FW datapath constraints */ 1240 emit_alu(nfp_prog, reg_none(), 1241 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1242 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1243 emit_alu(nfp_prog, reg_none(), 1244 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1245 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1246 1247 /* Validate the length is at least ETH_HLEN */ 1248 emit_alu(nfp_prog, tmp_len, 1249 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1250 emit_alu(nfp_prog, reg_none(), 1251 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1252 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1253 1254 /* Load the ret code */ 1255 wrp_immed(nfp_prog, reg_both(0), 0); 1256 wrp_immed(nfp_prog, reg_both(1), 0); 1257 1258 /* Modify the packet metadata */ 1259 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1260 1261 /* Skip over the -EINVAL ret code (defer 2) */ 1262 emit_br(nfp_prog, BR_UNC, end, 2); 1263 1264 emit_alu(nfp_prog, plen_reg(nfp_prog), 1265 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1266 emit_alu(nfp_prog, pv_len(nfp_prog), 1267 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1268 1269 /* return -EINVAL target */ 1270 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1271 return -EINVAL; 1272 1273 wrp_immed(nfp_prog, reg_both(0), -22); 1274 wrp_immed(nfp_prog, reg_both(1), ~0); 1275 1276 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1277 return -EINVAL; 1278 1279 return 0; 1280 } 1281 1282 /* --- Callbacks --- */ 1283 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1284 { 1285 const struct bpf_insn *insn = &meta->insn; 1286 u8 dst = insn->dst_reg * 2; 1287 u8 src = insn->src_reg * 2; 1288 1289 if (insn->src_reg == BPF_REG_10) { 1290 swreg stack_depth_reg; 1291 1292 stack_depth_reg = ur_load_imm_any(nfp_prog, 1293 nfp_prog->stack_depth, 1294 stack_imm(nfp_prog)); 1295 emit_alu(nfp_prog, reg_both(dst), 1296 stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); 1297 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1298 } else { 1299 wrp_reg_mov(nfp_prog, dst, src); 1300 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1301 } 1302 1303 return 0; 1304 } 1305 1306 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1307 { 1308 u64 imm = meta->insn.imm; /* sign extend */ 1309 1310 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1311 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1312 1313 return 0; 1314 } 1315 1316 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1317 { 1318 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1319 } 1320 1321 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1322 { 1323 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1324 } 1325 1326 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1327 { 1328 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1329 } 1330 1331 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1332 { 1333 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1334 } 1335 1336 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1337 { 1338 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1339 } 1340 1341 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1342 { 1343 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1344 } 1345 1346 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1347 { 1348 const struct bpf_insn *insn = &meta->insn; 1349 1350 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1351 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1352 reg_b(insn->src_reg * 2)); 1353 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1354 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1355 reg_b(insn->src_reg * 2 + 1)); 1356 1357 return 0; 1358 } 1359 1360 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1361 { 1362 const struct bpf_insn *insn = &meta->insn; 1363 u64 imm = insn->imm; /* sign extend */ 1364 1365 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1366 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1367 1368 return 0; 1369 } 1370 1371 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1372 { 1373 const struct bpf_insn *insn = &meta->insn; 1374 1375 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1376 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1377 reg_b(insn->src_reg * 2)); 1378 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1379 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1380 reg_b(insn->src_reg * 2 + 1)); 1381 1382 return 0; 1383 } 1384 1385 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1386 { 1387 const struct bpf_insn *insn = &meta->insn; 1388 u64 imm = insn->imm; /* sign extend */ 1389 1390 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1391 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1392 1393 return 0; 1394 } 1395 1396 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1397 { 1398 const struct bpf_insn *insn = &meta->insn; 1399 1400 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1401 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1402 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1403 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1404 1405 return 0; 1406 } 1407 1408 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1409 { 1410 const struct bpf_insn *insn = &meta->insn; 1411 u8 dst = insn->dst_reg * 2; 1412 1413 if (insn->imm < 32) { 1414 emit_shf(nfp_prog, reg_both(dst + 1), 1415 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1416 SHF_SC_R_DSHF, 32 - insn->imm); 1417 emit_shf(nfp_prog, reg_both(dst), 1418 reg_none(), SHF_OP_NONE, reg_b(dst), 1419 SHF_SC_L_SHF, insn->imm); 1420 } else if (insn->imm == 32) { 1421 wrp_reg_mov(nfp_prog, dst + 1, dst); 1422 wrp_immed(nfp_prog, reg_both(dst), 0); 1423 } else if (insn->imm > 32) { 1424 emit_shf(nfp_prog, reg_both(dst + 1), 1425 reg_none(), SHF_OP_NONE, reg_b(dst), 1426 SHF_SC_L_SHF, insn->imm - 32); 1427 wrp_immed(nfp_prog, reg_both(dst), 0); 1428 } 1429 1430 return 0; 1431 } 1432 1433 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1434 { 1435 const struct bpf_insn *insn = &meta->insn; 1436 u8 dst = insn->dst_reg * 2; 1437 1438 if (insn->imm < 32) { 1439 emit_shf(nfp_prog, reg_both(dst), 1440 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1441 SHF_SC_R_DSHF, insn->imm); 1442 emit_shf(nfp_prog, reg_both(dst + 1), 1443 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1444 SHF_SC_R_SHF, insn->imm); 1445 } else if (insn->imm == 32) { 1446 wrp_reg_mov(nfp_prog, dst, dst + 1); 1447 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1448 } else if (insn->imm > 32) { 1449 emit_shf(nfp_prog, reg_both(dst), 1450 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1451 SHF_SC_R_SHF, insn->imm - 32); 1452 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1453 } 1454 1455 return 0; 1456 } 1457 1458 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1459 { 1460 const struct bpf_insn *insn = &meta->insn; 1461 1462 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 1463 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1464 1465 return 0; 1466 } 1467 1468 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1469 { 1470 const struct bpf_insn *insn = &meta->insn; 1471 1472 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 1473 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1474 1475 return 0; 1476 } 1477 1478 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1479 { 1480 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 1481 } 1482 1483 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1484 { 1485 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 1486 } 1487 1488 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1489 { 1490 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 1491 } 1492 1493 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1494 { 1495 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1496 } 1497 1498 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1499 { 1500 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 1501 } 1502 1503 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1504 { 1505 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1506 } 1507 1508 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1509 { 1510 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 1511 } 1512 1513 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1514 { 1515 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 1516 } 1517 1518 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1519 { 1520 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 1521 } 1522 1523 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1524 { 1525 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 1526 } 1527 1528 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1529 { 1530 u8 dst = meta->insn.dst_reg * 2; 1531 1532 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 1533 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1534 1535 return 0; 1536 } 1537 1538 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1539 { 1540 const struct bpf_insn *insn = &meta->insn; 1541 1542 if (!insn->imm) 1543 return 1; /* TODO: zero shift means indirect */ 1544 1545 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 1546 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 1547 SHF_SC_L_SHF, insn->imm); 1548 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1549 1550 return 0; 1551 } 1552 1553 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1554 { 1555 const struct bpf_insn *insn = &meta->insn; 1556 u8 gpr = insn->dst_reg * 2; 1557 1558 switch (insn->imm) { 1559 case 16: 1560 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 1561 SHF_SC_R_ROT, 8); 1562 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 1563 SHF_SC_R_SHF, 16); 1564 1565 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1566 break; 1567 case 32: 1568 wrp_end32(nfp_prog, reg_a(gpr), gpr); 1569 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1570 break; 1571 case 64: 1572 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 1573 1574 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 1575 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 1576 break; 1577 } 1578 1579 return 0; 1580 } 1581 1582 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1583 { 1584 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 1585 u32 imm_lo, imm_hi; 1586 u8 dst; 1587 1588 dst = prev->insn.dst_reg * 2; 1589 imm_lo = prev->insn.imm; 1590 imm_hi = meta->insn.imm; 1591 1592 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 1593 1594 /* mov is always 1 insn, load imm may be two, so try to use mov */ 1595 if (imm_hi == imm_lo) 1596 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 1597 else 1598 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 1599 1600 return 0; 1601 } 1602 1603 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1604 { 1605 meta->double_cb = imm_ld8_part2; 1606 return 0; 1607 } 1608 1609 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1610 { 1611 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 1612 } 1613 1614 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1615 { 1616 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 1617 } 1618 1619 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1620 { 1621 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 1622 } 1623 1624 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1625 { 1626 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1627 meta->insn.src_reg * 2, 1); 1628 } 1629 1630 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1631 { 1632 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1633 meta->insn.src_reg * 2, 2); 1634 } 1635 1636 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1637 { 1638 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1639 meta->insn.src_reg * 2, 4); 1640 } 1641 1642 static int 1643 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1644 unsigned int size, unsigned int ptr_off) 1645 { 1646 return mem_op_stack(nfp_prog, meta, size, ptr_off, 1647 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 1648 true, wrp_lmem_load); 1649 } 1650 1651 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1652 u8 size) 1653 { 1654 swreg dst = reg_both(meta->insn.dst_reg * 2); 1655 1656 switch (meta->insn.off) { 1657 case offsetof(struct __sk_buff, len): 1658 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 1659 return -EOPNOTSUPP; 1660 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 1661 break; 1662 case offsetof(struct __sk_buff, data): 1663 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 1664 return -EOPNOTSUPP; 1665 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1666 break; 1667 case offsetof(struct __sk_buff, data_end): 1668 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 1669 return -EOPNOTSUPP; 1670 emit_alu(nfp_prog, dst, 1671 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1672 break; 1673 default: 1674 return -EOPNOTSUPP; 1675 } 1676 1677 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1678 1679 return 0; 1680 } 1681 1682 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1683 u8 size) 1684 { 1685 swreg dst = reg_both(meta->insn.dst_reg * 2); 1686 1687 switch (meta->insn.off) { 1688 case offsetof(struct xdp_md, data): 1689 if (size != FIELD_SIZEOF(struct xdp_md, data)) 1690 return -EOPNOTSUPP; 1691 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1692 break; 1693 case offsetof(struct xdp_md, data_end): 1694 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 1695 return -EOPNOTSUPP; 1696 emit_alu(nfp_prog, dst, 1697 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1698 break; 1699 default: 1700 return -EOPNOTSUPP; 1701 } 1702 1703 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1704 1705 return 0; 1706 } 1707 1708 static int 1709 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1710 unsigned int size) 1711 { 1712 swreg tmp_reg; 1713 1714 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1715 1716 return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg, 1717 meta->insn.dst_reg * 2, size); 1718 } 1719 1720 static int 1721 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1722 unsigned int size) 1723 { 1724 if (meta->ldst_gather_len) 1725 return nfp_cpp_memcpy(nfp_prog, meta); 1726 1727 if (meta->ptr.type == PTR_TO_CTX) { 1728 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 1729 return mem_ldx_xdp(nfp_prog, meta, size); 1730 else 1731 return mem_ldx_skb(nfp_prog, meta, size); 1732 } 1733 1734 if (meta->ptr.type == PTR_TO_PACKET) 1735 return mem_ldx_data(nfp_prog, meta, size); 1736 1737 if (meta->ptr.type == PTR_TO_STACK) 1738 return mem_ldx_stack(nfp_prog, meta, size, 1739 meta->ptr.off + meta->ptr.var_off.value); 1740 1741 return -EOPNOTSUPP; 1742 } 1743 1744 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1745 { 1746 return mem_ldx(nfp_prog, meta, 1); 1747 } 1748 1749 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1750 { 1751 return mem_ldx(nfp_prog, meta, 2); 1752 } 1753 1754 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1755 { 1756 return mem_ldx(nfp_prog, meta, 4); 1757 } 1758 1759 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1760 { 1761 return mem_ldx(nfp_prog, meta, 8); 1762 } 1763 1764 static int 1765 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1766 unsigned int size) 1767 { 1768 u64 imm = meta->insn.imm; /* sign extend */ 1769 swreg off_reg; 1770 1771 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1772 1773 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 1774 imm, size); 1775 } 1776 1777 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1778 unsigned int size) 1779 { 1780 if (meta->ptr.type == PTR_TO_PACKET) 1781 return mem_st_data(nfp_prog, meta, size); 1782 1783 return -EOPNOTSUPP; 1784 } 1785 1786 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1787 { 1788 return mem_st(nfp_prog, meta, 1); 1789 } 1790 1791 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1792 { 1793 return mem_st(nfp_prog, meta, 2); 1794 } 1795 1796 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1797 { 1798 return mem_st(nfp_prog, meta, 4); 1799 } 1800 1801 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1802 { 1803 return mem_st(nfp_prog, meta, 8); 1804 } 1805 1806 static int 1807 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1808 unsigned int size) 1809 { 1810 swreg off_reg; 1811 1812 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1813 1814 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 1815 meta->insn.src_reg * 2, size); 1816 } 1817 1818 static int 1819 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1820 unsigned int size, unsigned int ptr_off) 1821 { 1822 return mem_op_stack(nfp_prog, meta, size, ptr_off, 1823 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 1824 false, wrp_lmem_store); 1825 } 1826 1827 static int 1828 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1829 unsigned int size) 1830 { 1831 if (meta->ptr.type == PTR_TO_PACKET) 1832 return mem_stx_data(nfp_prog, meta, size); 1833 1834 if (meta->ptr.type == PTR_TO_STACK) 1835 return mem_stx_stack(nfp_prog, meta, size, 1836 meta->ptr.off + meta->ptr.var_off.value); 1837 1838 return -EOPNOTSUPP; 1839 } 1840 1841 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1842 { 1843 return mem_stx(nfp_prog, meta, 1); 1844 } 1845 1846 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1847 { 1848 return mem_stx(nfp_prog, meta, 2); 1849 } 1850 1851 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1852 { 1853 return mem_stx(nfp_prog, meta, 4); 1854 } 1855 1856 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1857 { 1858 return mem_stx(nfp_prog, meta, 8); 1859 } 1860 1861 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1862 { 1863 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 1864 1865 return 0; 1866 } 1867 1868 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1869 { 1870 const struct bpf_insn *insn = &meta->insn; 1871 u64 imm = insn->imm; /* sign extend */ 1872 swreg or1, or2, tmp_reg; 1873 1874 or1 = reg_a(insn->dst_reg * 2); 1875 or2 = reg_b(insn->dst_reg * 2 + 1); 1876 1877 if (imm & ~0U) { 1878 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1879 emit_alu(nfp_prog, imm_a(nfp_prog), 1880 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 1881 or1 = imm_a(nfp_prog); 1882 } 1883 1884 if (imm >> 32) { 1885 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1886 emit_alu(nfp_prog, imm_b(nfp_prog), 1887 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 1888 or2 = imm_b(nfp_prog); 1889 } 1890 1891 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 1892 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 1893 1894 return 0; 1895 } 1896 1897 static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1898 { 1899 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); 1900 } 1901 1902 static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1903 { 1904 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); 1905 } 1906 1907 static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1908 { 1909 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false); 1910 } 1911 1912 static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1913 { 1914 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); 1915 } 1916 1917 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1918 { 1919 const struct bpf_insn *insn = &meta->insn; 1920 u64 imm = insn->imm; /* sign extend */ 1921 swreg tmp_reg; 1922 1923 if (!imm) { 1924 meta->skip = true; 1925 return 0; 1926 } 1927 1928 if (imm & ~0U) { 1929 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1930 emit_alu(nfp_prog, reg_none(), 1931 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); 1932 emit_br(nfp_prog, BR_BNE, insn->off, 0); 1933 } 1934 1935 if (imm >> 32) { 1936 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1937 emit_alu(nfp_prog, reg_none(), 1938 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 1939 emit_br(nfp_prog, BR_BNE, insn->off, 0); 1940 } 1941 1942 return 0; 1943 } 1944 1945 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1946 { 1947 const struct bpf_insn *insn = &meta->insn; 1948 u64 imm = insn->imm; /* sign extend */ 1949 swreg tmp_reg; 1950 1951 if (!imm) { 1952 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 1953 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 1954 emit_br(nfp_prog, BR_BNE, insn->off, 0); 1955 return 0; 1956 } 1957 1958 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1959 emit_alu(nfp_prog, reg_none(), 1960 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 1961 emit_br(nfp_prog, BR_BNE, insn->off, 0); 1962 1963 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1964 emit_alu(nfp_prog, reg_none(), 1965 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 1966 emit_br(nfp_prog, BR_BNE, insn->off, 0); 1967 1968 return 0; 1969 } 1970 1971 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1972 { 1973 const struct bpf_insn *insn = &meta->insn; 1974 1975 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 1976 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 1977 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 1978 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 1979 emit_alu(nfp_prog, reg_none(), 1980 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 1981 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 1982 1983 return 0; 1984 } 1985 1986 static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1987 { 1988 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); 1989 } 1990 1991 static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1992 { 1993 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); 1994 } 1995 1996 static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1997 { 1998 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false); 1999 } 2000 2001 static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2002 { 2003 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); 2004 } 2005 2006 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2007 { 2008 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 2009 } 2010 2011 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2012 { 2013 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 2014 } 2015 2016 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2017 { 2018 switch (meta->insn.imm) { 2019 case BPF_FUNC_xdp_adjust_head: 2020 return adjust_head(nfp_prog, meta); 2021 default: 2022 WARN_ONCE(1, "verifier allowed unsupported function\n"); 2023 return -EOPNOTSUPP; 2024 } 2025 } 2026 2027 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2028 { 2029 emit_br_relo(nfp_prog, BR_UNC, 0, 0, RELO_BR_GO_OUT); 2030 2031 return 0; 2032 } 2033 2034 static const instr_cb_t instr_cb[256] = { 2035 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 2036 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 2037 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 2038 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 2039 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 2040 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 2041 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 2042 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 2043 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 2044 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 2045 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 2046 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 2047 [BPF_ALU64 | BPF_NEG] = neg_reg64, 2048 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 2049 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 2050 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 2051 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 2052 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 2053 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 2054 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 2055 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 2056 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 2057 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 2058 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 2059 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 2060 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 2061 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 2062 [BPF_ALU | BPF_NEG] = neg_reg, 2063 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 2064 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 2065 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 2066 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 2067 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 2068 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 2069 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 2070 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 2071 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 2072 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 2073 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 2074 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 2075 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 2076 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 2077 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 2078 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 2079 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 2080 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 2081 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 2082 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 2083 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 2084 [BPF_JMP | BPF_JA | BPF_K] = jump, 2085 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 2086 [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, 2087 [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, 2088 [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, 2089 [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, 2090 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 2091 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 2092 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 2093 [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, 2094 [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, 2095 [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, 2096 [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, 2097 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 2098 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 2099 [BPF_JMP | BPF_CALL] = call, 2100 [BPF_JMP | BPF_EXIT] = goto_out, 2101 }; 2102 2103 /* --- Assembler logic --- */ 2104 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 2105 { 2106 struct nfp_insn_meta *meta, *jmp_dst; 2107 u32 idx, br_idx; 2108 2109 list_for_each_entry(meta, &nfp_prog->insns, l) { 2110 if (meta->skip) 2111 continue; 2112 if (meta->insn.code == (BPF_JMP | BPF_CALL)) 2113 continue; 2114 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 2115 continue; 2116 2117 if (list_is_last(&meta->l, &nfp_prog->insns)) 2118 br_idx = nfp_prog->last_bpf_off; 2119 else 2120 br_idx = list_next_entry(meta, l)->off - 1; 2121 2122 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 2123 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 2124 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 2125 return -ELOOP; 2126 } 2127 /* Leave special branches for later */ 2128 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 2129 RELO_BR_REL) 2130 continue; 2131 2132 if (!meta->jmp_dst) { 2133 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 2134 return -ELOOP; 2135 } 2136 2137 jmp_dst = meta->jmp_dst; 2138 2139 if (jmp_dst->skip) { 2140 pr_err("Branch landing on removed instruction!!\n"); 2141 return -ELOOP; 2142 } 2143 2144 for (idx = meta->off; idx <= br_idx; idx++) { 2145 if (!nfp_is_br(nfp_prog->prog[idx])) 2146 continue; 2147 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 2148 } 2149 } 2150 2151 return 0; 2152 } 2153 2154 static void nfp_intro(struct nfp_prog *nfp_prog) 2155 { 2156 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 2157 emit_alu(nfp_prog, plen_reg(nfp_prog), 2158 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 2159 } 2160 2161 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 2162 { 2163 /* TC direct-action mode: 2164 * 0,1 ok NOT SUPPORTED[1] 2165 * 2 drop 0x22 -> drop, count as stat1 2166 * 4,5 nuke 0x02 -> drop 2167 * 7 redir 0x44 -> redir, count as stat2 2168 * * unspec 0x11 -> pass, count as stat0 2169 * 2170 * [1] We can't support OK and RECLASSIFY because we can't tell TC 2171 * the exact decision made. We are forced to support UNSPEC 2172 * to handle aborts so that's the only one we handle for passing 2173 * packets up the stack. 2174 */ 2175 /* Target for aborts */ 2176 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2177 2178 emit_br_relo(nfp_prog, BR_UNC, 0, 2, RELO_BR_NEXT_PKT); 2179 2180 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2181 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 2182 2183 /* Target for normal exits */ 2184 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2185 2186 /* if R0 > 7 jump to abort */ 2187 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 2188 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2189 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2190 2191 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 2192 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 2193 2194 emit_shf(nfp_prog, reg_a(1), 2195 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 2196 2197 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2198 emit_shf(nfp_prog, reg_a(2), 2199 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2200 2201 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2202 emit_shf(nfp_prog, reg_b(2), 2203 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 2204 2205 emit_br_relo(nfp_prog, BR_UNC, 0, 2, RELO_BR_NEXT_PKT); 2206 2207 emit_shf(nfp_prog, reg_b(2), 2208 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 2209 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2210 } 2211 2212 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 2213 { 2214 /* XDP return codes: 2215 * 0 aborted 0x82 -> drop, count as stat3 2216 * 1 drop 0x22 -> drop, count as stat1 2217 * 2 pass 0x11 -> pass, count as stat0 2218 * 3 tx 0x44 -> redir, count as stat2 2219 * * unknown 0x82 -> drop, count as stat3 2220 */ 2221 /* Target for aborts */ 2222 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2223 2224 emit_br_relo(nfp_prog, BR_UNC, 0, 2, RELO_BR_NEXT_PKT); 2225 2226 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2227 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 2228 2229 /* Target for normal exits */ 2230 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2231 2232 /* if R0 > 3 jump to abort */ 2233 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 2234 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2235 2236 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 2237 2238 emit_shf(nfp_prog, reg_a(1), 2239 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 2240 2241 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2242 emit_shf(nfp_prog, reg_b(2), 2243 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2244 2245 emit_br_relo(nfp_prog, BR_UNC, 0, 2, RELO_BR_NEXT_PKT); 2246 2247 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2248 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2249 } 2250 2251 static void nfp_outro(struct nfp_prog *nfp_prog) 2252 { 2253 switch (nfp_prog->type) { 2254 case BPF_PROG_TYPE_SCHED_CLS: 2255 nfp_outro_tc_da(nfp_prog); 2256 break; 2257 case BPF_PROG_TYPE_XDP: 2258 nfp_outro_xdp(nfp_prog); 2259 break; 2260 default: 2261 WARN_ON(1); 2262 } 2263 } 2264 2265 static int nfp_translate(struct nfp_prog *nfp_prog) 2266 { 2267 struct nfp_insn_meta *meta; 2268 int err; 2269 2270 nfp_intro(nfp_prog); 2271 if (nfp_prog->error) 2272 return nfp_prog->error; 2273 2274 list_for_each_entry(meta, &nfp_prog->insns, l) { 2275 instr_cb_t cb = instr_cb[meta->insn.code]; 2276 2277 meta->off = nfp_prog_current_offset(nfp_prog); 2278 2279 if (meta->skip) { 2280 nfp_prog->n_translated++; 2281 continue; 2282 } 2283 2284 if (nfp_meta_has_prev(nfp_prog, meta) && 2285 nfp_meta_prev(meta)->double_cb) 2286 cb = nfp_meta_prev(meta)->double_cb; 2287 if (!cb) 2288 return -ENOENT; 2289 err = cb(nfp_prog, meta); 2290 if (err) 2291 return err; 2292 2293 nfp_prog->n_translated++; 2294 } 2295 2296 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 2297 2298 nfp_outro(nfp_prog); 2299 if (nfp_prog->error) 2300 return nfp_prog->error; 2301 2302 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 2303 if (nfp_prog->error) 2304 return nfp_prog->error; 2305 2306 return nfp_fixup_branches(nfp_prog); 2307 } 2308 2309 /* --- Optimizations --- */ 2310 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 2311 { 2312 struct nfp_insn_meta *meta; 2313 2314 list_for_each_entry(meta, &nfp_prog->insns, l) { 2315 struct bpf_insn insn = meta->insn; 2316 2317 /* Programs converted from cBPF start with register xoring */ 2318 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 2319 insn.src_reg == insn.dst_reg) 2320 continue; 2321 2322 /* Programs start with R6 = R1 but we ignore the skb pointer */ 2323 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 2324 insn.src_reg == 1 && insn.dst_reg == 6) 2325 meta->skip = true; 2326 2327 /* Return as soon as something doesn't match */ 2328 if (!meta->skip) 2329 return; 2330 } 2331 } 2332 2333 /* Remove masking after load since our load guarantees this is not needed */ 2334 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 2335 { 2336 struct nfp_insn_meta *meta1, *meta2; 2337 const s32 exp_mask[] = { 2338 [BPF_B] = 0x000000ffU, 2339 [BPF_H] = 0x0000ffffU, 2340 [BPF_W] = 0xffffffffU, 2341 }; 2342 2343 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 2344 struct bpf_insn insn, next; 2345 2346 insn = meta1->insn; 2347 next = meta2->insn; 2348 2349 if (BPF_CLASS(insn.code) != BPF_LD) 2350 continue; 2351 if (BPF_MODE(insn.code) != BPF_ABS && 2352 BPF_MODE(insn.code) != BPF_IND) 2353 continue; 2354 2355 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 2356 continue; 2357 2358 if (!exp_mask[BPF_SIZE(insn.code)]) 2359 continue; 2360 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 2361 continue; 2362 2363 if (next.src_reg || next.dst_reg) 2364 continue; 2365 2366 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 2367 continue; 2368 2369 meta2->skip = true; 2370 } 2371 } 2372 2373 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 2374 { 2375 struct nfp_insn_meta *meta1, *meta2, *meta3; 2376 2377 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 2378 struct bpf_insn insn, next1, next2; 2379 2380 insn = meta1->insn; 2381 next1 = meta2->insn; 2382 next2 = meta3->insn; 2383 2384 if (BPF_CLASS(insn.code) != BPF_LD) 2385 continue; 2386 if (BPF_MODE(insn.code) != BPF_ABS && 2387 BPF_MODE(insn.code) != BPF_IND) 2388 continue; 2389 if (BPF_SIZE(insn.code) != BPF_W) 2390 continue; 2391 2392 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 2393 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 2394 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 2395 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 2396 continue; 2397 2398 if (next1.src_reg || next1.dst_reg || 2399 next2.src_reg || next2.dst_reg) 2400 continue; 2401 2402 if (next1.imm != 0x20 || next2.imm != 0x20) 2403 continue; 2404 2405 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 2406 meta3->flags & FLAG_INSN_IS_JUMP_DST) 2407 continue; 2408 2409 meta2->skip = true; 2410 meta3->skip = true; 2411 } 2412 } 2413 2414 /* load/store pair that forms memory copy sould look like the following: 2415 * 2416 * ld_width R, [addr_src + offset_src] 2417 * st_width [addr_dest + offset_dest], R 2418 * 2419 * The destination register of load and source register of store should 2420 * be the same, load and store should also perform at the same width. 2421 * If either of addr_src or addr_dest is stack pointer, we don't do the 2422 * CPP optimization as stack is modelled by registers on NFP. 2423 */ 2424 static bool 2425 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 2426 struct nfp_insn_meta *st_meta) 2427 { 2428 struct bpf_insn *ld = &ld_meta->insn; 2429 struct bpf_insn *st = &st_meta->insn; 2430 2431 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 2432 return false; 2433 2434 if (ld_meta->ptr.type != PTR_TO_PACKET) 2435 return false; 2436 2437 if (st_meta->ptr.type != PTR_TO_PACKET) 2438 return false; 2439 2440 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 2441 return false; 2442 2443 if (ld->dst_reg != st->src_reg) 2444 return false; 2445 2446 /* There is jump to the store insn in this pair. */ 2447 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 2448 return false; 2449 2450 return true; 2451 } 2452 2453 /* Currently, we only support chaining load/store pairs if: 2454 * 2455 * - Their address base registers are the same. 2456 * - Their address offsets are in the same order. 2457 * - They operate at the same memory width. 2458 * - There is no jump into the middle of them. 2459 */ 2460 static bool 2461 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 2462 struct nfp_insn_meta *st_meta, 2463 struct bpf_insn *prev_ld, 2464 struct bpf_insn *prev_st) 2465 { 2466 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 2467 struct bpf_insn *ld = &ld_meta->insn; 2468 struct bpf_insn *st = &st_meta->insn; 2469 s16 prev_ld_off, prev_st_off; 2470 2471 /* This pair is the start pair. */ 2472 if (!prev_ld) 2473 return true; 2474 2475 prev_size = BPF_LDST_BYTES(prev_ld); 2476 curr_size = BPF_LDST_BYTES(ld); 2477 prev_ld_base = prev_ld->src_reg; 2478 prev_st_base = prev_st->dst_reg; 2479 prev_ld_dst = prev_ld->dst_reg; 2480 prev_ld_off = prev_ld->off; 2481 prev_st_off = prev_st->off; 2482 2483 if (ld->dst_reg != prev_ld_dst) 2484 return false; 2485 2486 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 2487 return false; 2488 2489 if (curr_size != prev_size) 2490 return false; 2491 2492 /* There is jump to the head of this pair. */ 2493 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 2494 return false; 2495 2496 /* Both in ascending order. */ 2497 if (prev_ld_off + prev_size == ld->off && 2498 prev_st_off + prev_size == st->off) 2499 return true; 2500 2501 /* Both in descending order. */ 2502 if (ld->off + curr_size == prev_ld_off && 2503 st->off + curr_size == prev_st_off) 2504 return true; 2505 2506 return false; 2507 } 2508 2509 /* Return TRUE if cross memory access happens. Cross memory access means 2510 * store area is overlapping with load area that a later load might load 2511 * the value from previous store, for this case we can't treat the sequence 2512 * as an memory copy. 2513 */ 2514 static bool 2515 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 2516 struct nfp_insn_meta *head_st_meta) 2517 { 2518 s16 head_ld_off, head_st_off, ld_off; 2519 2520 /* Different pointer types does not overlap. */ 2521 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 2522 return false; 2523 2524 /* load and store are both PTR_TO_PACKET, check ID info. */ 2525 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 2526 return true; 2527 2528 /* Canonicalize the offsets. Turn all of them against the original 2529 * base register. 2530 */ 2531 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 2532 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 2533 ld_off = ld->off + head_ld_meta->ptr.off; 2534 2535 /* Ascending order cross. */ 2536 if (ld_off > head_ld_off && 2537 head_ld_off < head_st_off && ld_off >= head_st_off) 2538 return true; 2539 2540 /* Descending order cross. */ 2541 if (ld_off < head_ld_off && 2542 head_ld_off > head_st_off && ld_off <= head_st_off) 2543 return true; 2544 2545 return false; 2546 } 2547 2548 /* This pass try to identify the following instructoin sequences. 2549 * 2550 * load R, [regA + offA] 2551 * store [regB + offB], R 2552 * load R, [regA + offA + const_imm_A] 2553 * store [regB + offB + const_imm_A], R 2554 * load R, [regA + offA + 2 * const_imm_A] 2555 * store [regB + offB + 2 * const_imm_A], R 2556 * ... 2557 * 2558 * Above sequence is typically generated by compiler when lowering 2559 * memcpy. NFP prefer using CPP instructions to accelerate it. 2560 */ 2561 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 2562 { 2563 struct nfp_insn_meta *head_ld_meta = NULL; 2564 struct nfp_insn_meta *head_st_meta = NULL; 2565 struct nfp_insn_meta *meta1, *meta2; 2566 struct bpf_insn *prev_ld = NULL; 2567 struct bpf_insn *prev_st = NULL; 2568 u8 count = 0; 2569 2570 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 2571 struct bpf_insn *ld = &meta1->insn; 2572 struct bpf_insn *st = &meta2->insn; 2573 2574 /* Reset record status if any of the following if true: 2575 * - The current insn pair is not load/store. 2576 * - The load/store pair doesn't chain with previous one. 2577 * - The chained load/store pair crossed with previous pair. 2578 * - The chained load/store pair has a total size of memory 2579 * copy beyond 128 bytes which is the maximum length a 2580 * single NFP CPP command can transfer. 2581 */ 2582 if (!curr_pair_is_memcpy(meta1, meta2) || 2583 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 2584 prev_st) || 2585 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 2586 head_st_meta) || 2587 head_ld_meta->ldst_gather_len >= 128))) { 2588 if (!count) 2589 continue; 2590 2591 if (count > 1) { 2592 s16 prev_ld_off = prev_ld->off; 2593 s16 prev_st_off = prev_st->off; 2594 s16 head_ld_off = head_ld_meta->insn.off; 2595 2596 if (prev_ld_off < head_ld_off) { 2597 head_ld_meta->insn.off = prev_ld_off; 2598 head_st_meta->insn.off = prev_st_off; 2599 head_ld_meta->ldst_gather_len = 2600 -head_ld_meta->ldst_gather_len; 2601 } 2602 2603 head_ld_meta->paired_st = &head_st_meta->insn; 2604 head_st_meta->skip = true; 2605 } else { 2606 head_ld_meta->ldst_gather_len = 0; 2607 } 2608 2609 /* If the chain is ended by an load/store pair then this 2610 * could serve as the new head of the the next chain. 2611 */ 2612 if (curr_pair_is_memcpy(meta1, meta2)) { 2613 head_ld_meta = meta1; 2614 head_st_meta = meta2; 2615 head_ld_meta->ldst_gather_len = 2616 BPF_LDST_BYTES(ld); 2617 meta1 = nfp_meta_next(meta1); 2618 meta2 = nfp_meta_next(meta2); 2619 prev_ld = ld; 2620 prev_st = st; 2621 count = 1; 2622 } else { 2623 head_ld_meta = NULL; 2624 head_st_meta = NULL; 2625 prev_ld = NULL; 2626 prev_st = NULL; 2627 count = 0; 2628 } 2629 2630 continue; 2631 } 2632 2633 if (!head_ld_meta) { 2634 head_ld_meta = meta1; 2635 head_st_meta = meta2; 2636 } else { 2637 meta1->skip = true; 2638 meta2->skip = true; 2639 } 2640 2641 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 2642 meta1 = nfp_meta_next(meta1); 2643 meta2 = nfp_meta_next(meta2); 2644 prev_ld = ld; 2645 prev_st = st; 2646 count++; 2647 } 2648 } 2649 2650 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 2651 { 2652 nfp_bpf_opt_reg_init(nfp_prog); 2653 2654 nfp_bpf_opt_ld_mask(nfp_prog); 2655 nfp_bpf_opt_ld_shift(nfp_prog); 2656 nfp_bpf_opt_ldst_gather(nfp_prog); 2657 2658 return 0; 2659 } 2660 2661 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 2662 { 2663 __le64 *ustore = (__force __le64 *)prog; 2664 int i; 2665 2666 for (i = 0; i < len; i++) { 2667 int err; 2668 2669 err = nfp_ustore_check_valid_no_ecc(prog[i]); 2670 if (err) 2671 return err; 2672 2673 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 2674 } 2675 2676 return 0; 2677 } 2678 2679 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 2680 { 2681 int ret; 2682 2683 ret = nfp_bpf_optimize(nfp_prog); 2684 if (ret) 2685 return ret; 2686 2687 ret = nfp_translate(nfp_prog); 2688 if (ret) { 2689 pr_err("Translation failed with error %d (translated: %u)\n", 2690 ret, nfp_prog->n_translated); 2691 return -EINVAL; 2692 } 2693 2694 return ret; 2695 } 2696 2697 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 2698 { 2699 struct nfp_insn_meta *meta; 2700 2701 /* Another pass to record jump information. */ 2702 list_for_each_entry(meta, &nfp_prog->insns, l) { 2703 u64 code = meta->insn.code; 2704 2705 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && 2706 BPF_OP(code) != BPF_CALL) { 2707 struct nfp_insn_meta *dst_meta; 2708 unsigned short dst_indx; 2709 2710 dst_indx = meta->n + 1 + meta->insn.off; 2711 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, 2712 cnt); 2713 2714 meta->jmp_dst = dst_meta; 2715 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 2716 } 2717 } 2718 } 2719 2720 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 2721 { 2722 unsigned int i; 2723 u64 *prog; 2724 int err; 2725 2726 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 2727 GFP_KERNEL); 2728 if (!prog) 2729 return ERR_PTR(-ENOMEM); 2730 2731 for (i = 0; i < nfp_prog->prog_len; i++) { 2732 enum nfp_relo_type special; 2733 2734 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 2735 switch (special) { 2736 case RELO_NONE: 2737 continue; 2738 case RELO_BR_REL: 2739 br_add_offset(&prog[i], bv->start_off); 2740 break; 2741 case RELO_BR_GO_OUT: 2742 br_set_offset(&prog[i], 2743 nfp_prog->tgt_out + bv->start_off); 2744 break; 2745 case RELO_BR_GO_ABORT: 2746 br_set_offset(&prog[i], 2747 nfp_prog->tgt_abort + bv->start_off); 2748 break; 2749 case RELO_BR_NEXT_PKT: 2750 br_set_offset(&prog[i], bv->tgt_done); 2751 break; 2752 } 2753 2754 prog[i] &= ~OP_RELO_TYPE; 2755 } 2756 2757 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 2758 if (err) 2759 goto err_free_prog; 2760 2761 return prog; 2762 2763 err_free_prog: 2764 kfree(prog); 2765 return ERR_PTR(err); 2766 } 2767