1 /* 2 * Copyright (C) 2016-2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/bug.h> 37 #include <linux/kernel.h> 38 #include <linux/bpf.h> 39 #include <linux/filter.h> 40 #include <linux/pkt_cls.h> 41 #include <linux/unistd.h> 42 43 #include "main.h" 44 #include "../nfp_asm.h" 45 46 /* --- NFP prog --- */ 47 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 48 * It's safe to modify the next pointers (but not pos). 49 */ 50 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 51 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 52 next = list_next_entry(pos, l); \ 53 &(nfp_prog)->insns != &pos->l && \ 54 &(nfp_prog)->insns != &next->l; \ 55 pos = nfp_meta_next(pos), \ 56 next = nfp_meta_next(pos)) 57 58 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 59 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 60 next = list_next_entry(pos, l), \ 61 next2 = list_next_entry(next, l); \ 62 &(nfp_prog)->insns != &pos->l && \ 63 &(nfp_prog)->insns != &next->l && \ 64 &(nfp_prog)->insns != &next2->l; \ 65 pos = nfp_meta_next(pos), \ 66 next = nfp_meta_next(pos), \ 67 next2 = nfp_meta_next(next)) 68 69 static bool 70 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 71 { 72 return meta->l.prev != &nfp_prog->insns; 73 } 74 75 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 76 { 77 if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) { 78 nfp_prog->error = -ENOSPC; 79 return; 80 } 81 82 nfp_prog->prog[nfp_prog->prog_len] = insn; 83 nfp_prog->prog_len++; 84 } 85 86 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 87 { 88 return nfp_prog->prog_len; 89 } 90 91 static bool 92 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 93 { 94 /* If there is a recorded error we may have dropped instructions; 95 * that doesn't have to be due to translator bug, and the translation 96 * will fail anyway, so just return OK. 97 */ 98 if (nfp_prog->error) 99 return true; 100 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 101 } 102 103 /* --- Emitters --- */ 104 static void 105 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 106 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, 107 bool indir) 108 { 109 u64 insn; 110 111 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 112 FIELD_PREP(OP_CMD_CTX, ctx) | 113 FIELD_PREP(OP_CMD_B_SRC, breg) | 114 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 115 FIELD_PREP(OP_CMD_XFER, xfer) | 116 FIELD_PREP(OP_CMD_CNT, size) | 117 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | 118 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 119 FIELD_PREP(OP_CMD_INDIR, indir) | 120 FIELD_PREP(OP_CMD_MODE, mode); 121 122 nfp_prog_push(nfp_prog, insn); 123 } 124 125 static void 126 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 127 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) 128 { 129 struct nfp_insn_re_regs reg; 130 int err; 131 132 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 133 if (err) { 134 nfp_prog->error = err; 135 return; 136 } 137 if (reg.swap) { 138 pr_err("cmd can't swap arguments\n"); 139 nfp_prog->error = -EFAULT; 140 return; 141 } 142 if (reg.dst_lmextn || reg.src_lmextn) { 143 pr_err("cmd can't use LMextn\n"); 144 nfp_prog->error = -EFAULT; 145 return; 146 } 147 148 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, 149 indir); 150 } 151 152 static void 153 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 154 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 155 { 156 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); 157 } 158 159 static void 160 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 161 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 162 { 163 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); 164 } 165 166 static void 167 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 168 enum br_ctx_signal_state css, u16 addr, u8 defer) 169 { 170 u16 addr_lo, addr_hi; 171 u64 insn; 172 173 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 174 addr_hi = addr != addr_lo; 175 176 insn = OP_BR_BASE | 177 FIELD_PREP(OP_BR_MASK, mask) | 178 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 179 FIELD_PREP(OP_BR_CSS, css) | 180 FIELD_PREP(OP_BR_DEFBR, defer) | 181 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 182 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 183 184 nfp_prog_push(nfp_prog, insn); 185 } 186 187 static void 188 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 189 enum nfp_relo_type relo) 190 { 191 if (mask == BR_UNC && defer > 2) { 192 pr_err("BUG: branch defer out of bounds %d\n", defer); 193 nfp_prog->error = -EFAULT; 194 return; 195 } 196 197 __emit_br(nfp_prog, mask, 198 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 199 BR_CSS_NONE, addr, defer); 200 201 nfp_prog->prog[nfp_prog->prog_len - 1] |= 202 FIELD_PREP(OP_RELO_TYPE, relo); 203 } 204 205 static void 206 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 207 { 208 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 209 } 210 211 static void 212 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 213 enum immed_width width, bool invert, 214 enum immed_shift shift, bool wr_both, 215 bool dst_lmextn, bool src_lmextn) 216 { 217 u64 insn; 218 219 insn = OP_IMMED_BASE | 220 FIELD_PREP(OP_IMMED_A_SRC, areg) | 221 FIELD_PREP(OP_IMMED_B_SRC, breg) | 222 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 223 FIELD_PREP(OP_IMMED_WIDTH, width) | 224 FIELD_PREP(OP_IMMED_INV, invert) | 225 FIELD_PREP(OP_IMMED_SHIFT, shift) | 226 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 227 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 228 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 229 230 nfp_prog_push(nfp_prog, insn); 231 } 232 233 static void 234 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 235 enum immed_width width, bool invert, enum immed_shift shift) 236 { 237 struct nfp_insn_ur_regs reg; 238 int err; 239 240 if (swreg_type(dst) == NN_REG_IMM) { 241 nfp_prog->error = -EFAULT; 242 return; 243 } 244 245 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 246 if (err) { 247 nfp_prog->error = err; 248 return; 249 } 250 251 /* Use reg.dst when destination is No-Dest. */ 252 __emit_immed(nfp_prog, 253 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 254 reg.breg, imm >> 8, width, invert, shift, 255 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 256 } 257 258 static void 259 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 260 enum shf_sc sc, u8 shift, 261 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 262 bool dst_lmextn, bool src_lmextn) 263 { 264 u64 insn; 265 266 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 267 nfp_prog->error = -EFAULT; 268 return; 269 } 270 271 if (sc == SHF_SC_L_SHF) 272 shift = 32 - shift; 273 274 insn = OP_SHF_BASE | 275 FIELD_PREP(OP_SHF_A_SRC, areg) | 276 FIELD_PREP(OP_SHF_SC, sc) | 277 FIELD_PREP(OP_SHF_B_SRC, breg) | 278 FIELD_PREP(OP_SHF_I8, i8) | 279 FIELD_PREP(OP_SHF_SW, sw) | 280 FIELD_PREP(OP_SHF_DST, dst) | 281 FIELD_PREP(OP_SHF_SHIFT, shift) | 282 FIELD_PREP(OP_SHF_OP, op) | 283 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 284 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 285 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 286 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 287 288 nfp_prog_push(nfp_prog, insn); 289 } 290 291 static void 292 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 293 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 294 { 295 struct nfp_insn_re_regs reg; 296 int err; 297 298 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 299 if (err) { 300 nfp_prog->error = err; 301 return; 302 } 303 304 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 305 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 306 reg.dst_lmextn, reg.src_lmextn); 307 } 308 309 static void 310 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 311 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 312 bool dst_lmextn, bool src_lmextn) 313 { 314 u64 insn; 315 316 insn = OP_ALU_BASE | 317 FIELD_PREP(OP_ALU_A_SRC, areg) | 318 FIELD_PREP(OP_ALU_B_SRC, breg) | 319 FIELD_PREP(OP_ALU_DST, dst) | 320 FIELD_PREP(OP_ALU_SW, swap) | 321 FIELD_PREP(OP_ALU_OP, op) | 322 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 323 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 324 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 325 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 326 327 nfp_prog_push(nfp_prog, insn); 328 } 329 330 static void 331 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 332 swreg lreg, enum alu_op op, swreg rreg) 333 { 334 struct nfp_insn_ur_regs reg; 335 int err; 336 337 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 338 if (err) { 339 nfp_prog->error = err; 340 return; 341 } 342 343 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 344 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 345 reg.dst_lmextn, reg.src_lmextn); 346 } 347 348 static void 349 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 350 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 351 bool zero, bool swap, bool wr_both, 352 bool dst_lmextn, bool src_lmextn) 353 { 354 u64 insn; 355 356 insn = OP_LDF_BASE | 357 FIELD_PREP(OP_LDF_A_SRC, areg) | 358 FIELD_PREP(OP_LDF_SC, sc) | 359 FIELD_PREP(OP_LDF_B_SRC, breg) | 360 FIELD_PREP(OP_LDF_I8, imm8) | 361 FIELD_PREP(OP_LDF_SW, swap) | 362 FIELD_PREP(OP_LDF_ZF, zero) | 363 FIELD_PREP(OP_LDF_BMASK, bmask) | 364 FIELD_PREP(OP_LDF_SHF, shift) | 365 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 366 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 367 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 368 369 nfp_prog_push(nfp_prog, insn); 370 } 371 372 static void 373 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 374 enum shf_sc sc, u8 shift, bool zero) 375 { 376 struct nfp_insn_re_regs reg; 377 int err; 378 379 /* Note: ld_field is special as it uses one of the src regs as dst */ 380 err = swreg_to_restricted(dst, dst, src, ®, true); 381 if (err) { 382 nfp_prog->error = err; 383 return; 384 } 385 386 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 387 reg.i8, zero, reg.swap, reg.wr_both, 388 reg.dst_lmextn, reg.src_lmextn); 389 } 390 391 static void 392 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 393 enum shf_sc sc, u8 shift) 394 { 395 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 396 } 397 398 static void 399 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 400 bool dst_lmextn, bool src_lmextn) 401 { 402 u64 insn; 403 404 insn = OP_LCSR_BASE | 405 FIELD_PREP(OP_LCSR_A_SRC, areg) | 406 FIELD_PREP(OP_LCSR_B_SRC, breg) | 407 FIELD_PREP(OP_LCSR_WRITE, wr) | 408 FIELD_PREP(OP_LCSR_ADDR, addr / 4) | 409 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 410 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 411 412 nfp_prog_push(nfp_prog, insn); 413 } 414 415 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 416 { 417 struct nfp_insn_ur_regs reg; 418 int err; 419 420 /* This instruction takes immeds instead of reg_none() for the ignored 421 * operand, but we can't encode 2 immeds in one instr with our normal 422 * swreg infra so if param is an immed, we encode as reg_none() and 423 * copy the immed to both operands. 424 */ 425 if (swreg_type(src) == NN_REG_IMM) { 426 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 427 reg.breg = reg.areg; 428 } else { 429 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 430 } 431 if (err) { 432 nfp_prog->error = err; 433 return; 434 } 435 436 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, 437 false, reg.src_lmextn); 438 } 439 440 /* CSR value is read in following immed[gpr, 0] */ 441 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) 442 { 443 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); 444 } 445 446 static void emit_nop(struct nfp_prog *nfp_prog) 447 { 448 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 449 } 450 451 /* --- Wrappers --- */ 452 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 453 { 454 if (!(imm & 0xffff0000)) { 455 *val = imm; 456 *shift = IMMED_SHIFT_0B; 457 } else if (!(imm & 0xff0000ff)) { 458 *val = imm >> 8; 459 *shift = IMMED_SHIFT_1B; 460 } else if (!(imm & 0x0000ffff)) { 461 *val = imm >> 16; 462 *shift = IMMED_SHIFT_2B; 463 } else { 464 return false; 465 } 466 467 return true; 468 } 469 470 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 471 { 472 enum immed_shift shift; 473 u16 val; 474 475 if (pack_immed(imm, &val, &shift)) { 476 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 477 } else if (pack_immed(~imm, &val, &shift)) { 478 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 479 } else { 480 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 481 false, IMMED_SHIFT_0B); 482 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 483 false, IMMED_SHIFT_2B); 484 } 485 } 486 487 static void 488 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 489 enum nfp_relo_type relo) 490 { 491 if (imm > 0xffff) { 492 pr_err("relocation of a large immediate!\n"); 493 nfp_prog->error = -EFAULT; 494 return; 495 } 496 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 497 498 nfp_prog->prog[nfp_prog->prog_len - 1] |= 499 FIELD_PREP(OP_RELO_TYPE, relo); 500 } 501 502 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 503 * If the @imm is small enough encode it directly in operand and return 504 * otherwise load @imm to a spare register and return its encoding. 505 */ 506 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 507 { 508 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 509 return reg_imm(imm); 510 511 wrp_immed(nfp_prog, tmp_reg, imm); 512 return tmp_reg; 513 } 514 515 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 516 * If the @imm is small enough encode it directly in operand and return 517 * otherwise load @imm to a spare register and return its encoding. 518 */ 519 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 520 { 521 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 522 return reg_imm(imm); 523 524 wrp_immed(nfp_prog, tmp_reg, imm); 525 return tmp_reg; 526 } 527 528 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 529 { 530 while (count--) 531 emit_nop(nfp_prog); 532 } 533 534 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 535 { 536 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 537 } 538 539 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 540 { 541 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 542 } 543 544 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 545 * result to @dst from low end. 546 */ 547 static void 548 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 549 u8 offset) 550 { 551 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 552 u8 mask = (1 << field_len) - 1; 553 554 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 555 } 556 557 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 558 * result to @dst from offset, there is no change on the other bits of @dst. 559 */ 560 static void 561 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 562 u8 field_len, u8 offset) 563 { 564 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 565 u8 mask = ((1 << field_len) - 1) << offset; 566 567 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 568 } 569 570 static void 571 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 572 swreg *rega, swreg *regb) 573 { 574 if (offset == reg_imm(0)) { 575 *rega = reg_a(src_gpr); 576 *regb = reg_b(src_gpr + 1); 577 return; 578 } 579 580 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 581 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 582 reg_imm(0)); 583 *rega = imm_a(nfp_prog); 584 *regb = imm_b(nfp_prog); 585 } 586 587 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 588 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 589 { 590 bool descending_seq = meta->ldst_gather_len < 0; 591 s16 len = abs(meta->ldst_gather_len); 592 swreg src_base, off; 593 bool src_40bit_addr; 594 unsigned int i; 595 u8 xfer_num; 596 597 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 598 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 599 src_base = reg_a(meta->insn.src_reg * 2); 600 xfer_num = round_up(len, 4) / 4; 601 602 if (src_40bit_addr) 603 addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base, 604 &off); 605 606 /* Setup PREV_ALU fields to override memory read length. */ 607 if (len > 32) 608 wrp_immed(nfp_prog, reg_none(), 609 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 610 611 /* Memory read from source addr into transfer-in registers. */ 612 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 613 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 614 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); 615 616 /* Move from transfer-in to transfer-out. */ 617 for (i = 0; i < xfer_num; i++) 618 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 619 620 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 621 622 if (len <= 8) { 623 /* Use single direct_ref write8. */ 624 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 625 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 626 CMD_CTX_SWAP); 627 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 628 /* Use single direct_ref write32. */ 629 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 630 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 631 CMD_CTX_SWAP); 632 } else if (len <= 32) { 633 /* Use single indirect_ref write8. */ 634 wrp_immed(nfp_prog, reg_none(), 635 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 636 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 637 reg_a(meta->paired_st->dst_reg * 2), off, 638 len - 1, CMD_CTX_SWAP); 639 } else if (IS_ALIGNED(len, 4)) { 640 /* Use single indirect_ref write32. */ 641 wrp_immed(nfp_prog, reg_none(), 642 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 643 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 644 reg_a(meta->paired_st->dst_reg * 2), off, 645 xfer_num - 1, CMD_CTX_SWAP); 646 } else if (len <= 40) { 647 /* Use one direct_ref write32 to write the first 32-bytes, then 648 * another direct_ref write8 to write the remaining bytes. 649 */ 650 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 651 reg_a(meta->paired_st->dst_reg * 2), off, 7, 652 CMD_CTX_SWAP); 653 654 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 655 imm_b(nfp_prog)); 656 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 657 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 658 CMD_CTX_SWAP); 659 } else { 660 /* Use one indirect_ref write32 to write 4-bytes aligned length, 661 * then another direct_ref write8 to write the remaining bytes. 662 */ 663 u8 new_off; 664 665 wrp_immed(nfp_prog, reg_none(), 666 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 667 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 668 reg_a(meta->paired_st->dst_reg * 2), off, 669 xfer_num - 2, CMD_CTX_SWAP); 670 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 671 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 672 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 673 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 674 (len & 0x3) - 1, CMD_CTX_SWAP); 675 } 676 677 /* TODO: The following extra load is to make sure data flow be identical 678 * before and after we do memory copy optimization. 679 * 680 * The load destination register is not guaranteed to be dead, so we 681 * need to make sure it is loaded with the value the same as before 682 * this transformation. 683 * 684 * These extra loads could be removed once we have accurate register 685 * usage information. 686 */ 687 if (descending_seq) 688 xfer_num = 0; 689 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 690 xfer_num = xfer_num - 1; 691 else 692 xfer_num = xfer_num - 2; 693 694 switch (BPF_SIZE(meta->insn.code)) { 695 case BPF_B: 696 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 697 reg_xfer(xfer_num), 1, 698 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 699 break; 700 case BPF_H: 701 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 702 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 703 break; 704 case BPF_W: 705 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 706 reg_xfer(0)); 707 break; 708 case BPF_DW: 709 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 710 reg_xfer(xfer_num)); 711 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 712 reg_xfer(xfer_num + 1)); 713 break; 714 } 715 716 if (BPF_SIZE(meta->insn.code) != BPF_DW) 717 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 718 719 return 0; 720 } 721 722 static int 723 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 724 { 725 unsigned int i; 726 u16 shift, sz; 727 728 /* We load the value from the address indicated in @offset and then 729 * shift out the data we don't need. Note: this is big endian! 730 */ 731 sz = max(size, 4); 732 shift = size < 4 ? 4 - size : 0; 733 734 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 735 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); 736 737 i = 0; 738 if (shift) 739 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 740 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 741 else 742 for (; i * 4 < size; i++) 743 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 744 745 if (i < 2) 746 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 747 748 return 0; 749 } 750 751 static int 752 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 753 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 754 { 755 unsigned int i; 756 u8 mask, sz; 757 758 /* We load the value from the address indicated in rreg + lreg and then 759 * mask out the data we don't need. Note: this is little endian! 760 */ 761 sz = max(size, 4); 762 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 763 764 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 765 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); 766 767 i = 0; 768 if (mask) 769 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 770 reg_xfer(0), SHF_SC_NONE, 0, true); 771 else 772 for (; i * 4 < size; i++) 773 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 774 775 if (i < 2) 776 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 777 778 return 0; 779 } 780 781 static int 782 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 783 u8 dst_gpr, u8 size) 784 { 785 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 786 size, CMD_MODE_32b); 787 } 788 789 static int 790 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 791 u8 dst_gpr, u8 size) 792 { 793 swreg rega, regb; 794 795 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 796 797 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 798 size, CMD_MODE_40b_BA); 799 } 800 801 static int 802 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 803 { 804 swreg tmp_reg; 805 806 /* Calculate the true offset (src_reg + imm) */ 807 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 808 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 809 810 /* Check packet length (size guaranteed to fit b/c it's u8) */ 811 emit_alu(nfp_prog, imm_a(nfp_prog), 812 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 813 emit_alu(nfp_prog, reg_none(), 814 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 815 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 816 817 /* Load data */ 818 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 819 } 820 821 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 822 { 823 swreg tmp_reg; 824 825 /* Check packet length */ 826 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 827 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 828 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 829 830 /* Load data */ 831 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 832 return data_ld(nfp_prog, tmp_reg, 0, size); 833 } 834 835 static int 836 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 837 u8 src_gpr, u8 size) 838 { 839 unsigned int i; 840 841 for (i = 0; i * 4 < size; i++) 842 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 843 844 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 845 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 846 847 return 0; 848 } 849 850 static int 851 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 852 u64 imm, u8 size) 853 { 854 wrp_immed(nfp_prog, reg_xfer(0), imm); 855 if (size == 8) 856 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 857 858 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 859 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 860 861 return 0; 862 } 863 864 typedef int 865 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 866 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 867 bool needs_inc); 868 869 static int 870 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 871 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 872 bool needs_inc) 873 { 874 bool should_inc = needs_inc && new_gpr && !last; 875 u32 idx, src_byte; 876 enum shf_sc sc; 877 swreg reg; 878 int shf; 879 u8 mask; 880 881 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 882 return -EOPNOTSUPP; 883 884 idx = off / 4; 885 886 /* Move the entire word */ 887 if (size == 4) { 888 wrp_mov(nfp_prog, reg_both(dst), 889 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 890 return 0; 891 } 892 893 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 894 return -EOPNOTSUPP; 895 896 src_byte = off % 4; 897 898 mask = (1 << size) - 1; 899 mask <<= dst_byte; 900 901 if (WARN_ON_ONCE(mask > 0xf)) 902 return -EOPNOTSUPP; 903 904 shf = abs(src_byte - dst_byte) * 8; 905 if (src_byte == dst_byte) { 906 sc = SHF_SC_NONE; 907 } else if (src_byte < dst_byte) { 908 shf = 32 - shf; 909 sc = SHF_SC_L_SHF; 910 } else { 911 sc = SHF_SC_R_SHF; 912 } 913 914 /* ld_field can address fewer indexes, if offset too large do RMW. 915 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 916 */ 917 if (idx <= RE_REG_LM_IDX_MAX) { 918 reg = reg_lm(lm3 ? 3 : 0, idx); 919 } else { 920 reg = imm_a(nfp_prog); 921 /* If it's not the first part of the load and we start a new GPR 922 * that means we are loading a second part of the LMEM word into 923 * a new GPR. IOW we've already looked that LMEM word and 924 * therefore it has been loaded into imm_a(). 925 */ 926 if (first || !new_gpr) 927 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 928 } 929 930 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 931 932 if (should_inc) 933 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 934 935 return 0; 936 } 937 938 static int 939 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 940 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 941 bool needs_inc) 942 { 943 bool should_inc = needs_inc && new_gpr && !last; 944 u32 idx, dst_byte; 945 enum shf_sc sc; 946 swreg reg; 947 int shf; 948 u8 mask; 949 950 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 951 return -EOPNOTSUPP; 952 953 idx = off / 4; 954 955 /* Move the entire word */ 956 if (size == 4) { 957 wrp_mov(nfp_prog, 958 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 959 reg_b(src)); 960 return 0; 961 } 962 963 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 964 return -EOPNOTSUPP; 965 966 dst_byte = off % 4; 967 968 mask = (1 << size) - 1; 969 mask <<= dst_byte; 970 971 if (WARN_ON_ONCE(mask > 0xf)) 972 return -EOPNOTSUPP; 973 974 shf = abs(src_byte - dst_byte) * 8; 975 if (src_byte == dst_byte) { 976 sc = SHF_SC_NONE; 977 } else if (src_byte < dst_byte) { 978 shf = 32 - shf; 979 sc = SHF_SC_L_SHF; 980 } else { 981 sc = SHF_SC_R_SHF; 982 } 983 984 /* ld_field can address fewer indexes, if offset too large do RMW. 985 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 986 */ 987 if (idx <= RE_REG_LM_IDX_MAX) { 988 reg = reg_lm(lm3 ? 3 : 0, idx); 989 } else { 990 reg = imm_a(nfp_prog); 991 /* Only first and last LMEM locations are going to need RMW, 992 * the middle location will be overwritten fully. 993 */ 994 if (first || last) 995 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 996 } 997 998 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 999 1000 if (new_gpr || last) { 1001 if (idx > RE_REG_LM_IDX_MAX) 1002 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1003 if (should_inc) 1004 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1005 } 1006 1007 return 0; 1008 } 1009 1010 static int 1011 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1012 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1013 bool clr_gpr, lmem_step step) 1014 { 1015 s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; 1016 bool first = true, last; 1017 bool needs_inc = false; 1018 swreg stack_off_reg; 1019 u8 prev_gpr = 255; 1020 u32 gpr_byte = 0; 1021 bool lm3 = true; 1022 int ret; 1023 1024 if (meta->ptr_not_const) { 1025 /* Use of the last encountered ptr_off is OK, they all have 1026 * the same alignment. Depend on low bits of value being 1027 * discarded when written to LMaddr register. 1028 */ 1029 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1030 stack_imm(nfp_prog)); 1031 1032 emit_alu(nfp_prog, imm_b(nfp_prog), 1033 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1034 1035 needs_inc = true; 1036 } else if (off + size <= 64) { 1037 /* We can reach bottom 64B with LMaddr0 */ 1038 lm3 = false; 1039 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1040 /* We have to set up a new pointer. If we know the offset 1041 * and the entire access falls into a single 32 byte aligned 1042 * window we won't have to increment the LM pointer. 1043 * The 32 byte alignment is imporant because offset is ORed in 1044 * not added when doing *l$indexN[off]. 1045 */ 1046 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1047 stack_imm(nfp_prog)); 1048 emit_alu(nfp_prog, imm_b(nfp_prog), 1049 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1050 1051 off %= 32; 1052 } else { 1053 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1054 stack_imm(nfp_prog)); 1055 1056 emit_alu(nfp_prog, imm_b(nfp_prog), 1057 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1058 1059 needs_inc = true; 1060 } 1061 if (lm3) { 1062 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1063 /* For size < 4 one slot will be filled by zeroing of upper. */ 1064 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1065 } 1066 1067 if (clr_gpr && size < 8) 1068 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1069 1070 while (size) { 1071 u32 slice_end; 1072 u8 slice_size; 1073 1074 slice_size = min(size, 4 - gpr_byte); 1075 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1076 slice_size = slice_end - off; 1077 1078 last = slice_size == size; 1079 1080 if (needs_inc) 1081 off %= 4; 1082 1083 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1084 first, gpr != prev_gpr, last, lm3, needs_inc); 1085 if (ret) 1086 return ret; 1087 1088 prev_gpr = gpr; 1089 first = false; 1090 1091 gpr_byte += slice_size; 1092 if (gpr_byte >= 4) { 1093 gpr_byte -= 4; 1094 gpr++; 1095 } 1096 1097 size -= slice_size; 1098 off += slice_size; 1099 } 1100 1101 return 0; 1102 } 1103 1104 static void 1105 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1106 { 1107 swreg tmp_reg; 1108 1109 if (alu_op == ALU_OP_AND) { 1110 if (!imm) 1111 wrp_immed(nfp_prog, reg_both(dst), 0); 1112 if (!imm || !~imm) 1113 return; 1114 } 1115 if (alu_op == ALU_OP_OR) { 1116 if (!~imm) 1117 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1118 if (!imm || !~imm) 1119 return; 1120 } 1121 if (alu_op == ALU_OP_XOR) { 1122 if (!~imm) 1123 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1124 ALU_OP_NOT, reg_b(dst)); 1125 if (!imm || !~imm) 1126 return; 1127 } 1128 1129 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1130 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1131 } 1132 1133 static int 1134 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1135 enum alu_op alu_op, bool skip) 1136 { 1137 const struct bpf_insn *insn = &meta->insn; 1138 u64 imm = insn->imm; /* sign extend */ 1139 1140 if (skip) { 1141 meta->skip = true; 1142 return 0; 1143 } 1144 1145 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1146 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1147 1148 return 0; 1149 } 1150 1151 static int 1152 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1153 enum alu_op alu_op) 1154 { 1155 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1156 1157 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1158 emit_alu(nfp_prog, reg_both(dst + 1), 1159 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1160 1161 return 0; 1162 } 1163 1164 static int 1165 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1166 enum alu_op alu_op, bool skip) 1167 { 1168 const struct bpf_insn *insn = &meta->insn; 1169 1170 if (skip) { 1171 meta->skip = true; 1172 return 0; 1173 } 1174 1175 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1176 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1177 1178 return 0; 1179 } 1180 1181 static int 1182 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1183 enum alu_op alu_op) 1184 { 1185 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1186 1187 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1188 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1189 1190 return 0; 1191 } 1192 1193 static void 1194 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1195 enum br_mask br_mask, u16 off) 1196 { 1197 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1198 emit_br(nfp_prog, br_mask, off, 0); 1199 } 1200 1201 static int 1202 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1203 enum alu_op alu_op, enum br_mask br_mask) 1204 { 1205 const struct bpf_insn *insn = &meta->insn; 1206 1207 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1208 insn->src_reg * 2, br_mask, insn->off); 1209 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1210 insn->src_reg * 2 + 1, br_mask, insn->off); 1211 1212 return 0; 1213 } 1214 1215 static int 1216 wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1217 enum br_mask br_mask, bool swap) 1218 { 1219 const struct bpf_insn *insn = &meta->insn; 1220 u64 imm = insn->imm; /* sign extend */ 1221 u8 reg = insn->dst_reg * 2; 1222 swreg tmp_reg; 1223 1224 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1225 if (!swap) 1226 emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg); 1227 else 1228 emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg)); 1229 1230 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1231 if (!swap) 1232 emit_alu(nfp_prog, reg_none(), 1233 reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg); 1234 else 1235 emit_alu(nfp_prog, reg_none(), 1236 tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1)); 1237 1238 emit_br(nfp_prog, br_mask, insn->off, 0); 1239 1240 return 0; 1241 } 1242 1243 static int 1244 wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1245 enum br_mask br_mask, bool swap) 1246 { 1247 const struct bpf_insn *insn = &meta->insn; 1248 u8 areg, breg; 1249 1250 areg = insn->dst_reg * 2; 1251 breg = insn->src_reg * 2; 1252 1253 if (swap) { 1254 areg ^= breg; 1255 breg ^= areg; 1256 areg ^= breg; 1257 } 1258 1259 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1260 emit_alu(nfp_prog, reg_none(), 1261 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1262 emit_br(nfp_prog, br_mask, insn->off, 0); 1263 1264 return 0; 1265 } 1266 1267 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1268 { 1269 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1270 SHF_SC_R_ROT, 8); 1271 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1272 SHF_SC_R_ROT, 16); 1273 } 1274 1275 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1276 { 1277 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1278 struct nfp_bpf_cap_adjust_head *adjust_head; 1279 u32 ret_einval, end; 1280 1281 adjust_head = &nfp_prog->bpf->adjust_head; 1282 1283 /* Optimized version - 5 vs 14 cycles */ 1284 if (nfp_prog->adjust_head_location != UINT_MAX) { 1285 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1286 return -EINVAL; 1287 1288 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1289 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1290 emit_alu(nfp_prog, plen_reg(nfp_prog), 1291 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1292 emit_alu(nfp_prog, pv_len(nfp_prog), 1293 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1294 1295 wrp_immed(nfp_prog, reg_both(0), 0); 1296 wrp_immed(nfp_prog, reg_both(1), 0); 1297 1298 /* TODO: when adjust head is guaranteed to succeed we can 1299 * also eliminate the following if (r0 == 0) branch. 1300 */ 1301 1302 return 0; 1303 } 1304 1305 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1306 end = ret_einval + 2; 1307 1308 /* We need to use a temp because offset is just a part of the pkt ptr */ 1309 emit_alu(nfp_prog, tmp, 1310 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1311 1312 /* Validate result will fit within FW datapath constraints */ 1313 emit_alu(nfp_prog, reg_none(), 1314 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1315 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1316 emit_alu(nfp_prog, reg_none(), 1317 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1318 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1319 1320 /* Validate the length is at least ETH_HLEN */ 1321 emit_alu(nfp_prog, tmp_len, 1322 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1323 emit_alu(nfp_prog, reg_none(), 1324 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1325 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1326 1327 /* Load the ret code */ 1328 wrp_immed(nfp_prog, reg_both(0), 0); 1329 wrp_immed(nfp_prog, reg_both(1), 0); 1330 1331 /* Modify the packet metadata */ 1332 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1333 1334 /* Skip over the -EINVAL ret code (defer 2) */ 1335 emit_br(nfp_prog, BR_UNC, end, 2); 1336 1337 emit_alu(nfp_prog, plen_reg(nfp_prog), 1338 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1339 emit_alu(nfp_prog, pv_len(nfp_prog), 1340 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1341 1342 /* return -EINVAL target */ 1343 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1344 return -EINVAL; 1345 1346 wrp_immed(nfp_prog, reg_both(0), -22); 1347 wrp_immed(nfp_prog, reg_both(1), ~0); 1348 1349 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1350 return -EINVAL; 1351 1352 return 0; 1353 } 1354 1355 static int 1356 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1357 { 1358 struct bpf_offloaded_map *offmap; 1359 struct nfp_bpf_map *nfp_map; 1360 bool load_lm_ptr; 1361 u32 ret_tgt; 1362 s64 lm_off; 1363 swreg tid; 1364 1365 offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr; 1366 nfp_map = offmap->dev_priv; 1367 1368 /* We only have to reload LM0 if the key is not at start of stack */ 1369 lm_off = nfp_prog->stack_depth; 1370 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1371 load_lm_ptr = meta->arg2.var_off || lm_off; 1372 1373 /* Set LM0 to start of key */ 1374 if (load_lm_ptr) 1375 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1376 if (meta->func_id == BPF_FUNC_map_update_elem) 1377 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1378 1379 /* Load map ID into a register, it should actually fit as an immediate 1380 * but in case it doesn't deal with it here, not in the delay slots. 1381 */ 1382 tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog)); 1383 1384 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1385 2, RELO_BR_HELPER); 1386 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1387 1388 /* Load map ID into A0 */ 1389 wrp_mov(nfp_prog, reg_a(0), tid); 1390 1391 /* Load the return address into B0 */ 1392 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1393 1394 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1395 return -EINVAL; 1396 1397 /* Reset the LM0 pointer */ 1398 if (!load_lm_ptr) 1399 return 0; 1400 1401 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1402 wrp_nops(nfp_prog, 3); 1403 1404 return 0; 1405 } 1406 1407 static int 1408 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1409 { 1410 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); 1411 /* CSR value is read in following immed[gpr, 0] */ 1412 emit_immed(nfp_prog, reg_both(0), 0, 1413 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1414 emit_immed(nfp_prog, reg_both(1), 0, 1415 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1416 return 0; 1417 } 1418 1419 /* --- Callbacks --- */ 1420 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1421 { 1422 const struct bpf_insn *insn = &meta->insn; 1423 u8 dst = insn->dst_reg * 2; 1424 u8 src = insn->src_reg * 2; 1425 1426 if (insn->src_reg == BPF_REG_10) { 1427 swreg stack_depth_reg; 1428 1429 stack_depth_reg = ur_load_imm_any(nfp_prog, 1430 nfp_prog->stack_depth, 1431 stack_imm(nfp_prog)); 1432 emit_alu(nfp_prog, reg_both(dst), 1433 stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); 1434 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1435 } else { 1436 wrp_reg_mov(nfp_prog, dst, src); 1437 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1438 } 1439 1440 return 0; 1441 } 1442 1443 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1444 { 1445 u64 imm = meta->insn.imm; /* sign extend */ 1446 1447 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1448 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1449 1450 return 0; 1451 } 1452 1453 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1454 { 1455 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1456 } 1457 1458 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1459 { 1460 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1461 } 1462 1463 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1464 { 1465 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1466 } 1467 1468 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1469 { 1470 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1471 } 1472 1473 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1474 { 1475 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1476 } 1477 1478 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1479 { 1480 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1481 } 1482 1483 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1484 { 1485 const struct bpf_insn *insn = &meta->insn; 1486 1487 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1488 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1489 reg_b(insn->src_reg * 2)); 1490 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1491 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1492 reg_b(insn->src_reg * 2 + 1)); 1493 1494 return 0; 1495 } 1496 1497 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1498 { 1499 const struct bpf_insn *insn = &meta->insn; 1500 u64 imm = insn->imm; /* sign extend */ 1501 1502 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1503 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1504 1505 return 0; 1506 } 1507 1508 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1509 { 1510 const struct bpf_insn *insn = &meta->insn; 1511 1512 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1513 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1514 reg_b(insn->src_reg * 2)); 1515 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1516 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1517 reg_b(insn->src_reg * 2 + 1)); 1518 1519 return 0; 1520 } 1521 1522 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1523 { 1524 const struct bpf_insn *insn = &meta->insn; 1525 u64 imm = insn->imm; /* sign extend */ 1526 1527 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1528 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1529 1530 return 0; 1531 } 1532 1533 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1534 { 1535 const struct bpf_insn *insn = &meta->insn; 1536 1537 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1538 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1539 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1540 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1541 1542 return 0; 1543 } 1544 1545 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1546 { 1547 const struct bpf_insn *insn = &meta->insn; 1548 u8 dst = insn->dst_reg * 2; 1549 1550 if (insn->imm < 32) { 1551 emit_shf(nfp_prog, reg_both(dst + 1), 1552 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1553 SHF_SC_R_DSHF, 32 - insn->imm); 1554 emit_shf(nfp_prog, reg_both(dst), 1555 reg_none(), SHF_OP_NONE, reg_b(dst), 1556 SHF_SC_L_SHF, insn->imm); 1557 } else if (insn->imm == 32) { 1558 wrp_reg_mov(nfp_prog, dst + 1, dst); 1559 wrp_immed(nfp_prog, reg_both(dst), 0); 1560 } else if (insn->imm > 32) { 1561 emit_shf(nfp_prog, reg_both(dst + 1), 1562 reg_none(), SHF_OP_NONE, reg_b(dst), 1563 SHF_SC_L_SHF, insn->imm - 32); 1564 wrp_immed(nfp_prog, reg_both(dst), 0); 1565 } 1566 1567 return 0; 1568 } 1569 1570 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1571 { 1572 const struct bpf_insn *insn = &meta->insn; 1573 u8 dst = insn->dst_reg * 2; 1574 1575 if (insn->imm < 32) { 1576 emit_shf(nfp_prog, reg_both(dst), 1577 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1578 SHF_SC_R_DSHF, insn->imm); 1579 emit_shf(nfp_prog, reg_both(dst + 1), 1580 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1581 SHF_SC_R_SHF, insn->imm); 1582 } else if (insn->imm == 32) { 1583 wrp_reg_mov(nfp_prog, dst, dst + 1); 1584 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1585 } else if (insn->imm > 32) { 1586 emit_shf(nfp_prog, reg_both(dst), 1587 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1588 SHF_SC_R_SHF, insn->imm - 32); 1589 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1590 } 1591 1592 return 0; 1593 } 1594 1595 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1596 { 1597 const struct bpf_insn *insn = &meta->insn; 1598 1599 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 1600 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1601 1602 return 0; 1603 } 1604 1605 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1606 { 1607 const struct bpf_insn *insn = &meta->insn; 1608 1609 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 1610 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1611 1612 return 0; 1613 } 1614 1615 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1616 { 1617 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 1618 } 1619 1620 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1621 { 1622 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 1623 } 1624 1625 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1626 { 1627 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 1628 } 1629 1630 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1631 { 1632 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1633 } 1634 1635 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1636 { 1637 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 1638 } 1639 1640 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1641 { 1642 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1643 } 1644 1645 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1646 { 1647 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 1648 } 1649 1650 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1651 { 1652 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 1653 } 1654 1655 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1656 { 1657 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 1658 } 1659 1660 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1661 { 1662 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 1663 } 1664 1665 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1666 { 1667 u8 dst = meta->insn.dst_reg * 2; 1668 1669 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 1670 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1671 1672 return 0; 1673 } 1674 1675 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1676 { 1677 const struct bpf_insn *insn = &meta->insn; 1678 1679 if (!insn->imm) 1680 return 1; /* TODO: zero shift means indirect */ 1681 1682 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 1683 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 1684 SHF_SC_L_SHF, insn->imm); 1685 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1686 1687 return 0; 1688 } 1689 1690 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1691 { 1692 const struct bpf_insn *insn = &meta->insn; 1693 u8 gpr = insn->dst_reg * 2; 1694 1695 switch (insn->imm) { 1696 case 16: 1697 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 1698 SHF_SC_R_ROT, 8); 1699 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 1700 SHF_SC_R_SHF, 16); 1701 1702 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1703 break; 1704 case 32: 1705 wrp_end32(nfp_prog, reg_a(gpr), gpr); 1706 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1707 break; 1708 case 64: 1709 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 1710 1711 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 1712 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 1713 break; 1714 } 1715 1716 return 0; 1717 } 1718 1719 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1720 { 1721 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 1722 u32 imm_lo, imm_hi; 1723 u8 dst; 1724 1725 dst = prev->insn.dst_reg * 2; 1726 imm_lo = prev->insn.imm; 1727 imm_hi = meta->insn.imm; 1728 1729 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 1730 1731 /* mov is always 1 insn, load imm may be two, so try to use mov */ 1732 if (imm_hi == imm_lo) 1733 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 1734 else 1735 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 1736 1737 return 0; 1738 } 1739 1740 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1741 { 1742 meta->double_cb = imm_ld8_part2; 1743 return 0; 1744 } 1745 1746 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1747 { 1748 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 1749 } 1750 1751 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1752 { 1753 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 1754 } 1755 1756 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1757 { 1758 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 1759 } 1760 1761 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1762 { 1763 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1764 meta->insn.src_reg * 2, 1); 1765 } 1766 1767 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1768 { 1769 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1770 meta->insn.src_reg * 2, 2); 1771 } 1772 1773 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1774 { 1775 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1776 meta->insn.src_reg * 2, 4); 1777 } 1778 1779 static int 1780 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1781 unsigned int size, unsigned int ptr_off) 1782 { 1783 return mem_op_stack(nfp_prog, meta, size, ptr_off, 1784 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 1785 true, wrp_lmem_load); 1786 } 1787 1788 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1789 u8 size) 1790 { 1791 swreg dst = reg_both(meta->insn.dst_reg * 2); 1792 1793 switch (meta->insn.off) { 1794 case offsetof(struct __sk_buff, len): 1795 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 1796 return -EOPNOTSUPP; 1797 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 1798 break; 1799 case offsetof(struct __sk_buff, data): 1800 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 1801 return -EOPNOTSUPP; 1802 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1803 break; 1804 case offsetof(struct __sk_buff, data_end): 1805 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 1806 return -EOPNOTSUPP; 1807 emit_alu(nfp_prog, dst, 1808 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1809 break; 1810 default: 1811 return -EOPNOTSUPP; 1812 } 1813 1814 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1815 1816 return 0; 1817 } 1818 1819 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1820 u8 size) 1821 { 1822 swreg dst = reg_both(meta->insn.dst_reg * 2); 1823 1824 switch (meta->insn.off) { 1825 case offsetof(struct xdp_md, data): 1826 if (size != FIELD_SIZEOF(struct xdp_md, data)) 1827 return -EOPNOTSUPP; 1828 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1829 break; 1830 case offsetof(struct xdp_md, data_end): 1831 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 1832 return -EOPNOTSUPP; 1833 emit_alu(nfp_prog, dst, 1834 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1835 break; 1836 default: 1837 return -EOPNOTSUPP; 1838 } 1839 1840 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1841 1842 return 0; 1843 } 1844 1845 static int 1846 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1847 unsigned int size) 1848 { 1849 swreg tmp_reg; 1850 1851 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1852 1853 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 1854 tmp_reg, meta->insn.dst_reg * 2, size); 1855 } 1856 1857 static int 1858 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1859 unsigned int size) 1860 { 1861 swreg tmp_reg; 1862 1863 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1864 1865 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 1866 tmp_reg, meta->insn.dst_reg * 2, size); 1867 } 1868 1869 static void 1870 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 1871 struct nfp_insn_meta *meta) 1872 { 1873 s16 range_start = meta->pkt_cache.range_start; 1874 s16 range_end = meta->pkt_cache.range_end; 1875 swreg src_base, off; 1876 u8 xfer_num, len; 1877 bool indir; 1878 1879 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 1880 src_base = reg_a(meta->insn.src_reg * 2); 1881 len = range_end - range_start; 1882 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 1883 1884 indir = len > 8 * REG_WIDTH; 1885 /* Setup PREV_ALU for indirect mode. */ 1886 if (indir) 1887 wrp_immed(nfp_prog, reg_none(), 1888 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 1889 1890 /* Cache memory into transfer-in registers. */ 1891 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 1892 off, xfer_num - 1, CMD_CTX_SWAP, indir); 1893 } 1894 1895 static int 1896 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 1897 struct nfp_insn_meta *meta, 1898 unsigned int size) 1899 { 1900 s16 range_start = meta->pkt_cache.range_start; 1901 s16 insn_off = meta->insn.off - range_start; 1902 swreg dst_lo, dst_hi, src_lo, src_mid; 1903 u8 dst_gpr = meta->insn.dst_reg * 2; 1904 u8 len_lo = size, len_mid = 0; 1905 u8 idx = insn_off / REG_WIDTH; 1906 u8 off = insn_off % REG_WIDTH; 1907 1908 dst_hi = reg_both(dst_gpr + 1); 1909 dst_lo = reg_both(dst_gpr); 1910 src_lo = reg_xfer(idx); 1911 1912 /* The read length could involve as many as three registers. */ 1913 if (size > REG_WIDTH - off) { 1914 /* Calculate the part in the second register. */ 1915 len_lo = REG_WIDTH - off; 1916 len_mid = size - len_lo; 1917 1918 /* Calculate the part in the third register. */ 1919 if (size > 2 * REG_WIDTH - off) 1920 len_mid = REG_WIDTH; 1921 } 1922 1923 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 1924 1925 if (!len_mid) { 1926 wrp_immed(nfp_prog, dst_hi, 0); 1927 return 0; 1928 } 1929 1930 src_mid = reg_xfer(idx + 1); 1931 1932 if (size <= REG_WIDTH) { 1933 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 1934 wrp_immed(nfp_prog, dst_hi, 0); 1935 } else { 1936 swreg src_hi = reg_xfer(idx + 2); 1937 1938 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 1939 REG_WIDTH - len_lo, len_lo); 1940 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 1941 REG_WIDTH - len_lo); 1942 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 1943 len_lo); 1944 } 1945 1946 return 0; 1947 } 1948 1949 static int 1950 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 1951 struct nfp_insn_meta *meta, 1952 unsigned int size) 1953 { 1954 swreg dst_lo, dst_hi, src_lo; 1955 u8 dst_gpr, idx; 1956 1957 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 1958 dst_gpr = meta->insn.dst_reg * 2; 1959 dst_hi = reg_both(dst_gpr + 1); 1960 dst_lo = reg_both(dst_gpr); 1961 src_lo = reg_xfer(idx); 1962 1963 if (size < REG_WIDTH) { 1964 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 1965 wrp_immed(nfp_prog, dst_hi, 0); 1966 } else if (size == REG_WIDTH) { 1967 wrp_mov(nfp_prog, dst_lo, src_lo); 1968 wrp_immed(nfp_prog, dst_hi, 0); 1969 } else { 1970 swreg src_hi = reg_xfer(idx + 1); 1971 1972 wrp_mov(nfp_prog, dst_lo, src_lo); 1973 wrp_mov(nfp_prog, dst_hi, src_hi); 1974 } 1975 1976 return 0; 1977 } 1978 1979 static int 1980 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 1981 struct nfp_insn_meta *meta, unsigned int size) 1982 { 1983 u8 off = meta->insn.off - meta->pkt_cache.range_start; 1984 1985 if (IS_ALIGNED(off, REG_WIDTH)) 1986 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 1987 1988 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 1989 } 1990 1991 static int 1992 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1993 unsigned int size) 1994 { 1995 if (meta->ldst_gather_len) 1996 return nfp_cpp_memcpy(nfp_prog, meta); 1997 1998 if (meta->ptr.type == PTR_TO_CTX) { 1999 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2000 return mem_ldx_xdp(nfp_prog, meta, size); 2001 else 2002 return mem_ldx_skb(nfp_prog, meta, size); 2003 } 2004 2005 if (meta->ptr.type == PTR_TO_PACKET) { 2006 if (meta->pkt_cache.range_end) { 2007 if (meta->pkt_cache.do_init) 2008 mem_ldx_data_init_pktcache(nfp_prog, meta); 2009 2010 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 2011 } else { 2012 return mem_ldx_data(nfp_prog, meta, size); 2013 } 2014 } 2015 2016 if (meta->ptr.type == PTR_TO_STACK) 2017 return mem_ldx_stack(nfp_prog, meta, size, 2018 meta->ptr.off + meta->ptr.var_off.value); 2019 2020 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2021 return mem_ldx_emem(nfp_prog, meta, size); 2022 2023 return -EOPNOTSUPP; 2024 } 2025 2026 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2027 { 2028 return mem_ldx(nfp_prog, meta, 1); 2029 } 2030 2031 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2032 { 2033 return mem_ldx(nfp_prog, meta, 2); 2034 } 2035 2036 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2037 { 2038 return mem_ldx(nfp_prog, meta, 4); 2039 } 2040 2041 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2042 { 2043 return mem_ldx(nfp_prog, meta, 8); 2044 } 2045 2046 static int 2047 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2048 unsigned int size) 2049 { 2050 u64 imm = meta->insn.imm; /* sign extend */ 2051 swreg off_reg; 2052 2053 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2054 2055 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2056 imm, size); 2057 } 2058 2059 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2060 unsigned int size) 2061 { 2062 if (meta->ptr.type == PTR_TO_PACKET) 2063 return mem_st_data(nfp_prog, meta, size); 2064 2065 return -EOPNOTSUPP; 2066 } 2067 2068 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2069 { 2070 return mem_st(nfp_prog, meta, 1); 2071 } 2072 2073 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2074 { 2075 return mem_st(nfp_prog, meta, 2); 2076 } 2077 2078 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2079 { 2080 return mem_st(nfp_prog, meta, 4); 2081 } 2082 2083 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2084 { 2085 return mem_st(nfp_prog, meta, 8); 2086 } 2087 2088 static int 2089 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2090 unsigned int size) 2091 { 2092 swreg off_reg; 2093 2094 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2095 2096 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2097 meta->insn.src_reg * 2, size); 2098 } 2099 2100 static int 2101 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2102 unsigned int size, unsigned int ptr_off) 2103 { 2104 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2105 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2106 false, wrp_lmem_store); 2107 } 2108 2109 static int 2110 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2111 unsigned int size) 2112 { 2113 if (meta->ptr.type == PTR_TO_PACKET) 2114 return mem_stx_data(nfp_prog, meta, size); 2115 2116 if (meta->ptr.type == PTR_TO_STACK) 2117 return mem_stx_stack(nfp_prog, meta, size, 2118 meta->ptr.off + meta->ptr.var_off.value); 2119 2120 return -EOPNOTSUPP; 2121 } 2122 2123 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2124 { 2125 return mem_stx(nfp_prog, meta, 1); 2126 } 2127 2128 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2129 { 2130 return mem_stx(nfp_prog, meta, 2); 2131 } 2132 2133 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2134 { 2135 return mem_stx(nfp_prog, meta, 4); 2136 } 2137 2138 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2139 { 2140 return mem_stx(nfp_prog, meta, 8); 2141 } 2142 2143 static int 2144 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) 2145 { 2146 u8 dst_gpr = meta->insn.dst_reg * 2; 2147 u8 src_gpr = meta->insn.src_reg * 2; 2148 unsigned int full_add, out; 2149 swreg addra, addrb, off; 2150 2151 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2152 2153 /* We can fit 16 bits into command immediate, if we know the immediate 2154 * is guaranteed to either always or never fit into 16 bit we only 2155 * generate code to handle that particular case, otherwise generate 2156 * code for both. 2157 */ 2158 out = nfp_prog_current_offset(nfp_prog); 2159 full_add = nfp_prog_current_offset(nfp_prog); 2160 2161 if (meta->insn.off) { 2162 out += 2; 2163 full_add += 2; 2164 } 2165 if (meta->xadd_maybe_16bit) { 2166 out += 3; 2167 full_add += 3; 2168 } 2169 if (meta->xadd_over_16bit) 2170 out += 2 + is64; 2171 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2172 out += 5; 2173 full_add += 5; 2174 } 2175 2176 /* Generate the branch for choosing add_imm vs add */ 2177 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2178 swreg max_imm = imm_a(nfp_prog); 2179 2180 wrp_immed(nfp_prog, max_imm, 0xffff); 2181 emit_alu(nfp_prog, reg_none(), 2182 max_imm, ALU_OP_SUB, reg_b(src_gpr)); 2183 emit_alu(nfp_prog, reg_none(), 2184 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); 2185 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); 2186 /* defer for add */ 2187 } 2188 2189 /* If insn has an offset add to the address */ 2190 if (!meta->insn.off) { 2191 addra = reg_a(dst_gpr); 2192 addrb = reg_b(dst_gpr + 1); 2193 } else { 2194 emit_alu(nfp_prog, imma_a(nfp_prog), 2195 reg_a(dst_gpr), ALU_OP_ADD, off); 2196 emit_alu(nfp_prog, imma_b(nfp_prog), 2197 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); 2198 addra = imma_a(nfp_prog); 2199 addrb = imma_b(nfp_prog); 2200 } 2201 2202 /* Generate the add_imm if 16 bits are possible */ 2203 if (meta->xadd_maybe_16bit) { 2204 swreg prev_alu = imm_a(nfp_prog); 2205 2206 wrp_immed(nfp_prog, prev_alu, 2207 FIELD_PREP(CMD_OVE_DATA, 2) | 2208 CMD_OVE_LEN | 2209 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); 2210 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); 2211 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, 2212 addra, addrb, 0, CMD_CTX_NO_SWAP); 2213 2214 if (meta->xadd_over_16bit) 2215 emit_br(nfp_prog, BR_UNC, out, 0); 2216 } 2217 2218 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) 2219 return -EINVAL; 2220 2221 /* Generate the add if 16 bits are not guaranteed */ 2222 if (meta->xadd_over_16bit) { 2223 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, 2224 addra, addrb, is64 << 2, 2225 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); 2226 2227 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); 2228 if (is64) 2229 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); 2230 } 2231 2232 if (!nfp_prog_confirm_current_offset(nfp_prog, out)) 2233 return -EINVAL; 2234 2235 return 0; 2236 } 2237 2238 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2239 { 2240 return mem_xadd(nfp_prog, meta, false); 2241 } 2242 2243 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2244 { 2245 return mem_xadd(nfp_prog, meta, true); 2246 } 2247 2248 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2249 { 2250 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 2251 2252 return 0; 2253 } 2254 2255 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2256 { 2257 const struct bpf_insn *insn = &meta->insn; 2258 u64 imm = insn->imm; /* sign extend */ 2259 swreg or1, or2, tmp_reg; 2260 2261 or1 = reg_a(insn->dst_reg * 2); 2262 or2 = reg_b(insn->dst_reg * 2 + 1); 2263 2264 if (imm & ~0U) { 2265 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2266 emit_alu(nfp_prog, imm_a(nfp_prog), 2267 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2268 or1 = imm_a(nfp_prog); 2269 } 2270 2271 if (imm >> 32) { 2272 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2273 emit_alu(nfp_prog, imm_b(nfp_prog), 2274 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2275 or2 = imm_b(nfp_prog); 2276 } 2277 2278 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 2279 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2280 2281 return 0; 2282 } 2283 2284 static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2285 { 2286 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); 2287 } 2288 2289 static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2290 { 2291 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); 2292 } 2293 2294 static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2295 { 2296 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false); 2297 } 2298 2299 static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2300 { 2301 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); 2302 } 2303 2304 static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2305 { 2306 return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true); 2307 } 2308 2309 static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2310 { 2311 return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false); 2312 } 2313 2314 static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2315 { 2316 return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false); 2317 } 2318 2319 static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2320 { 2321 return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true); 2322 } 2323 2324 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2325 { 2326 const struct bpf_insn *insn = &meta->insn; 2327 u64 imm = insn->imm; /* sign extend */ 2328 swreg tmp_reg; 2329 2330 if (!imm) { 2331 meta->skip = true; 2332 return 0; 2333 } 2334 2335 if (imm & ~0U) { 2336 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2337 emit_alu(nfp_prog, reg_none(), 2338 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); 2339 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2340 } 2341 2342 if (imm >> 32) { 2343 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2344 emit_alu(nfp_prog, reg_none(), 2345 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 2346 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2347 } 2348 2349 return 0; 2350 } 2351 2352 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2353 { 2354 const struct bpf_insn *insn = &meta->insn; 2355 u64 imm = insn->imm; /* sign extend */ 2356 swreg tmp_reg; 2357 2358 if (!imm) { 2359 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 2360 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 2361 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2362 return 0; 2363 } 2364 2365 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2366 emit_alu(nfp_prog, reg_none(), 2367 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2368 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2369 2370 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2371 emit_alu(nfp_prog, reg_none(), 2372 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2373 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2374 2375 return 0; 2376 } 2377 2378 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2379 { 2380 const struct bpf_insn *insn = &meta->insn; 2381 2382 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 2383 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 2384 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 2385 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 2386 emit_alu(nfp_prog, reg_none(), 2387 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 2388 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2389 2390 return 0; 2391 } 2392 2393 static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2394 { 2395 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); 2396 } 2397 2398 static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2399 { 2400 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); 2401 } 2402 2403 static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2404 { 2405 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false); 2406 } 2407 2408 static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2409 { 2410 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); 2411 } 2412 2413 static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2414 { 2415 return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true); 2416 } 2417 2418 static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2419 { 2420 return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false); 2421 } 2422 2423 static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2424 { 2425 return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false); 2426 } 2427 2428 static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2429 { 2430 return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true); 2431 } 2432 2433 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2434 { 2435 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 2436 } 2437 2438 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2439 { 2440 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 2441 } 2442 2443 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2444 { 2445 switch (meta->insn.imm) { 2446 case BPF_FUNC_xdp_adjust_head: 2447 return adjust_head(nfp_prog, meta); 2448 case BPF_FUNC_map_lookup_elem: 2449 case BPF_FUNC_map_update_elem: 2450 case BPF_FUNC_map_delete_elem: 2451 return map_call_stack_common(nfp_prog, meta); 2452 case BPF_FUNC_get_prandom_u32: 2453 return nfp_get_prandom_u32(nfp_prog, meta); 2454 default: 2455 WARN_ONCE(1, "verifier allowed unsupported function\n"); 2456 return -EOPNOTSUPP; 2457 } 2458 } 2459 2460 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2461 { 2462 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 2463 2464 return 0; 2465 } 2466 2467 static const instr_cb_t instr_cb[256] = { 2468 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 2469 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 2470 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 2471 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 2472 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 2473 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 2474 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 2475 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 2476 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 2477 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 2478 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 2479 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 2480 [BPF_ALU64 | BPF_NEG] = neg_reg64, 2481 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 2482 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 2483 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 2484 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 2485 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 2486 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 2487 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 2488 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 2489 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 2490 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 2491 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 2492 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 2493 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 2494 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 2495 [BPF_ALU | BPF_NEG] = neg_reg, 2496 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 2497 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 2498 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 2499 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 2500 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 2501 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 2502 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 2503 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 2504 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 2505 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 2506 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 2507 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 2508 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 2509 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 2510 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 2511 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 2512 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 2513 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 2514 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 2515 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 2516 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 2517 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 2518 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 2519 [BPF_JMP | BPF_JA | BPF_K] = jump, 2520 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 2521 [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, 2522 [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, 2523 [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, 2524 [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, 2525 [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm, 2526 [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm, 2527 [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm, 2528 [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm, 2529 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 2530 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 2531 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 2532 [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, 2533 [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, 2534 [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, 2535 [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, 2536 [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg, 2537 [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg, 2538 [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg, 2539 [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg, 2540 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 2541 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 2542 [BPF_JMP | BPF_CALL] = call, 2543 [BPF_JMP | BPF_EXIT] = goto_out, 2544 }; 2545 2546 /* --- Assembler logic --- */ 2547 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 2548 { 2549 struct nfp_insn_meta *meta, *jmp_dst; 2550 u32 idx, br_idx; 2551 2552 list_for_each_entry(meta, &nfp_prog->insns, l) { 2553 if (meta->skip) 2554 continue; 2555 if (meta->insn.code == (BPF_JMP | BPF_CALL)) 2556 continue; 2557 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 2558 continue; 2559 2560 if (list_is_last(&meta->l, &nfp_prog->insns)) 2561 br_idx = nfp_prog->last_bpf_off; 2562 else 2563 br_idx = list_next_entry(meta, l)->off - 1; 2564 2565 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 2566 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 2567 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 2568 return -ELOOP; 2569 } 2570 /* Leave special branches for later */ 2571 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 2572 RELO_BR_REL) 2573 continue; 2574 2575 if (!meta->jmp_dst) { 2576 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 2577 return -ELOOP; 2578 } 2579 2580 jmp_dst = meta->jmp_dst; 2581 2582 if (jmp_dst->skip) { 2583 pr_err("Branch landing on removed instruction!!\n"); 2584 return -ELOOP; 2585 } 2586 2587 for (idx = meta->off; idx <= br_idx; idx++) { 2588 if (!nfp_is_br(nfp_prog->prog[idx])) 2589 continue; 2590 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 2591 } 2592 } 2593 2594 return 0; 2595 } 2596 2597 static void nfp_intro(struct nfp_prog *nfp_prog) 2598 { 2599 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 2600 emit_alu(nfp_prog, plen_reg(nfp_prog), 2601 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 2602 } 2603 2604 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 2605 { 2606 /* TC direct-action mode: 2607 * 0,1 ok NOT SUPPORTED[1] 2608 * 2 drop 0x22 -> drop, count as stat1 2609 * 4,5 nuke 0x02 -> drop 2610 * 7 redir 0x44 -> redir, count as stat2 2611 * * unspec 0x11 -> pass, count as stat0 2612 * 2613 * [1] We can't support OK and RECLASSIFY because we can't tell TC 2614 * the exact decision made. We are forced to support UNSPEC 2615 * to handle aborts so that's the only one we handle for passing 2616 * packets up the stack. 2617 */ 2618 /* Target for aborts */ 2619 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2620 2621 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2622 2623 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2624 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 2625 2626 /* Target for normal exits */ 2627 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2628 2629 /* if R0 > 7 jump to abort */ 2630 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 2631 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2632 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2633 2634 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 2635 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 2636 2637 emit_shf(nfp_prog, reg_a(1), 2638 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 2639 2640 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2641 emit_shf(nfp_prog, reg_a(2), 2642 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2643 2644 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2645 emit_shf(nfp_prog, reg_b(2), 2646 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 2647 2648 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2649 2650 emit_shf(nfp_prog, reg_b(2), 2651 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 2652 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2653 } 2654 2655 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 2656 { 2657 /* XDP return codes: 2658 * 0 aborted 0x82 -> drop, count as stat3 2659 * 1 drop 0x22 -> drop, count as stat1 2660 * 2 pass 0x11 -> pass, count as stat0 2661 * 3 tx 0x44 -> redir, count as stat2 2662 * * unknown 0x82 -> drop, count as stat3 2663 */ 2664 /* Target for aborts */ 2665 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2666 2667 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2668 2669 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2670 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 2671 2672 /* Target for normal exits */ 2673 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2674 2675 /* if R0 > 3 jump to abort */ 2676 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 2677 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2678 2679 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 2680 2681 emit_shf(nfp_prog, reg_a(1), 2682 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 2683 2684 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2685 emit_shf(nfp_prog, reg_b(2), 2686 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2687 2688 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2689 2690 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2691 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2692 } 2693 2694 static void nfp_outro(struct nfp_prog *nfp_prog) 2695 { 2696 switch (nfp_prog->type) { 2697 case BPF_PROG_TYPE_SCHED_CLS: 2698 nfp_outro_tc_da(nfp_prog); 2699 break; 2700 case BPF_PROG_TYPE_XDP: 2701 nfp_outro_xdp(nfp_prog); 2702 break; 2703 default: 2704 WARN_ON(1); 2705 } 2706 } 2707 2708 static int nfp_translate(struct nfp_prog *nfp_prog) 2709 { 2710 struct nfp_insn_meta *meta; 2711 int err; 2712 2713 nfp_intro(nfp_prog); 2714 if (nfp_prog->error) 2715 return nfp_prog->error; 2716 2717 list_for_each_entry(meta, &nfp_prog->insns, l) { 2718 instr_cb_t cb = instr_cb[meta->insn.code]; 2719 2720 meta->off = nfp_prog_current_offset(nfp_prog); 2721 2722 if (meta->skip) { 2723 nfp_prog->n_translated++; 2724 continue; 2725 } 2726 2727 if (nfp_meta_has_prev(nfp_prog, meta) && 2728 nfp_meta_prev(meta)->double_cb) 2729 cb = nfp_meta_prev(meta)->double_cb; 2730 if (!cb) 2731 return -ENOENT; 2732 err = cb(nfp_prog, meta); 2733 if (err) 2734 return err; 2735 2736 nfp_prog->n_translated++; 2737 } 2738 2739 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 2740 2741 nfp_outro(nfp_prog); 2742 if (nfp_prog->error) 2743 return nfp_prog->error; 2744 2745 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 2746 if (nfp_prog->error) 2747 return nfp_prog->error; 2748 2749 return nfp_fixup_branches(nfp_prog); 2750 } 2751 2752 /* --- Optimizations --- */ 2753 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 2754 { 2755 struct nfp_insn_meta *meta; 2756 2757 list_for_each_entry(meta, &nfp_prog->insns, l) { 2758 struct bpf_insn insn = meta->insn; 2759 2760 /* Programs converted from cBPF start with register xoring */ 2761 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 2762 insn.src_reg == insn.dst_reg) 2763 continue; 2764 2765 /* Programs start with R6 = R1 but we ignore the skb pointer */ 2766 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 2767 insn.src_reg == 1 && insn.dst_reg == 6) 2768 meta->skip = true; 2769 2770 /* Return as soon as something doesn't match */ 2771 if (!meta->skip) 2772 return; 2773 } 2774 } 2775 2776 /* Remove masking after load since our load guarantees this is not needed */ 2777 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 2778 { 2779 struct nfp_insn_meta *meta1, *meta2; 2780 const s32 exp_mask[] = { 2781 [BPF_B] = 0x000000ffU, 2782 [BPF_H] = 0x0000ffffU, 2783 [BPF_W] = 0xffffffffU, 2784 }; 2785 2786 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 2787 struct bpf_insn insn, next; 2788 2789 insn = meta1->insn; 2790 next = meta2->insn; 2791 2792 if (BPF_CLASS(insn.code) != BPF_LD) 2793 continue; 2794 if (BPF_MODE(insn.code) != BPF_ABS && 2795 BPF_MODE(insn.code) != BPF_IND) 2796 continue; 2797 2798 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 2799 continue; 2800 2801 if (!exp_mask[BPF_SIZE(insn.code)]) 2802 continue; 2803 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 2804 continue; 2805 2806 if (next.src_reg || next.dst_reg) 2807 continue; 2808 2809 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 2810 continue; 2811 2812 meta2->skip = true; 2813 } 2814 } 2815 2816 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 2817 { 2818 struct nfp_insn_meta *meta1, *meta2, *meta3; 2819 2820 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 2821 struct bpf_insn insn, next1, next2; 2822 2823 insn = meta1->insn; 2824 next1 = meta2->insn; 2825 next2 = meta3->insn; 2826 2827 if (BPF_CLASS(insn.code) != BPF_LD) 2828 continue; 2829 if (BPF_MODE(insn.code) != BPF_ABS && 2830 BPF_MODE(insn.code) != BPF_IND) 2831 continue; 2832 if (BPF_SIZE(insn.code) != BPF_W) 2833 continue; 2834 2835 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 2836 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 2837 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 2838 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 2839 continue; 2840 2841 if (next1.src_reg || next1.dst_reg || 2842 next2.src_reg || next2.dst_reg) 2843 continue; 2844 2845 if (next1.imm != 0x20 || next2.imm != 0x20) 2846 continue; 2847 2848 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 2849 meta3->flags & FLAG_INSN_IS_JUMP_DST) 2850 continue; 2851 2852 meta2->skip = true; 2853 meta3->skip = true; 2854 } 2855 } 2856 2857 /* load/store pair that forms memory copy sould look like the following: 2858 * 2859 * ld_width R, [addr_src + offset_src] 2860 * st_width [addr_dest + offset_dest], R 2861 * 2862 * The destination register of load and source register of store should 2863 * be the same, load and store should also perform at the same width. 2864 * If either of addr_src or addr_dest is stack pointer, we don't do the 2865 * CPP optimization as stack is modelled by registers on NFP. 2866 */ 2867 static bool 2868 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 2869 struct nfp_insn_meta *st_meta) 2870 { 2871 struct bpf_insn *ld = &ld_meta->insn; 2872 struct bpf_insn *st = &st_meta->insn; 2873 2874 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 2875 return false; 2876 2877 if (ld_meta->ptr.type != PTR_TO_PACKET) 2878 return false; 2879 2880 if (st_meta->ptr.type != PTR_TO_PACKET) 2881 return false; 2882 2883 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 2884 return false; 2885 2886 if (ld->dst_reg != st->src_reg) 2887 return false; 2888 2889 /* There is jump to the store insn in this pair. */ 2890 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 2891 return false; 2892 2893 return true; 2894 } 2895 2896 /* Currently, we only support chaining load/store pairs if: 2897 * 2898 * - Their address base registers are the same. 2899 * - Their address offsets are in the same order. 2900 * - They operate at the same memory width. 2901 * - There is no jump into the middle of them. 2902 */ 2903 static bool 2904 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 2905 struct nfp_insn_meta *st_meta, 2906 struct bpf_insn *prev_ld, 2907 struct bpf_insn *prev_st) 2908 { 2909 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 2910 struct bpf_insn *ld = &ld_meta->insn; 2911 struct bpf_insn *st = &st_meta->insn; 2912 s16 prev_ld_off, prev_st_off; 2913 2914 /* This pair is the start pair. */ 2915 if (!prev_ld) 2916 return true; 2917 2918 prev_size = BPF_LDST_BYTES(prev_ld); 2919 curr_size = BPF_LDST_BYTES(ld); 2920 prev_ld_base = prev_ld->src_reg; 2921 prev_st_base = prev_st->dst_reg; 2922 prev_ld_dst = prev_ld->dst_reg; 2923 prev_ld_off = prev_ld->off; 2924 prev_st_off = prev_st->off; 2925 2926 if (ld->dst_reg != prev_ld_dst) 2927 return false; 2928 2929 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 2930 return false; 2931 2932 if (curr_size != prev_size) 2933 return false; 2934 2935 /* There is jump to the head of this pair. */ 2936 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 2937 return false; 2938 2939 /* Both in ascending order. */ 2940 if (prev_ld_off + prev_size == ld->off && 2941 prev_st_off + prev_size == st->off) 2942 return true; 2943 2944 /* Both in descending order. */ 2945 if (ld->off + curr_size == prev_ld_off && 2946 st->off + curr_size == prev_st_off) 2947 return true; 2948 2949 return false; 2950 } 2951 2952 /* Return TRUE if cross memory access happens. Cross memory access means 2953 * store area is overlapping with load area that a later load might load 2954 * the value from previous store, for this case we can't treat the sequence 2955 * as an memory copy. 2956 */ 2957 static bool 2958 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 2959 struct nfp_insn_meta *head_st_meta) 2960 { 2961 s16 head_ld_off, head_st_off, ld_off; 2962 2963 /* Different pointer types does not overlap. */ 2964 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 2965 return false; 2966 2967 /* load and store are both PTR_TO_PACKET, check ID info. */ 2968 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 2969 return true; 2970 2971 /* Canonicalize the offsets. Turn all of them against the original 2972 * base register. 2973 */ 2974 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 2975 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 2976 ld_off = ld->off + head_ld_meta->ptr.off; 2977 2978 /* Ascending order cross. */ 2979 if (ld_off > head_ld_off && 2980 head_ld_off < head_st_off && ld_off >= head_st_off) 2981 return true; 2982 2983 /* Descending order cross. */ 2984 if (ld_off < head_ld_off && 2985 head_ld_off > head_st_off && ld_off <= head_st_off) 2986 return true; 2987 2988 return false; 2989 } 2990 2991 /* This pass try to identify the following instructoin sequences. 2992 * 2993 * load R, [regA + offA] 2994 * store [regB + offB], R 2995 * load R, [regA + offA + const_imm_A] 2996 * store [regB + offB + const_imm_A], R 2997 * load R, [regA + offA + 2 * const_imm_A] 2998 * store [regB + offB + 2 * const_imm_A], R 2999 * ... 3000 * 3001 * Above sequence is typically generated by compiler when lowering 3002 * memcpy. NFP prefer using CPP instructions to accelerate it. 3003 */ 3004 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 3005 { 3006 struct nfp_insn_meta *head_ld_meta = NULL; 3007 struct nfp_insn_meta *head_st_meta = NULL; 3008 struct nfp_insn_meta *meta1, *meta2; 3009 struct bpf_insn *prev_ld = NULL; 3010 struct bpf_insn *prev_st = NULL; 3011 u8 count = 0; 3012 3013 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3014 struct bpf_insn *ld = &meta1->insn; 3015 struct bpf_insn *st = &meta2->insn; 3016 3017 /* Reset record status if any of the following if true: 3018 * - The current insn pair is not load/store. 3019 * - The load/store pair doesn't chain with previous one. 3020 * - The chained load/store pair crossed with previous pair. 3021 * - The chained load/store pair has a total size of memory 3022 * copy beyond 128 bytes which is the maximum length a 3023 * single NFP CPP command can transfer. 3024 */ 3025 if (!curr_pair_is_memcpy(meta1, meta2) || 3026 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 3027 prev_st) || 3028 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 3029 head_st_meta) || 3030 head_ld_meta->ldst_gather_len >= 128))) { 3031 if (!count) 3032 continue; 3033 3034 if (count > 1) { 3035 s16 prev_ld_off = prev_ld->off; 3036 s16 prev_st_off = prev_st->off; 3037 s16 head_ld_off = head_ld_meta->insn.off; 3038 3039 if (prev_ld_off < head_ld_off) { 3040 head_ld_meta->insn.off = prev_ld_off; 3041 head_st_meta->insn.off = prev_st_off; 3042 head_ld_meta->ldst_gather_len = 3043 -head_ld_meta->ldst_gather_len; 3044 } 3045 3046 head_ld_meta->paired_st = &head_st_meta->insn; 3047 head_st_meta->skip = true; 3048 } else { 3049 head_ld_meta->ldst_gather_len = 0; 3050 } 3051 3052 /* If the chain is ended by an load/store pair then this 3053 * could serve as the new head of the the next chain. 3054 */ 3055 if (curr_pair_is_memcpy(meta1, meta2)) { 3056 head_ld_meta = meta1; 3057 head_st_meta = meta2; 3058 head_ld_meta->ldst_gather_len = 3059 BPF_LDST_BYTES(ld); 3060 meta1 = nfp_meta_next(meta1); 3061 meta2 = nfp_meta_next(meta2); 3062 prev_ld = ld; 3063 prev_st = st; 3064 count = 1; 3065 } else { 3066 head_ld_meta = NULL; 3067 head_st_meta = NULL; 3068 prev_ld = NULL; 3069 prev_st = NULL; 3070 count = 0; 3071 } 3072 3073 continue; 3074 } 3075 3076 if (!head_ld_meta) { 3077 head_ld_meta = meta1; 3078 head_st_meta = meta2; 3079 } else { 3080 meta1->skip = true; 3081 meta2->skip = true; 3082 } 3083 3084 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 3085 meta1 = nfp_meta_next(meta1); 3086 meta2 = nfp_meta_next(meta2); 3087 prev_ld = ld; 3088 prev_st = st; 3089 count++; 3090 } 3091 } 3092 3093 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 3094 { 3095 struct nfp_insn_meta *meta, *range_node = NULL; 3096 s16 range_start = 0, range_end = 0; 3097 bool cache_avail = false; 3098 struct bpf_insn *insn; 3099 s32 range_ptr_off = 0; 3100 u32 range_ptr_id = 0; 3101 3102 list_for_each_entry(meta, &nfp_prog->insns, l) { 3103 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 3104 cache_avail = false; 3105 3106 if (meta->skip) 3107 continue; 3108 3109 insn = &meta->insn; 3110 3111 if (is_mbpf_store_pkt(meta) || 3112 insn->code == (BPF_JMP | BPF_CALL) || 3113 is_mbpf_classic_store_pkt(meta) || 3114 is_mbpf_classic_load(meta)) { 3115 cache_avail = false; 3116 continue; 3117 } 3118 3119 if (!is_mbpf_load(meta)) 3120 continue; 3121 3122 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 3123 cache_avail = false; 3124 continue; 3125 } 3126 3127 if (!cache_avail) { 3128 cache_avail = true; 3129 if (range_node) 3130 goto end_current_then_start_new; 3131 goto start_new; 3132 } 3133 3134 /* Check ID to make sure two reads share the same 3135 * variable offset against PTR_TO_PACKET, and check OFF 3136 * to make sure they also share the same constant 3137 * offset. 3138 * 3139 * OFFs don't really need to be the same, because they 3140 * are the constant offsets against PTR_TO_PACKET, so 3141 * for different OFFs, we could canonicalize them to 3142 * offsets against original packet pointer. We don't 3143 * support this. 3144 */ 3145 if (meta->ptr.id == range_ptr_id && 3146 meta->ptr.off == range_ptr_off) { 3147 s16 new_start = range_start; 3148 s16 end, off = insn->off; 3149 s16 new_end = range_end; 3150 bool changed = false; 3151 3152 if (off < range_start) { 3153 new_start = off; 3154 changed = true; 3155 } 3156 3157 end = off + BPF_LDST_BYTES(insn); 3158 if (end > range_end) { 3159 new_end = end; 3160 changed = true; 3161 } 3162 3163 if (!changed) 3164 continue; 3165 3166 if (new_end - new_start <= 64) { 3167 /* Install new range. */ 3168 range_start = new_start; 3169 range_end = new_end; 3170 continue; 3171 } 3172 } 3173 3174 end_current_then_start_new: 3175 range_node->pkt_cache.range_start = range_start; 3176 range_node->pkt_cache.range_end = range_end; 3177 start_new: 3178 range_node = meta; 3179 range_node->pkt_cache.do_init = true; 3180 range_ptr_id = range_node->ptr.id; 3181 range_ptr_off = range_node->ptr.off; 3182 range_start = insn->off; 3183 range_end = insn->off + BPF_LDST_BYTES(insn); 3184 } 3185 3186 if (range_node) { 3187 range_node->pkt_cache.range_start = range_start; 3188 range_node->pkt_cache.range_end = range_end; 3189 } 3190 3191 list_for_each_entry(meta, &nfp_prog->insns, l) { 3192 if (meta->skip) 3193 continue; 3194 3195 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 3196 if (meta->pkt_cache.do_init) { 3197 range_start = meta->pkt_cache.range_start; 3198 range_end = meta->pkt_cache.range_end; 3199 } else { 3200 meta->pkt_cache.range_start = range_start; 3201 meta->pkt_cache.range_end = range_end; 3202 } 3203 } 3204 } 3205 } 3206 3207 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 3208 { 3209 nfp_bpf_opt_reg_init(nfp_prog); 3210 3211 nfp_bpf_opt_ld_mask(nfp_prog); 3212 nfp_bpf_opt_ld_shift(nfp_prog); 3213 nfp_bpf_opt_ldst_gather(nfp_prog); 3214 nfp_bpf_opt_pkt_cache(nfp_prog); 3215 3216 return 0; 3217 } 3218 3219 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 3220 { 3221 __le64 *ustore = (__force __le64 *)prog; 3222 int i; 3223 3224 for (i = 0; i < len; i++) { 3225 int err; 3226 3227 err = nfp_ustore_check_valid_no_ecc(prog[i]); 3228 if (err) 3229 return err; 3230 3231 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 3232 } 3233 3234 return 0; 3235 } 3236 3237 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 3238 { 3239 void *prog; 3240 3241 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 3242 if (!prog) 3243 return; 3244 3245 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 3246 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 3247 kvfree(nfp_prog->prog); 3248 nfp_prog->prog = prog; 3249 } 3250 3251 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 3252 { 3253 int ret; 3254 3255 ret = nfp_bpf_optimize(nfp_prog); 3256 if (ret) 3257 return ret; 3258 3259 ret = nfp_translate(nfp_prog); 3260 if (ret) { 3261 pr_err("Translation failed with error %d (translated: %u)\n", 3262 ret, nfp_prog->n_translated); 3263 return -EINVAL; 3264 } 3265 3266 nfp_bpf_prog_trim(nfp_prog); 3267 3268 return ret; 3269 } 3270 3271 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 3272 { 3273 struct nfp_insn_meta *meta; 3274 3275 /* Another pass to record jump information. */ 3276 list_for_each_entry(meta, &nfp_prog->insns, l) { 3277 u64 code = meta->insn.code; 3278 3279 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && 3280 BPF_OP(code) != BPF_CALL) { 3281 struct nfp_insn_meta *dst_meta; 3282 unsigned short dst_indx; 3283 3284 dst_indx = meta->n + 1 + meta->insn.off; 3285 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, 3286 cnt); 3287 3288 meta->jmp_dst = dst_meta; 3289 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 3290 } 3291 } 3292 } 3293 3294 bool nfp_bpf_supported_opcode(u8 code) 3295 { 3296 return !!instr_cb[code]; 3297 } 3298 3299 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 3300 { 3301 unsigned int i; 3302 u64 *prog; 3303 int err; 3304 3305 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 3306 GFP_KERNEL); 3307 if (!prog) 3308 return ERR_PTR(-ENOMEM); 3309 3310 for (i = 0; i < nfp_prog->prog_len; i++) { 3311 enum nfp_relo_type special; 3312 u32 val; 3313 3314 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 3315 switch (special) { 3316 case RELO_NONE: 3317 continue; 3318 case RELO_BR_REL: 3319 br_add_offset(&prog[i], bv->start_off); 3320 break; 3321 case RELO_BR_GO_OUT: 3322 br_set_offset(&prog[i], 3323 nfp_prog->tgt_out + bv->start_off); 3324 break; 3325 case RELO_BR_GO_ABORT: 3326 br_set_offset(&prog[i], 3327 nfp_prog->tgt_abort + bv->start_off); 3328 break; 3329 case RELO_BR_NEXT_PKT: 3330 br_set_offset(&prog[i], bv->tgt_done); 3331 break; 3332 case RELO_BR_HELPER: 3333 val = br_get_offset(prog[i]); 3334 val -= BR_OFF_RELO; 3335 switch (val) { 3336 case BPF_FUNC_map_lookup_elem: 3337 val = nfp_prog->bpf->helpers.map_lookup; 3338 break; 3339 case BPF_FUNC_map_update_elem: 3340 val = nfp_prog->bpf->helpers.map_update; 3341 break; 3342 case BPF_FUNC_map_delete_elem: 3343 val = nfp_prog->bpf->helpers.map_delete; 3344 break; 3345 default: 3346 pr_err("relocation of unknown helper %d\n", 3347 val); 3348 err = -EINVAL; 3349 goto err_free_prog; 3350 } 3351 br_set_offset(&prog[i], val); 3352 break; 3353 case RELO_IMMED_REL: 3354 immed_add_value(&prog[i], bv->start_off); 3355 break; 3356 } 3357 3358 prog[i] &= ~OP_RELO_TYPE; 3359 } 3360 3361 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 3362 if (err) 3363 goto err_free_prog; 3364 3365 return prog; 3366 3367 err_free_prog: 3368 kfree(prog); 3369 return ERR_PTR(err); 3370 } 3371