1 /* 2 * Copyright (C) 2016-2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/bug.h> 37 #include <linux/kernel.h> 38 #include <linux/bpf.h> 39 #include <linux/filter.h> 40 #include <linux/pkt_cls.h> 41 #include <linux/unistd.h> 42 43 #include "main.h" 44 #include "../nfp_asm.h" 45 46 /* --- NFP prog --- */ 47 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 48 * It's safe to modify the next pointers (but not pos). 49 */ 50 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 51 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 52 next = list_next_entry(pos, l); \ 53 &(nfp_prog)->insns != &pos->l && \ 54 &(nfp_prog)->insns != &next->l; \ 55 pos = nfp_meta_next(pos), \ 56 next = nfp_meta_next(pos)) 57 58 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 59 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 60 next = list_next_entry(pos, l), \ 61 next2 = list_next_entry(next, l); \ 62 &(nfp_prog)->insns != &pos->l && \ 63 &(nfp_prog)->insns != &next->l && \ 64 &(nfp_prog)->insns != &next2->l; \ 65 pos = nfp_meta_next(pos), \ 66 next = nfp_meta_next(pos), \ 67 next2 = nfp_meta_next(next)) 68 69 static bool 70 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 71 { 72 return meta->l.prev != &nfp_prog->insns; 73 } 74 75 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 76 { 77 if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) { 78 nfp_prog->error = -ENOSPC; 79 return; 80 } 81 82 nfp_prog->prog[nfp_prog->prog_len] = insn; 83 nfp_prog->prog_len++; 84 } 85 86 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 87 { 88 return nfp_prog->prog_len; 89 } 90 91 static bool 92 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 93 { 94 /* If there is a recorded error we may have dropped instructions; 95 * that doesn't have to be due to translator bug, and the translation 96 * will fail anyway, so just return OK. 97 */ 98 if (nfp_prog->error) 99 return true; 100 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 101 } 102 103 /* --- Emitters --- */ 104 static void 105 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 106 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir) 107 { 108 enum cmd_ctx_swap ctx; 109 u64 insn; 110 111 if (sync) 112 ctx = CMD_CTX_SWAP; 113 else 114 ctx = CMD_CTX_NO_SWAP; 115 116 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 117 FIELD_PREP(OP_CMD_CTX, ctx) | 118 FIELD_PREP(OP_CMD_B_SRC, breg) | 119 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 120 FIELD_PREP(OP_CMD_XFER, xfer) | 121 FIELD_PREP(OP_CMD_CNT, size) | 122 FIELD_PREP(OP_CMD_SIG, sync) | 123 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 124 FIELD_PREP(OP_CMD_INDIR, indir) | 125 FIELD_PREP(OP_CMD_MODE, mode); 126 127 nfp_prog_push(nfp_prog, insn); 128 } 129 130 static void 131 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 132 swreg lreg, swreg rreg, u8 size, bool sync, bool indir) 133 { 134 struct nfp_insn_re_regs reg; 135 int err; 136 137 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 138 if (err) { 139 nfp_prog->error = err; 140 return; 141 } 142 if (reg.swap) { 143 pr_err("cmd can't swap arguments\n"); 144 nfp_prog->error = -EFAULT; 145 return; 146 } 147 if (reg.dst_lmextn || reg.src_lmextn) { 148 pr_err("cmd can't use LMextn\n"); 149 nfp_prog->error = -EFAULT; 150 return; 151 } 152 153 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync, 154 indir); 155 } 156 157 static void 158 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 159 swreg lreg, swreg rreg, u8 size, bool sync) 160 { 161 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false); 162 } 163 164 static void 165 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 166 swreg lreg, swreg rreg, u8 size, bool sync) 167 { 168 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true); 169 } 170 171 static void 172 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 173 enum br_ctx_signal_state css, u16 addr, u8 defer) 174 { 175 u16 addr_lo, addr_hi; 176 u64 insn; 177 178 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 179 addr_hi = addr != addr_lo; 180 181 insn = OP_BR_BASE | 182 FIELD_PREP(OP_BR_MASK, mask) | 183 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 184 FIELD_PREP(OP_BR_CSS, css) | 185 FIELD_PREP(OP_BR_DEFBR, defer) | 186 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 187 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 188 189 nfp_prog_push(nfp_prog, insn); 190 } 191 192 static void 193 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 194 enum nfp_relo_type relo) 195 { 196 if (mask == BR_UNC && defer > 2) { 197 pr_err("BUG: branch defer out of bounds %d\n", defer); 198 nfp_prog->error = -EFAULT; 199 return; 200 } 201 202 __emit_br(nfp_prog, mask, 203 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 204 BR_CSS_NONE, addr, defer); 205 206 nfp_prog->prog[nfp_prog->prog_len - 1] |= 207 FIELD_PREP(OP_RELO_TYPE, relo); 208 } 209 210 static void 211 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 212 { 213 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 214 } 215 216 static void 217 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 218 enum immed_width width, bool invert, 219 enum immed_shift shift, bool wr_both, 220 bool dst_lmextn, bool src_lmextn) 221 { 222 u64 insn; 223 224 insn = OP_IMMED_BASE | 225 FIELD_PREP(OP_IMMED_A_SRC, areg) | 226 FIELD_PREP(OP_IMMED_B_SRC, breg) | 227 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 228 FIELD_PREP(OP_IMMED_WIDTH, width) | 229 FIELD_PREP(OP_IMMED_INV, invert) | 230 FIELD_PREP(OP_IMMED_SHIFT, shift) | 231 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 232 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 233 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 234 235 nfp_prog_push(nfp_prog, insn); 236 } 237 238 static void 239 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 240 enum immed_width width, bool invert, enum immed_shift shift) 241 { 242 struct nfp_insn_ur_regs reg; 243 int err; 244 245 if (swreg_type(dst) == NN_REG_IMM) { 246 nfp_prog->error = -EFAULT; 247 return; 248 } 249 250 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 251 if (err) { 252 nfp_prog->error = err; 253 return; 254 } 255 256 /* Use reg.dst when destination is No-Dest. */ 257 __emit_immed(nfp_prog, 258 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 259 reg.breg, imm >> 8, width, invert, shift, 260 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 261 } 262 263 static void 264 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 265 enum shf_sc sc, u8 shift, 266 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 267 bool dst_lmextn, bool src_lmextn) 268 { 269 u64 insn; 270 271 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 272 nfp_prog->error = -EFAULT; 273 return; 274 } 275 276 if (sc == SHF_SC_L_SHF) 277 shift = 32 - shift; 278 279 insn = OP_SHF_BASE | 280 FIELD_PREP(OP_SHF_A_SRC, areg) | 281 FIELD_PREP(OP_SHF_SC, sc) | 282 FIELD_PREP(OP_SHF_B_SRC, breg) | 283 FIELD_PREP(OP_SHF_I8, i8) | 284 FIELD_PREP(OP_SHF_SW, sw) | 285 FIELD_PREP(OP_SHF_DST, dst) | 286 FIELD_PREP(OP_SHF_SHIFT, shift) | 287 FIELD_PREP(OP_SHF_OP, op) | 288 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 289 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 290 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 291 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 292 293 nfp_prog_push(nfp_prog, insn); 294 } 295 296 static void 297 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 298 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 299 { 300 struct nfp_insn_re_regs reg; 301 int err; 302 303 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 304 if (err) { 305 nfp_prog->error = err; 306 return; 307 } 308 309 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 310 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 311 reg.dst_lmextn, reg.src_lmextn); 312 } 313 314 static void 315 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 316 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 317 bool dst_lmextn, bool src_lmextn) 318 { 319 u64 insn; 320 321 insn = OP_ALU_BASE | 322 FIELD_PREP(OP_ALU_A_SRC, areg) | 323 FIELD_PREP(OP_ALU_B_SRC, breg) | 324 FIELD_PREP(OP_ALU_DST, dst) | 325 FIELD_PREP(OP_ALU_SW, swap) | 326 FIELD_PREP(OP_ALU_OP, op) | 327 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 328 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 329 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 330 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 331 332 nfp_prog_push(nfp_prog, insn); 333 } 334 335 static void 336 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 337 swreg lreg, enum alu_op op, swreg rreg) 338 { 339 struct nfp_insn_ur_regs reg; 340 int err; 341 342 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 343 if (err) { 344 nfp_prog->error = err; 345 return; 346 } 347 348 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 349 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 350 reg.dst_lmextn, reg.src_lmextn); 351 } 352 353 static void 354 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 355 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 356 bool zero, bool swap, bool wr_both, 357 bool dst_lmextn, bool src_lmextn) 358 { 359 u64 insn; 360 361 insn = OP_LDF_BASE | 362 FIELD_PREP(OP_LDF_A_SRC, areg) | 363 FIELD_PREP(OP_LDF_SC, sc) | 364 FIELD_PREP(OP_LDF_B_SRC, breg) | 365 FIELD_PREP(OP_LDF_I8, imm8) | 366 FIELD_PREP(OP_LDF_SW, swap) | 367 FIELD_PREP(OP_LDF_ZF, zero) | 368 FIELD_PREP(OP_LDF_BMASK, bmask) | 369 FIELD_PREP(OP_LDF_SHF, shift) | 370 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 371 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 372 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 373 374 nfp_prog_push(nfp_prog, insn); 375 } 376 377 static void 378 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 379 enum shf_sc sc, u8 shift, bool zero) 380 { 381 struct nfp_insn_re_regs reg; 382 int err; 383 384 /* Note: ld_field is special as it uses one of the src regs as dst */ 385 err = swreg_to_restricted(dst, dst, src, ®, true); 386 if (err) { 387 nfp_prog->error = err; 388 return; 389 } 390 391 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 392 reg.i8, zero, reg.swap, reg.wr_both, 393 reg.dst_lmextn, reg.src_lmextn); 394 } 395 396 static void 397 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 398 enum shf_sc sc, u8 shift) 399 { 400 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 401 } 402 403 static void 404 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 405 bool dst_lmextn, bool src_lmextn) 406 { 407 u64 insn; 408 409 insn = OP_LCSR_BASE | 410 FIELD_PREP(OP_LCSR_A_SRC, areg) | 411 FIELD_PREP(OP_LCSR_B_SRC, breg) | 412 FIELD_PREP(OP_LCSR_WRITE, wr) | 413 FIELD_PREP(OP_LCSR_ADDR, addr) | 414 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 415 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 416 417 nfp_prog_push(nfp_prog, insn); 418 } 419 420 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 421 { 422 struct nfp_insn_ur_regs reg; 423 int err; 424 425 /* This instruction takes immeds instead of reg_none() for the ignored 426 * operand, but we can't encode 2 immeds in one instr with our normal 427 * swreg infra so if param is an immed, we encode as reg_none() and 428 * copy the immed to both operands. 429 */ 430 if (swreg_type(src) == NN_REG_IMM) { 431 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 432 reg.breg = reg.areg; 433 } else { 434 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 435 } 436 if (err) { 437 nfp_prog->error = err; 438 return; 439 } 440 441 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4, 442 false, reg.src_lmextn); 443 } 444 445 static void emit_nop(struct nfp_prog *nfp_prog) 446 { 447 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 448 } 449 450 /* --- Wrappers --- */ 451 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 452 { 453 if (!(imm & 0xffff0000)) { 454 *val = imm; 455 *shift = IMMED_SHIFT_0B; 456 } else if (!(imm & 0xff0000ff)) { 457 *val = imm >> 8; 458 *shift = IMMED_SHIFT_1B; 459 } else if (!(imm & 0x0000ffff)) { 460 *val = imm >> 16; 461 *shift = IMMED_SHIFT_2B; 462 } else { 463 return false; 464 } 465 466 return true; 467 } 468 469 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 470 { 471 enum immed_shift shift; 472 u16 val; 473 474 if (pack_immed(imm, &val, &shift)) { 475 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 476 } else if (pack_immed(~imm, &val, &shift)) { 477 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 478 } else { 479 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 480 false, IMMED_SHIFT_0B); 481 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 482 false, IMMED_SHIFT_2B); 483 } 484 } 485 486 static void 487 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 488 enum nfp_relo_type relo) 489 { 490 if (imm > 0xffff) { 491 pr_err("relocation of a large immediate!\n"); 492 nfp_prog->error = -EFAULT; 493 return; 494 } 495 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 496 497 nfp_prog->prog[nfp_prog->prog_len - 1] |= 498 FIELD_PREP(OP_RELO_TYPE, relo); 499 } 500 501 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 502 * If the @imm is small enough encode it directly in operand and return 503 * otherwise load @imm to a spare register and return its encoding. 504 */ 505 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 506 { 507 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 508 return reg_imm(imm); 509 510 wrp_immed(nfp_prog, tmp_reg, imm); 511 return tmp_reg; 512 } 513 514 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 515 * If the @imm is small enough encode it directly in operand and return 516 * otherwise load @imm to a spare register and return its encoding. 517 */ 518 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 519 { 520 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 521 return reg_imm(imm); 522 523 wrp_immed(nfp_prog, tmp_reg, imm); 524 return tmp_reg; 525 } 526 527 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 528 { 529 while (count--) 530 emit_nop(nfp_prog); 531 } 532 533 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 534 { 535 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 536 } 537 538 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 539 { 540 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 541 } 542 543 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 544 * result to @dst from low end. 545 */ 546 static void 547 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 548 u8 offset) 549 { 550 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 551 u8 mask = (1 << field_len) - 1; 552 553 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 554 } 555 556 static void 557 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 558 swreg *rega, swreg *regb) 559 { 560 if (offset == reg_imm(0)) { 561 *rega = reg_a(src_gpr); 562 *regb = reg_b(src_gpr + 1); 563 return; 564 } 565 566 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 567 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 568 reg_imm(0)); 569 *rega = imm_a(nfp_prog); 570 *regb = imm_b(nfp_prog); 571 } 572 573 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 574 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 575 { 576 bool descending_seq = meta->ldst_gather_len < 0; 577 s16 len = abs(meta->ldst_gather_len); 578 swreg src_base, off; 579 bool src_40bit_addr; 580 unsigned int i; 581 u8 xfer_num; 582 583 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 584 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 585 src_base = reg_a(meta->insn.src_reg * 2); 586 xfer_num = round_up(len, 4) / 4; 587 588 if (src_40bit_addr) 589 addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base, 590 &off); 591 592 /* Setup PREV_ALU fields to override memory read length. */ 593 if (len > 32) 594 wrp_immed(nfp_prog, reg_none(), 595 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 596 597 /* Memory read from source addr into transfer-in registers. */ 598 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 599 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 600 src_base, off, xfer_num - 1, true, len > 32); 601 602 /* Move from transfer-in to transfer-out. */ 603 for (i = 0; i < xfer_num; i++) 604 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 605 606 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 607 608 if (len <= 8) { 609 /* Use single direct_ref write8. */ 610 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 611 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 612 true); 613 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 614 /* Use single direct_ref write32. */ 615 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 616 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 617 true); 618 } else if (len <= 32) { 619 /* Use single indirect_ref write8. */ 620 wrp_immed(nfp_prog, reg_none(), 621 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 622 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 623 reg_a(meta->paired_st->dst_reg * 2), off, 624 len - 1, true); 625 } else if (IS_ALIGNED(len, 4)) { 626 /* Use single indirect_ref write32. */ 627 wrp_immed(nfp_prog, reg_none(), 628 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 629 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 630 reg_a(meta->paired_st->dst_reg * 2), off, 631 xfer_num - 1, true); 632 } else if (len <= 40) { 633 /* Use one direct_ref write32 to write the first 32-bytes, then 634 * another direct_ref write8 to write the remaining bytes. 635 */ 636 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 637 reg_a(meta->paired_st->dst_reg * 2), off, 7, 638 true); 639 640 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 641 imm_b(nfp_prog)); 642 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 643 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 644 true); 645 } else { 646 /* Use one indirect_ref write32 to write 4-bytes aligned length, 647 * then another direct_ref write8 to write the remaining bytes. 648 */ 649 u8 new_off; 650 651 wrp_immed(nfp_prog, reg_none(), 652 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 653 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 654 reg_a(meta->paired_st->dst_reg * 2), off, 655 xfer_num - 2, true); 656 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 657 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 658 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 659 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 660 (len & 0x3) - 1, true); 661 } 662 663 /* TODO: The following extra load is to make sure data flow be identical 664 * before and after we do memory copy optimization. 665 * 666 * The load destination register is not guaranteed to be dead, so we 667 * need to make sure it is loaded with the value the same as before 668 * this transformation. 669 * 670 * These extra loads could be removed once we have accurate register 671 * usage information. 672 */ 673 if (descending_seq) 674 xfer_num = 0; 675 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 676 xfer_num = xfer_num - 1; 677 else 678 xfer_num = xfer_num - 2; 679 680 switch (BPF_SIZE(meta->insn.code)) { 681 case BPF_B: 682 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 683 reg_xfer(xfer_num), 1, 684 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 685 break; 686 case BPF_H: 687 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 688 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 689 break; 690 case BPF_W: 691 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 692 reg_xfer(0)); 693 break; 694 case BPF_DW: 695 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 696 reg_xfer(xfer_num)); 697 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 698 reg_xfer(xfer_num + 1)); 699 break; 700 } 701 702 if (BPF_SIZE(meta->insn.code) != BPF_DW) 703 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 704 705 return 0; 706 } 707 708 static int 709 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 710 { 711 unsigned int i; 712 u16 shift, sz; 713 714 /* We load the value from the address indicated in @offset and then 715 * shift out the data we don't need. Note: this is big endian! 716 */ 717 sz = max(size, 4); 718 shift = size < 4 ? 4 - size : 0; 719 720 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 721 pptr_reg(nfp_prog), offset, sz - 1, true); 722 723 i = 0; 724 if (shift) 725 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 726 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 727 else 728 for (; i * 4 < size; i++) 729 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 730 731 if (i < 2) 732 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 733 734 return 0; 735 } 736 737 static int 738 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 739 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 740 { 741 unsigned int i; 742 u8 mask, sz; 743 744 /* We load the value from the address indicated in rreg + lreg and then 745 * mask out the data we don't need. Note: this is little endian! 746 */ 747 sz = max(size, 4); 748 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 749 750 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 751 lreg, rreg, sz / 4 - 1, true); 752 753 i = 0; 754 if (mask) 755 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 756 reg_xfer(0), SHF_SC_NONE, 0, true); 757 else 758 for (; i * 4 < size; i++) 759 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 760 761 if (i < 2) 762 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 763 764 return 0; 765 } 766 767 static int 768 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 769 u8 dst_gpr, u8 size) 770 { 771 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 772 size, CMD_MODE_32b); 773 } 774 775 static int 776 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 777 u8 dst_gpr, u8 size) 778 { 779 swreg rega, regb; 780 781 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 782 783 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 784 size, CMD_MODE_40b_BA); 785 } 786 787 static int 788 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 789 { 790 swreg tmp_reg; 791 792 /* Calculate the true offset (src_reg + imm) */ 793 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 794 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 795 796 /* Check packet length (size guaranteed to fit b/c it's u8) */ 797 emit_alu(nfp_prog, imm_a(nfp_prog), 798 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 799 emit_alu(nfp_prog, reg_none(), 800 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 801 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 802 803 /* Load data */ 804 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 805 } 806 807 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 808 { 809 swreg tmp_reg; 810 811 /* Check packet length */ 812 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 813 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 814 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 815 816 /* Load data */ 817 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 818 return data_ld(nfp_prog, tmp_reg, 0, size); 819 } 820 821 static int 822 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 823 u8 src_gpr, u8 size) 824 { 825 unsigned int i; 826 827 for (i = 0; i * 4 < size; i++) 828 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 829 830 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 831 reg_a(dst_gpr), offset, size - 1, true); 832 833 return 0; 834 } 835 836 static int 837 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 838 u64 imm, u8 size) 839 { 840 wrp_immed(nfp_prog, reg_xfer(0), imm); 841 if (size == 8) 842 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 843 844 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 845 reg_a(dst_gpr), offset, size - 1, true); 846 847 return 0; 848 } 849 850 typedef int 851 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 852 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 853 bool needs_inc); 854 855 static int 856 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 857 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 858 bool needs_inc) 859 { 860 bool should_inc = needs_inc && new_gpr && !last; 861 u32 idx, src_byte; 862 enum shf_sc sc; 863 swreg reg; 864 int shf; 865 u8 mask; 866 867 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 868 return -EOPNOTSUPP; 869 870 idx = off / 4; 871 872 /* Move the entire word */ 873 if (size == 4) { 874 wrp_mov(nfp_prog, reg_both(dst), 875 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 876 return 0; 877 } 878 879 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 880 return -EOPNOTSUPP; 881 882 src_byte = off % 4; 883 884 mask = (1 << size) - 1; 885 mask <<= dst_byte; 886 887 if (WARN_ON_ONCE(mask > 0xf)) 888 return -EOPNOTSUPP; 889 890 shf = abs(src_byte - dst_byte) * 8; 891 if (src_byte == dst_byte) { 892 sc = SHF_SC_NONE; 893 } else if (src_byte < dst_byte) { 894 shf = 32 - shf; 895 sc = SHF_SC_L_SHF; 896 } else { 897 sc = SHF_SC_R_SHF; 898 } 899 900 /* ld_field can address fewer indexes, if offset too large do RMW. 901 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 902 */ 903 if (idx <= RE_REG_LM_IDX_MAX) { 904 reg = reg_lm(lm3 ? 3 : 0, idx); 905 } else { 906 reg = imm_a(nfp_prog); 907 /* If it's not the first part of the load and we start a new GPR 908 * that means we are loading a second part of the LMEM word into 909 * a new GPR. IOW we've already looked that LMEM word and 910 * therefore it has been loaded into imm_a(). 911 */ 912 if (first || !new_gpr) 913 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 914 } 915 916 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 917 918 if (should_inc) 919 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 920 921 return 0; 922 } 923 924 static int 925 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 926 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 927 bool needs_inc) 928 { 929 bool should_inc = needs_inc && new_gpr && !last; 930 u32 idx, dst_byte; 931 enum shf_sc sc; 932 swreg reg; 933 int shf; 934 u8 mask; 935 936 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 937 return -EOPNOTSUPP; 938 939 idx = off / 4; 940 941 /* Move the entire word */ 942 if (size == 4) { 943 wrp_mov(nfp_prog, 944 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 945 reg_b(src)); 946 return 0; 947 } 948 949 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 950 return -EOPNOTSUPP; 951 952 dst_byte = off % 4; 953 954 mask = (1 << size) - 1; 955 mask <<= dst_byte; 956 957 if (WARN_ON_ONCE(mask > 0xf)) 958 return -EOPNOTSUPP; 959 960 shf = abs(src_byte - dst_byte) * 8; 961 if (src_byte == dst_byte) { 962 sc = SHF_SC_NONE; 963 } else if (src_byte < dst_byte) { 964 shf = 32 - shf; 965 sc = SHF_SC_L_SHF; 966 } else { 967 sc = SHF_SC_R_SHF; 968 } 969 970 /* ld_field can address fewer indexes, if offset too large do RMW. 971 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 972 */ 973 if (idx <= RE_REG_LM_IDX_MAX) { 974 reg = reg_lm(lm3 ? 3 : 0, idx); 975 } else { 976 reg = imm_a(nfp_prog); 977 /* Only first and last LMEM locations are going to need RMW, 978 * the middle location will be overwritten fully. 979 */ 980 if (first || last) 981 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 982 } 983 984 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 985 986 if (new_gpr || last) { 987 if (idx > RE_REG_LM_IDX_MAX) 988 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 989 if (should_inc) 990 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 991 } 992 993 return 0; 994 } 995 996 static int 997 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 998 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 999 bool clr_gpr, lmem_step step) 1000 { 1001 s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; 1002 bool first = true, last; 1003 bool needs_inc = false; 1004 swreg stack_off_reg; 1005 u8 prev_gpr = 255; 1006 u32 gpr_byte = 0; 1007 bool lm3 = true; 1008 int ret; 1009 1010 if (meta->ptr_not_const) { 1011 /* Use of the last encountered ptr_off is OK, they all have 1012 * the same alignment. Depend on low bits of value being 1013 * discarded when written to LMaddr register. 1014 */ 1015 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1016 stack_imm(nfp_prog)); 1017 1018 emit_alu(nfp_prog, imm_b(nfp_prog), 1019 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1020 1021 needs_inc = true; 1022 } else if (off + size <= 64) { 1023 /* We can reach bottom 64B with LMaddr0 */ 1024 lm3 = false; 1025 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1026 /* We have to set up a new pointer. If we know the offset 1027 * and the entire access falls into a single 32 byte aligned 1028 * window we won't have to increment the LM pointer. 1029 * The 32 byte alignment is imporant because offset is ORed in 1030 * not added when doing *l$indexN[off]. 1031 */ 1032 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1033 stack_imm(nfp_prog)); 1034 emit_alu(nfp_prog, imm_b(nfp_prog), 1035 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1036 1037 off %= 32; 1038 } else { 1039 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1040 stack_imm(nfp_prog)); 1041 1042 emit_alu(nfp_prog, imm_b(nfp_prog), 1043 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1044 1045 needs_inc = true; 1046 } 1047 if (lm3) { 1048 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1049 /* For size < 4 one slot will be filled by zeroing of upper. */ 1050 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1051 } 1052 1053 if (clr_gpr && size < 8) 1054 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1055 1056 while (size) { 1057 u32 slice_end; 1058 u8 slice_size; 1059 1060 slice_size = min(size, 4 - gpr_byte); 1061 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1062 slice_size = slice_end - off; 1063 1064 last = slice_size == size; 1065 1066 if (needs_inc) 1067 off %= 4; 1068 1069 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1070 first, gpr != prev_gpr, last, lm3, needs_inc); 1071 if (ret) 1072 return ret; 1073 1074 prev_gpr = gpr; 1075 first = false; 1076 1077 gpr_byte += slice_size; 1078 if (gpr_byte >= 4) { 1079 gpr_byte -= 4; 1080 gpr++; 1081 } 1082 1083 size -= slice_size; 1084 off += slice_size; 1085 } 1086 1087 return 0; 1088 } 1089 1090 static void 1091 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1092 { 1093 swreg tmp_reg; 1094 1095 if (alu_op == ALU_OP_AND) { 1096 if (!imm) 1097 wrp_immed(nfp_prog, reg_both(dst), 0); 1098 if (!imm || !~imm) 1099 return; 1100 } 1101 if (alu_op == ALU_OP_OR) { 1102 if (!~imm) 1103 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1104 if (!imm || !~imm) 1105 return; 1106 } 1107 if (alu_op == ALU_OP_XOR) { 1108 if (!~imm) 1109 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1110 ALU_OP_NOT, reg_b(dst)); 1111 if (!imm || !~imm) 1112 return; 1113 } 1114 1115 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1116 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1117 } 1118 1119 static int 1120 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1121 enum alu_op alu_op, bool skip) 1122 { 1123 const struct bpf_insn *insn = &meta->insn; 1124 u64 imm = insn->imm; /* sign extend */ 1125 1126 if (skip) { 1127 meta->skip = true; 1128 return 0; 1129 } 1130 1131 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1132 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1133 1134 return 0; 1135 } 1136 1137 static int 1138 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1139 enum alu_op alu_op) 1140 { 1141 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1142 1143 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1144 emit_alu(nfp_prog, reg_both(dst + 1), 1145 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1146 1147 return 0; 1148 } 1149 1150 static int 1151 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1152 enum alu_op alu_op, bool skip) 1153 { 1154 const struct bpf_insn *insn = &meta->insn; 1155 1156 if (skip) { 1157 meta->skip = true; 1158 return 0; 1159 } 1160 1161 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1162 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1163 1164 return 0; 1165 } 1166 1167 static int 1168 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1169 enum alu_op alu_op) 1170 { 1171 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1172 1173 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1174 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1175 1176 return 0; 1177 } 1178 1179 static void 1180 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1181 enum br_mask br_mask, u16 off) 1182 { 1183 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1184 emit_br(nfp_prog, br_mask, off, 0); 1185 } 1186 1187 static int 1188 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1189 enum alu_op alu_op, enum br_mask br_mask) 1190 { 1191 const struct bpf_insn *insn = &meta->insn; 1192 1193 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1194 insn->src_reg * 2, br_mask, insn->off); 1195 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1196 insn->src_reg * 2 + 1, br_mask, insn->off); 1197 1198 return 0; 1199 } 1200 1201 static int 1202 wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1203 enum br_mask br_mask, bool swap) 1204 { 1205 const struct bpf_insn *insn = &meta->insn; 1206 u64 imm = insn->imm; /* sign extend */ 1207 u8 reg = insn->dst_reg * 2; 1208 swreg tmp_reg; 1209 1210 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1211 if (!swap) 1212 emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg); 1213 else 1214 emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg)); 1215 1216 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1217 if (!swap) 1218 emit_alu(nfp_prog, reg_none(), 1219 reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg); 1220 else 1221 emit_alu(nfp_prog, reg_none(), 1222 tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1)); 1223 1224 emit_br(nfp_prog, br_mask, insn->off, 0); 1225 1226 return 0; 1227 } 1228 1229 static int 1230 wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1231 enum br_mask br_mask, bool swap) 1232 { 1233 const struct bpf_insn *insn = &meta->insn; 1234 u8 areg, breg; 1235 1236 areg = insn->dst_reg * 2; 1237 breg = insn->src_reg * 2; 1238 1239 if (swap) { 1240 areg ^= breg; 1241 breg ^= areg; 1242 areg ^= breg; 1243 } 1244 1245 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1246 emit_alu(nfp_prog, reg_none(), 1247 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1248 emit_br(nfp_prog, br_mask, insn->off, 0); 1249 1250 return 0; 1251 } 1252 1253 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1254 { 1255 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1256 SHF_SC_R_ROT, 8); 1257 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1258 SHF_SC_R_ROT, 16); 1259 } 1260 1261 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1262 { 1263 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1264 struct nfp_bpf_cap_adjust_head *adjust_head; 1265 u32 ret_einval, end; 1266 1267 adjust_head = &nfp_prog->bpf->adjust_head; 1268 1269 /* Optimized version - 5 vs 14 cycles */ 1270 if (nfp_prog->adjust_head_location != UINT_MAX) { 1271 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1272 return -EINVAL; 1273 1274 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1275 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1276 emit_alu(nfp_prog, plen_reg(nfp_prog), 1277 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1278 emit_alu(nfp_prog, pv_len(nfp_prog), 1279 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1280 1281 wrp_immed(nfp_prog, reg_both(0), 0); 1282 wrp_immed(nfp_prog, reg_both(1), 0); 1283 1284 /* TODO: when adjust head is guaranteed to succeed we can 1285 * also eliminate the following if (r0 == 0) branch. 1286 */ 1287 1288 return 0; 1289 } 1290 1291 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1292 end = ret_einval + 2; 1293 1294 /* We need to use a temp because offset is just a part of the pkt ptr */ 1295 emit_alu(nfp_prog, tmp, 1296 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1297 1298 /* Validate result will fit within FW datapath constraints */ 1299 emit_alu(nfp_prog, reg_none(), 1300 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1301 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1302 emit_alu(nfp_prog, reg_none(), 1303 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1304 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1305 1306 /* Validate the length is at least ETH_HLEN */ 1307 emit_alu(nfp_prog, tmp_len, 1308 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1309 emit_alu(nfp_prog, reg_none(), 1310 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1311 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1312 1313 /* Load the ret code */ 1314 wrp_immed(nfp_prog, reg_both(0), 0); 1315 wrp_immed(nfp_prog, reg_both(1), 0); 1316 1317 /* Modify the packet metadata */ 1318 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1319 1320 /* Skip over the -EINVAL ret code (defer 2) */ 1321 emit_br(nfp_prog, BR_UNC, end, 2); 1322 1323 emit_alu(nfp_prog, plen_reg(nfp_prog), 1324 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1325 emit_alu(nfp_prog, pv_len(nfp_prog), 1326 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1327 1328 /* return -EINVAL target */ 1329 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1330 return -EINVAL; 1331 1332 wrp_immed(nfp_prog, reg_both(0), -22); 1333 wrp_immed(nfp_prog, reg_both(1), ~0); 1334 1335 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1336 return -EINVAL; 1337 1338 return 0; 1339 } 1340 1341 static int 1342 map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1343 { 1344 struct bpf_offloaded_map *offmap; 1345 struct nfp_bpf_map *nfp_map; 1346 bool load_lm_ptr; 1347 u32 ret_tgt; 1348 s64 lm_off; 1349 swreg tid; 1350 1351 offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr; 1352 nfp_map = offmap->dev_priv; 1353 1354 /* We only have to reload LM0 if the key is not at start of stack */ 1355 lm_off = nfp_prog->stack_depth; 1356 lm_off += meta->arg2.var_off.value + meta->arg2.off; 1357 load_lm_ptr = meta->arg2_var_off || lm_off; 1358 1359 /* Set LM0 to start of key */ 1360 if (load_lm_ptr) 1361 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1362 1363 /* Load map ID into a register, it should actually fit as an immediate 1364 * but in case it doesn't deal with it here, not in the delay slots. 1365 */ 1366 tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog)); 1367 1368 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem, 1369 2, RELO_BR_HELPER); 1370 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1371 1372 /* Load map ID into A0 */ 1373 wrp_mov(nfp_prog, reg_a(0), tid); 1374 1375 /* Load the return address into B0 */ 1376 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1377 1378 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1379 return -EINVAL; 1380 1381 /* Reset the LM0 pointer */ 1382 if (!load_lm_ptr) 1383 return 0; 1384 1385 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1386 wrp_nops(nfp_prog, 3); 1387 1388 return 0; 1389 } 1390 1391 /* --- Callbacks --- */ 1392 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1393 { 1394 const struct bpf_insn *insn = &meta->insn; 1395 u8 dst = insn->dst_reg * 2; 1396 u8 src = insn->src_reg * 2; 1397 1398 if (insn->src_reg == BPF_REG_10) { 1399 swreg stack_depth_reg; 1400 1401 stack_depth_reg = ur_load_imm_any(nfp_prog, 1402 nfp_prog->stack_depth, 1403 stack_imm(nfp_prog)); 1404 emit_alu(nfp_prog, reg_both(dst), 1405 stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); 1406 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1407 } else { 1408 wrp_reg_mov(nfp_prog, dst, src); 1409 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1410 } 1411 1412 return 0; 1413 } 1414 1415 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1416 { 1417 u64 imm = meta->insn.imm; /* sign extend */ 1418 1419 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1420 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1421 1422 return 0; 1423 } 1424 1425 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1426 { 1427 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1428 } 1429 1430 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1431 { 1432 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1433 } 1434 1435 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1436 { 1437 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1438 } 1439 1440 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1441 { 1442 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1443 } 1444 1445 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1446 { 1447 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1448 } 1449 1450 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1451 { 1452 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1453 } 1454 1455 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1456 { 1457 const struct bpf_insn *insn = &meta->insn; 1458 1459 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1460 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1461 reg_b(insn->src_reg * 2)); 1462 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1463 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1464 reg_b(insn->src_reg * 2 + 1)); 1465 1466 return 0; 1467 } 1468 1469 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1470 { 1471 const struct bpf_insn *insn = &meta->insn; 1472 u64 imm = insn->imm; /* sign extend */ 1473 1474 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1475 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1476 1477 return 0; 1478 } 1479 1480 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1481 { 1482 const struct bpf_insn *insn = &meta->insn; 1483 1484 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1485 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1486 reg_b(insn->src_reg * 2)); 1487 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1488 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1489 reg_b(insn->src_reg * 2 + 1)); 1490 1491 return 0; 1492 } 1493 1494 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1495 { 1496 const struct bpf_insn *insn = &meta->insn; 1497 u64 imm = insn->imm; /* sign extend */ 1498 1499 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1500 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1501 1502 return 0; 1503 } 1504 1505 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1506 { 1507 const struct bpf_insn *insn = &meta->insn; 1508 1509 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1510 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1511 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1512 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1513 1514 return 0; 1515 } 1516 1517 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1518 { 1519 const struct bpf_insn *insn = &meta->insn; 1520 u8 dst = insn->dst_reg * 2; 1521 1522 if (insn->imm < 32) { 1523 emit_shf(nfp_prog, reg_both(dst + 1), 1524 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1525 SHF_SC_R_DSHF, 32 - insn->imm); 1526 emit_shf(nfp_prog, reg_both(dst), 1527 reg_none(), SHF_OP_NONE, reg_b(dst), 1528 SHF_SC_L_SHF, insn->imm); 1529 } else if (insn->imm == 32) { 1530 wrp_reg_mov(nfp_prog, dst + 1, dst); 1531 wrp_immed(nfp_prog, reg_both(dst), 0); 1532 } else if (insn->imm > 32) { 1533 emit_shf(nfp_prog, reg_both(dst + 1), 1534 reg_none(), SHF_OP_NONE, reg_b(dst), 1535 SHF_SC_L_SHF, insn->imm - 32); 1536 wrp_immed(nfp_prog, reg_both(dst), 0); 1537 } 1538 1539 return 0; 1540 } 1541 1542 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1543 { 1544 const struct bpf_insn *insn = &meta->insn; 1545 u8 dst = insn->dst_reg * 2; 1546 1547 if (insn->imm < 32) { 1548 emit_shf(nfp_prog, reg_both(dst), 1549 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1550 SHF_SC_R_DSHF, insn->imm); 1551 emit_shf(nfp_prog, reg_both(dst + 1), 1552 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1553 SHF_SC_R_SHF, insn->imm); 1554 } else if (insn->imm == 32) { 1555 wrp_reg_mov(nfp_prog, dst, dst + 1); 1556 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1557 } else if (insn->imm > 32) { 1558 emit_shf(nfp_prog, reg_both(dst), 1559 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1560 SHF_SC_R_SHF, insn->imm - 32); 1561 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1562 } 1563 1564 return 0; 1565 } 1566 1567 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1568 { 1569 const struct bpf_insn *insn = &meta->insn; 1570 1571 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 1572 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1573 1574 return 0; 1575 } 1576 1577 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1578 { 1579 const struct bpf_insn *insn = &meta->insn; 1580 1581 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 1582 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1583 1584 return 0; 1585 } 1586 1587 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1588 { 1589 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 1590 } 1591 1592 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1593 { 1594 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 1595 } 1596 1597 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1598 { 1599 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 1600 } 1601 1602 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1603 { 1604 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1605 } 1606 1607 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1608 { 1609 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 1610 } 1611 1612 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1613 { 1614 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1615 } 1616 1617 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1618 { 1619 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 1620 } 1621 1622 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1623 { 1624 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 1625 } 1626 1627 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1628 { 1629 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 1630 } 1631 1632 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1633 { 1634 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 1635 } 1636 1637 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1638 { 1639 u8 dst = meta->insn.dst_reg * 2; 1640 1641 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 1642 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1643 1644 return 0; 1645 } 1646 1647 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1648 { 1649 const struct bpf_insn *insn = &meta->insn; 1650 1651 if (!insn->imm) 1652 return 1; /* TODO: zero shift means indirect */ 1653 1654 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 1655 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 1656 SHF_SC_L_SHF, insn->imm); 1657 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1658 1659 return 0; 1660 } 1661 1662 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1663 { 1664 const struct bpf_insn *insn = &meta->insn; 1665 u8 gpr = insn->dst_reg * 2; 1666 1667 switch (insn->imm) { 1668 case 16: 1669 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 1670 SHF_SC_R_ROT, 8); 1671 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 1672 SHF_SC_R_SHF, 16); 1673 1674 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1675 break; 1676 case 32: 1677 wrp_end32(nfp_prog, reg_a(gpr), gpr); 1678 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1679 break; 1680 case 64: 1681 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 1682 1683 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 1684 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 1685 break; 1686 } 1687 1688 return 0; 1689 } 1690 1691 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1692 { 1693 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 1694 u32 imm_lo, imm_hi; 1695 u8 dst; 1696 1697 dst = prev->insn.dst_reg * 2; 1698 imm_lo = prev->insn.imm; 1699 imm_hi = meta->insn.imm; 1700 1701 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 1702 1703 /* mov is always 1 insn, load imm may be two, so try to use mov */ 1704 if (imm_hi == imm_lo) 1705 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 1706 else 1707 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 1708 1709 return 0; 1710 } 1711 1712 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1713 { 1714 meta->double_cb = imm_ld8_part2; 1715 return 0; 1716 } 1717 1718 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1719 { 1720 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 1721 } 1722 1723 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1724 { 1725 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 1726 } 1727 1728 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1729 { 1730 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 1731 } 1732 1733 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1734 { 1735 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1736 meta->insn.src_reg * 2, 1); 1737 } 1738 1739 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1740 { 1741 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1742 meta->insn.src_reg * 2, 2); 1743 } 1744 1745 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1746 { 1747 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1748 meta->insn.src_reg * 2, 4); 1749 } 1750 1751 static int 1752 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1753 unsigned int size, unsigned int ptr_off) 1754 { 1755 return mem_op_stack(nfp_prog, meta, size, ptr_off, 1756 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 1757 true, wrp_lmem_load); 1758 } 1759 1760 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1761 u8 size) 1762 { 1763 swreg dst = reg_both(meta->insn.dst_reg * 2); 1764 1765 switch (meta->insn.off) { 1766 case offsetof(struct __sk_buff, len): 1767 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 1768 return -EOPNOTSUPP; 1769 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 1770 break; 1771 case offsetof(struct __sk_buff, data): 1772 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 1773 return -EOPNOTSUPP; 1774 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1775 break; 1776 case offsetof(struct __sk_buff, data_end): 1777 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 1778 return -EOPNOTSUPP; 1779 emit_alu(nfp_prog, dst, 1780 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1781 break; 1782 default: 1783 return -EOPNOTSUPP; 1784 } 1785 1786 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1787 1788 return 0; 1789 } 1790 1791 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1792 u8 size) 1793 { 1794 swreg dst = reg_both(meta->insn.dst_reg * 2); 1795 1796 switch (meta->insn.off) { 1797 case offsetof(struct xdp_md, data): 1798 if (size != FIELD_SIZEOF(struct xdp_md, data)) 1799 return -EOPNOTSUPP; 1800 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1801 break; 1802 case offsetof(struct xdp_md, data_end): 1803 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 1804 return -EOPNOTSUPP; 1805 emit_alu(nfp_prog, dst, 1806 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1807 break; 1808 default: 1809 return -EOPNOTSUPP; 1810 } 1811 1812 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1813 1814 return 0; 1815 } 1816 1817 static int 1818 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1819 unsigned int size) 1820 { 1821 swreg tmp_reg; 1822 1823 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1824 1825 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 1826 tmp_reg, meta->insn.dst_reg * 2, size); 1827 } 1828 1829 static int 1830 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1831 unsigned int size) 1832 { 1833 swreg tmp_reg; 1834 1835 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1836 1837 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 1838 tmp_reg, meta->insn.dst_reg * 2, size); 1839 } 1840 1841 static int 1842 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1843 unsigned int size) 1844 { 1845 if (meta->ldst_gather_len) 1846 return nfp_cpp_memcpy(nfp_prog, meta); 1847 1848 if (meta->ptr.type == PTR_TO_CTX) { 1849 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 1850 return mem_ldx_xdp(nfp_prog, meta, size); 1851 else 1852 return mem_ldx_skb(nfp_prog, meta, size); 1853 } 1854 1855 if (meta->ptr.type == PTR_TO_PACKET) 1856 return mem_ldx_data(nfp_prog, meta, size); 1857 1858 if (meta->ptr.type == PTR_TO_STACK) 1859 return mem_ldx_stack(nfp_prog, meta, size, 1860 meta->ptr.off + meta->ptr.var_off.value); 1861 1862 if (meta->ptr.type == PTR_TO_MAP_VALUE) 1863 return mem_ldx_emem(nfp_prog, meta, size); 1864 1865 return -EOPNOTSUPP; 1866 } 1867 1868 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1869 { 1870 return mem_ldx(nfp_prog, meta, 1); 1871 } 1872 1873 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1874 { 1875 return mem_ldx(nfp_prog, meta, 2); 1876 } 1877 1878 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1879 { 1880 return mem_ldx(nfp_prog, meta, 4); 1881 } 1882 1883 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1884 { 1885 return mem_ldx(nfp_prog, meta, 8); 1886 } 1887 1888 static int 1889 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1890 unsigned int size) 1891 { 1892 u64 imm = meta->insn.imm; /* sign extend */ 1893 swreg off_reg; 1894 1895 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1896 1897 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 1898 imm, size); 1899 } 1900 1901 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1902 unsigned int size) 1903 { 1904 if (meta->ptr.type == PTR_TO_PACKET) 1905 return mem_st_data(nfp_prog, meta, size); 1906 1907 return -EOPNOTSUPP; 1908 } 1909 1910 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1911 { 1912 return mem_st(nfp_prog, meta, 1); 1913 } 1914 1915 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1916 { 1917 return mem_st(nfp_prog, meta, 2); 1918 } 1919 1920 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1921 { 1922 return mem_st(nfp_prog, meta, 4); 1923 } 1924 1925 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1926 { 1927 return mem_st(nfp_prog, meta, 8); 1928 } 1929 1930 static int 1931 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1932 unsigned int size) 1933 { 1934 swreg off_reg; 1935 1936 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1937 1938 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 1939 meta->insn.src_reg * 2, size); 1940 } 1941 1942 static int 1943 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1944 unsigned int size, unsigned int ptr_off) 1945 { 1946 return mem_op_stack(nfp_prog, meta, size, ptr_off, 1947 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 1948 false, wrp_lmem_store); 1949 } 1950 1951 static int 1952 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1953 unsigned int size) 1954 { 1955 if (meta->ptr.type == PTR_TO_PACKET) 1956 return mem_stx_data(nfp_prog, meta, size); 1957 1958 if (meta->ptr.type == PTR_TO_STACK) 1959 return mem_stx_stack(nfp_prog, meta, size, 1960 meta->ptr.off + meta->ptr.var_off.value); 1961 1962 return -EOPNOTSUPP; 1963 } 1964 1965 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1966 { 1967 return mem_stx(nfp_prog, meta, 1); 1968 } 1969 1970 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1971 { 1972 return mem_stx(nfp_prog, meta, 2); 1973 } 1974 1975 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1976 { 1977 return mem_stx(nfp_prog, meta, 4); 1978 } 1979 1980 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1981 { 1982 return mem_stx(nfp_prog, meta, 8); 1983 } 1984 1985 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1986 { 1987 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 1988 1989 return 0; 1990 } 1991 1992 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1993 { 1994 const struct bpf_insn *insn = &meta->insn; 1995 u64 imm = insn->imm; /* sign extend */ 1996 swreg or1, or2, tmp_reg; 1997 1998 or1 = reg_a(insn->dst_reg * 2); 1999 or2 = reg_b(insn->dst_reg * 2 + 1); 2000 2001 if (imm & ~0U) { 2002 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2003 emit_alu(nfp_prog, imm_a(nfp_prog), 2004 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2005 or1 = imm_a(nfp_prog); 2006 } 2007 2008 if (imm >> 32) { 2009 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2010 emit_alu(nfp_prog, imm_b(nfp_prog), 2011 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2012 or2 = imm_b(nfp_prog); 2013 } 2014 2015 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 2016 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2017 2018 return 0; 2019 } 2020 2021 static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2022 { 2023 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); 2024 } 2025 2026 static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2027 { 2028 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); 2029 } 2030 2031 static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2032 { 2033 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false); 2034 } 2035 2036 static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2037 { 2038 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); 2039 } 2040 2041 static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2042 { 2043 return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true); 2044 } 2045 2046 static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2047 { 2048 return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false); 2049 } 2050 2051 static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2052 { 2053 return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false); 2054 } 2055 2056 static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2057 { 2058 return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true); 2059 } 2060 2061 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2062 { 2063 const struct bpf_insn *insn = &meta->insn; 2064 u64 imm = insn->imm; /* sign extend */ 2065 swreg tmp_reg; 2066 2067 if (!imm) { 2068 meta->skip = true; 2069 return 0; 2070 } 2071 2072 if (imm & ~0U) { 2073 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2074 emit_alu(nfp_prog, reg_none(), 2075 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); 2076 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2077 } 2078 2079 if (imm >> 32) { 2080 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2081 emit_alu(nfp_prog, reg_none(), 2082 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 2083 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2084 } 2085 2086 return 0; 2087 } 2088 2089 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2090 { 2091 const struct bpf_insn *insn = &meta->insn; 2092 u64 imm = insn->imm; /* sign extend */ 2093 swreg tmp_reg; 2094 2095 if (!imm) { 2096 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 2097 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 2098 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2099 return 0; 2100 } 2101 2102 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2103 emit_alu(nfp_prog, reg_none(), 2104 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2105 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2106 2107 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2108 emit_alu(nfp_prog, reg_none(), 2109 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2110 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2111 2112 return 0; 2113 } 2114 2115 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2116 { 2117 const struct bpf_insn *insn = &meta->insn; 2118 2119 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 2120 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 2121 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 2122 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 2123 emit_alu(nfp_prog, reg_none(), 2124 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 2125 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2126 2127 return 0; 2128 } 2129 2130 static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2131 { 2132 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); 2133 } 2134 2135 static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2136 { 2137 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); 2138 } 2139 2140 static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2141 { 2142 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false); 2143 } 2144 2145 static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2146 { 2147 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); 2148 } 2149 2150 static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2151 { 2152 return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true); 2153 } 2154 2155 static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2156 { 2157 return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false); 2158 } 2159 2160 static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2161 { 2162 return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false); 2163 } 2164 2165 static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2166 { 2167 return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true); 2168 } 2169 2170 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2171 { 2172 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 2173 } 2174 2175 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2176 { 2177 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 2178 } 2179 2180 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2181 { 2182 switch (meta->insn.imm) { 2183 case BPF_FUNC_xdp_adjust_head: 2184 return adjust_head(nfp_prog, meta); 2185 case BPF_FUNC_map_lookup_elem: 2186 return map_lookup_stack(nfp_prog, meta); 2187 default: 2188 WARN_ONCE(1, "verifier allowed unsupported function\n"); 2189 return -EOPNOTSUPP; 2190 } 2191 } 2192 2193 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2194 { 2195 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 2196 2197 return 0; 2198 } 2199 2200 static const instr_cb_t instr_cb[256] = { 2201 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 2202 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 2203 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 2204 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 2205 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 2206 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 2207 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 2208 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 2209 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 2210 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 2211 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 2212 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 2213 [BPF_ALU64 | BPF_NEG] = neg_reg64, 2214 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 2215 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 2216 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 2217 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 2218 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 2219 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 2220 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 2221 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 2222 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 2223 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 2224 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 2225 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 2226 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 2227 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 2228 [BPF_ALU | BPF_NEG] = neg_reg, 2229 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 2230 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 2231 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 2232 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 2233 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 2234 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 2235 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 2236 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 2237 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 2238 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 2239 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 2240 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 2241 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 2242 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 2243 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 2244 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 2245 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 2246 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 2247 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 2248 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 2249 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 2250 [BPF_JMP | BPF_JA | BPF_K] = jump, 2251 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 2252 [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, 2253 [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, 2254 [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, 2255 [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, 2256 [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm, 2257 [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm, 2258 [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm, 2259 [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm, 2260 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 2261 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 2262 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 2263 [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, 2264 [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, 2265 [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, 2266 [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, 2267 [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg, 2268 [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg, 2269 [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg, 2270 [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg, 2271 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 2272 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 2273 [BPF_JMP | BPF_CALL] = call, 2274 [BPF_JMP | BPF_EXIT] = goto_out, 2275 }; 2276 2277 /* --- Assembler logic --- */ 2278 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 2279 { 2280 struct nfp_insn_meta *meta, *jmp_dst; 2281 u32 idx, br_idx; 2282 2283 list_for_each_entry(meta, &nfp_prog->insns, l) { 2284 if (meta->skip) 2285 continue; 2286 if (meta->insn.code == (BPF_JMP | BPF_CALL)) 2287 continue; 2288 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 2289 continue; 2290 2291 if (list_is_last(&meta->l, &nfp_prog->insns)) 2292 br_idx = nfp_prog->last_bpf_off; 2293 else 2294 br_idx = list_next_entry(meta, l)->off - 1; 2295 2296 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 2297 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 2298 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 2299 return -ELOOP; 2300 } 2301 /* Leave special branches for later */ 2302 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 2303 RELO_BR_REL) 2304 continue; 2305 2306 if (!meta->jmp_dst) { 2307 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 2308 return -ELOOP; 2309 } 2310 2311 jmp_dst = meta->jmp_dst; 2312 2313 if (jmp_dst->skip) { 2314 pr_err("Branch landing on removed instruction!!\n"); 2315 return -ELOOP; 2316 } 2317 2318 for (idx = meta->off; idx <= br_idx; idx++) { 2319 if (!nfp_is_br(nfp_prog->prog[idx])) 2320 continue; 2321 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 2322 } 2323 } 2324 2325 return 0; 2326 } 2327 2328 static void nfp_intro(struct nfp_prog *nfp_prog) 2329 { 2330 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 2331 emit_alu(nfp_prog, plen_reg(nfp_prog), 2332 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 2333 } 2334 2335 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 2336 { 2337 /* TC direct-action mode: 2338 * 0,1 ok NOT SUPPORTED[1] 2339 * 2 drop 0x22 -> drop, count as stat1 2340 * 4,5 nuke 0x02 -> drop 2341 * 7 redir 0x44 -> redir, count as stat2 2342 * * unspec 0x11 -> pass, count as stat0 2343 * 2344 * [1] We can't support OK and RECLASSIFY because we can't tell TC 2345 * the exact decision made. We are forced to support UNSPEC 2346 * to handle aborts so that's the only one we handle for passing 2347 * packets up the stack. 2348 */ 2349 /* Target for aborts */ 2350 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2351 2352 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2353 2354 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2355 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 2356 2357 /* Target for normal exits */ 2358 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2359 2360 /* if R0 > 7 jump to abort */ 2361 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 2362 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2363 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2364 2365 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 2366 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 2367 2368 emit_shf(nfp_prog, reg_a(1), 2369 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 2370 2371 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2372 emit_shf(nfp_prog, reg_a(2), 2373 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2374 2375 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2376 emit_shf(nfp_prog, reg_b(2), 2377 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 2378 2379 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2380 2381 emit_shf(nfp_prog, reg_b(2), 2382 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 2383 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2384 } 2385 2386 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 2387 { 2388 /* XDP return codes: 2389 * 0 aborted 0x82 -> drop, count as stat3 2390 * 1 drop 0x22 -> drop, count as stat1 2391 * 2 pass 0x11 -> pass, count as stat0 2392 * 3 tx 0x44 -> redir, count as stat2 2393 * * unknown 0x82 -> drop, count as stat3 2394 */ 2395 /* Target for aborts */ 2396 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2397 2398 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2399 2400 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2401 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 2402 2403 /* Target for normal exits */ 2404 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2405 2406 /* if R0 > 3 jump to abort */ 2407 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 2408 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2409 2410 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 2411 2412 emit_shf(nfp_prog, reg_a(1), 2413 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 2414 2415 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2416 emit_shf(nfp_prog, reg_b(2), 2417 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2418 2419 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2420 2421 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2422 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2423 } 2424 2425 static void nfp_outro(struct nfp_prog *nfp_prog) 2426 { 2427 switch (nfp_prog->type) { 2428 case BPF_PROG_TYPE_SCHED_CLS: 2429 nfp_outro_tc_da(nfp_prog); 2430 break; 2431 case BPF_PROG_TYPE_XDP: 2432 nfp_outro_xdp(nfp_prog); 2433 break; 2434 default: 2435 WARN_ON(1); 2436 } 2437 } 2438 2439 static int nfp_translate(struct nfp_prog *nfp_prog) 2440 { 2441 struct nfp_insn_meta *meta; 2442 int err; 2443 2444 nfp_intro(nfp_prog); 2445 if (nfp_prog->error) 2446 return nfp_prog->error; 2447 2448 list_for_each_entry(meta, &nfp_prog->insns, l) { 2449 instr_cb_t cb = instr_cb[meta->insn.code]; 2450 2451 meta->off = nfp_prog_current_offset(nfp_prog); 2452 2453 if (meta->skip) { 2454 nfp_prog->n_translated++; 2455 continue; 2456 } 2457 2458 if (nfp_meta_has_prev(nfp_prog, meta) && 2459 nfp_meta_prev(meta)->double_cb) 2460 cb = nfp_meta_prev(meta)->double_cb; 2461 if (!cb) 2462 return -ENOENT; 2463 err = cb(nfp_prog, meta); 2464 if (err) 2465 return err; 2466 2467 nfp_prog->n_translated++; 2468 } 2469 2470 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 2471 2472 nfp_outro(nfp_prog); 2473 if (nfp_prog->error) 2474 return nfp_prog->error; 2475 2476 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 2477 if (nfp_prog->error) 2478 return nfp_prog->error; 2479 2480 return nfp_fixup_branches(nfp_prog); 2481 } 2482 2483 /* --- Optimizations --- */ 2484 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 2485 { 2486 struct nfp_insn_meta *meta; 2487 2488 list_for_each_entry(meta, &nfp_prog->insns, l) { 2489 struct bpf_insn insn = meta->insn; 2490 2491 /* Programs converted from cBPF start with register xoring */ 2492 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 2493 insn.src_reg == insn.dst_reg) 2494 continue; 2495 2496 /* Programs start with R6 = R1 but we ignore the skb pointer */ 2497 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 2498 insn.src_reg == 1 && insn.dst_reg == 6) 2499 meta->skip = true; 2500 2501 /* Return as soon as something doesn't match */ 2502 if (!meta->skip) 2503 return; 2504 } 2505 } 2506 2507 /* Remove masking after load since our load guarantees this is not needed */ 2508 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 2509 { 2510 struct nfp_insn_meta *meta1, *meta2; 2511 const s32 exp_mask[] = { 2512 [BPF_B] = 0x000000ffU, 2513 [BPF_H] = 0x0000ffffU, 2514 [BPF_W] = 0xffffffffU, 2515 }; 2516 2517 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 2518 struct bpf_insn insn, next; 2519 2520 insn = meta1->insn; 2521 next = meta2->insn; 2522 2523 if (BPF_CLASS(insn.code) != BPF_LD) 2524 continue; 2525 if (BPF_MODE(insn.code) != BPF_ABS && 2526 BPF_MODE(insn.code) != BPF_IND) 2527 continue; 2528 2529 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 2530 continue; 2531 2532 if (!exp_mask[BPF_SIZE(insn.code)]) 2533 continue; 2534 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 2535 continue; 2536 2537 if (next.src_reg || next.dst_reg) 2538 continue; 2539 2540 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 2541 continue; 2542 2543 meta2->skip = true; 2544 } 2545 } 2546 2547 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 2548 { 2549 struct nfp_insn_meta *meta1, *meta2, *meta3; 2550 2551 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 2552 struct bpf_insn insn, next1, next2; 2553 2554 insn = meta1->insn; 2555 next1 = meta2->insn; 2556 next2 = meta3->insn; 2557 2558 if (BPF_CLASS(insn.code) != BPF_LD) 2559 continue; 2560 if (BPF_MODE(insn.code) != BPF_ABS && 2561 BPF_MODE(insn.code) != BPF_IND) 2562 continue; 2563 if (BPF_SIZE(insn.code) != BPF_W) 2564 continue; 2565 2566 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 2567 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 2568 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 2569 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 2570 continue; 2571 2572 if (next1.src_reg || next1.dst_reg || 2573 next2.src_reg || next2.dst_reg) 2574 continue; 2575 2576 if (next1.imm != 0x20 || next2.imm != 0x20) 2577 continue; 2578 2579 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 2580 meta3->flags & FLAG_INSN_IS_JUMP_DST) 2581 continue; 2582 2583 meta2->skip = true; 2584 meta3->skip = true; 2585 } 2586 } 2587 2588 /* load/store pair that forms memory copy sould look like the following: 2589 * 2590 * ld_width R, [addr_src + offset_src] 2591 * st_width [addr_dest + offset_dest], R 2592 * 2593 * The destination register of load and source register of store should 2594 * be the same, load and store should also perform at the same width. 2595 * If either of addr_src or addr_dest is stack pointer, we don't do the 2596 * CPP optimization as stack is modelled by registers on NFP. 2597 */ 2598 static bool 2599 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 2600 struct nfp_insn_meta *st_meta) 2601 { 2602 struct bpf_insn *ld = &ld_meta->insn; 2603 struct bpf_insn *st = &st_meta->insn; 2604 2605 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 2606 return false; 2607 2608 if (ld_meta->ptr.type != PTR_TO_PACKET) 2609 return false; 2610 2611 if (st_meta->ptr.type != PTR_TO_PACKET) 2612 return false; 2613 2614 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 2615 return false; 2616 2617 if (ld->dst_reg != st->src_reg) 2618 return false; 2619 2620 /* There is jump to the store insn in this pair. */ 2621 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 2622 return false; 2623 2624 return true; 2625 } 2626 2627 /* Currently, we only support chaining load/store pairs if: 2628 * 2629 * - Their address base registers are the same. 2630 * - Their address offsets are in the same order. 2631 * - They operate at the same memory width. 2632 * - There is no jump into the middle of them. 2633 */ 2634 static bool 2635 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 2636 struct nfp_insn_meta *st_meta, 2637 struct bpf_insn *prev_ld, 2638 struct bpf_insn *prev_st) 2639 { 2640 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 2641 struct bpf_insn *ld = &ld_meta->insn; 2642 struct bpf_insn *st = &st_meta->insn; 2643 s16 prev_ld_off, prev_st_off; 2644 2645 /* This pair is the start pair. */ 2646 if (!prev_ld) 2647 return true; 2648 2649 prev_size = BPF_LDST_BYTES(prev_ld); 2650 curr_size = BPF_LDST_BYTES(ld); 2651 prev_ld_base = prev_ld->src_reg; 2652 prev_st_base = prev_st->dst_reg; 2653 prev_ld_dst = prev_ld->dst_reg; 2654 prev_ld_off = prev_ld->off; 2655 prev_st_off = prev_st->off; 2656 2657 if (ld->dst_reg != prev_ld_dst) 2658 return false; 2659 2660 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 2661 return false; 2662 2663 if (curr_size != prev_size) 2664 return false; 2665 2666 /* There is jump to the head of this pair. */ 2667 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 2668 return false; 2669 2670 /* Both in ascending order. */ 2671 if (prev_ld_off + prev_size == ld->off && 2672 prev_st_off + prev_size == st->off) 2673 return true; 2674 2675 /* Both in descending order. */ 2676 if (ld->off + curr_size == prev_ld_off && 2677 st->off + curr_size == prev_st_off) 2678 return true; 2679 2680 return false; 2681 } 2682 2683 /* Return TRUE if cross memory access happens. Cross memory access means 2684 * store area is overlapping with load area that a later load might load 2685 * the value from previous store, for this case we can't treat the sequence 2686 * as an memory copy. 2687 */ 2688 static bool 2689 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 2690 struct nfp_insn_meta *head_st_meta) 2691 { 2692 s16 head_ld_off, head_st_off, ld_off; 2693 2694 /* Different pointer types does not overlap. */ 2695 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 2696 return false; 2697 2698 /* load and store are both PTR_TO_PACKET, check ID info. */ 2699 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 2700 return true; 2701 2702 /* Canonicalize the offsets. Turn all of them against the original 2703 * base register. 2704 */ 2705 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 2706 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 2707 ld_off = ld->off + head_ld_meta->ptr.off; 2708 2709 /* Ascending order cross. */ 2710 if (ld_off > head_ld_off && 2711 head_ld_off < head_st_off && ld_off >= head_st_off) 2712 return true; 2713 2714 /* Descending order cross. */ 2715 if (ld_off < head_ld_off && 2716 head_ld_off > head_st_off && ld_off <= head_st_off) 2717 return true; 2718 2719 return false; 2720 } 2721 2722 /* This pass try to identify the following instructoin sequences. 2723 * 2724 * load R, [regA + offA] 2725 * store [regB + offB], R 2726 * load R, [regA + offA + const_imm_A] 2727 * store [regB + offB + const_imm_A], R 2728 * load R, [regA + offA + 2 * const_imm_A] 2729 * store [regB + offB + 2 * const_imm_A], R 2730 * ... 2731 * 2732 * Above sequence is typically generated by compiler when lowering 2733 * memcpy. NFP prefer using CPP instructions to accelerate it. 2734 */ 2735 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 2736 { 2737 struct nfp_insn_meta *head_ld_meta = NULL; 2738 struct nfp_insn_meta *head_st_meta = NULL; 2739 struct nfp_insn_meta *meta1, *meta2; 2740 struct bpf_insn *prev_ld = NULL; 2741 struct bpf_insn *prev_st = NULL; 2742 u8 count = 0; 2743 2744 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 2745 struct bpf_insn *ld = &meta1->insn; 2746 struct bpf_insn *st = &meta2->insn; 2747 2748 /* Reset record status if any of the following if true: 2749 * - The current insn pair is not load/store. 2750 * - The load/store pair doesn't chain with previous one. 2751 * - The chained load/store pair crossed with previous pair. 2752 * - The chained load/store pair has a total size of memory 2753 * copy beyond 128 bytes which is the maximum length a 2754 * single NFP CPP command can transfer. 2755 */ 2756 if (!curr_pair_is_memcpy(meta1, meta2) || 2757 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 2758 prev_st) || 2759 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 2760 head_st_meta) || 2761 head_ld_meta->ldst_gather_len >= 128))) { 2762 if (!count) 2763 continue; 2764 2765 if (count > 1) { 2766 s16 prev_ld_off = prev_ld->off; 2767 s16 prev_st_off = prev_st->off; 2768 s16 head_ld_off = head_ld_meta->insn.off; 2769 2770 if (prev_ld_off < head_ld_off) { 2771 head_ld_meta->insn.off = prev_ld_off; 2772 head_st_meta->insn.off = prev_st_off; 2773 head_ld_meta->ldst_gather_len = 2774 -head_ld_meta->ldst_gather_len; 2775 } 2776 2777 head_ld_meta->paired_st = &head_st_meta->insn; 2778 head_st_meta->skip = true; 2779 } else { 2780 head_ld_meta->ldst_gather_len = 0; 2781 } 2782 2783 /* If the chain is ended by an load/store pair then this 2784 * could serve as the new head of the the next chain. 2785 */ 2786 if (curr_pair_is_memcpy(meta1, meta2)) { 2787 head_ld_meta = meta1; 2788 head_st_meta = meta2; 2789 head_ld_meta->ldst_gather_len = 2790 BPF_LDST_BYTES(ld); 2791 meta1 = nfp_meta_next(meta1); 2792 meta2 = nfp_meta_next(meta2); 2793 prev_ld = ld; 2794 prev_st = st; 2795 count = 1; 2796 } else { 2797 head_ld_meta = NULL; 2798 head_st_meta = NULL; 2799 prev_ld = NULL; 2800 prev_st = NULL; 2801 count = 0; 2802 } 2803 2804 continue; 2805 } 2806 2807 if (!head_ld_meta) { 2808 head_ld_meta = meta1; 2809 head_st_meta = meta2; 2810 } else { 2811 meta1->skip = true; 2812 meta2->skip = true; 2813 } 2814 2815 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 2816 meta1 = nfp_meta_next(meta1); 2817 meta2 = nfp_meta_next(meta2); 2818 prev_ld = ld; 2819 prev_st = st; 2820 count++; 2821 } 2822 } 2823 2824 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 2825 { 2826 nfp_bpf_opt_reg_init(nfp_prog); 2827 2828 nfp_bpf_opt_ld_mask(nfp_prog); 2829 nfp_bpf_opt_ld_shift(nfp_prog); 2830 nfp_bpf_opt_ldst_gather(nfp_prog); 2831 2832 return 0; 2833 } 2834 2835 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 2836 { 2837 __le64 *ustore = (__force __le64 *)prog; 2838 int i; 2839 2840 for (i = 0; i < len; i++) { 2841 int err; 2842 2843 err = nfp_ustore_check_valid_no_ecc(prog[i]); 2844 if (err) 2845 return err; 2846 2847 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 2848 } 2849 2850 return 0; 2851 } 2852 2853 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 2854 { 2855 void *prog; 2856 2857 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 2858 if (!prog) 2859 return; 2860 2861 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 2862 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 2863 kvfree(nfp_prog->prog); 2864 nfp_prog->prog = prog; 2865 } 2866 2867 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 2868 { 2869 int ret; 2870 2871 ret = nfp_bpf_optimize(nfp_prog); 2872 if (ret) 2873 return ret; 2874 2875 ret = nfp_translate(nfp_prog); 2876 if (ret) { 2877 pr_err("Translation failed with error %d (translated: %u)\n", 2878 ret, nfp_prog->n_translated); 2879 return -EINVAL; 2880 } 2881 2882 nfp_bpf_prog_trim(nfp_prog); 2883 2884 return ret; 2885 } 2886 2887 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 2888 { 2889 struct nfp_insn_meta *meta; 2890 2891 /* Another pass to record jump information. */ 2892 list_for_each_entry(meta, &nfp_prog->insns, l) { 2893 u64 code = meta->insn.code; 2894 2895 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && 2896 BPF_OP(code) != BPF_CALL) { 2897 struct nfp_insn_meta *dst_meta; 2898 unsigned short dst_indx; 2899 2900 dst_indx = meta->n + 1 + meta->insn.off; 2901 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, 2902 cnt); 2903 2904 meta->jmp_dst = dst_meta; 2905 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 2906 } 2907 } 2908 } 2909 2910 bool nfp_bpf_supported_opcode(u8 code) 2911 { 2912 return !!instr_cb[code]; 2913 } 2914 2915 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 2916 { 2917 unsigned int i; 2918 u64 *prog; 2919 int err; 2920 2921 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 2922 GFP_KERNEL); 2923 if (!prog) 2924 return ERR_PTR(-ENOMEM); 2925 2926 for (i = 0; i < nfp_prog->prog_len; i++) { 2927 enum nfp_relo_type special; 2928 u32 val; 2929 2930 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 2931 switch (special) { 2932 case RELO_NONE: 2933 continue; 2934 case RELO_BR_REL: 2935 br_add_offset(&prog[i], bv->start_off); 2936 break; 2937 case RELO_BR_GO_OUT: 2938 br_set_offset(&prog[i], 2939 nfp_prog->tgt_out + bv->start_off); 2940 break; 2941 case RELO_BR_GO_ABORT: 2942 br_set_offset(&prog[i], 2943 nfp_prog->tgt_abort + bv->start_off); 2944 break; 2945 case RELO_BR_NEXT_PKT: 2946 br_set_offset(&prog[i], bv->tgt_done); 2947 break; 2948 case RELO_BR_HELPER: 2949 val = br_get_offset(prog[i]); 2950 val -= BR_OFF_RELO; 2951 switch (val) { 2952 case BPF_FUNC_map_lookup_elem: 2953 val = nfp_prog->bpf->helpers.map_lookup; 2954 break; 2955 default: 2956 pr_err("relocation of unknown helper %d\n", 2957 val); 2958 err = -EINVAL; 2959 goto err_free_prog; 2960 } 2961 br_set_offset(&prog[i], val); 2962 break; 2963 case RELO_IMMED_REL: 2964 immed_add_value(&prog[i], bv->start_off); 2965 break; 2966 } 2967 2968 prog[i] &= ~OP_RELO_TYPE; 2969 } 2970 2971 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 2972 if (err) 2973 goto err_free_prog; 2974 2975 return prog; 2976 2977 err_free_prog: 2978 kfree(prog); 2979 return ERR_PTR(err); 2980 } 2981