1 /* 2 * Copyright (C) 2016-2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/bug.h> 37 #include <linux/kernel.h> 38 #include <linux/bpf.h> 39 #include <linux/filter.h> 40 #include <linux/pkt_cls.h> 41 #include <linux/unistd.h> 42 43 #include "main.h" 44 #include "../nfp_asm.h" 45 46 /* --- NFP prog --- */ 47 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 48 * It's safe to modify the next pointers (but not pos). 49 */ 50 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 51 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 52 next = list_next_entry(pos, l); \ 53 &(nfp_prog)->insns != &pos->l && \ 54 &(nfp_prog)->insns != &next->l; \ 55 pos = nfp_meta_next(pos), \ 56 next = nfp_meta_next(pos)) 57 58 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 59 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 60 next = list_next_entry(pos, l), \ 61 next2 = list_next_entry(next, l); \ 62 &(nfp_prog)->insns != &pos->l && \ 63 &(nfp_prog)->insns != &next->l && \ 64 &(nfp_prog)->insns != &next2->l; \ 65 pos = nfp_meta_next(pos), \ 66 next = nfp_meta_next(pos), \ 67 next2 = nfp_meta_next(next)) 68 69 static bool 70 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 71 { 72 return meta->l.prev != &nfp_prog->insns; 73 } 74 75 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 76 { 77 if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) { 78 nfp_prog->error = -ENOSPC; 79 return; 80 } 81 82 nfp_prog->prog[nfp_prog->prog_len] = insn; 83 nfp_prog->prog_len++; 84 } 85 86 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 87 { 88 return nfp_prog->prog_len; 89 } 90 91 static bool 92 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 93 { 94 /* If there is a recorded error we may have dropped instructions; 95 * that doesn't have to be due to translator bug, and the translation 96 * will fail anyway, so just return OK. 97 */ 98 if (nfp_prog->error) 99 return true; 100 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 101 } 102 103 /* --- Emitters --- */ 104 static void 105 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 106 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir) 107 { 108 enum cmd_ctx_swap ctx; 109 u64 insn; 110 111 if (sync) 112 ctx = CMD_CTX_SWAP; 113 else 114 ctx = CMD_CTX_NO_SWAP; 115 116 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 117 FIELD_PREP(OP_CMD_CTX, ctx) | 118 FIELD_PREP(OP_CMD_B_SRC, breg) | 119 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 120 FIELD_PREP(OP_CMD_XFER, xfer) | 121 FIELD_PREP(OP_CMD_CNT, size) | 122 FIELD_PREP(OP_CMD_SIG, sync) | 123 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 124 FIELD_PREP(OP_CMD_INDIR, indir) | 125 FIELD_PREP(OP_CMD_MODE, mode); 126 127 nfp_prog_push(nfp_prog, insn); 128 } 129 130 static void 131 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 132 swreg lreg, swreg rreg, u8 size, bool sync, bool indir) 133 { 134 struct nfp_insn_re_regs reg; 135 int err; 136 137 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 138 if (err) { 139 nfp_prog->error = err; 140 return; 141 } 142 if (reg.swap) { 143 pr_err("cmd can't swap arguments\n"); 144 nfp_prog->error = -EFAULT; 145 return; 146 } 147 if (reg.dst_lmextn || reg.src_lmextn) { 148 pr_err("cmd can't use LMextn\n"); 149 nfp_prog->error = -EFAULT; 150 return; 151 } 152 153 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync, 154 indir); 155 } 156 157 static void 158 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 159 swreg lreg, swreg rreg, u8 size, bool sync) 160 { 161 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false); 162 } 163 164 static void 165 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 166 swreg lreg, swreg rreg, u8 size, bool sync) 167 { 168 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true); 169 } 170 171 static void 172 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 173 enum br_ctx_signal_state css, u16 addr, u8 defer) 174 { 175 u16 addr_lo, addr_hi; 176 u64 insn; 177 178 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 179 addr_hi = addr != addr_lo; 180 181 insn = OP_BR_BASE | 182 FIELD_PREP(OP_BR_MASK, mask) | 183 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 184 FIELD_PREP(OP_BR_CSS, css) | 185 FIELD_PREP(OP_BR_DEFBR, defer) | 186 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 187 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 188 189 nfp_prog_push(nfp_prog, insn); 190 } 191 192 static void 193 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 194 enum nfp_relo_type relo) 195 { 196 if (mask == BR_UNC && defer > 2) { 197 pr_err("BUG: branch defer out of bounds %d\n", defer); 198 nfp_prog->error = -EFAULT; 199 return; 200 } 201 202 __emit_br(nfp_prog, mask, 203 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 204 BR_CSS_NONE, addr, defer); 205 206 nfp_prog->prog[nfp_prog->prog_len - 1] |= 207 FIELD_PREP(OP_RELO_TYPE, relo); 208 } 209 210 static void 211 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 212 { 213 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 214 } 215 216 static void 217 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 218 enum immed_width width, bool invert, 219 enum immed_shift shift, bool wr_both, 220 bool dst_lmextn, bool src_lmextn) 221 { 222 u64 insn; 223 224 insn = OP_IMMED_BASE | 225 FIELD_PREP(OP_IMMED_A_SRC, areg) | 226 FIELD_PREP(OP_IMMED_B_SRC, breg) | 227 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 228 FIELD_PREP(OP_IMMED_WIDTH, width) | 229 FIELD_PREP(OP_IMMED_INV, invert) | 230 FIELD_PREP(OP_IMMED_SHIFT, shift) | 231 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 232 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 233 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 234 235 nfp_prog_push(nfp_prog, insn); 236 } 237 238 static void 239 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 240 enum immed_width width, bool invert, enum immed_shift shift) 241 { 242 struct nfp_insn_ur_regs reg; 243 int err; 244 245 if (swreg_type(dst) == NN_REG_IMM) { 246 nfp_prog->error = -EFAULT; 247 return; 248 } 249 250 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 251 if (err) { 252 nfp_prog->error = err; 253 return; 254 } 255 256 /* Use reg.dst when destination is No-Dest. */ 257 __emit_immed(nfp_prog, 258 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 259 reg.breg, imm >> 8, width, invert, shift, 260 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 261 } 262 263 static void 264 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 265 enum shf_sc sc, u8 shift, 266 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 267 bool dst_lmextn, bool src_lmextn) 268 { 269 u64 insn; 270 271 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 272 nfp_prog->error = -EFAULT; 273 return; 274 } 275 276 if (sc == SHF_SC_L_SHF) 277 shift = 32 - shift; 278 279 insn = OP_SHF_BASE | 280 FIELD_PREP(OP_SHF_A_SRC, areg) | 281 FIELD_PREP(OP_SHF_SC, sc) | 282 FIELD_PREP(OP_SHF_B_SRC, breg) | 283 FIELD_PREP(OP_SHF_I8, i8) | 284 FIELD_PREP(OP_SHF_SW, sw) | 285 FIELD_PREP(OP_SHF_DST, dst) | 286 FIELD_PREP(OP_SHF_SHIFT, shift) | 287 FIELD_PREP(OP_SHF_OP, op) | 288 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 289 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 290 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 291 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 292 293 nfp_prog_push(nfp_prog, insn); 294 } 295 296 static void 297 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 298 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 299 { 300 struct nfp_insn_re_regs reg; 301 int err; 302 303 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 304 if (err) { 305 nfp_prog->error = err; 306 return; 307 } 308 309 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 310 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 311 reg.dst_lmextn, reg.src_lmextn); 312 } 313 314 static void 315 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 316 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 317 bool dst_lmextn, bool src_lmextn) 318 { 319 u64 insn; 320 321 insn = OP_ALU_BASE | 322 FIELD_PREP(OP_ALU_A_SRC, areg) | 323 FIELD_PREP(OP_ALU_B_SRC, breg) | 324 FIELD_PREP(OP_ALU_DST, dst) | 325 FIELD_PREP(OP_ALU_SW, swap) | 326 FIELD_PREP(OP_ALU_OP, op) | 327 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 328 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 329 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 330 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 331 332 nfp_prog_push(nfp_prog, insn); 333 } 334 335 static void 336 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 337 swreg lreg, enum alu_op op, swreg rreg) 338 { 339 struct nfp_insn_ur_regs reg; 340 int err; 341 342 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 343 if (err) { 344 nfp_prog->error = err; 345 return; 346 } 347 348 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 349 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 350 reg.dst_lmextn, reg.src_lmextn); 351 } 352 353 static void 354 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 355 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 356 bool zero, bool swap, bool wr_both, 357 bool dst_lmextn, bool src_lmextn) 358 { 359 u64 insn; 360 361 insn = OP_LDF_BASE | 362 FIELD_PREP(OP_LDF_A_SRC, areg) | 363 FIELD_PREP(OP_LDF_SC, sc) | 364 FIELD_PREP(OP_LDF_B_SRC, breg) | 365 FIELD_PREP(OP_LDF_I8, imm8) | 366 FIELD_PREP(OP_LDF_SW, swap) | 367 FIELD_PREP(OP_LDF_ZF, zero) | 368 FIELD_PREP(OP_LDF_BMASK, bmask) | 369 FIELD_PREP(OP_LDF_SHF, shift) | 370 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 371 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 372 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 373 374 nfp_prog_push(nfp_prog, insn); 375 } 376 377 static void 378 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 379 enum shf_sc sc, u8 shift, bool zero) 380 { 381 struct nfp_insn_re_regs reg; 382 int err; 383 384 /* Note: ld_field is special as it uses one of the src regs as dst */ 385 err = swreg_to_restricted(dst, dst, src, ®, true); 386 if (err) { 387 nfp_prog->error = err; 388 return; 389 } 390 391 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 392 reg.i8, zero, reg.swap, reg.wr_both, 393 reg.dst_lmextn, reg.src_lmextn); 394 } 395 396 static void 397 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 398 enum shf_sc sc, u8 shift) 399 { 400 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 401 } 402 403 static void 404 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 405 bool dst_lmextn, bool src_lmextn) 406 { 407 u64 insn; 408 409 insn = OP_LCSR_BASE | 410 FIELD_PREP(OP_LCSR_A_SRC, areg) | 411 FIELD_PREP(OP_LCSR_B_SRC, breg) | 412 FIELD_PREP(OP_LCSR_WRITE, wr) | 413 FIELD_PREP(OP_LCSR_ADDR, addr) | 414 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 415 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 416 417 nfp_prog_push(nfp_prog, insn); 418 } 419 420 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 421 { 422 struct nfp_insn_ur_regs reg; 423 int err; 424 425 /* This instruction takes immeds instead of reg_none() for the ignored 426 * operand, but we can't encode 2 immeds in one instr with our normal 427 * swreg infra so if param is an immed, we encode as reg_none() and 428 * copy the immed to both operands. 429 */ 430 if (swreg_type(src) == NN_REG_IMM) { 431 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 432 reg.breg = reg.areg; 433 } else { 434 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 435 } 436 if (err) { 437 nfp_prog->error = err; 438 return; 439 } 440 441 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4, 442 false, reg.src_lmextn); 443 } 444 445 static void emit_nop(struct nfp_prog *nfp_prog) 446 { 447 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 448 } 449 450 /* --- Wrappers --- */ 451 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 452 { 453 if (!(imm & 0xffff0000)) { 454 *val = imm; 455 *shift = IMMED_SHIFT_0B; 456 } else if (!(imm & 0xff0000ff)) { 457 *val = imm >> 8; 458 *shift = IMMED_SHIFT_1B; 459 } else if (!(imm & 0x0000ffff)) { 460 *val = imm >> 16; 461 *shift = IMMED_SHIFT_2B; 462 } else { 463 return false; 464 } 465 466 return true; 467 } 468 469 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 470 { 471 enum immed_shift shift; 472 u16 val; 473 474 if (pack_immed(imm, &val, &shift)) { 475 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 476 } else if (pack_immed(~imm, &val, &shift)) { 477 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 478 } else { 479 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 480 false, IMMED_SHIFT_0B); 481 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 482 false, IMMED_SHIFT_2B); 483 } 484 } 485 486 static void 487 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 488 enum nfp_relo_type relo) 489 { 490 if (imm > 0xffff) { 491 pr_err("relocation of a large immediate!\n"); 492 nfp_prog->error = -EFAULT; 493 return; 494 } 495 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 496 497 nfp_prog->prog[nfp_prog->prog_len - 1] |= 498 FIELD_PREP(OP_RELO_TYPE, relo); 499 } 500 501 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 502 * If the @imm is small enough encode it directly in operand and return 503 * otherwise load @imm to a spare register and return its encoding. 504 */ 505 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 506 { 507 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 508 return reg_imm(imm); 509 510 wrp_immed(nfp_prog, tmp_reg, imm); 511 return tmp_reg; 512 } 513 514 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 515 * If the @imm is small enough encode it directly in operand and return 516 * otherwise load @imm to a spare register and return its encoding. 517 */ 518 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 519 { 520 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 521 return reg_imm(imm); 522 523 wrp_immed(nfp_prog, tmp_reg, imm); 524 return tmp_reg; 525 } 526 527 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 528 { 529 while (count--) 530 emit_nop(nfp_prog); 531 } 532 533 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 534 { 535 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 536 } 537 538 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 539 { 540 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 541 } 542 543 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 544 * result to @dst from low end. 545 */ 546 static void 547 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 548 u8 offset) 549 { 550 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 551 u8 mask = (1 << field_len) - 1; 552 553 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 554 } 555 556 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 557 * result to @dst from offset, there is no change on the other bits of @dst. 558 */ 559 static void 560 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 561 u8 field_len, u8 offset) 562 { 563 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 564 u8 mask = ((1 << field_len) - 1) << offset; 565 566 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 567 } 568 569 static void 570 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 571 swreg *rega, swreg *regb) 572 { 573 if (offset == reg_imm(0)) { 574 *rega = reg_a(src_gpr); 575 *regb = reg_b(src_gpr + 1); 576 return; 577 } 578 579 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 580 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 581 reg_imm(0)); 582 *rega = imm_a(nfp_prog); 583 *regb = imm_b(nfp_prog); 584 } 585 586 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 587 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 588 { 589 bool descending_seq = meta->ldst_gather_len < 0; 590 s16 len = abs(meta->ldst_gather_len); 591 swreg src_base, off; 592 bool src_40bit_addr; 593 unsigned int i; 594 u8 xfer_num; 595 596 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 597 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 598 src_base = reg_a(meta->insn.src_reg * 2); 599 xfer_num = round_up(len, 4) / 4; 600 601 if (src_40bit_addr) 602 addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base, 603 &off); 604 605 /* Setup PREV_ALU fields to override memory read length. */ 606 if (len > 32) 607 wrp_immed(nfp_prog, reg_none(), 608 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 609 610 /* Memory read from source addr into transfer-in registers. */ 611 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 612 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 613 src_base, off, xfer_num - 1, true, len > 32); 614 615 /* Move from transfer-in to transfer-out. */ 616 for (i = 0; i < xfer_num; i++) 617 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 618 619 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 620 621 if (len <= 8) { 622 /* Use single direct_ref write8. */ 623 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 624 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 625 true); 626 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 627 /* Use single direct_ref write32. */ 628 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 629 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 630 true); 631 } else if (len <= 32) { 632 /* Use single indirect_ref write8. */ 633 wrp_immed(nfp_prog, reg_none(), 634 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 635 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 636 reg_a(meta->paired_st->dst_reg * 2), off, 637 len - 1, true); 638 } else if (IS_ALIGNED(len, 4)) { 639 /* Use single indirect_ref write32. */ 640 wrp_immed(nfp_prog, reg_none(), 641 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 642 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 643 reg_a(meta->paired_st->dst_reg * 2), off, 644 xfer_num - 1, true); 645 } else if (len <= 40) { 646 /* Use one direct_ref write32 to write the first 32-bytes, then 647 * another direct_ref write8 to write the remaining bytes. 648 */ 649 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 650 reg_a(meta->paired_st->dst_reg * 2), off, 7, 651 true); 652 653 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 654 imm_b(nfp_prog)); 655 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 656 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 657 true); 658 } else { 659 /* Use one indirect_ref write32 to write 4-bytes aligned length, 660 * then another direct_ref write8 to write the remaining bytes. 661 */ 662 u8 new_off; 663 664 wrp_immed(nfp_prog, reg_none(), 665 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 666 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 667 reg_a(meta->paired_st->dst_reg * 2), off, 668 xfer_num - 2, true); 669 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 670 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 671 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 672 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 673 (len & 0x3) - 1, true); 674 } 675 676 /* TODO: The following extra load is to make sure data flow be identical 677 * before and after we do memory copy optimization. 678 * 679 * The load destination register is not guaranteed to be dead, so we 680 * need to make sure it is loaded with the value the same as before 681 * this transformation. 682 * 683 * These extra loads could be removed once we have accurate register 684 * usage information. 685 */ 686 if (descending_seq) 687 xfer_num = 0; 688 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 689 xfer_num = xfer_num - 1; 690 else 691 xfer_num = xfer_num - 2; 692 693 switch (BPF_SIZE(meta->insn.code)) { 694 case BPF_B: 695 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 696 reg_xfer(xfer_num), 1, 697 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 698 break; 699 case BPF_H: 700 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 701 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 702 break; 703 case BPF_W: 704 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 705 reg_xfer(0)); 706 break; 707 case BPF_DW: 708 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 709 reg_xfer(xfer_num)); 710 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 711 reg_xfer(xfer_num + 1)); 712 break; 713 } 714 715 if (BPF_SIZE(meta->insn.code) != BPF_DW) 716 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 717 718 return 0; 719 } 720 721 static int 722 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 723 { 724 unsigned int i; 725 u16 shift, sz; 726 727 /* We load the value from the address indicated in @offset and then 728 * shift out the data we don't need. Note: this is big endian! 729 */ 730 sz = max(size, 4); 731 shift = size < 4 ? 4 - size : 0; 732 733 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 734 pptr_reg(nfp_prog), offset, sz - 1, true); 735 736 i = 0; 737 if (shift) 738 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 739 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 740 else 741 for (; i * 4 < size; i++) 742 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 743 744 if (i < 2) 745 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 746 747 return 0; 748 } 749 750 static int 751 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 752 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 753 { 754 unsigned int i; 755 u8 mask, sz; 756 757 /* We load the value from the address indicated in rreg + lreg and then 758 * mask out the data we don't need. Note: this is little endian! 759 */ 760 sz = max(size, 4); 761 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 762 763 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 764 lreg, rreg, sz / 4 - 1, true); 765 766 i = 0; 767 if (mask) 768 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 769 reg_xfer(0), SHF_SC_NONE, 0, true); 770 else 771 for (; i * 4 < size; i++) 772 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 773 774 if (i < 2) 775 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 776 777 return 0; 778 } 779 780 static int 781 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 782 u8 dst_gpr, u8 size) 783 { 784 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 785 size, CMD_MODE_32b); 786 } 787 788 static int 789 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 790 u8 dst_gpr, u8 size) 791 { 792 swreg rega, regb; 793 794 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 795 796 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 797 size, CMD_MODE_40b_BA); 798 } 799 800 static int 801 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 802 { 803 swreg tmp_reg; 804 805 /* Calculate the true offset (src_reg + imm) */ 806 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 807 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 808 809 /* Check packet length (size guaranteed to fit b/c it's u8) */ 810 emit_alu(nfp_prog, imm_a(nfp_prog), 811 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 812 emit_alu(nfp_prog, reg_none(), 813 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 814 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 815 816 /* Load data */ 817 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 818 } 819 820 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 821 { 822 swreg tmp_reg; 823 824 /* Check packet length */ 825 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 826 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 827 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 828 829 /* Load data */ 830 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 831 return data_ld(nfp_prog, tmp_reg, 0, size); 832 } 833 834 static int 835 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 836 u8 src_gpr, u8 size) 837 { 838 unsigned int i; 839 840 for (i = 0; i * 4 < size; i++) 841 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 842 843 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 844 reg_a(dst_gpr), offset, size - 1, true); 845 846 return 0; 847 } 848 849 static int 850 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 851 u64 imm, u8 size) 852 { 853 wrp_immed(nfp_prog, reg_xfer(0), imm); 854 if (size == 8) 855 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 856 857 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 858 reg_a(dst_gpr), offset, size - 1, true); 859 860 return 0; 861 } 862 863 typedef int 864 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 865 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 866 bool needs_inc); 867 868 static int 869 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 870 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 871 bool needs_inc) 872 { 873 bool should_inc = needs_inc && new_gpr && !last; 874 u32 idx, src_byte; 875 enum shf_sc sc; 876 swreg reg; 877 int shf; 878 u8 mask; 879 880 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 881 return -EOPNOTSUPP; 882 883 idx = off / 4; 884 885 /* Move the entire word */ 886 if (size == 4) { 887 wrp_mov(nfp_prog, reg_both(dst), 888 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 889 return 0; 890 } 891 892 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 893 return -EOPNOTSUPP; 894 895 src_byte = off % 4; 896 897 mask = (1 << size) - 1; 898 mask <<= dst_byte; 899 900 if (WARN_ON_ONCE(mask > 0xf)) 901 return -EOPNOTSUPP; 902 903 shf = abs(src_byte - dst_byte) * 8; 904 if (src_byte == dst_byte) { 905 sc = SHF_SC_NONE; 906 } else if (src_byte < dst_byte) { 907 shf = 32 - shf; 908 sc = SHF_SC_L_SHF; 909 } else { 910 sc = SHF_SC_R_SHF; 911 } 912 913 /* ld_field can address fewer indexes, if offset too large do RMW. 914 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 915 */ 916 if (idx <= RE_REG_LM_IDX_MAX) { 917 reg = reg_lm(lm3 ? 3 : 0, idx); 918 } else { 919 reg = imm_a(nfp_prog); 920 /* If it's not the first part of the load and we start a new GPR 921 * that means we are loading a second part of the LMEM word into 922 * a new GPR. IOW we've already looked that LMEM word and 923 * therefore it has been loaded into imm_a(). 924 */ 925 if (first || !new_gpr) 926 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 927 } 928 929 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 930 931 if (should_inc) 932 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 933 934 return 0; 935 } 936 937 static int 938 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 939 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 940 bool needs_inc) 941 { 942 bool should_inc = needs_inc && new_gpr && !last; 943 u32 idx, dst_byte; 944 enum shf_sc sc; 945 swreg reg; 946 int shf; 947 u8 mask; 948 949 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 950 return -EOPNOTSUPP; 951 952 idx = off / 4; 953 954 /* Move the entire word */ 955 if (size == 4) { 956 wrp_mov(nfp_prog, 957 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 958 reg_b(src)); 959 return 0; 960 } 961 962 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 963 return -EOPNOTSUPP; 964 965 dst_byte = off % 4; 966 967 mask = (1 << size) - 1; 968 mask <<= dst_byte; 969 970 if (WARN_ON_ONCE(mask > 0xf)) 971 return -EOPNOTSUPP; 972 973 shf = abs(src_byte - dst_byte) * 8; 974 if (src_byte == dst_byte) { 975 sc = SHF_SC_NONE; 976 } else if (src_byte < dst_byte) { 977 shf = 32 - shf; 978 sc = SHF_SC_L_SHF; 979 } else { 980 sc = SHF_SC_R_SHF; 981 } 982 983 /* ld_field can address fewer indexes, if offset too large do RMW. 984 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 985 */ 986 if (idx <= RE_REG_LM_IDX_MAX) { 987 reg = reg_lm(lm3 ? 3 : 0, idx); 988 } else { 989 reg = imm_a(nfp_prog); 990 /* Only first and last LMEM locations are going to need RMW, 991 * the middle location will be overwritten fully. 992 */ 993 if (first || last) 994 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 995 } 996 997 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 998 999 if (new_gpr || last) { 1000 if (idx > RE_REG_LM_IDX_MAX) 1001 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1002 if (should_inc) 1003 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1004 } 1005 1006 return 0; 1007 } 1008 1009 static int 1010 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1011 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1012 bool clr_gpr, lmem_step step) 1013 { 1014 s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; 1015 bool first = true, last; 1016 bool needs_inc = false; 1017 swreg stack_off_reg; 1018 u8 prev_gpr = 255; 1019 u32 gpr_byte = 0; 1020 bool lm3 = true; 1021 int ret; 1022 1023 if (meta->ptr_not_const) { 1024 /* Use of the last encountered ptr_off is OK, they all have 1025 * the same alignment. Depend on low bits of value being 1026 * discarded when written to LMaddr register. 1027 */ 1028 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1029 stack_imm(nfp_prog)); 1030 1031 emit_alu(nfp_prog, imm_b(nfp_prog), 1032 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1033 1034 needs_inc = true; 1035 } else if (off + size <= 64) { 1036 /* We can reach bottom 64B with LMaddr0 */ 1037 lm3 = false; 1038 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1039 /* We have to set up a new pointer. If we know the offset 1040 * and the entire access falls into a single 32 byte aligned 1041 * window we won't have to increment the LM pointer. 1042 * The 32 byte alignment is imporant because offset is ORed in 1043 * not added when doing *l$indexN[off]. 1044 */ 1045 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1046 stack_imm(nfp_prog)); 1047 emit_alu(nfp_prog, imm_b(nfp_prog), 1048 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1049 1050 off %= 32; 1051 } else { 1052 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1053 stack_imm(nfp_prog)); 1054 1055 emit_alu(nfp_prog, imm_b(nfp_prog), 1056 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1057 1058 needs_inc = true; 1059 } 1060 if (lm3) { 1061 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1062 /* For size < 4 one slot will be filled by zeroing of upper. */ 1063 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1064 } 1065 1066 if (clr_gpr && size < 8) 1067 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1068 1069 while (size) { 1070 u32 slice_end; 1071 u8 slice_size; 1072 1073 slice_size = min(size, 4 - gpr_byte); 1074 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1075 slice_size = slice_end - off; 1076 1077 last = slice_size == size; 1078 1079 if (needs_inc) 1080 off %= 4; 1081 1082 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1083 first, gpr != prev_gpr, last, lm3, needs_inc); 1084 if (ret) 1085 return ret; 1086 1087 prev_gpr = gpr; 1088 first = false; 1089 1090 gpr_byte += slice_size; 1091 if (gpr_byte >= 4) { 1092 gpr_byte -= 4; 1093 gpr++; 1094 } 1095 1096 size -= slice_size; 1097 off += slice_size; 1098 } 1099 1100 return 0; 1101 } 1102 1103 static void 1104 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1105 { 1106 swreg tmp_reg; 1107 1108 if (alu_op == ALU_OP_AND) { 1109 if (!imm) 1110 wrp_immed(nfp_prog, reg_both(dst), 0); 1111 if (!imm || !~imm) 1112 return; 1113 } 1114 if (alu_op == ALU_OP_OR) { 1115 if (!~imm) 1116 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1117 if (!imm || !~imm) 1118 return; 1119 } 1120 if (alu_op == ALU_OP_XOR) { 1121 if (!~imm) 1122 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1123 ALU_OP_NOT, reg_b(dst)); 1124 if (!imm || !~imm) 1125 return; 1126 } 1127 1128 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1129 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1130 } 1131 1132 static int 1133 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1134 enum alu_op alu_op, bool skip) 1135 { 1136 const struct bpf_insn *insn = &meta->insn; 1137 u64 imm = insn->imm; /* sign extend */ 1138 1139 if (skip) { 1140 meta->skip = true; 1141 return 0; 1142 } 1143 1144 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1145 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1146 1147 return 0; 1148 } 1149 1150 static int 1151 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1152 enum alu_op alu_op) 1153 { 1154 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1155 1156 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1157 emit_alu(nfp_prog, reg_both(dst + 1), 1158 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1159 1160 return 0; 1161 } 1162 1163 static int 1164 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1165 enum alu_op alu_op, bool skip) 1166 { 1167 const struct bpf_insn *insn = &meta->insn; 1168 1169 if (skip) { 1170 meta->skip = true; 1171 return 0; 1172 } 1173 1174 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1175 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1176 1177 return 0; 1178 } 1179 1180 static int 1181 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1182 enum alu_op alu_op) 1183 { 1184 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1185 1186 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1187 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1188 1189 return 0; 1190 } 1191 1192 static void 1193 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1194 enum br_mask br_mask, u16 off) 1195 { 1196 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1197 emit_br(nfp_prog, br_mask, off, 0); 1198 } 1199 1200 static int 1201 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1202 enum alu_op alu_op, enum br_mask br_mask) 1203 { 1204 const struct bpf_insn *insn = &meta->insn; 1205 1206 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1207 insn->src_reg * 2, br_mask, insn->off); 1208 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1209 insn->src_reg * 2 + 1, br_mask, insn->off); 1210 1211 return 0; 1212 } 1213 1214 static int 1215 wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1216 enum br_mask br_mask, bool swap) 1217 { 1218 const struct bpf_insn *insn = &meta->insn; 1219 u64 imm = insn->imm; /* sign extend */ 1220 u8 reg = insn->dst_reg * 2; 1221 swreg tmp_reg; 1222 1223 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1224 if (!swap) 1225 emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg); 1226 else 1227 emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg)); 1228 1229 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1230 if (!swap) 1231 emit_alu(nfp_prog, reg_none(), 1232 reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg); 1233 else 1234 emit_alu(nfp_prog, reg_none(), 1235 tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1)); 1236 1237 emit_br(nfp_prog, br_mask, insn->off, 0); 1238 1239 return 0; 1240 } 1241 1242 static int 1243 wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1244 enum br_mask br_mask, bool swap) 1245 { 1246 const struct bpf_insn *insn = &meta->insn; 1247 u8 areg, breg; 1248 1249 areg = insn->dst_reg * 2; 1250 breg = insn->src_reg * 2; 1251 1252 if (swap) { 1253 areg ^= breg; 1254 breg ^= areg; 1255 areg ^= breg; 1256 } 1257 1258 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1259 emit_alu(nfp_prog, reg_none(), 1260 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1261 emit_br(nfp_prog, br_mask, insn->off, 0); 1262 1263 return 0; 1264 } 1265 1266 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1267 { 1268 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1269 SHF_SC_R_ROT, 8); 1270 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1271 SHF_SC_R_ROT, 16); 1272 } 1273 1274 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1275 { 1276 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1277 struct nfp_bpf_cap_adjust_head *adjust_head; 1278 u32 ret_einval, end; 1279 1280 adjust_head = &nfp_prog->bpf->adjust_head; 1281 1282 /* Optimized version - 5 vs 14 cycles */ 1283 if (nfp_prog->adjust_head_location != UINT_MAX) { 1284 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1285 return -EINVAL; 1286 1287 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1288 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1289 emit_alu(nfp_prog, plen_reg(nfp_prog), 1290 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1291 emit_alu(nfp_prog, pv_len(nfp_prog), 1292 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1293 1294 wrp_immed(nfp_prog, reg_both(0), 0); 1295 wrp_immed(nfp_prog, reg_both(1), 0); 1296 1297 /* TODO: when adjust head is guaranteed to succeed we can 1298 * also eliminate the following if (r0 == 0) branch. 1299 */ 1300 1301 return 0; 1302 } 1303 1304 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1305 end = ret_einval + 2; 1306 1307 /* We need to use a temp because offset is just a part of the pkt ptr */ 1308 emit_alu(nfp_prog, tmp, 1309 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1310 1311 /* Validate result will fit within FW datapath constraints */ 1312 emit_alu(nfp_prog, reg_none(), 1313 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1314 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1315 emit_alu(nfp_prog, reg_none(), 1316 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1317 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1318 1319 /* Validate the length is at least ETH_HLEN */ 1320 emit_alu(nfp_prog, tmp_len, 1321 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1322 emit_alu(nfp_prog, reg_none(), 1323 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1324 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1325 1326 /* Load the ret code */ 1327 wrp_immed(nfp_prog, reg_both(0), 0); 1328 wrp_immed(nfp_prog, reg_both(1), 0); 1329 1330 /* Modify the packet metadata */ 1331 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1332 1333 /* Skip over the -EINVAL ret code (defer 2) */ 1334 emit_br(nfp_prog, BR_UNC, end, 2); 1335 1336 emit_alu(nfp_prog, plen_reg(nfp_prog), 1337 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1338 emit_alu(nfp_prog, pv_len(nfp_prog), 1339 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1340 1341 /* return -EINVAL target */ 1342 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1343 return -EINVAL; 1344 1345 wrp_immed(nfp_prog, reg_both(0), -22); 1346 wrp_immed(nfp_prog, reg_both(1), ~0); 1347 1348 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1349 return -EINVAL; 1350 1351 return 0; 1352 } 1353 1354 static int 1355 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1356 { 1357 struct bpf_offloaded_map *offmap; 1358 struct nfp_bpf_map *nfp_map; 1359 bool load_lm_ptr; 1360 u32 ret_tgt; 1361 s64 lm_off; 1362 swreg tid; 1363 1364 offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr; 1365 nfp_map = offmap->dev_priv; 1366 1367 /* We only have to reload LM0 if the key is not at start of stack */ 1368 lm_off = nfp_prog->stack_depth; 1369 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1370 load_lm_ptr = meta->arg2.var_off || lm_off; 1371 1372 /* Set LM0 to start of key */ 1373 if (load_lm_ptr) 1374 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1375 if (meta->func_id == BPF_FUNC_map_update_elem) 1376 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1377 1378 /* Load map ID into a register, it should actually fit as an immediate 1379 * but in case it doesn't deal with it here, not in the delay slots. 1380 */ 1381 tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog)); 1382 1383 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1384 2, RELO_BR_HELPER); 1385 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1386 1387 /* Load map ID into A0 */ 1388 wrp_mov(nfp_prog, reg_a(0), tid); 1389 1390 /* Load the return address into B0 */ 1391 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1392 1393 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1394 return -EINVAL; 1395 1396 /* Reset the LM0 pointer */ 1397 if (!load_lm_ptr) 1398 return 0; 1399 1400 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1401 wrp_nops(nfp_prog, 3); 1402 1403 return 0; 1404 } 1405 1406 /* --- Callbacks --- */ 1407 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1408 { 1409 const struct bpf_insn *insn = &meta->insn; 1410 u8 dst = insn->dst_reg * 2; 1411 u8 src = insn->src_reg * 2; 1412 1413 if (insn->src_reg == BPF_REG_10) { 1414 swreg stack_depth_reg; 1415 1416 stack_depth_reg = ur_load_imm_any(nfp_prog, 1417 nfp_prog->stack_depth, 1418 stack_imm(nfp_prog)); 1419 emit_alu(nfp_prog, reg_both(dst), 1420 stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); 1421 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1422 } else { 1423 wrp_reg_mov(nfp_prog, dst, src); 1424 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1425 } 1426 1427 return 0; 1428 } 1429 1430 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1431 { 1432 u64 imm = meta->insn.imm; /* sign extend */ 1433 1434 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1435 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1436 1437 return 0; 1438 } 1439 1440 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1441 { 1442 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1443 } 1444 1445 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1446 { 1447 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1448 } 1449 1450 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1451 { 1452 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1453 } 1454 1455 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1456 { 1457 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1458 } 1459 1460 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1461 { 1462 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1463 } 1464 1465 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1466 { 1467 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1468 } 1469 1470 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1471 { 1472 const struct bpf_insn *insn = &meta->insn; 1473 1474 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1475 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1476 reg_b(insn->src_reg * 2)); 1477 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1478 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1479 reg_b(insn->src_reg * 2 + 1)); 1480 1481 return 0; 1482 } 1483 1484 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1485 { 1486 const struct bpf_insn *insn = &meta->insn; 1487 u64 imm = insn->imm; /* sign extend */ 1488 1489 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1490 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1491 1492 return 0; 1493 } 1494 1495 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1496 { 1497 const struct bpf_insn *insn = &meta->insn; 1498 1499 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1500 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1501 reg_b(insn->src_reg * 2)); 1502 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1503 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1504 reg_b(insn->src_reg * 2 + 1)); 1505 1506 return 0; 1507 } 1508 1509 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1510 { 1511 const struct bpf_insn *insn = &meta->insn; 1512 u64 imm = insn->imm; /* sign extend */ 1513 1514 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1515 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1516 1517 return 0; 1518 } 1519 1520 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1521 { 1522 const struct bpf_insn *insn = &meta->insn; 1523 1524 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1525 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1526 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1527 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1528 1529 return 0; 1530 } 1531 1532 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1533 { 1534 const struct bpf_insn *insn = &meta->insn; 1535 u8 dst = insn->dst_reg * 2; 1536 1537 if (insn->imm < 32) { 1538 emit_shf(nfp_prog, reg_both(dst + 1), 1539 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1540 SHF_SC_R_DSHF, 32 - insn->imm); 1541 emit_shf(nfp_prog, reg_both(dst), 1542 reg_none(), SHF_OP_NONE, reg_b(dst), 1543 SHF_SC_L_SHF, insn->imm); 1544 } else if (insn->imm == 32) { 1545 wrp_reg_mov(nfp_prog, dst + 1, dst); 1546 wrp_immed(nfp_prog, reg_both(dst), 0); 1547 } else if (insn->imm > 32) { 1548 emit_shf(nfp_prog, reg_both(dst + 1), 1549 reg_none(), SHF_OP_NONE, reg_b(dst), 1550 SHF_SC_L_SHF, insn->imm - 32); 1551 wrp_immed(nfp_prog, reg_both(dst), 0); 1552 } 1553 1554 return 0; 1555 } 1556 1557 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1558 { 1559 const struct bpf_insn *insn = &meta->insn; 1560 u8 dst = insn->dst_reg * 2; 1561 1562 if (insn->imm < 32) { 1563 emit_shf(nfp_prog, reg_both(dst), 1564 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1565 SHF_SC_R_DSHF, insn->imm); 1566 emit_shf(nfp_prog, reg_both(dst + 1), 1567 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1568 SHF_SC_R_SHF, insn->imm); 1569 } else if (insn->imm == 32) { 1570 wrp_reg_mov(nfp_prog, dst, dst + 1); 1571 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1572 } else if (insn->imm > 32) { 1573 emit_shf(nfp_prog, reg_both(dst), 1574 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1575 SHF_SC_R_SHF, insn->imm - 32); 1576 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1577 } 1578 1579 return 0; 1580 } 1581 1582 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1583 { 1584 const struct bpf_insn *insn = &meta->insn; 1585 1586 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 1587 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1588 1589 return 0; 1590 } 1591 1592 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1593 { 1594 const struct bpf_insn *insn = &meta->insn; 1595 1596 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 1597 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1598 1599 return 0; 1600 } 1601 1602 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1603 { 1604 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 1605 } 1606 1607 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1608 { 1609 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 1610 } 1611 1612 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1613 { 1614 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 1615 } 1616 1617 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1618 { 1619 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1620 } 1621 1622 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1623 { 1624 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 1625 } 1626 1627 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1628 { 1629 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1630 } 1631 1632 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1633 { 1634 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 1635 } 1636 1637 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1638 { 1639 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 1640 } 1641 1642 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1643 { 1644 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 1645 } 1646 1647 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1648 { 1649 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 1650 } 1651 1652 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1653 { 1654 u8 dst = meta->insn.dst_reg * 2; 1655 1656 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 1657 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1658 1659 return 0; 1660 } 1661 1662 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1663 { 1664 const struct bpf_insn *insn = &meta->insn; 1665 1666 if (!insn->imm) 1667 return 1; /* TODO: zero shift means indirect */ 1668 1669 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 1670 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 1671 SHF_SC_L_SHF, insn->imm); 1672 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1673 1674 return 0; 1675 } 1676 1677 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1678 { 1679 const struct bpf_insn *insn = &meta->insn; 1680 u8 gpr = insn->dst_reg * 2; 1681 1682 switch (insn->imm) { 1683 case 16: 1684 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 1685 SHF_SC_R_ROT, 8); 1686 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 1687 SHF_SC_R_SHF, 16); 1688 1689 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1690 break; 1691 case 32: 1692 wrp_end32(nfp_prog, reg_a(gpr), gpr); 1693 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1694 break; 1695 case 64: 1696 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 1697 1698 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 1699 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 1700 break; 1701 } 1702 1703 return 0; 1704 } 1705 1706 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1707 { 1708 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 1709 u32 imm_lo, imm_hi; 1710 u8 dst; 1711 1712 dst = prev->insn.dst_reg * 2; 1713 imm_lo = prev->insn.imm; 1714 imm_hi = meta->insn.imm; 1715 1716 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 1717 1718 /* mov is always 1 insn, load imm may be two, so try to use mov */ 1719 if (imm_hi == imm_lo) 1720 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 1721 else 1722 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 1723 1724 return 0; 1725 } 1726 1727 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1728 { 1729 meta->double_cb = imm_ld8_part2; 1730 return 0; 1731 } 1732 1733 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1734 { 1735 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 1736 } 1737 1738 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1739 { 1740 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 1741 } 1742 1743 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1744 { 1745 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 1746 } 1747 1748 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1749 { 1750 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1751 meta->insn.src_reg * 2, 1); 1752 } 1753 1754 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1755 { 1756 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1757 meta->insn.src_reg * 2, 2); 1758 } 1759 1760 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1761 { 1762 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1763 meta->insn.src_reg * 2, 4); 1764 } 1765 1766 static int 1767 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1768 unsigned int size, unsigned int ptr_off) 1769 { 1770 return mem_op_stack(nfp_prog, meta, size, ptr_off, 1771 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 1772 true, wrp_lmem_load); 1773 } 1774 1775 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1776 u8 size) 1777 { 1778 swreg dst = reg_both(meta->insn.dst_reg * 2); 1779 1780 switch (meta->insn.off) { 1781 case offsetof(struct __sk_buff, len): 1782 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 1783 return -EOPNOTSUPP; 1784 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 1785 break; 1786 case offsetof(struct __sk_buff, data): 1787 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 1788 return -EOPNOTSUPP; 1789 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1790 break; 1791 case offsetof(struct __sk_buff, data_end): 1792 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 1793 return -EOPNOTSUPP; 1794 emit_alu(nfp_prog, dst, 1795 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1796 break; 1797 default: 1798 return -EOPNOTSUPP; 1799 } 1800 1801 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1802 1803 return 0; 1804 } 1805 1806 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1807 u8 size) 1808 { 1809 swreg dst = reg_both(meta->insn.dst_reg * 2); 1810 1811 switch (meta->insn.off) { 1812 case offsetof(struct xdp_md, data): 1813 if (size != FIELD_SIZEOF(struct xdp_md, data)) 1814 return -EOPNOTSUPP; 1815 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1816 break; 1817 case offsetof(struct xdp_md, data_end): 1818 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 1819 return -EOPNOTSUPP; 1820 emit_alu(nfp_prog, dst, 1821 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1822 break; 1823 default: 1824 return -EOPNOTSUPP; 1825 } 1826 1827 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1828 1829 return 0; 1830 } 1831 1832 static int 1833 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1834 unsigned int size) 1835 { 1836 swreg tmp_reg; 1837 1838 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1839 1840 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 1841 tmp_reg, meta->insn.dst_reg * 2, size); 1842 } 1843 1844 static int 1845 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1846 unsigned int size) 1847 { 1848 swreg tmp_reg; 1849 1850 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1851 1852 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 1853 tmp_reg, meta->insn.dst_reg * 2, size); 1854 } 1855 1856 static void 1857 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 1858 struct nfp_insn_meta *meta) 1859 { 1860 s16 range_start = meta->pkt_cache.range_start; 1861 s16 range_end = meta->pkt_cache.range_end; 1862 swreg src_base, off; 1863 u8 xfer_num, len; 1864 bool indir; 1865 1866 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 1867 src_base = reg_a(meta->insn.src_reg * 2); 1868 len = range_end - range_start; 1869 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 1870 1871 indir = len > 8 * REG_WIDTH; 1872 /* Setup PREV_ALU for indirect mode. */ 1873 if (indir) 1874 wrp_immed(nfp_prog, reg_none(), 1875 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 1876 1877 /* Cache memory into transfer-in registers. */ 1878 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 1879 off, xfer_num - 1, true, indir); 1880 } 1881 1882 static int 1883 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 1884 struct nfp_insn_meta *meta, 1885 unsigned int size) 1886 { 1887 s16 range_start = meta->pkt_cache.range_start; 1888 s16 insn_off = meta->insn.off - range_start; 1889 swreg dst_lo, dst_hi, src_lo, src_mid; 1890 u8 dst_gpr = meta->insn.dst_reg * 2; 1891 u8 len_lo = size, len_mid = 0; 1892 u8 idx = insn_off / REG_WIDTH; 1893 u8 off = insn_off % REG_WIDTH; 1894 1895 dst_hi = reg_both(dst_gpr + 1); 1896 dst_lo = reg_both(dst_gpr); 1897 src_lo = reg_xfer(idx); 1898 1899 /* The read length could involve as many as three registers. */ 1900 if (size > REG_WIDTH - off) { 1901 /* Calculate the part in the second register. */ 1902 len_lo = REG_WIDTH - off; 1903 len_mid = size - len_lo; 1904 1905 /* Calculate the part in the third register. */ 1906 if (size > 2 * REG_WIDTH - off) 1907 len_mid = REG_WIDTH; 1908 } 1909 1910 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 1911 1912 if (!len_mid) { 1913 wrp_immed(nfp_prog, dst_hi, 0); 1914 return 0; 1915 } 1916 1917 src_mid = reg_xfer(idx + 1); 1918 1919 if (size <= REG_WIDTH) { 1920 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 1921 wrp_immed(nfp_prog, dst_hi, 0); 1922 } else { 1923 swreg src_hi = reg_xfer(idx + 2); 1924 1925 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 1926 REG_WIDTH - len_lo, len_lo); 1927 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 1928 REG_WIDTH - len_lo); 1929 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 1930 len_lo); 1931 } 1932 1933 return 0; 1934 } 1935 1936 static int 1937 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 1938 struct nfp_insn_meta *meta, 1939 unsigned int size) 1940 { 1941 swreg dst_lo, dst_hi, src_lo; 1942 u8 dst_gpr, idx; 1943 1944 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 1945 dst_gpr = meta->insn.dst_reg * 2; 1946 dst_hi = reg_both(dst_gpr + 1); 1947 dst_lo = reg_both(dst_gpr); 1948 src_lo = reg_xfer(idx); 1949 1950 if (size < REG_WIDTH) { 1951 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 1952 wrp_immed(nfp_prog, dst_hi, 0); 1953 } else if (size == REG_WIDTH) { 1954 wrp_mov(nfp_prog, dst_lo, src_lo); 1955 wrp_immed(nfp_prog, dst_hi, 0); 1956 } else { 1957 swreg src_hi = reg_xfer(idx + 1); 1958 1959 wrp_mov(nfp_prog, dst_lo, src_lo); 1960 wrp_mov(nfp_prog, dst_hi, src_hi); 1961 } 1962 1963 return 0; 1964 } 1965 1966 static int 1967 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 1968 struct nfp_insn_meta *meta, unsigned int size) 1969 { 1970 u8 off = meta->insn.off - meta->pkt_cache.range_start; 1971 1972 if (IS_ALIGNED(off, REG_WIDTH)) 1973 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 1974 1975 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 1976 } 1977 1978 static int 1979 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1980 unsigned int size) 1981 { 1982 if (meta->ldst_gather_len) 1983 return nfp_cpp_memcpy(nfp_prog, meta); 1984 1985 if (meta->ptr.type == PTR_TO_CTX) { 1986 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 1987 return mem_ldx_xdp(nfp_prog, meta, size); 1988 else 1989 return mem_ldx_skb(nfp_prog, meta, size); 1990 } 1991 1992 if (meta->ptr.type == PTR_TO_PACKET) { 1993 if (meta->pkt_cache.range_end) { 1994 if (meta->pkt_cache.do_init) 1995 mem_ldx_data_init_pktcache(nfp_prog, meta); 1996 1997 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 1998 } else { 1999 return mem_ldx_data(nfp_prog, meta, size); 2000 } 2001 } 2002 2003 if (meta->ptr.type == PTR_TO_STACK) 2004 return mem_ldx_stack(nfp_prog, meta, size, 2005 meta->ptr.off + meta->ptr.var_off.value); 2006 2007 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2008 return mem_ldx_emem(nfp_prog, meta, size); 2009 2010 return -EOPNOTSUPP; 2011 } 2012 2013 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2014 { 2015 return mem_ldx(nfp_prog, meta, 1); 2016 } 2017 2018 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2019 { 2020 return mem_ldx(nfp_prog, meta, 2); 2021 } 2022 2023 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2024 { 2025 return mem_ldx(nfp_prog, meta, 4); 2026 } 2027 2028 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2029 { 2030 return mem_ldx(nfp_prog, meta, 8); 2031 } 2032 2033 static int 2034 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2035 unsigned int size) 2036 { 2037 u64 imm = meta->insn.imm; /* sign extend */ 2038 swreg off_reg; 2039 2040 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2041 2042 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2043 imm, size); 2044 } 2045 2046 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2047 unsigned int size) 2048 { 2049 if (meta->ptr.type == PTR_TO_PACKET) 2050 return mem_st_data(nfp_prog, meta, size); 2051 2052 return -EOPNOTSUPP; 2053 } 2054 2055 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2056 { 2057 return mem_st(nfp_prog, meta, 1); 2058 } 2059 2060 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2061 { 2062 return mem_st(nfp_prog, meta, 2); 2063 } 2064 2065 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2066 { 2067 return mem_st(nfp_prog, meta, 4); 2068 } 2069 2070 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2071 { 2072 return mem_st(nfp_prog, meta, 8); 2073 } 2074 2075 static int 2076 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2077 unsigned int size) 2078 { 2079 swreg off_reg; 2080 2081 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2082 2083 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2084 meta->insn.src_reg * 2, size); 2085 } 2086 2087 static int 2088 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2089 unsigned int size, unsigned int ptr_off) 2090 { 2091 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2092 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2093 false, wrp_lmem_store); 2094 } 2095 2096 static int 2097 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2098 unsigned int size) 2099 { 2100 if (meta->ptr.type == PTR_TO_PACKET) 2101 return mem_stx_data(nfp_prog, meta, size); 2102 2103 if (meta->ptr.type == PTR_TO_STACK) 2104 return mem_stx_stack(nfp_prog, meta, size, 2105 meta->ptr.off + meta->ptr.var_off.value); 2106 2107 return -EOPNOTSUPP; 2108 } 2109 2110 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2111 { 2112 return mem_stx(nfp_prog, meta, 1); 2113 } 2114 2115 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2116 { 2117 return mem_stx(nfp_prog, meta, 2); 2118 } 2119 2120 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2121 { 2122 return mem_stx(nfp_prog, meta, 4); 2123 } 2124 2125 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2126 { 2127 return mem_stx(nfp_prog, meta, 8); 2128 } 2129 2130 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2131 { 2132 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 2133 2134 return 0; 2135 } 2136 2137 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2138 { 2139 const struct bpf_insn *insn = &meta->insn; 2140 u64 imm = insn->imm; /* sign extend */ 2141 swreg or1, or2, tmp_reg; 2142 2143 or1 = reg_a(insn->dst_reg * 2); 2144 or2 = reg_b(insn->dst_reg * 2 + 1); 2145 2146 if (imm & ~0U) { 2147 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2148 emit_alu(nfp_prog, imm_a(nfp_prog), 2149 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2150 or1 = imm_a(nfp_prog); 2151 } 2152 2153 if (imm >> 32) { 2154 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2155 emit_alu(nfp_prog, imm_b(nfp_prog), 2156 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2157 or2 = imm_b(nfp_prog); 2158 } 2159 2160 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 2161 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2162 2163 return 0; 2164 } 2165 2166 static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2167 { 2168 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); 2169 } 2170 2171 static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2172 { 2173 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); 2174 } 2175 2176 static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2177 { 2178 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false); 2179 } 2180 2181 static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2182 { 2183 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); 2184 } 2185 2186 static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2187 { 2188 return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true); 2189 } 2190 2191 static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2192 { 2193 return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false); 2194 } 2195 2196 static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2197 { 2198 return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false); 2199 } 2200 2201 static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2202 { 2203 return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true); 2204 } 2205 2206 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2207 { 2208 const struct bpf_insn *insn = &meta->insn; 2209 u64 imm = insn->imm; /* sign extend */ 2210 swreg tmp_reg; 2211 2212 if (!imm) { 2213 meta->skip = true; 2214 return 0; 2215 } 2216 2217 if (imm & ~0U) { 2218 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2219 emit_alu(nfp_prog, reg_none(), 2220 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); 2221 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2222 } 2223 2224 if (imm >> 32) { 2225 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2226 emit_alu(nfp_prog, reg_none(), 2227 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 2228 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2229 } 2230 2231 return 0; 2232 } 2233 2234 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2235 { 2236 const struct bpf_insn *insn = &meta->insn; 2237 u64 imm = insn->imm; /* sign extend */ 2238 swreg tmp_reg; 2239 2240 if (!imm) { 2241 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 2242 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 2243 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2244 return 0; 2245 } 2246 2247 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2248 emit_alu(nfp_prog, reg_none(), 2249 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2250 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2251 2252 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2253 emit_alu(nfp_prog, reg_none(), 2254 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2255 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2256 2257 return 0; 2258 } 2259 2260 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2261 { 2262 const struct bpf_insn *insn = &meta->insn; 2263 2264 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 2265 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 2266 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 2267 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 2268 emit_alu(nfp_prog, reg_none(), 2269 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 2270 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2271 2272 return 0; 2273 } 2274 2275 static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2276 { 2277 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); 2278 } 2279 2280 static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2281 { 2282 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); 2283 } 2284 2285 static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2286 { 2287 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false); 2288 } 2289 2290 static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2291 { 2292 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); 2293 } 2294 2295 static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2296 { 2297 return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true); 2298 } 2299 2300 static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2301 { 2302 return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false); 2303 } 2304 2305 static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2306 { 2307 return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false); 2308 } 2309 2310 static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2311 { 2312 return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true); 2313 } 2314 2315 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2316 { 2317 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 2318 } 2319 2320 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2321 { 2322 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 2323 } 2324 2325 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2326 { 2327 switch (meta->insn.imm) { 2328 case BPF_FUNC_xdp_adjust_head: 2329 return adjust_head(nfp_prog, meta); 2330 case BPF_FUNC_map_lookup_elem: 2331 case BPF_FUNC_map_update_elem: 2332 return map_call_stack_common(nfp_prog, meta); 2333 default: 2334 WARN_ONCE(1, "verifier allowed unsupported function\n"); 2335 return -EOPNOTSUPP; 2336 } 2337 } 2338 2339 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2340 { 2341 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 2342 2343 return 0; 2344 } 2345 2346 static const instr_cb_t instr_cb[256] = { 2347 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 2348 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 2349 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 2350 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 2351 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 2352 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 2353 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 2354 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 2355 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 2356 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 2357 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 2358 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 2359 [BPF_ALU64 | BPF_NEG] = neg_reg64, 2360 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 2361 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 2362 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 2363 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 2364 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 2365 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 2366 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 2367 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 2368 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 2369 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 2370 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 2371 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 2372 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 2373 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 2374 [BPF_ALU | BPF_NEG] = neg_reg, 2375 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 2376 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 2377 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 2378 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 2379 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 2380 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 2381 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 2382 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 2383 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 2384 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 2385 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 2386 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 2387 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 2388 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 2389 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 2390 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 2391 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 2392 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 2393 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 2394 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 2395 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 2396 [BPF_JMP | BPF_JA | BPF_K] = jump, 2397 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 2398 [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, 2399 [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, 2400 [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, 2401 [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, 2402 [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm, 2403 [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm, 2404 [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm, 2405 [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm, 2406 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 2407 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 2408 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 2409 [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, 2410 [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, 2411 [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, 2412 [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, 2413 [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg, 2414 [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg, 2415 [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg, 2416 [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg, 2417 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 2418 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 2419 [BPF_JMP | BPF_CALL] = call, 2420 [BPF_JMP | BPF_EXIT] = goto_out, 2421 }; 2422 2423 /* --- Assembler logic --- */ 2424 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 2425 { 2426 struct nfp_insn_meta *meta, *jmp_dst; 2427 u32 idx, br_idx; 2428 2429 list_for_each_entry(meta, &nfp_prog->insns, l) { 2430 if (meta->skip) 2431 continue; 2432 if (meta->insn.code == (BPF_JMP | BPF_CALL)) 2433 continue; 2434 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 2435 continue; 2436 2437 if (list_is_last(&meta->l, &nfp_prog->insns)) 2438 br_idx = nfp_prog->last_bpf_off; 2439 else 2440 br_idx = list_next_entry(meta, l)->off - 1; 2441 2442 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 2443 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 2444 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 2445 return -ELOOP; 2446 } 2447 /* Leave special branches for later */ 2448 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 2449 RELO_BR_REL) 2450 continue; 2451 2452 if (!meta->jmp_dst) { 2453 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 2454 return -ELOOP; 2455 } 2456 2457 jmp_dst = meta->jmp_dst; 2458 2459 if (jmp_dst->skip) { 2460 pr_err("Branch landing on removed instruction!!\n"); 2461 return -ELOOP; 2462 } 2463 2464 for (idx = meta->off; idx <= br_idx; idx++) { 2465 if (!nfp_is_br(nfp_prog->prog[idx])) 2466 continue; 2467 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 2468 } 2469 } 2470 2471 return 0; 2472 } 2473 2474 static void nfp_intro(struct nfp_prog *nfp_prog) 2475 { 2476 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 2477 emit_alu(nfp_prog, plen_reg(nfp_prog), 2478 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 2479 } 2480 2481 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 2482 { 2483 /* TC direct-action mode: 2484 * 0,1 ok NOT SUPPORTED[1] 2485 * 2 drop 0x22 -> drop, count as stat1 2486 * 4,5 nuke 0x02 -> drop 2487 * 7 redir 0x44 -> redir, count as stat2 2488 * * unspec 0x11 -> pass, count as stat0 2489 * 2490 * [1] We can't support OK and RECLASSIFY because we can't tell TC 2491 * the exact decision made. We are forced to support UNSPEC 2492 * to handle aborts so that's the only one we handle for passing 2493 * packets up the stack. 2494 */ 2495 /* Target for aborts */ 2496 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2497 2498 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2499 2500 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2501 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 2502 2503 /* Target for normal exits */ 2504 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2505 2506 /* if R0 > 7 jump to abort */ 2507 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 2508 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2509 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2510 2511 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 2512 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 2513 2514 emit_shf(nfp_prog, reg_a(1), 2515 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 2516 2517 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2518 emit_shf(nfp_prog, reg_a(2), 2519 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2520 2521 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2522 emit_shf(nfp_prog, reg_b(2), 2523 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 2524 2525 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2526 2527 emit_shf(nfp_prog, reg_b(2), 2528 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 2529 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2530 } 2531 2532 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 2533 { 2534 /* XDP return codes: 2535 * 0 aborted 0x82 -> drop, count as stat3 2536 * 1 drop 0x22 -> drop, count as stat1 2537 * 2 pass 0x11 -> pass, count as stat0 2538 * 3 tx 0x44 -> redir, count as stat2 2539 * * unknown 0x82 -> drop, count as stat3 2540 */ 2541 /* Target for aborts */ 2542 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2543 2544 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2545 2546 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2547 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 2548 2549 /* Target for normal exits */ 2550 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2551 2552 /* if R0 > 3 jump to abort */ 2553 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 2554 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2555 2556 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 2557 2558 emit_shf(nfp_prog, reg_a(1), 2559 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 2560 2561 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2562 emit_shf(nfp_prog, reg_b(2), 2563 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2564 2565 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2566 2567 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2568 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2569 } 2570 2571 static void nfp_outro(struct nfp_prog *nfp_prog) 2572 { 2573 switch (nfp_prog->type) { 2574 case BPF_PROG_TYPE_SCHED_CLS: 2575 nfp_outro_tc_da(nfp_prog); 2576 break; 2577 case BPF_PROG_TYPE_XDP: 2578 nfp_outro_xdp(nfp_prog); 2579 break; 2580 default: 2581 WARN_ON(1); 2582 } 2583 } 2584 2585 static int nfp_translate(struct nfp_prog *nfp_prog) 2586 { 2587 struct nfp_insn_meta *meta; 2588 int err; 2589 2590 nfp_intro(nfp_prog); 2591 if (nfp_prog->error) 2592 return nfp_prog->error; 2593 2594 list_for_each_entry(meta, &nfp_prog->insns, l) { 2595 instr_cb_t cb = instr_cb[meta->insn.code]; 2596 2597 meta->off = nfp_prog_current_offset(nfp_prog); 2598 2599 if (meta->skip) { 2600 nfp_prog->n_translated++; 2601 continue; 2602 } 2603 2604 if (nfp_meta_has_prev(nfp_prog, meta) && 2605 nfp_meta_prev(meta)->double_cb) 2606 cb = nfp_meta_prev(meta)->double_cb; 2607 if (!cb) 2608 return -ENOENT; 2609 err = cb(nfp_prog, meta); 2610 if (err) 2611 return err; 2612 2613 nfp_prog->n_translated++; 2614 } 2615 2616 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 2617 2618 nfp_outro(nfp_prog); 2619 if (nfp_prog->error) 2620 return nfp_prog->error; 2621 2622 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 2623 if (nfp_prog->error) 2624 return nfp_prog->error; 2625 2626 return nfp_fixup_branches(nfp_prog); 2627 } 2628 2629 /* --- Optimizations --- */ 2630 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 2631 { 2632 struct nfp_insn_meta *meta; 2633 2634 list_for_each_entry(meta, &nfp_prog->insns, l) { 2635 struct bpf_insn insn = meta->insn; 2636 2637 /* Programs converted from cBPF start with register xoring */ 2638 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 2639 insn.src_reg == insn.dst_reg) 2640 continue; 2641 2642 /* Programs start with R6 = R1 but we ignore the skb pointer */ 2643 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 2644 insn.src_reg == 1 && insn.dst_reg == 6) 2645 meta->skip = true; 2646 2647 /* Return as soon as something doesn't match */ 2648 if (!meta->skip) 2649 return; 2650 } 2651 } 2652 2653 /* Remove masking after load since our load guarantees this is not needed */ 2654 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 2655 { 2656 struct nfp_insn_meta *meta1, *meta2; 2657 const s32 exp_mask[] = { 2658 [BPF_B] = 0x000000ffU, 2659 [BPF_H] = 0x0000ffffU, 2660 [BPF_W] = 0xffffffffU, 2661 }; 2662 2663 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 2664 struct bpf_insn insn, next; 2665 2666 insn = meta1->insn; 2667 next = meta2->insn; 2668 2669 if (BPF_CLASS(insn.code) != BPF_LD) 2670 continue; 2671 if (BPF_MODE(insn.code) != BPF_ABS && 2672 BPF_MODE(insn.code) != BPF_IND) 2673 continue; 2674 2675 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 2676 continue; 2677 2678 if (!exp_mask[BPF_SIZE(insn.code)]) 2679 continue; 2680 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 2681 continue; 2682 2683 if (next.src_reg || next.dst_reg) 2684 continue; 2685 2686 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 2687 continue; 2688 2689 meta2->skip = true; 2690 } 2691 } 2692 2693 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 2694 { 2695 struct nfp_insn_meta *meta1, *meta2, *meta3; 2696 2697 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 2698 struct bpf_insn insn, next1, next2; 2699 2700 insn = meta1->insn; 2701 next1 = meta2->insn; 2702 next2 = meta3->insn; 2703 2704 if (BPF_CLASS(insn.code) != BPF_LD) 2705 continue; 2706 if (BPF_MODE(insn.code) != BPF_ABS && 2707 BPF_MODE(insn.code) != BPF_IND) 2708 continue; 2709 if (BPF_SIZE(insn.code) != BPF_W) 2710 continue; 2711 2712 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 2713 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 2714 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 2715 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 2716 continue; 2717 2718 if (next1.src_reg || next1.dst_reg || 2719 next2.src_reg || next2.dst_reg) 2720 continue; 2721 2722 if (next1.imm != 0x20 || next2.imm != 0x20) 2723 continue; 2724 2725 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 2726 meta3->flags & FLAG_INSN_IS_JUMP_DST) 2727 continue; 2728 2729 meta2->skip = true; 2730 meta3->skip = true; 2731 } 2732 } 2733 2734 /* load/store pair that forms memory copy sould look like the following: 2735 * 2736 * ld_width R, [addr_src + offset_src] 2737 * st_width [addr_dest + offset_dest], R 2738 * 2739 * The destination register of load and source register of store should 2740 * be the same, load and store should also perform at the same width. 2741 * If either of addr_src or addr_dest is stack pointer, we don't do the 2742 * CPP optimization as stack is modelled by registers on NFP. 2743 */ 2744 static bool 2745 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 2746 struct nfp_insn_meta *st_meta) 2747 { 2748 struct bpf_insn *ld = &ld_meta->insn; 2749 struct bpf_insn *st = &st_meta->insn; 2750 2751 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 2752 return false; 2753 2754 if (ld_meta->ptr.type != PTR_TO_PACKET) 2755 return false; 2756 2757 if (st_meta->ptr.type != PTR_TO_PACKET) 2758 return false; 2759 2760 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 2761 return false; 2762 2763 if (ld->dst_reg != st->src_reg) 2764 return false; 2765 2766 /* There is jump to the store insn in this pair. */ 2767 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 2768 return false; 2769 2770 return true; 2771 } 2772 2773 /* Currently, we only support chaining load/store pairs if: 2774 * 2775 * - Their address base registers are the same. 2776 * - Their address offsets are in the same order. 2777 * - They operate at the same memory width. 2778 * - There is no jump into the middle of them. 2779 */ 2780 static bool 2781 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 2782 struct nfp_insn_meta *st_meta, 2783 struct bpf_insn *prev_ld, 2784 struct bpf_insn *prev_st) 2785 { 2786 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 2787 struct bpf_insn *ld = &ld_meta->insn; 2788 struct bpf_insn *st = &st_meta->insn; 2789 s16 prev_ld_off, prev_st_off; 2790 2791 /* This pair is the start pair. */ 2792 if (!prev_ld) 2793 return true; 2794 2795 prev_size = BPF_LDST_BYTES(prev_ld); 2796 curr_size = BPF_LDST_BYTES(ld); 2797 prev_ld_base = prev_ld->src_reg; 2798 prev_st_base = prev_st->dst_reg; 2799 prev_ld_dst = prev_ld->dst_reg; 2800 prev_ld_off = prev_ld->off; 2801 prev_st_off = prev_st->off; 2802 2803 if (ld->dst_reg != prev_ld_dst) 2804 return false; 2805 2806 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 2807 return false; 2808 2809 if (curr_size != prev_size) 2810 return false; 2811 2812 /* There is jump to the head of this pair. */ 2813 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 2814 return false; 2815 2816 /* Both in ascending order. */ 2817 if (prev_ld_off + prev_size == ld->off && 2818 prev_st_off + prev_size == st->off) 2819 return true; 2820 2821 /* Both in descending order. */ 2822 if (ld->off + curr_size == prev_ld_off && 2823 st->off + curr_size == prev_st_off) 2824 return true; 2825 2826 return false; 2827 } 2828 2829 /* Return TRUE if cross memory access happens. Cross memory access means 2830 * store area is overlapping with load area that a later load might load 2831 * the value from previous store, for this case we can't treat the sequence 2832 * as an memory copy. 2833 */ 2834 static bool 2835 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 2836 struct nfp_insn_meta *head_st_meta) 2837 { 2838 s16 head_ld_off, head_st_off, ld_off; 2839 2840 /* Different pointer types does not overlap. */ 2841 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 2842 return false; 2843 2844 /* load and store are both PTR_TO_PACKET, check ID info. */ 2845 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 2846 return true; 2847 2848 /* Canonicalize the offsets. Turn all of them against the original 2849 * base register. 2850 */ 2851 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 2852 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 2853 ld_off = ld->off + head_ld_meta->ptr.off; 2854 2855 /* Ascending order cross. */ 2856 if (ld_off > head_ld_off && 2857 head_ld_off < head_st_off && ld_off >= head_st_off) 2858 return true; 2859 2860 /* Descending order cross. */ 2861 if (ld_off < head_ld_off && 2862 head_ld_off > head_st_off && ld_off <= head_st_off) 2863 return true; 2864 2865 return false; 2866 } 2867 2868 /* This pass try to identify the following instructoin sequences. 2869 * 2870 * load R, [regA + offA] 2871 * store [regB + offB], R 2872 * load R, [regA + offA + const_imm_A] 2873 * store [regB + offB + const_imm_A], R 2874 * load R, [regA + offA + 2 * const_imm_A] 2875 * store [regB + offB + 2 * const_imm_A], R 2876 * ... 2877 * 2878 * Above sequence is typically generated by compiler when lowering 2879 * memcpy. NFP prefer using CPP instructions to accelerate it. 2880 */ 2881 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 2882 { 2883 struct nfp_insn_meta *head_ld_meta = NULL; 2884 struct nfp_insn_meta *head_st_meta = NULL; 2885 struct nfp_insn_meta *meta1, *meta2; 2886 struct bpf_insn *prev_ld = NULL; 2887 struct bpf_insn *prev_st = NULL; 2888 u8 count = 0; 2889 2890 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 2891 struct bpf_insn *ld = &meta1->insn; 2892 struct bpf_insn *st = &meta2->insn; 2893 2894 /* Reset record status if any of the following if true: 2895 * - The current insn pair is not load/store. 2896 * - The load/store pair doesn't chain with previous one. 2897 * - The chained load/store pair crossed with previous pair. 2898 * - The chained load/store pair has a total size of memory 2899 * copy beyond 128 bytes which is the maximum length a 2900 * single NFP CPP command can transfer. 2901 */ 2902 if (!curr_pair_is_memcpy(meta1, meta2) || 2903 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 2904 prev_st) || 2905 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 2906 head_st_meta) || 2907 head_ld_meta->ldst_gather_len >= 128))) { 2908 if (!count) 2909 continue; 2910 2911 if (count > 1) { 2912 s16 prev_ld_off = prev_ld->off; 2913 s16 prev_st_off = prev_st->off; 2914 s16 head_ld_off = head_ld_meta->insn.off; 2915 2916 if (prev_ld_off < head_ld_off) { 2917 head_ld_meta->insn.off = prev_ld_off; 2918 head_st_meta->insn.off = prev_st_off; 2919 head_ld_meta->ldst_gather_len = 2920 -head_ld_meta->ldst_gather_len; 2921 } 2922 2923 head_ld_meta->paired_st = &head_st_meta->insn; 2924 head_st_meta->skip = true; 2925 } else { 2926 head_ld_meta->ldst_gather_len = 0; 2927 } 2928 2929 /* If the chain is ended by an load/store pair then this 2930 * could serve as the new head of the the next chain. 2931 */ 2932 if (curr_pair_is_memcpy(meta1, meta2)) { 2933 head_ld_meta = meta1; 2934 head_st_meta = meta2; 2935 head_ld_meta->ldst_gather_len = 2936 BPF_LDST_BYTES(ld); 2937 meta1 = nfp_meta_next(meta1); 2938 meta2 = nfp_meta_next(meta2); 2939 prev_ld = ld; 2940 prev_st = st; 2941 count = 1; 2942 } else { 2943 head_ld_meta = NULL; 2944 head_st_meta = NULL; 2945 prev_ld = NULL; 2946 prev_st = NULL; 2947 count = 0; 2948 } 2949 2950 continue; 2951 } 2952 2953 if (!head_ld_meta) { 2954 head_ld_meta = meta1; 2955 head_st_meta = meta2; 2956 } else { 2957 meta1->skip = true; 2958 meta2->skip = true; 2959 } 2960 2961 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 2962 meta1 = nfp_meta_next(meta1); 2963 meta2 = nfp_meta_next(meta2); 2964 prev_ld = ld; 2965 prev_st = st; 2966 count++; 2967 } 2968 } 2969 2970 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 2971 { 2972 struct nfp_insn_meta *meta, *range_node = NULL; 2973 s16 range_start = 0, range_end = 0; 2974 bool cache_avail = false; 2975 struct bpf_insn *insn; 2976 s32 range_ptr_off = 0; 2977 u32 range_ptr_id = 0; 2978 2979 list_for_each_entry(meta, &nfp_prog->insns, l) { 2980 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 2981 cache_avail = false; 2982 2983 if (meta->skip) 2984 continue; 2985 2986 insn = &meta->insn; 2987 2988 if (is_mbpf_store_pkt(meta) || 2989 insn->code == (BPF_JMP | BPF_CALL) || 2990 is_mbpf_classic_store_pkt(meta) || 2991 is_mbpf_classic_load(meta)) { 2992 cache_avail = false; 2993 continue; 2994 } 2995 2996 if (!is_mbpf_load(meta)) 2997 continue; 2998 2999 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 3000 cache_avail = false; 3001 continue; 3002 } 3003 3004 if (!cache_avail) { 3005 cache_avail = true; 3006 if (range_node) 3007 goto end_current_then_start_new; 3008 goto start_new; 3009 } 3010 3011 /* Check ID to make sure two reads share the same 3012 * variable offset against PTR_TO_PACKET, and check OFF 3013 * to make sure they also share the same constant 3014 * offset. 3015 * 3016 * OFFs don't really need to be the same, because they 3017 * are the constant offsets against PTR_TO_PACKET, so 3018 * for different OFFs, we could canonicalize them to 3019 * offsets against original packet pointer. We don't 3020 * support this. 3021 */ 3022 if (meta->ptr.id == range_ptr_id && 3023 meta->ptr.off == range_ptr_off) { 3024 s16 new_start = range_start; 3025 s16 end, off = insn->off; 3026 s16 new_end = range_end; 3027 bool changed = false; 3028 3029 if (off < range_start) { 3030 new_start = off; 3031 changed = true; 3032 } 3033 3034 end = off + BPF_LDST_BYTES(insn); 3035 if (end > range_end) { 3036 new_end = end; 3037 changed = true; 3038 } 3039 3040 if (!changed) 3041 continue; 3042 3043 if (new_end - new_start <= 64) { 3044 /* Install new range. */ 3045 range_start = new_start; 3046 range_end = new_end; 3047 continue; 3048 } 3049 } 3050 3051 end_current_then_start_new: 3052 range_node->pkt_cache.range_start = range_start; 3053 range_node->pkt_cache.range_end = range_end; 3054 start_new: 3055 range_node = meta; 3056 range_node->pkt_cache.do_init = true; 3057 range_ptr_id = range_node->ptr.id; 3058 range_ptr_off = range_node->ptr.off; 3059 range_start = insn->off; 3060 range_end = insn->off + BPF_LDST_BYTES(insn); 3061 } 3062 3063 if (range_node) { 3064 range_node->pkt_cache.range_start = range_start; 3065 range_node->pkt_cache.range_end = range_end; 3066 } 3067 3068 list_for_each_entry(meta, &nfp_prog->insns, l) { 3069 if (meta->skip) 3070 continue; 3071 3072 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 3073 if (meta->pkt_cache.do_init) { 3074 range_start = meta->pkt_cache.range_start; 3075 range_end = meta->pkt_cache.range_end; 3076 } else { 3077 meta->pkt_cache.range_start = range_start; 3078 meta->pkt_cache.range_end = range_end; 3079 } 3080 } 3081 } 3082 } 3083 3084 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 3085 { 3086 nfp_bpf_opt_reg_init(nfp_prog); 3087 3088 nfp_bpf_opt_ld_mask(nfp_prog); 3089 nfp_bpf_opt_ld_shift(nfp_prog); 3090 nfp_bpf_opt_ldst_gather(nfp_prog); 3091 nfp_bpf_opt_pkt_cache(nfp_prog); 3092 3093 return 0; 3094 } 3095 3096 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 3097 { 3098 __le64 *ustore = (__force __le64 *)prog; 3099 int i; 3100 3101 for (i = 0; i < len; i++) { 3102 int err; 3103 3104 err = nfp_ustore_check_valid_no_ecc(prog[i]); 3105 if (err) 3106 return err; 3107 3108 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 3109 } 3110 3111 return 0; 3112 } 3113 3114 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 3115 { 3116 void *prog; 3117 3118 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 3119 if (!prog) 3120 return; 3121 3122 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 3123 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 3124 kvfree(nfp_prog->prog); 3125 nfp_prog->prog = prog; 3126 } 3127 3128 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 3129 { 3130 int ret; 3131 3132 ret = nfp_bpf_optimize(nfp_prog); 3133 if (ret) 3134 return ret; 3135 3136 ret = nfp_translate(nfp_prog); 3137 if (ret) { 3138 pr_err("Translation failed with error %d (translated: %u)\n", 3139 ret, nfp_prog->n_translated); 3140 return -EINVAL; 3141 } 3142 3143 nfp_bpf_prog_trim(nfp_prog); 3144 3145 return ret; 3146 } 3147 3148 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 3149 { 3150 struct nfp_insn_meta *meta; 3151 3152 /* Another pass to record jump information. */ 3153 list_for_each_entry(meta, &nfp_prog->insns, l) { 3154 u64 code = meta->insn.code; 3155 3156 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && 3157 BPF_OP(code) != BPF_CALL) { 3158 struct nfp_insn_meta *dst_meta; 3159 unsigned short dst_indx; 3160 3161 dst_indx = meta->n + 1 + meta->insn.off; 3162 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, 3163 cnt); 3164 3165 meta->jmp_dst = dst_meta; 3166 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 3167 } 3168 } 3169 } 3170 3171 bool nfp_bpf_supported_opcode(u8 code) 3172 { 3173 return !!instr_cb[code]; 3174 } 3175 3176 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 3177 { 3178 unsigned int i; 3179 u64 *prog; 3180 int err; 3181 3182 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 3183 GFP_KERNEL); 3184 if (!prog) 3185 return ERR_PTR(-ENOMEM); 3186 3187 for (i = 0; i < nfp_prog->prog_len; i++) { 3188 enum nfp_relo_type special; 3189 u32 val; 3190 3191 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 3192 switch (special) { 3193 case RELO_NONE: 3194 continue; 3195 case RELO_BR_REL: 3196 br_add_offset(&prog[i], bv->start_off); 3197 break; 3198 case RELO_BR_GO_OUT: 3199 br_set_offset(&prog[i], 3200 nfp_prog->tgt_out + bv->start_off); 3201 break; 3202 case RELO_BR_GO_ABORT: 3203 br_set_offset(&prog[i], 3204 nfp_prog->tgt_abort + bv->start_off); 3205 break; 3206 case RELO_BR_NEXT_PKT: 3207 br_set_offset(&prog[i], bv->tgt_done); 3208 break; 3209 case RELO_BR_HELPER: 3210 val = br_get_offset(prog[i]); 3211 val -= BR_OFF_RELO; 3212 switch (val) { 3213 case BPF_FUNC_map_lookup_elem: 3214 val = nfp_prog->bpf->helpers.map_lookup; 3215 break; 3216 case BPF_FUNC_map_update_elem: 3217 val = nfp_prog->bpf->helpers.map_update; 3218 break; 3219 default: 3220 pr_err("relocation of unknown helper %d\n", 3221 val); 3222 err = -EINVAL; 3223 goto err_free_prog; 3224 } 3225 br_set_offset(&prog[i], val); 3226 break; 3227 case RELO_IMMED_REL: 3228 immed_add_value(&prog[i], bv->start_off); 3229 break; 3230 } 3231 3232 prog[i] &= ~OP_RELO_TYPE; 3233 } 3234 3235 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 3236 if (err) 3237 goto err_free_prog; 3238 3239 return prog; 3240 3241 err_free_prog: 3242 kfree(prog); 3243 return ERR_PTR(err); 3244 } 3245