1 /* 2 * Copyright (C) 2016-2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/bug.h> 37 #include <linux/kernel.h> 38 #include <linux/bpf.h> 39 #include <linux/filter.h> 40 #include <linux/pkt_cls.h> 41 #include <linux/unistd.h> 42 43 #include "main.h" 44 #include "../nfp_asm.h" 45 46 /* --- NFP prog --- */ 47 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 48 * It's safe to modify the next pointers (but not pos). 49 */ 50 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 51 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 52 next = list_next_entry(pos, l); \ 53 &(nfp_prog)->insns != &pos->l && \ 54 &(nfp_prog)->insns != &next->l; \ 55 pos = nfp_meta_next(pos), \ 56 next = nfp_meta_next(pos)) 57 58 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 59 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 60 next = list_next_entry(pos, l), \ 61 next2 = list_next_entry(next, l); \ 62 &(nfp_prog)->insns != &pos->l && \ 63 &(nfp_prog)->insns != &next->l && \ 64 &(nfp_prog)->insns != &next2->l; \ 65 pos = nfp_meta_next(pos), \ 66 next = nfp_meta_next(pos), \ 67 next2 = nfp_meta_next(next)) 68 69 static bool 70 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 71 { 72 return meta->l.prev != &nfp_prog->insns; 73 } 74 75 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 76 { 77 if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) { 78 nfp_prog->error = -ENOSPC; 79 return; 80 } 81 82 nfp_prog->prog[nfp_prog->prog_len] = insn; 83 nfp_prog->prog_len++; 84 } 85 86 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 87 { 88 return nfp_prog->prog_len; 89 } 90 91 static bool 92 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 93 { 94 /* If there is a recorded error we may have dropped instructions; 95 * that doesn't have to be due to translator bug, and the translation 96 * will fail anyway, so just return OK. 97 */ 98 if (nfp_prog->error) 99 return true; 100 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 101 } 102 103 /* --- Emitters --- */ 104 static void 105 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 106 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir) 107 { 108 enum cmd_ctx_swap ctx; 109 u64 insn; 110 111 if (sync) 112 ctx = CMD_CTX_SWAP; 113 else 114 ctx = CMD_CTX_NO_SWAP; 115 116 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 117 FIELD_PREP(OP_CMD_CTX, ctx) | 118 FIELD_PREP(OP_CMD_B_SRC, breg) | 119 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 120 FIELD_PREP(OP_CMD_XFER, xfer) | 121 FIELD_PREP(OP_CMD_CNT, size) | 122 FIELD_PREP(OP_CMD_SIG, sync) | 123 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 124 FIELD_PREP(OP_CMD_INDIR, indir) | 125 FIELD_PREP(OP_CMD_MODE, mode); 126 127 nfp_prog_push(nfp_prog, insn); 128 } 129 130 static void 131 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 132 swreg lreg, swreg rreg, u8 size, bool sync, bool indir) 133 { 134 struct nfp_insn_re_regs reg; 135 int err; 136 137 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 138 if (err) { 139 nfp_prog->error = err; 140 return; 141 } 142 if (reg.swap) { 143 pr_err("cmd can't swap arguments\n"); 144 nfp_prog->error = -EFAULT; 145 return; 146 } 147 if (reg.dst_lmextn || reg.src_lmextn) { 148 pr_err("cmd can't use LMextn\n"); 149 nfp_prog->error = -EFAULT; 150 return; 151 } 152 153 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync, 154 indir); 155 } 156 157 static void 158 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 159 swreg lreg, swreg rreg, u8 size, bool sync) 160 { 161 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false); 162 } 163 164 static void 165 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 166 swreg lreg, swreg rreg, u8 size, bool sync) 167 { 168 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true); 169 } 170 171 static void 172 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 173 enum br_ctx_signal_state css, u16 addr, u8 defer) 174 { 175 u16 addr_lo, addr_hi; 176 u64 insn; 177 178 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 179 addr_hi = addr != addr_lo; 180 181 insn = OP_BR_BASE | 182 FIELD_PREP(OP_BR_MASK, mask) | 183 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 184 FIELD_PREP(OP_BR_CSS, css) | 185 FIELD_PREP(OP_BR_DEFBR, defer) | 186 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 187 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 188 189 nfp_prog_push(nfp_prog, insn); 190 } 191 192 static void 193 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 194 enum nfp_relo_type relo) 195 { 196 if (mask == BR_UNC && defer > 2) { 197 pr_err("BUG: branch defer out of bounds %d\n", defer); 198 nfp_prog->error = -EFAULT; 199 return; 200 } 201 202 __emit_br(nfp_prog, mask, 203 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 204 BR_CSS_NONE, addr, defer); 205 206 nfp_prog->prog[nfp_prog->prog_len - 1] |= 207 FIELD_PREP(OP_RELO_TYPE, relo); 208 } 209 210 static void 211 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 212 { 213 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 214 } 215 216 static void 217 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 218 enum immed_width width, bool invert, 219 enum immed_shift shift, bool wr_both, 220 bool dst_lmextn, bool src_lmextn) 221 { 222 u64 insn; 223 224 insn = OP_IMMED_BASE | 225 FIELD_PREP(OP_IMMED_A_SRC, areg) | 226 FIELD_PREP(OP_IMMED_B_SRC, breg) | 227 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 228 FIELD_PREP(OP_IMMED_WIDTH, width) | 229 FIELD_PREP(OP_IMMED_INV, invert) | 230 FIELD_PREP(OP_IMMED_SHIFT, shift) | 231 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 232 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 233 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 234 235 nfp_prog_push(nfp_prog, insn); 236 } 237 238 static void 239 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 240 enum immed_width width, bool invert, enum immed_shift shift) 241 { 242 struct nfp_insn_ur_regs reg; 243 int err; 244 245 if (swreg_type(dst) == NN_REG_IMM) { 246 nfp_prog->error = -EFAULT; 247 return; 248 } 249 250 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 251 if (err) { 252 nfp_prog->error = err; 253 return; 254 } 255 256 /* Use reg.dst when destination is No-Dest. */ 257 __emit_immed(nfp_prog, 258 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 259 reg.breg, imm >> 8, width, invert, shift, 260 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 261 } 262 263 static void 264 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 265 enum shf_sc sc, u8 shift, 266 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 267 bool dst_lmextn, bool src_lmextn) 268 { 269 u64 insn; 270 271 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 272 nfp_prog->error = -EFAULT; 273 return; 274 } 275 276 if (sc == SHF_SC_L_SHF) 277 shift = 32 - shift; 278 279 insn = OP_SHF_BASE | 280 FIELD_PREP(OP_SHF_A_SRC, areg) | 281 FIELD_PREP(OP_SHF_SC, sc) | 282 FIELD_PREP(OP_SHF_B_SRC, breg) | 283 FIELD_PREP(OP_SHF_I8, i8) | 284 FIELD_PREP(OP_SHF_SW, sw) | 285 FIELD_PREP(OP_SHF_DST, dst) | 286 FIELD_PREP(OP_SHF_SHIFT, shift) | 287 FIELD_PREP(OP_SHF_OP, op) | 288 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 289 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 290 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 291 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 292 293 nfp_prog_push(nfp_prog, insn); 294 } 295 296 static void 297 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 298 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 299 { 300 struct nfp_insn_re_regs reg; 301 int err; 302 303 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 304 if (err) { 305 nfp_prog->error = err; 306 return; 307 } 308 309 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 310 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 311 reg.dst_lmextn, reg.src_lmextn); 312 } 313 314 static void 315 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 316 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 317 bool dst_lmextn, bool src_lmextn) 318 { 319 u64 insn; 320 321 insn = OP_ALU_BASE | 322 FIELD_PREP(OP_ALU_A_SRC, areg) | 323 FIELD_PREP(OP_ALU_B_SRC, breg) | 324 FIELD_PREP(OP_ALU_DST, dst) | 325 FIELD_PREP(OP_ALU_SW, swap) | 326 FIELD_PREP(OP_ALU_OP, op) | 327 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 328 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 329 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 330 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 331 332 nfp_prog_push(nfp_prog, insn); 333 } 334 335 static void 336 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 337 swreg lreg, enum alu_op op, swreg rreg) 338 { 339 struct nfp_insn_ur_regs reg; 340 int err; 341 342 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 343 if (err) { 344 nfp_prog->error = err; 345 return; 346 } 347 348 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 349 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 350 reg.dst_lmextn, reg.src_lmextn); 351 } 352 353 static void 354 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 355 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 356 bool zero, bool swap, bool wr_both, 357 bool dst_lmextn, bool src_lmextn) 358 { 359 u64 insn; 360 361 insn = OP_LDF_BASE | 362 FIELD_PREP(OP_LDF_A_SRC, areg) | 363 FIELD_PREP(OP_LDF_SC, sc) | 364 FIELD_PREP(OP_LDF_B_SRC, breg) | 365 FIELD_PREP(OP_LDF_I8, imm8) | 366 FIELD_PREP(OP_LDF_SW, swap) | 367 FIELD_PREP(OP_LDF_ZF, zero) | 368 FIELD_PREP(OP_LDF_BMASK, bmask) | 369 FIELD_PREP(OP_LDF_SHF, shift) | 370 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 371 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 372 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 373 374 nfp_prog_push(nfp_prog, insn); 375 } 376 377 static void 378 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 379 enum shf_sc sc, u8 shift, bool zero) 380 { 381 struct nfp_insn_re_regs reg; 382 int err; 383 384 /* Note: ld_field is special as it uses one of the src regs as dst */ 385 err = swreg_to_restricted(dst, dst, src, ®, true); 386 if (err) { 387 nfp_prog->error = err; 388 return; 389 } 390 391 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 392 reg.i8, zero, reg.swap, reg.wr_both, 393 reg.dst_lmextn, reg.src_lmextn); 394 } 395 396 static void 397 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 398 enum shf_sc sc, u8 shift) 399 { 400 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 401 } 402 403 static void 404 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 405 bool dst_lmextn, bool src_lmextn) 406 { 407 u64 insn; 408 409 insn = OP_LCSR_BASE | 410 FIELD_PREP(OP_LCSR_A_SRC, areg) | 411 FIELD_PREP(OP_LCSR_B_SRC, breg) | 412 FIELD_PREP(OP_LCSR_WRITE, wr) | 413 FIELD_PREP(OP_LCSR_ADDR, addr) | 414 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 415 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 416 417 nfp_prog_push(nfp_prog, insn); 418 } 419 420 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 421 { 422 struct nfp_insn_ur_regs reg; 423 int err; 424 425 /* This instruction takes immeds instead of reg_none() for the ignored 426 * operand, but we can't encode 2 immeds in one instr with our normal 427 * swreg infra so if param is an immed, we encode as reg_none() and 428 * copy the immed to both operands. 429 */ 430 if (swreg_type(src) == NN_REG_IMM) { 431 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 432 reg.breg = reg.areg; 433 } else { 434 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 435 } 436 if (err) { 437 nfp_prog->error = err; 438 return; 439 } 440 441 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4, 442 false, reg.src_lmextn); 443 } 444 445 static void emit_nop(struct nfp_prog *nfp_prog) 446 { 447 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 448 } 449 450 /* --- Wrappers --- */ 451 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 452 { 453 if (!(imm & 0xffff0000)) { 454 *val = imm; 455 *shift = IMMED_SHIFT_0B; 456 } else if (!(imm & 0xff0000ff)) { 457 *val = imm >> 8; 458 *shift = IMMED_SHIFT_1B; 459 } else if (!(imm & 0x0000ffff)) { 460 *val = imm >> 16; 461 *shift = IMMED_SHIFT_2B; 462 } else { 463 return false; 464 } 465 466 return true; 467 } 468 469 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 470 { 471 enum immed_shift shift; 472 u16 val; 473 474 if (pack_immed(imm, &val, &shift)) { 475 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 476 } else if (pack_immed(~imm, &val, &shift)) { 477 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 478 } else { 479 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 480 false, IMMED_SHIFT_0B); 481 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 482 false, IMMED_SHIFT_2B); 483 } 484 } 485 486 static void 487 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 488 enum nfp_relo_type relo) 489 { 490 if (imm > 0xffff) { 491 pr_err("relocation of a large immediate!\n"); 492 nfp_prog->error = -EFAULT; 493 return; 494 } 495 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 496 497 nfp_prog->prog[nfp_prog->prog_len - 1] |= 498 FIELD_PREP(OP_RELO_TYPE, relo); 499 } 500 501 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 502 * If the @imm is small enough encode it directly in operand and return 503 * otherwise load @imm to a spare register and return its encoding. 504 */ 505 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 506 { 507 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 508 return reg_imm(imm); 509 510 wrp_immed(nfp_prog, tmp_reg, imm); 511 return tmp_reg; 512 } 513 514 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 515 * If the @imm is small enough encode it directly in operand and return 516 * otherwise load @imm to a spare register and return its encoding. 517 */ 518 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 519 { 520 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 521 return reg_imm(imm); 522 523 wrp_immed(nfp_prog, tmp_reg, imm); 524 return tmp_reg; 525 } 526 527 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 528 { 529 while (count--) 530 emit_nop(nfp_prog); 531 } 532 533 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 534 { 535 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 536 } 537 538 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 539 { 540 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 541 } 542 543 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 544 * result to @dst from low end. 545 */ 546 static void 547 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 548 u8 offset) 549 { 550 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 551 u8 mask = (1 << field_len) - 1; 552 553 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 554 } 555 556 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 557 * result to @dst from offset, there is no change on the other bits of @dst. 558 */ 559 static void 560 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 561 u8 field_len, u8 offset) 562 { 563 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 564 u8 mask = ((1 << field_len) - 1) << offset; 565 566 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 567 } 568 569 static void 570 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 571 swreg *rega, swreg *regb) 572 { 573 if (offset == reg_imm(0)) { 574 *rega = reg_a(src_gpr); 575 *regb = reg_b(src_gpr + 1); 576 return; 577 } 578 579 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 580 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 581 reg_imm(0)); 582 *rega = imm_a(nfp_prog); 583 *regb = imm_b(nfp_prog); 584 } 585 586 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 587 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 588 { 589 bool descending_seq = meta->ldst_gather_len < 0; 590 s16 len = abs(meta->ldst_gather_len); 591 swreg src_base, off; 592 bool src_40bit_addr; 593 unsigned int i; 594 u8 xfer_num; 595 596 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 597 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 598 src_base = reg_a(meta->insn.src_reg * 2); 599 xfer_num = round_up(len, 4) / 4; 600 601 if (src_40bit_addr) 602 addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base, 603 &off); 604 605 /* Setup PREV_ALU fields to override memory read length. */ 606 if (len > 32) 607 wrp_immed(nfp_prog, reg_none(), 608 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 609 610 /* Memory read from source addr into transfer-in registers. */ 611 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 612 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 613 src_base, off, xfer_num - 1, true, len > 32); 614 615 /* Move from transfer-in to transfer-out. */ 616 for (i = 0; i < xfer_num; i++) 617 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 618 619 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 620 621 if (len <= 8) { 622 /* Use single direct_ref write8. */ 623 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 624 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 625 true); 626 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 627 /* Use single direct_ref write32. */ 628 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 629 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 630 true); 631 } else if (len <= 32) { 632 /* Use single indirect_ref write8. */ 633 wrp_immed(nfp_prog, reg_none(), 634 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 635 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 636 reg_a(meta->paired_st->dst_reg * 2), off, 637 len - 1, true); 638 } else if (IS_ALIGNED(len, 4)) { 639 /* Use single indirect_ref write32. */ 640 wrp_immed(nfp_prog, reg_none(), 641 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 642 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 643 reg_a(meta->paired_st->dst_reg * 2), off, 644 xfer_num - 1, true); 645 } else if (len <= 40) { 646 /* Use one direct_ref write32 to write the first 32-bytes, then 647 * another direct_ref write8 to write the remaining bytes. 648 */ 649 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 650 reg_a(meta->paired_st->dst_reg * 2), off, 7, 651 true); 652 653 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 654 imm_b(nfp_prog)); 655 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 656 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 657 true); 658 } else { 659 /* Use one indirect_ref write32 to write 4-bytes aligned length, 660 * then another direct_ref write8 to write the remaining bytes. 661 */ 662 u8 new_off; 663 664 wrp_immed(nfp_prog, reg_none(), 665 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 666 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 667 reg_a(meta->paired_st->dst_reg * 2), off, 668 xfer_num - 2, true); 669 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 670 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 671 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 672 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 673 (len & 0x3) - 1, true); 674 } 675 676 /* TODO: The following extra load is to make sure data flow be identical 677 * before and after we do memory copy optimization. 678 * 679 * The load destination register is not guaranteed to be dead, so we 680 * need to make sure it is loaded with the value the same as before 681 * this transformation. 682 * 683 * These extra loads could be removed once we have accurate register 684 * usage information. 685 */ 686 if (descending_seq) 687 xfer_num = 0; 688 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 689 xfer_num = xfer_num - 1; 690 else 691 xfer_num = xfer_num - 2; 692 693 switch (BPF_SIZE(meta->insn.code)) { 694 case BPF_B: 695 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 696 reg_xfer(xfer_num), 1, 697 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 698 break; 699 case BPF_H: 700 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 701 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 702 break; 703 case BPF_W: 704 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 705 reg_xfer(0)); 706 break; 707 case BPF_DW: 708 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 709 reg_xfer(xfer_num)); 710 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 711 reg_xfer(xfer_num + 1)); 712 break; 713 } 714 715 if (BPF_SIZE(meta->insn.code) != BPF_DW) 716 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 717 718 return 0; 719 } 720 721 static int 722 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 723 { 724 unsigned int i; 725 u16 shift, sz; 726 727 /* We load the value from the address indicated in @offset and then 728 * shift out the data we don't need. Note: this is big endian! 729 */ 730 sz = max(size, 4); 731 shift = size < 4 ? 4 - size : 0; 732 733 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 734 pptr_reg(nfp_prog), offset, sz - 1, true); 735 736 i = 0; 737 if (shift) 738 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 739 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 740 else 741 for (; i * 4 < size; i++) 742 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 743 744 if (i < 2) 745 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 746 747 return 0; 748 } 749 750 static int 751 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 752 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 753 { 754 unsigned int i; 755 u8 mask, sz; 756 757 /* We load the value from the address indicated in rreg + lreg and then 758 * mask out the data we don't need. Note: this is little endian! 759 */ 760 sz = max(size, 4); 761 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 762 763 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 764 lreg, rreg, sz / 4 - 1, true); 765 766 i = 0; 767 if (mask) 768 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 769 reg_xfer(0), SHF_SC_NONE, 0, true); 770 else 771 for (; i * 4 < size; i++) 772 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 773 774 if (i < 2) 775 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 776 777 return 0; 778 } 779 780 static int 781 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 782 u8 dst_gpr, u8 size) 783 { 784 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 785 size, CMD_MODE_32b); 786 } 787 788 static int 789 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 790 u8 dst_gpr, u8 size) 791 { 792 swreg rega, regb; 793 794 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 795 796 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 797 size, CMD_MODE_40b_BA); 798 } 799 800 static int 801 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 802 { 803 swreg tmp_reg; 804 805 /* Calculate the true offset (src_reg + imm) */ 806 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 807 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 808 809 /* Check packet length (size guaranteed to fit b/c it's u8) */ 810 emit_alu(nfp_prog, imm_a(nfp_prog), 811 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 812 emit_alu(nfp_prog, reg_none(), 813 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 814 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 815 816 /* Load data */ 817 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 818 } 819 820 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 821 { 822 swreg tmp_reg; 823 824 /* Check packet length */ 825 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 826 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 827 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 828 829 /* Load data */ 830 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 831 return data_ld(nfp_prog, tmp_reg, 0, size); 832 } 833 834 static int 835 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 836 u8 src_gpr, u8 size) 837 { 838 unsigned int i; 839 840 for (i = 0; i * 4 < size; i++) 841 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 842 843 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 844 reg_a(dst_gpr), offset, size - 1, true); 845 846 return 0; 847 } 848 849 static int 850 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 851 u64 imm, u8 size) 852 { 853 wrp_immed(nfp_prog, reg_xfer(0), imm); 854 if (size == 8) 855 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 856 857 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 858 reg_a(dst_gpr), offset, size - 1, true); 859 860 return 0; 861 } 862 863 typedef int 864 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 865 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 866 bool needs_inc); 867 868 static int 869 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 870 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 871 bool needs_inc) 872 { 873 bool should_inc = needs_inc && new_gpr && !last; 874 u32 idx, src_byte; 875 enum shf_sc sc; 876 swreg reg; 877 int shf; 878 u8 mask; 879 880 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 881 return -EOPNOTSUPP; 882 883 idx = off / 4; 884 885 /* Move the entire word */ 886 if (size == 4) { 887 wrp_mov(nfp_prog, reg_both(dst), 888 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 889 return 0; 890 } 891 892 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 893 return -EOPNOTSUPP; 894 895 src_byte = off % 4; 896 897 mask = (1 << size) - 1; 898 mask <<= dst_byte; 899 900 if (WARN_ON_ONCE(mask > 0xf)) 901 return -EOPNOTSUPP; 902 903 shf = abs(src_byte - dst_byte) * 8; 904 if (src_byte == dst_byte) { 905 sc = SHF_SC_NONE; 906 } else if (src_byte < dst_byte) { 907 shf = 32 - shf; 908 sc = SHF_SC_L_SHF; 909 } else { 910 sc = SHF_SC_R_SHF; 911 } 912 913 /* ld_field can address fewer indexes, if offset too large do RMW. 914 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 915 */ 916 if (idx <= RE_REG_LM_IDX_MAX) { 917 reg = reg_lm(lm3 ? 3 : 0, idx); 918 } else { 919 reg = imm_a(nfp_prog); 920 /* If it's not the first part of the load and we start a new GPR 921 * that means we are loading a second part of the LMEM word into 922 * a new GPR. IOW we've already looked that LMEM word and 923 * therefore it has been loaded into imm_a(). 924 */ 925 if (first || !new_gpr) 926 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 927 } 928 929 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 930 931 if (should_inc) 932 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 933 934 return 0; 935 } 936 937 static int 938 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 939 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 940 bool needs_inc) 941 { 942 bool should_inc = needs_inc && new_gpr && !last; 943 u32 idx, dst_byte; 944 enum shf_sc sc; 945 swreg reg; 946 int shf; 947 u8 mask; 948 949 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 950 return -EOPNOTSUPP; 951 952 idx = off / 4; 953 954 /* Move the entire word */ 955 if (size == 4) { 956 wrp_mov(nfp_prog, 957 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 958 reg_b(src)); 959 return 0; 960 } 961 962 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 963 return -EOPNOTSUPP; 964 965 dst_byte = off % 4; 966 967 mask = (1 << size) - 1; 968 mask <<= dst_byte; 969 970 if (WARN_ON_ONCE(mask > 0xf)) 971 return -EOPNOTSUPP; 972 973 shf = abs(src_byte - dst_byte) * 8; 974 if (src_byte == dst_byte) { 975 sc = SHF_SC_NONE; 976 } else if (src_byte < dst_byte) { 977 shf = 32 - shf; 978 sc = SHF_SC_L_SHF; 979 } else { 980 sc = SHF_SC_R_SHF; 981 } 982 983 /* ld_field can address fewer indexes, if offset too large do RMW. 984 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 985 */ 986 if (idx <= RE_REG_LM_IDX_MAX) { 987 reg = reg_lm(lm3 ? 3 : 0, idx); 988 } else { 989 reg = imm_a(nfp_prog); 990 /* Only first and last LMEM locations are going to need RMW, 991 * the middle location will be overwritten fully. 992 */ 993 if (first || last) 994 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 995 } 996 997 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 998 999 if (new_gpr || last) { 1000 if (idx > RE_REG_LM_IDX_MAX) 1001 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1002 if (should_inc) 1003 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1004 } 1005 1006 return 0; 1007 } 1008 1009 static int 1010 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1011 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1012 bool clr_gpr, lmem_step step) 1013 { 1014 s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; 1015 bool first = true, last; 1016 bool needs_inc = false; 1017 swreg stack_off_reg; 1018 u8 prev_gpr = 255; 1019 u32 gpr_byte = 0; 1020 bool lm3 = true; 1021 int ret; 1022 1023 if (meta->ptr_not_const) { 1024 /* Use of the last encountered ptr_off is OK, they all have 1025 * the same alignment. Depend on low bits of value being 1026 * discarded when written to LMaddr register. 1027 */ 1028 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1029 stack_imm(nfp_prog)); 1030 1031 emit_alu(nfp_prog, imm_b(nfp_prog), 1032 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1033 1034 needs_inc = true; 1035 } else if (off + size <= 64) { 1036 /* We can reach bottom 64B with LMaddr0 */ 1037 lm3 = false; 1038 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1039 /* We have to set up a new pointer. If we know the offset 1040 * and the entire access falls into a single 32 byte aligned 1041 * window we won't have to increment the LM pointer. 1042 * The 32 byte alignment is imporant because offset is ORed in 1043 * not added when doing *l$indexN[off]. 1044 */ 1045 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1046 stack_imm(nfp_prog)); 1047 emit_alu(nfp_prog, imm_b(nfp_prog), 1048 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1049 1050 off %= 32; 1051 } else { 1052 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1053 stack_imm(nfp_prog)); 1054 1055 emit_alu(nfp_prog, imm_b(nfp_prog), 1056 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1057 1058 needs_inc = true; 1059 } 1060 if (lm3) { 1061 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1062 /* For size < 4 one slot will be filled by zeroing of upper. */ 1063 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1064 } 1065 1066 if (clr_gpr && size < 8) 1067 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1068 1069 while (size) { 1070 u32 slice_end; 1071 u8 slice_size; 1072 1073 slice_size = min(size, 4 - gpr_byte); 1074 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1075 slice_size = slice_end - off; 1076 1077 last = slice_size == size; 1078 1079 if (needs_inc) 1080 off %= 4; 1081 1082 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1083 first, gpr != prev_gpr, last, lm3, needs_inc); 1084 if (ret) 1085 return ret; 1086 1087 prev_gpr = gpr; 1088 first = false; 1089 1090 gpr_byte += slice_size; 1091 if (gpr_byte >= 4) { 1092 gpr_byte -= 4; 1093 gpr++; 1094 } 1095 1096 size -= slice_size; 1097 off += slice_size; 1098 } 1099 1100 return 0; 1101 } 1102 1103 static void 1104 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1105 { 1106 swreg tmp_reg; 1107 1108 if (alu_op == ALU_OP_AND) { 1109 if (!imm) 1110 wrp_immed(nfp_prog, reg_both(dst), 0); 1111 if (!imm || !~imm) 1112 return; 1113 } 1114 if (alu_op == ALU_OP_OR) { 1115 if (!~imm) 1116 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1117 if (!imm || !~imm) 1118 return; 1119 } 1120 if (alu_op == ALU_OP_XOR) { 1121 if (!~imm) 1122 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1123 ALU_OP_NOT, reg_b(dst)); 1124 if (!imm || !~imm) 1125 return; 1126 } 1127 1128 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1129 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1130 } 1131 1132 static int 1133 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1134 enum alu_op alu_op, bool skip) 1135 { 1136 const struct bpf_insn *insn = &meta->insn; 1137 u64 imm = insn->imm; /* sign extend */ 1138 1139 if (skip) { 1140 meta->skip = true; 1141 return 0; 1142 } 1143 1144 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1145 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1146 1147 return 0; 1148 } 1149 1150 static int 1151 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1152 enum alu_op alu_op) 1153 { 1154 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1155 1156 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1157 emit_alu(nfp_prog, reg_both(dst + 1), 1158 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1159 1160 return 0; 1161 } 1162 1163 static int 1164 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1165 enum alu_op alu_op, bool skip) 1166 { 1167 const struct bpf_insn *insn = &meta->insn; 1168 1169 if (skip) { 1170 meta->skip = true; 1171 return 0; 1172 } 1173 1174 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1175 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1176 1177 return 0; 1178 } 1179 1180 static int 1181 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1182 enum alu_op alu_op) 1183 { 1184 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1185 1186 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1187 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1188 1189 return 0; 1190 } 1191 1192 static void 1193 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1194 enum br_mask br_mask, u16 off) 1195 { 1196 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1197 emit_br(nfp_prog, br_mask, off, 0); 1198 } 1199 1200 static int 1201 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1202 enum alu_op alu_op, enum br_mask br_mask) 1203 { 1204 const struct bpf_insn *insn = &meta->insn; 1205 1206 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1207 insn->src_reg * 2, br_mask, insn->off); 1208 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1209 insn->src_reg * 2 + 1, br_mask, insn->off); 1210 1211 return 0; 1212 } 1213 1214 static int 1215 wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1216 enum br_mask br_mask, bool swap) 1217 { 1218 const struct bpf_insn *insn = &meta->insn; 1219 u64 imm = insn->imm; /* sign extend */ 1220 u8 reg = insn->dst_reg * 2; 1221 swreg tmp_reg; 1222 1223 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1224 if (!swap) 1225 emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg); 1226 else 1227 emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg)); 1228 1229 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1230 if (!swap) 1231 emit_alu(nfp_prog, reg_none(), 1232 reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg); 1233 else 1234 emit_alu(nfp_prog, reg_none(), 1235 tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1)); 1236 1237 emit_br(nfp_prog, br_mask, insn->off, 0); 1238 1239 return 0; 1240 } 1241 1242 static int 1243 wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1244 enum br_mask br_mask, bool swap) 1245 { 1246 const struct bpf_insn *insn = &meta->insn; 1247 u8 areg, breg; 1248 1249 areg = insn->dst_reg * 2; 1250 breg = insn->src_reg * 2; 1251 1252 if (swap) { 1253 areg ^= breg; 1254 breg ^= areg; 1255 areg ^= breg; 1256 } 1257 1258 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1259 emit_alu(nfp_prog, reg_none(), 1260 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1261 emit_br(nfp_prog, br_mask, insn->off, 0); 1262 1263 return 0; 1264 } 1265 1266 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1267 { 1268 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1269 SHF_SC_R_ROT, 8); 1270 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1271 SHF_SC_R_ROT, 16); 1272 } 1273 1274 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1275 { 1276 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1277 struct nfp_bpf_cap_adjust_head *adjust_head; 1278 u32 ret_einval, end; 1279 1280 adjust_head = &nfp_prog->bpf->adjust_head; 1281 1282 /* Optimized version - 5 vs 14 cycles */ 1283 if (nfp_prog->adjust_head_location != UINT_MAX) { 1284 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1285 return -EINVAL; 1286 1287 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1288 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1289 emit_alu(nfp_prog, plen_reg(nfp_prog), 1290 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1291 emit_alu(nfp_prog, pv_len(nfp_prog), 1292 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1293 1294 wrp_immed(nfp_prog, reg_both(0), 0); 1295 wrp_immed(nfp_prog, reg_both(1), 0); 1296 1297 /* TODO: when adjust head is guaranteed to succeed we can 1298 * also eliminate the following if (r0 == 0) branch. 1299 */ 1300 1301 return 0; 1302 } 1303 1304 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1305 end = ret_einval + 2; 1306 1307 /* We need to use a temp because offset is just a part of the pkt ptr */ 1308 emit_alu(nfp_prog, tmp, 1309 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1310 1311 /* Validate result will fit within FW datapath constraints */ 1312 emit_alu(nfp_prog, reg_none(), 1313 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1314 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1315 emit_alu(nfp_prog, reg_none(), 1316 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1317 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1318 1319 /* Validate the length is at least ETH_HLEN */ 1320 emit_alu(nfp_prog, tmp_len, 1321 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1322 emit_alu(nfp_prog, reg_none(), 1323 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1324 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1325 1326 /* Load the ret code */ 1327 wrp_immed(nfp_prog, reg_both(0), 0); 1328 wrp_immed(nfp_prog, reg_both(1), 0); 1329 1330 /* Modify the packet metadata */ 1331 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1332 1333 /* Skip over the -EINVAL ret code (defer 2) */ 1334 emit_br(nfp_prog, BR_UNC, end, 2); 1335 1336 emit_alu(nfp_prog, plen_reg(nfp_prog), 1337 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1338 emit_alu(nfp_prog, pv_len(nfp_prog), 1339 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1340 1341 /* return -EINVAL target */ 1342 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1343 return -EINVAL; 1344 1345 wrp_immed(nfp_prog, reg_both(0), -22); 1346 wrp_immed(nfp_prog, reg_both(1), ~0); 1347 1348 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1349 return -EINVAL; 1350 1351 return 0; 1352 } 1353 1354 static int 1355 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1356 { 1357 struct bpf_offloaded_map *offmap; 1358 struct nfp_bpf_map *nfp_map; 1359 bool load_lm_ptr; 1360 u32 ret_tgt; 1361 s64 lm_off; 1362 swreg tid; 1363 1364 offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr; 1365 nfp_map = offmap->dev_priv; 1366 1367 /* We only have to reload LM0 if the key is not at start of stack */ 1368 lm_off = nfp_prog->stack_depth; 1369 lm_off += meta->arg2.var_off.value + meta->arg2.off; 1370 load_lm_ptr = meta->arg2_var_off || lm_off; 1371 1372 /* Set LM0 to start of key */ 1373 if (load_lm_ptr) 1374 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1375 1376 /* Load map ID into a register, it should actually fit as an immediate 1377 * but in case it doesn't deal with it here, not in the delay slots. 1378 */ 1379 tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog)); 1380 1381 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1382 2, RELO_BR_HELPER); 1383 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1384 1385 /* Load map ID into A0 */ 1386 wrp_mov(nfp_prog, reg_a(0), tid); 1387 1388 /* Load the return address into B0 */ 1389 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1390 1391 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1392 return -EINVAL; 1393 1394 /* Reset the LM0 pointer */ 1395 if (!load_lm_ptr) 1396 return 0; 1397 1398 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1399 wrp_nops(nfp_prog, 3); 1400 1401 return 0; 1402 } 1403 1404 /* --- Callbacks --- */ 1405 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1406 { 1407 const struct bpf_insn *insn = &meta->insn; 1408 u8 dst = insn->dst_reg * 2; 1409 u8 src = insn->src_reg * 2; 1410 1411 if (insn->src_reg == BPF_REG_10) { 1412 swreg stack_depth_reg; 1413 1414 stack_depth_reg = ur_load_imm_any(nfp_prog, 1415 nfp_prog->stack_depth, 1416 stack_imm(nfp_prog)); 1417 emit_alu(nfp_prog, reg_both(dst), 1418 stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); 1419 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1420 } else { 1421 wrp_reg_mov(nfp_prog, dst, src); 1422 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1423 } 1424 1425 return 0; 1426 } 1427 1428 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1429 { 1430 u64 imm = meta->insn.imm; /* sign extend */ 1431 1432 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1433 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1434 1435 return 0; 1436 } 1437 1438 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1439 { 1440 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1441 } 1442 1443 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1444 { 1445 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1446 } 1447 1448 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1449 { 1450 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1451 } 1452 1453 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1454 { 1455 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1456 } 1457 1458 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1459 { 1460 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1461 } 1462 1463 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1464 { 1465 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1466 } 1467 1468 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1469 { 1470 const struct bpf_insn *insn = &meta->insn; 1471 1472 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1473 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1474 reg_b(insn->src_reg * 2)); 1475 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1476 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1477 reg_b(insn->src_reg * 2 + 1)); 1478 1479 return 0; 1480 } 1481 1482 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1483 { 1484 const struct bpf_insn *insn = &meta->insn; 1485 u64 imm = insn->imm; /* sign extend */ 1486 1487 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1488 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1489 1490 return 0; 1491 } 1492 1493 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1494 { 1495 const struct bpf_insn *insn = &meta->insn; 1496 1497 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1498 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1499 reg_b(insn->src_reg * 2)); 1500 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1501 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1502 reg_b(insn->src_reg * 2 + 1)); 1503 1504 return 0; 1505 } 1506 1507 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1508 { 1509 const struct bpf_insn *insn = &meta->insn; 1510 u64 imm = insn->imm; /* sign extend */ 1511 1512 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1513 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1514 1515 return 0; 1516 } 1517 1518 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1519 { 1520 const struct bpf_insn *insn = &meta->insn; 1521 1522 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1523 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1524 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1525 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1526 1527 return 0; 1528 } 1529 1530 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1531 { 1532 const struct bpf_insn *insn = &meta->insn; 1533 u8 dst = insn->dst_reg * 2; 1534 1535 if (insn->imm < 32) { 1536 emit_shf(nfp_prog, reg_both(dst + 1), 1537 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1538 SHF_SC_R_DSHF, 32 - insn->imm); 1539 emit_shf(nfp_prog, reg_both(dst), 1540 reg_none(), SHF_OP_NONE, reg_b(dst), 1541 SHF_SC_L_SHF, insn->imm); 1542 } else if (insn->imm == 32) { 1543 wrp_reg_mov(nfp_prog, dst + 1, dst); 1544 wrp_immed(nfp_prog, reg_both(dst), 0); 1545 } else if (insn->imm > 32) { 1546 emit_shf(nfp_prog, reg_both(dst + 1), 1547 reg_none(), SHF_OP_NONE, reg_b(dst), 1548 SHF_SC_L_SHF, insn->imm - 32); 1549 wrp_immed(nfp_prog, reg_both(dst), 0); 1550 } 1551 1552 return 0; 1553 } 1554 1555 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1556 { 1557 const struct bpf_insn *insn = &meta->insn; 1558 u8 dst = insn->dst_reg * 2; 1559 1560 if (insn->imm < 32) { 1561 emit_shf(nfp_prog, reg_both(dst), 1562 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), 1563 SHF_SC_R_DSHF, insn->imm); 1564 emit_shf(nfp_prog, reg_both(dst + 1), 1565 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1566 SHF_SC_R_SHF, insn->imm); 1567 } else if (insn->imm == 32) { 1568 wrp_reg_mov(nfp_prog, dst, dst + 1); 1569 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1570 } else if (insn->imm > 32) { 1571 emit_shf(nfp_prog, reg_both(dst), 1572 reg_none(), SHF_OP_NONE, reg_b(dst + 1), 1573 SHF_SC_R_SHF, insn->imm - 32); 1574 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1575 } 1576 1577 return 0; 1578 } 1579 1580 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1581 { 1582 const struct bpf_insn *insn = &meta->insn; 1583 1584 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 1585 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1586 1587 return 0; 1588 } 1589 1590 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1591 { 1592 const struct bpf_insn *insn = &meta->insn; 1593 1594 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 1595 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1596 1597 return 0; 1598 } 1599 1600 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1601 { 1602 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 1603 } 1604 1605 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1606 { 1607 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 1608 } 1609 1610 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1611 { 1612 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 1613 } 1614 1615 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1616 { 1617 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1618 } 1619 1620 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1621 { 1622 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 1623 } 1624 1625 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1626 { 1627 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1628 } 1629 1630 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1631 { 1632 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 1633 } 1634 1635 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1636 { 1637 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 1638 } 1639 1640 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1641 { 1642 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 1643 } 1644 1645 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1646 { 1647 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 1648 } 1649 1650 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1651 { 1652 u8 dst = meta->insn.dst_reg * 2; 1653 1654 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 1655 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1656 1657 return 0; 1658 } 1659 1660 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1661 { 1662 const struct bpf_insn *insn = &meta->insn; 1663 1664 if (!insn->imm) 1665 return 1; /* TODO: zero shift means indirect */ 1666 1667 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 1668 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 1669 SHF_SC_L_SHF, insn->imm); 1670 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1671 1672 return 0; 1673 } 1674 1675 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1676 { 1677 const struct bpf_insn *insn = &meta->insn; 1678 u8 gpr = insn->dst_reg * 2; 1679 1680 switch (insn->imm) { 1681 case 16: 1682 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 1683 SHF_SC_R_ROT, 8); 1684 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 1685 SHF_SC_R_SHF, 16); 1686 1687 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1688 break; 1689 case 32: 1690 wrp_end32(nfp_prog, reg_a(gpr), gpr); 1691 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1692 break; 1693 case 64: 1694 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 1695 1696 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 1697 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 1698 break; 1699 } 1700 1701 return 0; 1702 } 1703 1704 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1705 { 1706 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 1707 u32 imm_lo, imm_hi; 1708 u8 dst; 1709 1710 dst = prev->insn.dst_reg * 2; 1711 imm_lo = prev->insn.imm; 1712 imm_hi = meta->insn.imm; 1713 1714 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 1715 1716 /* mov is always 1 insn, load imm may be two, so try to use mov */ 1717 if (imm_hi == imm_lo) 1718 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 1719 else 1720 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 1721 1722 return 0; 1723 } 1724 1725 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1726 { 1727 meta->double_cb = imm_ld8_part2; 1728 return 0; 1729 } 1730 1731 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1732 { 1733 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 1734 } 1735 1736 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1737 { 1738 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 1739 } 1740 1741 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1742 { 1743 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 1744 } 1745 1746 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1747 { 1748 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1749 meta->insn.src_reg * 2, 1); 1750 } 1751 1752 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1753 { 1754 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1755 meta->insn.src_reg * 2, 2); 1756 } 1757 1758 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1759 { 1760 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1761 meta->insn.src_reg * 2, 4); 1762 } 1763 1764 static int 1765 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1766 unsigned int size, unsigned int ptr_off) 1767 { 1768 return mem_op_stack(nfp_prog, meta, size, ptr_off, 1769 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 1770 true, wrp_lmem_load); 1771 } 1772 1773 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1774 u8 size) 1775 { 1776 swreg dst = reg_both(meta->insn.dst_reg * 2); 1777 1778 switch (meta->insn.off) { 1779 case offsetof(struct __sk_buff, len): 1780 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 1781 return -EOPNOTSUPP; 1782 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 1783 break; 1784 case offsetof(struct __sk_buff, data): 1785 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 1786 return -EOPNOTSUPP; 1787 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1788 break; 1789 case offsetof(struct __sk_buff, data_end): 1790 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 1791 return -EOPNOTSUPP; 1792 emit_alu(nfp_prog, dst, 1793 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1794 break; 1795 default: 1796 return -EOPNOTSUPP; 1797 } 1798 1799 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1800 1801 return 0; 1802 } 1803 1804 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1805 u8 size) 1806 { 1807 swreg dst = reg_both(meta->insn.dst_reg * 2); 1808 1809 switch (meta->insn.off) { 1810 case offsetof(struct xdp_md, data): 1811 if (size != FIELD_SIZEOF(struct xdp_md, data)) 1812 return -EOPNOTSUPP; 1813 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 1814 break; 1815 case offsetof(struct xdp_md, data_end): 1816 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 1817 return -EOPNOTSUPP; 1818 emit_alu(nfp_prog, dst, 1819 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 1820 break; 1821 default: 1822 return -EOPNOTSUPP; 1823 } 1824 1825 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1826 1827 return 0; 1828 } 1829 1830 static int 1831 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1832 unsigned int size) 1833 { 1834 swreg tmp_reg; 1835 1836 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1837 1838 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 1839 tmp_reg, meta->insn.dst_reg * 2, size); 1840 } 1841 1842 static int 1843 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1844 unsigned int size) 1845 { 1846 swreg tmp_reg; 1847 1848 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 1849 1850 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 1851 tmp_reg, meta->insn.dst_reg * 2, size); 1852 } 1853 1854 static void 1855 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 1856 struct nfp_insn_meta *meta) 1857 { 1858 s16 range_start = meta->pkt_cache.range_start; 1859 s16 range_end = meta->pkt_cache.range_end; 1860 swreg src_base, off; 1861 u8 xfer_num, len; 1862 bool indir; 1863 1864 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 1865 src_base = reg_a(meta->insn.src_reg * 2); 1866 len = range_end - range_start; 1867 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 1868 1869 indir = len > 8 * REG_WIDTH; 1870 /* Setup PREV_ALU for indirect mode. */ 1871 if (indir) 1872 wrp_immed(nfp_prog, reg_none(), 1873 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 1874 1875 /* Cache memory into transfer-in registers. */ 1876 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 1877 off, xfer_num - 1, true, indir); 1878 } 1879 1880 static int 1881 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 1882 struct nfp_insn_meta *meta, 1883 unsigned int size) 1884 { 1885 s16 range_start = meta->pkt_cache.range_start; 1886 s16 insn_off = meta->insn.off - range_start; 1887 swreg dst_lo, dst_hi, src_lo, src_mid; 1888 u8 dst_gpr = meta->insn.dst_reg * 2; 1889 u8 len_lo = size, len_mid = 0; 1890 u8 idx = insn_off / REG_WIDTH; 1891 u8 off = insn_off % REG_WIDTH; 1892 1893 dst_hi = reg_both(dst_gpr + 1); 1894 dst_lo = reg_both(dst_gpr); 1895 src_lo = reg_xfer(idx); 1896 1897 /* The read length could involve as many as three registers. */ 1898 if (size > REG_WIDTH - off) { 1899 /* Calculate the part in the second register. */ 1900 len_lo = REG_WIDTH - off; 1901 len_mid = size - len_lo; 1902 1903 /* Calculate the part in the third register. */ 1904 if (size > 2 * REG_WIDTH - off) 1905 len_mid = REG_WIDTH; 1906 } 1907 1908 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 1909 1910 if (!len_mid) { 1911 wrp_immed(nfp_prog, dst_hi, 0); 1912 return 0; 1913 } 1914 1915 src_mid = reg_xfer(idx + 1); 1916 1917 if (size <= REG_WIDTH) { 1918 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 1919 wrp_immed(nfp_prog, dst_hi, 0); 1920 } else { 1921 swreg src_hi = reg_xfer(idx + 2); 1922 1923 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 1924 REG_WIDTH - len_lo, len_lo); 1925 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 1926 REG_WIDTH - len_lo); 1927 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 1928 len_lo); 1929 } 1930 1931 return 0; 1932 } 1933 1934 static int 1935 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 1936 struct nfp_insn_meta *meta, 1937 unsigned int size) 1938 { 1939 swreg dst_lo, dst_hi, src_lo; 1940 u8 dst_gpr, idx; 1941 1942 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 1943 dst_gpr = meta->insn.dst_reg * 2; 1944 dst_hi = reg_both(dst_gpr + 1); 1945 dst_lo = reg_both(dst_gpr); 1946 src_lo = reg_xfer(idx); 1947 1948 if (size < REG_WIDTH) { 1949 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 1950 wrp_immed(nfp_prog, dst_hi, 0); 1951 } else if (size == REG_WIDTH) { 1952 wrp_mov(nfp_prog, dst_lo, src_lo); 1953 wrp_immed(nfp_prog, dst_hi, 0); 1954 } else { 1955 swreg src_hi = reg_xfer(idx + 1); 1956 1957 wrp_mov(nfp_prog, dst_lo, src_lo); 1958 wrp_mov(nfp_prog, dst_hi, src_hi); 1959 } 1960 1961 return 0; 1962 } 1963 1964 static int 1965 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 1966 struct nfp_insn_meta *meta, unsigned int size) 1967 { 1968 u8 off = meta->insn.off - meta->pkt_cache.range_start; 1969 1970 if (IS_ALIGNED(off, REG_WIDTH)) 1971 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 1972 1973 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 1974 } 1975 1976 static int 1977 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1978 unsigned int size) 1979 { 1980 if (meta->ldst_gather_len) 1981 return nfp_cpp_memcpy(nfp_prog, meta); 1982 1983 if (meta->ptr.type == PTR_TO_CTX) { 1984 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 1985 return mem_ldx_xdp(nfp_prog, meta, size); 1986 else 1987 return mem_ldx_skb(nfp_prog, meta, size); 1988 } 1989 1990 if (meta->ptr.type == PTR_TO_PACKET) { 1991 if (meta->pkt_cache.range_end) { 1992 if (meta->pkt_cache.do_init) 1993 mem_ldx_data_init_pktcache(nfp_prog, meta); 1994 1995 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 1996 } else { 1997 return mem_ldx_data(nfp_prog, meta, size); 1998 } 1999 } 2000 2001 if (meta->ptr.type == PTR_TO_STACK) 2002 return mem_ldx_stack(nfp_prog, meta, size, 2003 meta->ptr.off + meta->ptr.var_off.value); 2004 2005 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2006 return mem_ldx_emem(nfp_prog, meta, size); 2007 2008 return -EOPNOTSUPP; 2009 } 2010 2011 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2012 { 2013 return mem_ldx(nfp_prog, meta, 1); 2014 } 2015 2016 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2017 { 2018 return mem_ldx(nfp_prog, meta, 2); 2019 } 2020 2021 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2022 { 2023 return mem_ldx(nfp_prog, meta, 4); 2024 } 2025 2026 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2027 { 2028 return mem_ldx(nfp_prog, meta, 8); 2029 } 2030 2031 static int 2032 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2033 unsigned int size) 2034 { 2035 u64 imm = meta->insn.imm; /* sign extend */ 2036 swreg off_reg; 2037 2038 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2039 2040 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2041 imm, size); 2042 } 2043 2044 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2045 unsigned int size) 2046 { 2047 if (meta->ptr.type == PTR_TO_PACKET) 2048 return mem_st_data(nfp_prog, meta, size); 2049 2050 return -EOPNOTSUPP; 2051 } 2052 2053 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2054 { 2055 return mem_st(nfp_prog, meta, 1); 2056 } 2057 2058 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2059 { 2060 return mem_st(nfp_prog, meta, 2); 2061 } 2062 2063 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2064 { 2065 return mem_st(nfp_prog, meta, 4); 2066 } 2067 2068 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2069 { 2070 return mem_st(nfp_prog, meta, 8); 2071 } 2072 2073 static int 2074 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2075 unsigned int size) 2076 { 2077 swreg off_reg; 2078 2079 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2080 2081 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2082 meta->insn.src_reg * 2, size); 2083 } 2084 2085 static int 2086 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2087 unsigned int size, unsigned int ptr_off) 2088 { 2089 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2090 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2091 false, wrp_lmem_store); 2092 } 2093 2094 static int 2095 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2096 unsigned int size) 2097 { 2098 if (meta->ptr.type == PTR_TO_PACKET) 2099 return mem_stx_data(nfp_prog, meta, size); 2100 2101 if (meta->ptr.type == PTR_TO_STACK) 2102 return mem_stx_stack(nfp_prog, meta, size, 2103 meta->ptr.off + meta->ptr.var_off.value); 2104 2105 return -EOPNOTSUPP; 2106 } 2107 2108 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2109 { 2110 return mem_stx(nfp_prog, meta, 1); 2111 } 2112 2113 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2114 { 2115 return mem_stx(nfp_prog, meta, 2); 2116 } 2117 2118 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2119 { 2120 return mem_stx(nfp_prog, meta, 4); 2121 } 2122 2123 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2124 { 2125 return mem_stx(nfp_prog, meta, 8); 2126 } 2127 2128 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2129 { 2130 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 2131 2132 return 0; 2133 } 2134 2135 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2136 { 2137 const struct bpf_insn *insn = &meta->insn; 2138 u64 imm = insn->imm; /* sign extend */ 2139 swreg or1, or2, tmp_reg; 2140 2141 or1 = reg_a(insn->dst_reg * 2); 2142 or2 = reg_b(insn->dst_reg * 2 + 1); 2143 2144 if (imm & ~0U) { 2145 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2146 emit_alu(nfp_prog, imm_a(nfp_prog), 2147 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2148 or1 = imm_a(nfp_prog); 2149 } 2150 2151 if (imm >> 32) { 2152 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2153 emit_alu(nfp_prog, imm_b(nfp_prog), 2154 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2155 or2 = imm_b(nfp_prog); 2156 } 2157 2158 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 2159 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2160 2161 return 0; 2162 } 2163 2164 static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2165 { 2166 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); 2167 } 2168 2169 static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2170 { 2171 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); 2172 } 2173 2174 static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2175 { 2176 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false); 2177 } 2178 2179 static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2180 { 2181 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); 2182 } 2183 2184 static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2185 { 2186 return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true); 2187 } 2188 2189 static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2190 { 2191 return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false); 2192 } 2193 2194 static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2195 { 2196 return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false); 2197 } 2198 2199 static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2200 { 2201 return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true); 2202 } 2203 2204 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2205 { 2206 const struct bpf_insn *insn = &meta->insn; 2207 u64 imm = insn->imm; /* sign extend */ 2208 swreg tmp_reg; 2209 2210 if (!imm) { 2211 meta->skip = true; 2212 return 0; 2213 } 2214 2215 if (imm & ~0U) { 2216 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2217 emit_alu(nfp_prog, reg_none(), 2218 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); 2219 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2220 } 2221 2222 if (imm >> 32) { 2223 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2224 emit_alu(nfp_prog, reg_none(), 2225 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 2226 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2227 } 2228 2229 return 0; 2230 } 2231 2232 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2233 { 2234 const struct bpf_insn *insn = &meta->insn; 2235 u64 imm = insn->imm; /* sign extend */ 2236 swreg tmp_reg; 2237 2238 if (!imm) { 2239 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 2240 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 2241 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2242 return 0; 2243 } 2244 2245 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 2246 emit_alu(nfp_prog, reg_none(), 2247 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 2248 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2249 2250 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 2251 emit_alu(nfp_prog, reg_none(), 2252 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 2253 emit_br(nfp_prog, BR_BNE, insn->off, 0); 2254 2255 return 0; 2256 } 2257 2258 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2259 { 2260 const struct bpf_insn *insn = &meta->insn; 2261 2262 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 2263 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 2264 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 2265 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 2266 emit_alu(nfp_prog, reg_none(), 2267 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 2268 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 2269 2270 return 0; 2271 } 2272 2273 static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2274 { 2275 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); 2276 } 2277 2278 static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2279 { 2280 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); 2281 } 2282 2283 static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2284 { 2285 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false); 2286 } 2287 2288 static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2289 { 2290 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); 2291 } 2292 2293 static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2294 { 2295 return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true); 2296 } 2297 2298 static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2299 { 2300 return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false); 2301 } 2302 2303 static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2304 { 2305 return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false); 2306 } 2307 2308 static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2309 { 2310 return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true); 2311 } 2312 2313 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2314 { 2315 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 2316 } 2317 2318 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2319 { 2320 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 2321 } 2322 2323 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2324 { 2325 switch (meta->insn.imm) { 2326 case BPF_FUNC_xdp_adjust_head: 2327 return adjust_head(nfp_prog, meta); 2328 case BPF_FUNC_map_lookup_elem: 2329 return map_call_stack_common(nfp_prog, meta); 2330 default: 2331 WARN_ONCE(1, "verifier allowed unsupported function\n"); 2332 return -EOPNOTSUPP; 2333 } 2334 } 2335 2336 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2337 { 2338 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 2339 2340 return 0; 2341 } 2342 2343 static const instr_cb_t instr_cb[256] = { 2344 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 2345 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 2346 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 2347 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 2348 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 2349 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 2350 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 2351 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 2352 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 2353 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 2354 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 2355 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 2356 [BPF_ALU64 | BPF_NEG] = neg_reg64, 2357 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 2358 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 2359 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 2360 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 2361 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 2362 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 2363 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 2364 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 2365 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 2366 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 2367 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 2368 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 2369 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 2370 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 2371 [BPF_ALU | BPF_NEG] = neg_reg, 2372 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 2373 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 2374 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 2375 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 2376 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 2377 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 2378 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 2379 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 2380 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 2381 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 2382 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 2383 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 2384 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 2385 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 2386 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 2387 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 2388 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 2389 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 2390 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 2391 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 2392 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 2393 [BPF_JMP | BPF_JA | BPF_K] = jump, 2394 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 2395 [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, 2396 [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, 2397 [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, 2398 [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, 2399 [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm, 2400 [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm, 2401 [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm, 2402 [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm, 2403 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 2404 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 2405 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 2406 [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, 2407 [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, 2408 [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, 2409 [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, 2410 [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg, 2411 [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg, 2412 [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg, 2413 [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg, 2414 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 2415 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 2416 [BPF_JMP | BPF_CALL] = call, 2417 [BPF_JMP | BPF_EXIT] = goto_out, 2418 }; 2419 2420 /* --- Assembler logic --- */ 2421 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 2422 { 2423 struct nfp_insn_meta *meta, *jmp_dst; 2424 u32 idx, br_idx; 2425 2426 list_for_each_entry(meta, &nfp_prog->insns, l) { 2427 if (meta->skip) 2428 continue; 2429 if (meta->insn.code == (BPF_JMP | BPF_CALL)) 2430 continue; 2431 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 2432 continue; 2433 2434 if (list_is_last(&meta->l, &nfp_prog->insns)) 2435 br_idx = nfp_prog->last_bpf_off; 2436 else 2437 br_idx = list_next_entry(meta, l)->off - 1; 2438 2439 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 2440 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 2441 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 2442 return -ELOOP; 2443 } 2444 /* Leave special branches for later */ 2445 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 2446 RELO_BR_REL) 2447 continue; 2448 2449 if (!meta->jmp_dst) { 2450 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 2451 return -ELOOP; 2452 } 2453 2454 jmp_dst = meta->jmp_dst; 2455 2456 if (jmp_dst->skip) { 2457 pr_err("Branch landing on removed instruction!!\n"); 2458 return -ELOOP; 2459 } 2460 2461 for (idx = meta->off; idx <= br_idx; idx++) { 2462 if (!nfp_is_br(nfp_prog->prog[idx])) 2463 continue; 2464 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 2465 } 2466 } 2467 2468 return 0; 2469 } 2470 2471 static void nfp_intro(struct nfp_prog *nfp_prog) 2472 { 2473 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 2474 emit_alu(nfp_prog, plen_reg(nfp_prog), 2475 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 2476 } 2477 2478 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 2479 { 2480 /* TC direct-action mode: 2481 * 0,1 ok NOT SUPPORTED[1] 2482 * 2 drop 0x22 -> drop, count as stat1 2483 * 4,5 nuke 0x02 -> drop 2484 * 7 redir 0x44 -> redir, count as stat2 2485 * * unspec 0x11 -> pass, count as stat0 2486 * 2487 * [1] We can't support OK and RECLASSIFY because we can't tell TC 2488 * the exact decision made. We are forced to support UNSPEC 2489 * to handle aborts so that's the only one we handle for passing 2490 * packets up the stack. 2491 */ 2492 /* Target for aborts */ 2493 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2494 2495 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2496 2497 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2498 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 2499 2500 /* Target for normal exits */ 2501 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2502 2503 /* if R0 > 7 jump to abort */ 2504 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 2505 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2506 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2507 2508 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 2509 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 2510 2511 emit_shf(nfp_prog, reg_a(1), 2512 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 2513 2514 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2515 emit_shf(nfp_prog, reg_a(2), 2516 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2517 2518 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2519 emit_shf(nfp_prog, reg_b(2), 2520 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 2521 2522 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2523 2524 emit_shf(nfp_prog, reg_b(2), 2525 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 2526 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2527 } 2528 2529 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 2530 { 2531 /* XDP return codes: 2532 * 0 aborted 0x82 -> drop, count as stat3 2533 * 1 drop 0x22 -> drop, count as stat1 2534 * 2 pass 0x11 -> pass, count as stat0 2535 * 3 tx 0x44 -> redir, count as stat2 2536 * * unknown 0x82 -> drop, count as stat3 2537 */ 2538 /* Target for aborts */ 2539 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2540 2541 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2542 2543 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2544 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 2545 2546 /* Target for normal exits */ 2547 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 2548 2549 /* if R0 > 3 jump to abort */ 2550 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 2551 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 2552 2553 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 2554 2555 emit_shf(nfp_prog, reg_a(1), 2556 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 2557 2558 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 2559 emit_shf(nfp_prog, reg_b(2), 2560 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2561 2562 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 2563 2564 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2565 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2566 } 2567 2568 static void nfp_outro(struct nfp_prog *nfp_prog) 2569 { 2570 switch (nfp_prog->type) { 2571 case BPF_PROG_TYPE_SCHED_CLS: 2572 nfp_outro_tc_da(nfp_prog); 2573 break; 2574 case BPF_PROG_TYPE_XDP: 2575 nfp_outro_xdp(nfp_prog); 2576 break; 2577 default: 2578 WARN_ON(1); 2579 } 2580 } 2581 2582 static int nfp_translate(struct nfp_prog *nfp_prog) 2583 { 2584 struct nfp_insn_meta *meta; 2585 int err; 2586 2587 nfp_intro(nfp_prog); 2588 if (nfp_prog->error) 2589 return nfp_prog->error; 2590 2591 list_for_each_entry(meta, &nfp_prog->insns, l) { 2592 instr_cb_t cb = instr_cb[meta->insn.code]; 2593 2594 meta->off = nfp_prog_current_offset(nfp_prog); 2595 2596 if (meta->skip) { 2597 nfp_prog->n_translated++; 2598 continue; 2599 } 2600 2601 if (nfp_meta_has_prev(nfp_prog, meta) && 2602 nfp_meta_prev(meta)->double_cb) 2603 cb = nfp_meta_prev(meta)->double_cb; 2604 if (!cb) 2605 return -ENOENT; 2606 err = cb(nfp_prog, meta); 2607 if (err) 2608 return err; 2609 2610 nfp_prog->n_translated++; 2611 } 2612 2613 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 2614 2615 nfp_outro(nfp_prog); 2616 if (nfp_prog->error) 2617 return nfp_prog->error; 2618 2619 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 2620 if (nfp_prog->error) 2621 return nfp_prog->error; 2622 2623 return nfp_fixup_branches(nfp_prog); 2624 } 2625 2626 /* --- Optimizations --- */ 2627 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 2628 { 2629 struct nfp_insn_meta *meta; 2630 2631 list_for_each_entry(meta, &nfp_prog->insns, l) { 2632 struct bpf_insn insn = meta->insn; 2633 2634 /* Programs converted from cBPF start with register xoring */ 2635 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 2636 insn.src_reg == insn.dst_reg) 2637 continue; 2638 2639 /* Programs start with R6 = R1 but we ignore the skb pointer */ 2640 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 2641 insn.src_reg == 1 && insn.dst_reg == 6) 2642 meta->skip = true; 2643 2644 /* Return as soon as something doesn't match */ 2645 if (!meta->skip) 2646 return; 2647 } 2648 } 2649 2650 /* Remove masking after load since our load guarantees this is not needed */ 2651 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 2652 { 2653 struct nfp_insn_meta *meta1, *meta2; 2654 const s32 exp_mask[] = { 2655 [BPF_B] = 0x000000ffU, 2656 [BPF_H] = 0x0000ffffU, 2657 [BPF_W] = 0xffffffffU, 2658 }; 2659 2660 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 2661 struct bpf_insn insn, next; 2662 2663 insn = meta1->insn; 2664 next = meta2->insn; 2665 2666 if (BPF_CLASS(insn.code) != BPF_LD) 2667 continue; 2668 if (BPF_MODE(insn.code) != BPF_ABS && 2669 BPF_MODE(insn.code) != BPF_IND) 2670 continue; 2671 2672 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 2673 continue; 2674 2675 if (!exp_mask[BPF_SIZE(insn.code)]) 2676 continue; 2677 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 2678 continue; 2679 2680 if (next.src_reg || next.dst_reg) 2681 continue; 2682 2683 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 2684 continue; 2685 2686 meta2->skip = true; 2687 } 2688 } 2689 2690 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 2691 { 2692 struct nfp_insn_meta *meta1, *meta2, *meta3; 2693 2694 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 2695 struct bpf_insn insn, next1, next2; 2696 2697 insn = meta1->insn; 2698 next1 = meta2->insn; 2699 next2 = meta3->insn; 2700 2701 if (BPF_CLASS(insn.code) != BPF_LD) 2702 continue; 2703 if (BPF_MODE(insn.code) != BPF_ABS && 2704 BPF_MODE(insn.code) != BPF_IND) 2705 continue; 2706 if (BPF_SIZE(insn.code) != BPF_W) 2707 continue; 2708 2709 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 2710 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 2711 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 2712 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 2713 continue; 2714 2715 if (next1.src_reg || next1.dst_reg || 2716 next2.src_reg || next2.dst_reg) 2717 continue; 2718 2719 if (next1.imm != 0x20 || next2.imm != 0x20) 2720 continue; 2721 2722 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 2723 meta3->flags & FLAG_INSN_IS_JUMP_DST) 2724 continue; 2725 2726 meta2->skip = true; 2727 meta3->skip = true; 2728 } 2729 } 2730 2731 /* load/store pair that forms memory copy sould look like the following: 2732 * 2733 * ld_width R, [addr_src + offset_src] 2734 * st_width [addr_dest + offset_dest], R 2735 * 2736 * The destination register of load and source register of store should 2737 * be the same, load and store should also perform at the same width. 2738 * If either of addr_src or addr_dest is stack pointer, we don't do the 2739 * CPP optimization as stack is modelled by registers on NFP. 2740 */ 2741 static bool 2742 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 2743 struct nfp_insn_meta *st_meta) 2744 { 2745 struct bpf_insn *ld = &ld_meta->insn; 2746 struct bpf_insn *st = &st_meta->insn; 2747 2748 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 2749 return false; 2750 2751 if (ld_meta->ptr.type != PTR_TO_PACKET) 2752 return false; 2753 2754 if (st_meta->ptr.type != PTR_TO_PACKET) 2755 return false; 2756 2757 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 2758 return false; 2759 2760 if (ld->dst_reg != st->src_reg) 2761 return false; 2762 2763 /* There is jump to the store insn in this pair. */ 2764 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 2765 return false; 2766 2767 return true; 2768 } 2769 2770 /* Currently, we only support chaining load/store pairs if: 2771 * 2772 * - Their address base registers are the same. 2773 * - Their address offsets are in the same order. 2774 * - They operate at the same memory width. 2775 * - There is no jump into the middle of them. 2776 */ 2777 static bool 2778 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 2779 struct nfp_insn_meta *st_meta, 2780 struct bpf_insn *prev_ld, 2781 struct bpf_insn *prev_st) 2782 { 2783 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 2784 struct bpf_insn *ld = &ld_meta->insn; 2785 struct bpf_insn *st = &st_meta->insn; 2786 s16 prev_ld_off, prev_st_off; 2787 2788 /* This pair is the start pair. */ 2789 if (!prev_ld) 2790 return true; 2791 2792 prev_size = BPF_LDST_BYTES(prev_ld); 2793 curr_size = BPF_LDST_BYTES(ld); 2794 prev_ld_base = prev_ld->src_reg; 2795 prev_st_base = prev_st->dst_reg; 2796 prev_ld_dst = prev_ld->dst_reg; 2797 prev_ld_off = prev_ld->off; 2798 prev_st_off = prev_st->off; 2799 2800 if (ld->dst_reg != prev_ld_dst) 2801 return false; 2802 2803 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 2804 return false; 2805 2806 if (curr_size != prev_size) 2807 return false; 2808 2809 /* There is jump to the head of this pair. */ 2810 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 2811 return false; 2812 2813 /* Both in ascending order. */ 2814 if (prev_ld_off + prev_size == ld->off && 2815 prev_st_off + prev_size == st->off) 2816 return true; 2817 2818 /* Both in descending order. */ 2819 if (ld->off + curr_size == prev_ld_off && 2820 st->off + curr_size == prev_st_off) 2821 return true; 2822 2823 return false; 2824 } 2825 2826 /* Return TRUE if cross memory access happens. Cross memory access means 2827 * store area is overlapping with load area that a later load might load 2828 * the value from previous store, for this case we can't treat the sequence 2829 * as an memory copy. 2830 */ 2831 static bool 2832 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 2833 struct nfp_insn_meta *head_st_meta) 2834 { 2835 s16 head_ld_off, head_st_off, ld_off; 2836 2837 /* Different pointer types does not overlap. */ 2838 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 2839 return false; 2840 2841 /* load and store are both PTR_TO_PACKET, check ID info. */ 2842 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 2843 return true; 2844 2845 /* Canonicalize the offsets. Turn all of them against the original 2846 * base register. 2847 */ 2848 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 2849 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 2850 ld_off = ld->off + head_ld_meta->ptr.off; 2851 2852 /* Ascending order cross. */ 2853 if (ld_off > head_ld_off && 2854 head_ld_off < head_st_off && ld_off >= head_st_off) 2855 return true; 2856 2857 /* Descending order cross. */ 2858 if (ld_off < head_ld_off && 2859 head_ld_off > head_st_off && ld_off <= head_st_off) 2860 return true; 2861 2862 return false; 2863 } 2864 2865 /* This pass try to identify the following instructoin sequences. 2866 * 2867 * load R, [regA + offA] 2868 * store [regB + offB], R 2869 * load R, [regA + offA + const_imm_A] 2870 * store [regB + offB + const_imm_A], R 2871 * load R, [regA + offA + 2 * const_imm_A] 2872 * store [regB + offB + 2 * const_imm_A], R 2873 * ... 2874 * 2875 * Above sequence is typically generated by compiler when lowering 2876 * memcpy. NFP prefer using CPP instructions to accelerate it. 2877 */ 2878 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 2879 { 2880 struct nfp_insn_meta *head_ld_meta = NULL; 2881 struct nfp_insn_meta *head_st_meta = NULL; 2882 struct nfp_insn_meta *meta1, *meta2; 2883 struct bpf_insn *prev_ld = NULL; 2884 struct bpf_insn *prev_st = NULL; 2885 u8 count = 0; 2886 2887 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 2888 struct bpf_insn *ld = &meta1->insn; 2889 struct bpf_insn *st = &meta2->insn; 2890 2891 /* Reset record status if any of the following if true: 2892 * - The current insn pair is not load/store. 2893 * - The load/store pair doesn't chain with previous one. 2894 * - The chained load/store pair crossed with previous pair. 2895 * - The chained load/store pair has a total size of memory 2896 * copy beyond 128 bytes which is the maximum length a 2897 * single NFP CPP command can transfer. 2898 */ 2899 if (!curr_pair_is_memcpy(meta1, meta2) || 2900 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 2901 prev_st) || 2902 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 2903 head_st_meta) || 2904 head_ld_meta->ldst_gather_len >= 128))) { 2905 if (!count) 2906 continue; 2907 2908 if (count > 1) { 2909 s16 prev_ld_off = prev_ld->off; 2910 s16 prev_st_off = prev_st->off; 2911 s16 head_ld_off = head_ld_meta->insn.off; 2912 2913 if (prev_ld_off < head_ld_off) { 2914 head_ld_meta->insn.off = prev_ld_off; 2915 head_st_meta->insn.off = prev_st_off; 2916 head_ld_meta->ldst_gather_len = 2917 -head_ld_meta->ldst_gather_len; 2918 } 2919 2920 head_ld_meta->paired_st = &head_st_meta->insn; 2921 head_st_meta->skip = true; 2922 } else { 2923 head_ld_meta->ldst_gather_len = 0; 2924 } 2925 2926 /* If the chain is ended by an load/store pair then this 2927 * could serve as the new head of the the next chain. 2928 */ 2929 if (curr_pair_is_memcpy(meta1, meta2)) { 2930 head_ld_meta = meta1; 2931 head_st_meta = meta2; 2932 head_ld_meta->ldst_gather_len = 2933 BPF_LDST_BYTES(ld); 2934 meta1 = nfp_meta_next(meta1); 2935 meta2 = nfp_meta_next(meta2); 2936 prev_ld = ld; 2937 prev_st = st; 2938 count = 1; 2939 } else { 2940 head_ld_meta = NULL; 2941 head_st_meta = NULL; 2942 prev_ld = NULL; 2943 prev_st = NULL; 2944 count = 0; 2945 } 2946 2947 continue; 2948 } 2949 2950 if (!head_ld_meta) { 2951 head_ld_meta = meta1; 2952 head_st_meta = meta2; 2953 } else { 2954 meta1->skip = true; 2955 meta2->skip = true; 2956 } 2957 2958 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 2959 meta1 = nfp_meta_next(meta1); 2960 meta2 = nfp_meta_next(meta2); 2961 prev_ld = ld; 2962 prev_st = st; 2963 count++; 2964 } 2965 } 2966 2967 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 2968 { 2969 struct nfp_insn_meta *meta, *range_node = NULL; 2970 s16 range_start = 0, range_end = 0; 2971 bool cache_avail = false; 2972 struct bpf_insn *insn; 2973 s32 range_ptr_off = 0; 2974 u32 range_ptr_id = 0; 2975 2976 list_for_each_entry(meta, &nfp_prog->insns, l) { 2977 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 2978 cache_avail = false; 2979 2980 if (meta->skip) 2981 continue; 2982 2983 insn = &meta->insn; 2984 2985 if (is_mbpf_store_pkt(meta) || 2986 insn->code == (BPF_JMP | BPF_CALL) || 2987 is_mbpf_classic_store_pkt(meta) || 2988 is_mbpf_classic_load(meta)) { 2989 cache_avail = false; 2990 continue; 2991 } 2992 2993 if (!is_mbpf_load(meta)) 2994 continue; 2995 2996 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 2997 cache_avail = false; 2998 continue; 2999 } 3000 3001 if (!cache_avail) { 3002 cache_avail = true; 3003 if (range_node) 3004 goto end_current_then_start_new; 3005 goto start_new; 3006 } 3007 3008 /* Check ID to make sure two reads share the same 3009 * variable offset against PTR_TO_PACKET, and check OFF 3010 * to make sure they also share the same constant 3011 * offset. 3012 * 3013 * OFFs don't really need to be the same, because they 3014 * are the constant offsets against PTR_TO_PACKET, so 3015 * for different OFFs, we could canonicalize them to 3016 * offsets against original packet pointer. We don't 3017 * support this. 3018 */ 3019 if (meta->ptr.id == range_ptr_id && 3020 meta->ptr.off == range_ptr_off) { 3021 s16 new_start = range_start; 3022 s16 end, off = insn->off; 3023 s16 new_end = range_end; 3024 bool changed = false; 3025 3026 if (off < range_start) { 3027 new_start = off; 3028 changed = true; 3029 } 3030 3031 end = off + BPF_LDST_BYTES(insn); 3032 if (end > range_end) { 3033 new_end = end; 3034 changed = true; 3035 } 3036 3037 if (!changed) 3038 continue; 3039 3040 if (new_end - new_start <= 64) { 3041 /* Install new range. */ 3042 range_start = new_start; 3043 range_end = new_end; 3044 continue; 3045 } 3046 } 3047 3048 end_current_then_start_new: 3049 range_node->pkt_cache.range_start = range_start; 3050 range_node->pkt_cache.range_end = range_end; 3051 start_new: 3052 range_node = meta; 3053 range_node->pkt_cache.do_init = true; 3054 range_ptr_id = range_node->ptr.id; 3055 range_ptr_off = range_node->ptr.off; 3056 range_start = insn->off; 3057 range_end = insn->off + BPF_LDST_BYTES(insn); 3058 } 3059 3060 if (range_node) { 3061 range_node->pkt_cache.range_start = range_start; 3062 range_node->pkt_cache.range_end = range_end; 3063 } 3064 3065 list_for_each_entry(meta, &nfp_prog->insns, l) { 3066 if (meta->skip) 3067 continue; 3068 3069 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 3070 if (meta->pkt_cache.do_init) { 3071 range_start = meta->pkt_cache.range_start; 3072 range_end = meta->pkt_cache.range_end; 3073 } else { 3074 meta->pkt_cache.range_start = range_start; 3075 meta->pkt_cache.range_end = range_end; 3076 } 3077 } 3078 } 3079 } 3080 3081 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 3082 { 3083 nfp_bpf_opt_reg_init(nfp_prog); 3084 3085 nfp_bpf_opt_ld_mask(nfp_prog); 3086 nfp_bpf_opt_ld_shift(nfp_prog); 3087 nfp_bpf_opt_ldst_gather(nfp_prog); 3088 nfp_bpf_opt_pkt_cache(nfp_prog); 3089 3090 return 0; 3091 } 3092 3093 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 3094 { 3095 __le64 *ustore = (__force __le64 *)prog; 3096 int i; 3097 3098 for (i = 0; i < len; i++) { 3099 int err; 3100 3101 err = nfp_ustore_check_valid_no_ecc(prog[i]); 3102 if (err) 3103 return err; 3104 3105 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 3106 } 3107 3108 return 0; 3109 } 3110 3111 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 3112 { 3113 void *prog; 3114 3115 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 3116 if (!prog) 3117 return; 3118 3119 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 3120 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 3121 kvfree(nfp_prog->prog); 3122 nfp_prog->prog = prog; 3123 } 3124 3125 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 3126 { 3127 int ret; 3128 3129 ret = nfp_bpf_optimize(nfp_prog); 3130 if (ret) 3131 return ret; 3132 3133 ret = nfp_translate(nfp_prog); 3134 if (ret) { 3135 pr_err("Translation failed with error %d (translated: %u)\n", 3136 ret, nfp_prog->n_translated); 3137 return -EINVAL; 3138 } 3139 3140 nfp_bpf_prog_trim(nfp_prog); 3141 3142 return ret; 3143 } 3144 3145 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 3146 { 3147 struct nfp_insn_meta *meta; 3148 3149 /* Another pass to record jump information. */ 3150 list_for_each_entry(meta, &nfp_prog->insns, l) { 3151 u64 code = meta->insn.code; 3152 3153 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && 3154 BPF_OP(code) != BPF_CALL) { 3155 struct nfp_insn_meta *dst_meta; 3156 unsigned short dst_indx; 3157 3158 dst_indx = meta->n + 1 + meta->insn.off; 3159 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, 3160 cnt); 3161 3162 meta->jmp_dst = dst_meta; 3163 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 3164 } 3165 } 3166 } 3167 3168 bool nfp_bpf_supported_opcode(u8 code) 3169 { 3170 return !!instr_cb[code]; 3171 } 3172 3173 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 3174 { 3175 unsigned int i; 3176 u64 *prog; 3177 int err; 3178 3179 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 3180 GFP_KERNEL); 3181 if (!prog) 3182 return ERR_PTR(-ENOMEM); 3183 3184 for (i = 0; i < nfp_prog->prog_len; i++) { 3185 enum nfp_relo_type special; 3186 u32 val; 3187 3188 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 3189 switch (special) { 3190 case RELO_NONE: 3191 continue; 3192 case RELO_BR_REL: 3193 br_add_offset(&prog[i], bv->start_off); 3194 break; 3195 case RELO_BR_GO_OUT: 3196 br_set_offset(&prog[i], 3197 nfp_prog->tgt_out + bv->start_off); 3198 break; 3199 case RELO_BR_GO_ABORT: 3200 br_set_offset(&prog[i], 3201 nfp_prog->tgt_abort + bv->start_off); 3202 break; 3203 case RELO_BR_NEXT_PKT: 3204 br_set_offset(&prog[i], bv->tgt_done); 3205 break; 3206 case RELO_BR_HELPER: 3207 val = br_get_offset(prog[i]); 3208 val -= BR_OFF_RELO; 3209 switch (val) { 3210 case BPF_FUNC_map_lookup_elem: 3211 val = nfp_prog->bpf->helpers.map_lookup; 3212 break; 3213 default: 3214 pr_err("relocation of unknown helper %d\n", 3215 val); 3216 err = -EINVAL; 3217 goto err_free_prog; 3218 } 3219 br_set_offset(&prog[i], val); 3220 break; 3221 case RELO_IMMED_REL: 3222 immed_add_value(&prog[i], bv->start_off); 3223 break; 3224 } 3225 3226 prog[i] &= ~OP_RELO_TYPE; 3227 } 3228 3229 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 3230 if (err) 3231 goto err_free_prog; 3232 3233 return prog; 3234 3235 err_free_prog: 3236 kfree(prog); 3237 return ERR_PTR(err); 3238 } 3239