1 /* 2 * Copyright (C) 2016-2018 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/bug.h> 37 #include <linux/bpf.h> 38 #include <linux/filter.h> 39 #include <linux/kernel.h> 40 #include <linux/pkt_cls.h> 41 #include <linux/reciprocal_div.h> 42 #include <linux/unistd.h> 43 44 #include "main.h" 45 #include "../nfp_asm.h" 46 #include "../nfp_net_ctrl.h" 47 48 /* --- NFP prog --- */ 49 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 50 * It's safe to modify the next pointers (but not pos). 51 */ 52 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 53 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 54 next = list_next_entry(pos, l); \ 55 &(nfp_prog)->insns != &pos->l && \ 56 &(nfp_prog)->insns != &next->l; \ 57 pos = nfp_meta_next(pos), \ 58 next = nfp_meta_next(pos)) 59 60 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 61 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 62 next = list_next_entry(pos, l), \ 63 next2 = list_next_entry(next, l); \ 64 &(nfp_prog)->insns != &pos->l && \ 65 &(nfp_prog)->insns != &next->l && \ 66 &(nfp_prog)->insns != &next2->l; \ 67 pos = nfp_meta_next(pos), \ 68 next = nfp_meta_next(pos), \ 69 next2 = nfp_meta_next(next)) 70 71 static bool 72 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 73 { 74 return meta->l.prev != &nfp_prog->insns; 75 } 76 77 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 78 { 79 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { 80 pr_warn("instruction limit reached (%u NFP instructions)\n", 81 nfp_prog->prog_len); 82 nfp_prog->error = -ENOSPC; 83 return; 84 } 85 86 nfp_prog->prog[nfp_prog->prog_len] = insn; 87 nfp_prog->prog_len++; 88 } 89 90 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 91 { 92 return nfp_prog->prog_len; 93 } 94 95 static bool 96 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) 97 { 98 /* If there is a recorded error we may have dropped instructions; 99 * that doesn't have to be due to translator bug, and the translation 100 * will fail anyway, so just return OK. 101 */ 102 if (nfp_prog->error) 103 return true; 104 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 105 } 106 107 /* --- Emitters --- */ 108 static void 109 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 110 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, 111 bool indir) 112 { 113 u64 insn; 114 115 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 116 FIELD_PREP(OP_CMD_CTX, ctx) | 117 FIELD_PREP(OP_CMD_B_SRC, breg) | 118 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 119 FIELD_PREP(OP_CMD_XFER, xfer) | 120 FIELD_PREP(OP_CMD_CNT, size) | 121 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | 122 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 123 FIELD_PREP(OP_CMD_INDIR, indir) | 124 FIELD_PREP(OP_CMD_MODE, mode); 125 126 nfp_prog_push(nfp_prog, insn); 127 } 128 129 static void 130 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 131 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) 132 { 133 struct nfp_insn_re_regs reg; 134 int err; 135 136 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 137 if (err) { 138 nfp_prog->error = err; 139 return; 140 } 141 if (reg.swap) { 142 pr_err("cmd can't swap arguments\n"); 143 nfp_prog->error = -EFAULT; 144 return; 145 } 146 if (reg.dst_lmextn || reg.src_lmextn) { 147 pr_err("cmd can't use LMextn\n"); 148 nfp_prog->error = -EFAULT; 149 return; 150 } 151 152 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, 153 indir); 154 } 155 156 static void 157 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 158 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 159 { 160 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); 161 } 162 163 static void 164 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, 165 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) 166 { 167 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); 168 } 169 170 static void 171 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 172 enum br_ctx_signal_state css, u16 addr, u8 defer) 173 { 174 u16 addr_lo, addr_hi; 175 u64 insn; 176 177 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 178 addr_hi = addr != addr_lo; 179 180 insn = OP_BR_BASE | 181 FIELD_PREP(OP_BR_MASK, mask) | 182 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 183 FIELD_PREP(OP_BR_CSS, css) | 184 FIELD_PREP(OP_BR_DEFBR, defer) | 185 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 186 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 187 188 nfp_prog_push(nfp_prog, insn); 189 } 190 191 static void 192 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, 193 enum nfp_relo_type relo) 194 { 195 if (mask == BR_UNC && defer > 2) { 196 pr_err("BUG: branch defer out of bounds %d\n", defer); 197 nfp_prog->error = -EFAULT; 198 return; 199 } 200 201 __emit_br(nfp_prog, mask, 202 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 203 BR_CSS_NONE, addr, defer); 204 205 nfp_prog->prog[nfp_prog->prog_len - 1] |= 206 FIELD_PREP(OP_RELO_TYPE, relo); 207 } 208 209 static void 210 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 211 { 212 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); 213 } 214 215 static void 216 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer, 217 bool set, bool src_lmextn) 218 { 219 u16 addr_lo, addr_hi; 220 u64 insn; 221 222 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO)); 223 addr_hi = addr != addr_lo; 224 225 insn = OP_BR_BIT_BASE | 226 FIELD_PREP(OP_BR_BIT_A_SRC, areg) | 227 FIELD_PREP(OP_BR_BIT_B_SRC, breg) | 228 FIELD_PREP(OP_BR_BIT_BV, set) | 229 FIELD_PREP(OP_BR_BIT_DEFBR, defer) | 230 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) | 231 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) | 232 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn); 233 234 nfp_prog_push(nfp_prog, insn); 235 } 236 237 static void 238 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, 239 u8 defer, bool set, enum nfp_relo_type relo) 240 { 241 struct nfp_insn_re_regs reg; 242 int err; 243 244 /* NOTE: The bit to test is specified as an rotation amount, such that 245 * the bit to test will be placed on the MSB of the result when 246 * doing a rotate right. For bit X, we need right rotate X + 1. 247 */ 248 bit += 1; 249 250 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false); 251 if (err) { 252 nfp_prog->error = err; 253 return; 254 } 255 256 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set, 257 reg.src_lmextn); 258 259 nfp_prog->prog[nfp_prog->prog_len - 1] |= 260 FIELD_PREP(OP_RELO_TYPE, relo); 261 } 262 263 static void 264 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) 265 { 266 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL); 267 } 268 269 static void 270 __emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 271 u8 defer, bool dst_lmextn, bool src_lmextn) 272 { 273 u64 insn; 274 275 insn = OP_BR_ALU_BASE | 276 FIELD_PREP(OP_BR_ALU_A_SRC, areg) | 277 FIELD_PREP(OP_BR_ALU_B_SRC, breg) | 278 FIELD_PREP(OP_BR_ALU_DEFBR, defer) | 279 FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) | 280 FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) | 281 FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn); 282 283 nfp_prog_push(nfp_prog, insn); 284 } 285 286 static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer) 287 { 288 struct nfp_insn_ur_regs reg; 289 int err; 290 291 err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), ®); 292 if (err) { 293 nfp_prog->error = err; 294 return; 295 } 296 297 __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn, 298 reg.src_lmextn); 299 } 300 301 static void 302 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 303 enum immed_width width, bool invert, 304 enum immed_shift shift, bool wr_both, 305 bool dst_lmextn, bool src_lmextn) 306 { 307 u64 insn; 308 309 insn = OP_IMMED_BASE | 310 FIELD_PREP(OP_IMMED_A_SRC, areg) | 311 FIELD_PREP(OP_IMMED_B_SRC, breg) | 312 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 313 FIELD_PREP(OP_IMMED_WIDTH, width) | 314 FIELD_PREP(OP_IMMED_INV, invert) | 315 FIELD_PREP(OP_IMMED_SHIFT, shift) | 316 FIELD_PREP(OP_IMMED_WR_AB, wr_both) | 317 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | 318 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); 319 320 nfp_prog_push(nfp_prog, insn); 321 } 322 323 static void 324 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, 325 enum immed_width width, bool invert, enum immed_shift shift) 326 { 327 struct nfp_insn_ur_regs reg; 328 int err; 329 330 if (swreg_type(dst) == NN_REG_IMM) { 331 nfp_prog->error = -EFAULT; 332 return; 333 } 334 335 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 336 if (err) { 337 nfp_prog->error = err; 338 return; 339 } 340 341 /* Use reg.dst when destination is No-Dest. */ 342 __emit_immed(nfp_prog, 343 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, 344 reg.breg, imm >> 8, width, invert, shift, 345 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 346 } 347 348 static void 349 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 350 enum shf_sc sc, u8 shift, 351 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, 352 bool dst_lmextn, bool src_lmextn) 353 { 354 u64 insn; 355 356 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 357 nfp_prog->error = -EFAULT; 358 return; 359 } 360 361 if (sc == SHF_SC_L_SHF) 362 shift = 32 - shift; 363 364 insn = OP_SHF_BASE | 365 FIELD_PREP(OP_SHF_A_SRC, areg) | 366 FIELD_PREP(OP_SHF_SC, sc) | 367 FIELD_PREP(OP_SHF_B_SRC, breg) | 368 FIELD_PREP(OP_SHF_I8, i8) | 369 FIELD_PREP(OP_SHF_SW, sw) | 370 FIELD_PREP(OP_SHF_DST, dst) | 371 FIELD_PREP(OP_SHF_SHIFT, shift) | 372 FIELD_PREP(OP_SHF_OP, op) | 373 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 374 FIELD_PREP(OP_SHF_WR_AB, wr_both) | 375 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | 376 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); 377 378 nfp_prog_push(nfp_prog, insn); 379 } 380 381 static void 382 emit_shf(struct nfp_prog *nfp_prog, swreg dst, 383 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) 384 { 385 struct nfp_insn_re_regs reg; 386 int err; 387 388 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 389 if (err) { 390 nfp_prog->error = err; 391 return; 392 } 393 394 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 395 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, 396 reg.dst_lmextn, reg.src_lmextn); 397 } 398 399 static void 400 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst, 401 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc) 402 { 403 if (sc == SHF_SC_R_ROT) { 404 pr_err("indirect shift is not allowed on rotation\n"); 405 nfp_prog->error = -EFAULT; 406 return; 407 } 408 409 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0); 410 } 411 412 static void 413 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 414 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, 415 bool dst_lmextn, bool src_lmextn) 416 { 417 u64 insn; 418 419 insn = OP_ALU_BASE | 420 FIELD_PREP(OP_ALU_A_SRC, areg) | 421 FIELD_PREP(OP_ALU_B_SRC, breg) | 422 FIELD_PREP(OP_ALU_DST, dst) | 423 FIELD_PREP(OP_ALU_SW, swap) | 424 FIELD_PREP(OP_ALU_OP, op) | 425 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 426 FIELD_PREP(OP_ALU_WR_AB, wr_both) | 427 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | 428 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); 429 430 nfp_prog_push(nfp_prog, insn); 431 } 432 433 static void 434 emit_alu(struct nfp_prog *nfp_prog, swreg dst, 435 swreg lreg, enum alu_op op, swreg rreg) 436 { 437 struct nfp_insn_ur_regs reg; 438 int err; 439 440 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 441 if (err) { 442 nfp_prog->error = err; 443 return; 444 } 445 446 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 447 reg.areg, op, reg.breg, reg.swap, reg.wr_both, 448 reg.dst_lmextn, reg.src_lmextn); 449 } 450 451 static void 452 __emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg, 453 enum mul_type type, enum mul_step step, u16 breg, bool swap, 454 bool wr_both, bool dst_lmextn, bool src_lmextn) 455 { 456 u64 insn; 457 458 insn = OP_MUL_BASE | 459 FIELD_PREP(OP_MUL_A_SRC, areg) | 460 FIELD_PREP(OP_MUL_B_SRC, breg) | 461 FIELD_PREP(OP_MUL_STEP, step) | 462 FIELD_PREP(OP_MUL_DST_AB, dst_ab) | 463 FIELD_PREP(OP_MUL_SW, swap) | 464 FIELD_PREP(OP_MUL_TYPE, type) | 465 FIELD_PREP(OP_MUL_WR_AB, wr_both) | 466 FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) | 467 FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn); 468 469 nfp_prog_push(nfp_prog, insn); 470 } 471 472 static void 473 emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type, 474 enum mul_step step, swreg rreg) 475 { 476 struct nfp_insn_ur_regs reg; 477 u16 areg; 478 int err; 479 480 if (type == MUL_TYPE_START && step != MUL_STEP_NONE) { 481 nfp_prog->error = -EINVAL; 482 return; 483 } 484 485 if (step == MUL_LAST || step == MUL_LAST_2) { 486 /* When type is step and step Number is LAST or LAST2, left 487 * source is used as destination. 488 */ 489 err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®); 490 areg = reg.dst; 491 } else { 492 err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®); 493 areg = reg.areg; 494 } 495 496 if (err) { 497 nfp_prog->error = err; 498 return; 499 } 500 501 __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap, 502 reg.wr_both, reg.dst_lmextn, reg.src_lmextn); 503 } 504 505 static void 506 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 507 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 508 bool zero, bool swap, bool wr_both, 509 bool dst_lmextn, bool src_lmextn) 510 { 511 u64 insn; 512 513 insn = OP_LDF_BASE | 514 FIELD_PREP(OP_LDF_A_SRC, areg) | 515 FIELD_PREP(OP_LDF_SC, sc) | 516 FIELD_PREP(OP_LDF_B_SRC, breg) | 517 FIELD_PREP(OP_LDF_I8, imm8) | 518 FIELD_PREP(OP_LDF_SW, swap) | 519 FIELD_PREP(OP_LDF_ZF, zero) | 520 FIELD_PREP(OP_LDF_BMASK, bmask) | 521 FIELD_PREP(OP_LDF_SHF, shift) | 522 FIELD_PREP(OP_LDF_WR_AB, wr_both) | 523 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | 524 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); 525 526 nfp_prog_push(nfp_prog, insn); 527 } 528 529 static void 530 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 531 enum shf_sc sc, u8 shift, bool zero) 532 { 533 struct nfp_insn_re_regs reg; 534 int err; 535 536 /* Note: ld_field is special as it uses one of the src regs as dst */ 537 err = swreg_to_restricted(dst, dst, src, ®, true); 538 if (err) { 539 nfp_prog->error = err; 540 return; 541 } 542 543 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 544 reg.i8, zero, reg.swap, reg.wr_both, 545 reg.dst_lmextn, reg.src_lmextn); 546 } 547 548 static void 549 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, 550 enum shf_sc sc, u8 shift) 551 { 552 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); 553 } 554 555 static void 556 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, 557 bool dst_lmextn, bool src_lmextn) 558 { 559 u64 insn; 560 561 insn = OP_LCSR_BASE | 562 FIELD_PREP(OP_LCSR_A_SRC, areg) | 563 FIELD_PREP(OP_LCSR_B_SRC, breg) | 564 FIELD_PREP(OP_LCSR_WRITE, wr) | 565 FIELD_PREP(OP_LCSR_ADDR, addr / 4) | 566 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | 567 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); 568 569 nfp_prog_push(nfp_prog, insn); 570 } 571 572 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) 573 { 574 struct nfp_insn_ur_regs reg; 575 int err; 576 577 /* This instruction takes immeds instead of reg_none() for the ignored 578 * operand, but we can't encode 2 immeds in one instr with our normal 579 * swreg infra so if param is an immed, we encode as reg_none() and 580 * copy the immed to both operands. 581 */ 582 if (swreg_type(src) == NN_REG_IMM) { 583 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); 584 reg.breg = reg.areg; 585 } else { 586 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); 587 } 588 if (err) { 589 nfp_prog->error = err; 590 return; 591 } 592 593 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, 594 false, reg.src_lmextn); 595 } 596 597 /* CSR value is read in following immed[gpr, 0] */ 598 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) 599 { 600 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); 601 } 602 603 static void emit_nop(struct nfp_prog *nfp_prog) 604 { 605 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); 606 } 607 608 /* --- Wrappers --- */ 609 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 610 { 611 if (!(imm & 0xffff0000)) { 612 *val = imm; 613 *shift = IMMED_SHIFT_0B; 614 } else if (!(imm & 0xff0000ff)) { 615 *val = imm >> 8; 616 *shift = IMMED_SHIFT_1B; 617 } else if (!(imm & 0x0000ffff)) { 618 *val = imm >> 16; 619 *shift = IMMED_SHIFT_2B; 620 } else { 621 return false; 622 } 623 624 return true; 625 } 626 627 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) 628 { 629 enum immed_shift shift; 630 u16 val; 631 632 if (pack_immed(imm, &val, &shift)) { 633 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 634 } else if (pack_immed(~imm, &val, &shift)) { 635 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 636 } else { 637 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 638 false, IMMED_SHIFT_0B); 639 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 640 false, IMMED_SHIFT_2B); 641 } 642 } 643 644 static void 645 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, 646 enum nfp_relo_type relo) 647 { 648 if (imm > 0xffff) { 649 pr_err("relocation of a large immediate!\n"); 650 nfp_prog->error = -EFAULT; 651 return; 652 } 653 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 654 655 nfp_prog->prog[nfp_prog->prog_len - 1] |= 656 FIELD_PREP(OP_RELO_TYPE, relo); 657 } 658 659 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 660 * If the @imm is small enough encode it directly in operand and return 661 * otherwise load @imm to a spare register and return its encoding. 662 */ 663 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 664 { 665 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 666 return reg_imm(imm); 667 668 wrp_immed(nfp_prog, tmp_reg, imm); 669 return tmp_reg; 670 } 671 672 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 673 * If the @imm is small enough encode it directly in operand and return 674 * otherwise load @imm to a spare register and return its encoding. 675 */ 676 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) 677 { 678 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 679 return reg_imm(imm); 680 681 wrp_immed(nfp_prog, tmp_reg, imm); 682 return tmp_reg; 683 } 684 685 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) 686 { 687 while (count--) 688 emit_nop(nfp_prog); 689 } 690 691 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 692 { 693 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 694 } 695 696 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 697 { 698 wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); 699 } 700 701 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the 702 * result to @dst from low end. 703 */ 704 static void 705 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, 706 u8 offset) 707 { 708 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; 709 u8 mask = (1 << field_len) - 1; 710 711 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); 712 } 713 714 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the 715 * result to @dst from offset, there is no change on the other bits of @dst. 716 */ 717 static void 718 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, 719 u8 field_len, u8 offset) 720 { 721 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; 722 u8 mask = ((1 << field_len) - 1) << offset; 723 724 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); 725 } 726 727 static void 728 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 729 swreg *rega, swreg *regb) 730 { 731 if (offset == reg_imm(0)) { 732 *rega = reg_a(src_gpr); 733 *regb = reg_b(src_gpr + 1); 734 return; 735 } 736 737 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); 738 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, 739 reg_imm(0)); 740 *rega = imm_a(nfp_prog); 741 *regb = imm_b(nfp_prog); 742 } 743 744 /* NFP has Command Push Pull bus which supports bluk memory operations. */ 745 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 746 { 747 bool descending_seq = meta->ldst_gather_len < 0; 748 s16 len = abs(meta->ldst_gather_len); 749 swreg src_base, off; 750 bool src_40bit_addr; 751 unsigned int i; 752 u8 xfer_num; 753 754 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 755 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; 756 src_base = reg_a(meta->insn.src_reg * 2); 757 xfer_num = round_up(len, 4) / 4; 758 759 if (src_40bit_addr) 760 addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base, 761 &off); 762 763 /* Setup PREV_ALU fields to override memory read length. */ 764 if (len > 32) 765 wrp_immed(nfp_prog, reg_none(), 766 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 767 768 /* Memory read from source addr into transfer-in registers. */ 769 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, 770 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, 771 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); 772 773 /* Move from transfer-in to transfer-out. */ 774 for (i = 0; i < xfer_num; i++) 775 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); 776 777 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); 778 779 if (len <= 8) { 780 /* Use single direct_ref write8. */ 781 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 782 reg_a(meta->paired_st->dst_reg * 2), off, len - 1, 783 CMD_CTX_SWAP); 784 } else if (len <= 32 && IS_ALIGNED(len, 4)) { 785 /* Use single direct_ref write32. */ 786 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 787 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, 788 CMD_CTX_SWAP); 789 } else if (len <= 32) { 790 /* Use single indirect_ref write8. */ 791 wrp_immed(nfp_prog, reg_none(), 792 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); 793 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 794 reg_a(meta->paired_st->dst_reg * 2), off, 795 len - 1, CMD_CTX_SWAP); 796 } else if (IS_ALIGNED(len, 4)) { 797 /* Use single indirect_ref write32. */ 798 wrp_immed(nfp_prog, reg_none(), 799 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 800 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 801 reg_a(meta->paired_st->dst_reg * 2), off, 802 xfer_num - 1, CMD_CTX_SWAP); 803 } else if (len <= 40) { 804 /* Use one direct_ref write32 to write the first 32-bytes, then 805 * another direct_ref write8 to write the remaining bytes. 806 */ 807 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 808 reg_a(meta->paired_st->dst_reg * 2), off, 7, 809 CMD_CTX_SWAP); 810 811 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, 812 imm_b(nfp_prog)); 813 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, 814 reg_a(meta->paired_st->dst_reg * 2), off, len - 33, 815 CMD_CTX_SWAP); 816 } else { 817 /* Use one indirect_ref write32 to write 4-bytes aligned length, 818 * then another direct_ref write8 to write the remaining bytes. 819 */ 820 u8 new_off; 821 822 wrp_immed(nfp_prog, reg_none(), 823 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); 824 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, 825 reg_a(meta->paired_st->dst_reg * 2), off, 826 xfer_num - 2, CMD_CTX_SWAP); 827 new_off = meta->paired_st->off + (xfer_num - 1) * 4; 828 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); 829 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 830 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, 831 (len & 0x3) - 1, CMD_CTX_SWAP); 832 } 833 834 /* TODO: The following extra load is to make sure data flow be identical 835 * before and after we do memory copy optimization. 836 * 837 * The load destination register is not guaranteed to be dead, so we 838 * need to make sure it is loaded with the value the same as before 839 * this transformation. 840 * 841 * These extra loads could be removed once we have accurate register 842 * usage information. 843 */ 844 if (descending_seq) 845 xfer_num = 0; 846 else if (BPF_SIZE(meta->insn.code) != BPF_DW) 847 xfer_num = xfer_num - 1; 848 else 849 xfer_num = xfer_num - 2; 850 851 switch (BPF_SIZE(meta->insn.code)) { 852 case BPF_B: 853 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 854 reg_xfer(xfer_num), 1, 855 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); 856 break; 857 case BPF_H: 858 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), 859 reg_xfer(xfer_num), 2, (len & 3) ^ 2); 860 break; 861 case BPF_W: 862 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 863 reg_xfer(0)); 864 break; 865 case BPF_DW: 866 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), 867 reg_xfer(xfer_num)); 868 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 869 reg_xfer(xfer_num + 1)); 870 break; 871 } 872 873 if (BPF_SIZE(meta->insn.code) != BPF_DW) 874 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 875 876 return 0; 877 } 878 879 static int 880 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) 881 { 882 unsigned int i; 883 u16 shift, sz; 884 885 /* We load the value from the address indicated in @offset and then 886 * shift out the data we don't need. Note: this is big endian! 887 */ 888 sz = max(size, 4); 889 shift = size < 4 ? 4 - size : 0; 890 891 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 892 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); 893 894 i = 0; 895 if (shift) 896 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, 897 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 898 else 899 for (; i * 4 < size; i++) 900 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 901 902 if (i < 2) 903 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 904 905 return 0; 906 } 907 908 static int 909 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, 910 swreg lreg, swreg rreg, int size, enum cmd_mode mode) 911 { 912 unsigned int i; 913 u8 mask, sz; 914 915 /* We load the value from the address indicated in rreg + lreg and then 916 * mask out the data we don't need. Note: this is little endian! 917 */ 918 sz = max(size, 4); 919 mask = size < 4 ? GENMASK(size - 1, 0) : 0; 920 921 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, 922 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); 923 924 i = 0; 925 if (mask) 926 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, 927 reg_xfer(0), SHF_SC_NONE, 0, true); 928 else 929 for (; i * 4 < size; i++) 930 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); 931 932 if (i < 2) 933 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); 934 935 return 0; 936 } 937 938 static int 939 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 940 u8 dst_gpr, u8 size) 941 { 942 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, 943 size, CMD_MODE_32b); 944 } 945 946 static int 947 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, 948 u8 dst_gpr, u8 size) 949 { 950 swreg rega, regb; 951 952 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); 953 954 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, 955 size, CMD_MODE_40b_BA); 956 } 957 958 static int 959 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) 960 { 961 swreg tmp_reg; 962 963 /* Calculate the true offset (src_reg + imm) */ 964 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 965 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); 966 967 /* Check packet length (size guaranteed to fit b/c it's u8) */ 968 emit_alu(nfp_prog, imm_a(nfp_prog), 969 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 970 emit_alu(nfp_prog, reg_none(), 971 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 972 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 973 974 /* Load data */ 975 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 976 } 977 978 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 979 { 980 swreg tmp_reg; 981 982 /* Check packet length */ 983 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 984 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 985 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); 986 987 /* Load data */ 988 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 989 return data_ld(nfp_prog, tmp_reg, 0, size); 990 } 991 992 static int 993 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 994 u8 src_gpr, u8 size) 995 { 996 unsigned int i; 997 998 for (i = 0; i * 4 < size; i++) 999 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); 1000 1001 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 1002 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 1003 1004 return 0; 1005 } 1006 1007 static int 1008 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, 1009 u64 imm, u8 size) 1010 { 1011 wrp_immed(nfp_prog, reg_xfer(0), imm); 1012 if (size == 8) 1013 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); 1014 1015 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, 1016 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); 1017 1018 return 0; 1019 } 1020 1021 typedef int 1022 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, 1023 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 1024 bool needs_inc); 1025 1026 static int 1027 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, 1028 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 1029 bool needs_inc) 1030 { 1031 bool should_inc = needs_inc && new_gpr && !last; 1032 u32 idx, src_byte; 1033 enum shf_sc sc; 1034 swreg reg; 1035 int shf; 1036 u8 mask; 1037 1038 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) 1039 return -EOPNOTSUPP; 1040 1041 idx = off / 4; 1042 1043 /* Move the entire word */ 1044 if (size == 4) { 1045 wrp_mov(nfp_prog, reg_both(dst), 1046 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); 1047 return 0; 1048 } 1049 1050 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1051 return -EOPNOTSUPP; 1052 1053 src_byte = off % 4; 1054 1055 mask = (1 << size) - 1; 1056 mask <<= dst_byte; 1057 1058 if (WARN_ON_ONCE(mask > 0xf)) 1059 return -EOPNOTSUPP; 1060 1061 shf = abs(src_byte - dst_byte) * 8; 1062 if (src_byte == dst_byte) { 1063 sc = SHF_SC_NONE; 1064 } else if (src_byte < dst_byte) { 1065 shf = 32 - shf; 1066 sc = SHF_SC_L_SHF; 1067 } else { 1068 sc = SHF_SC_R_SHF; 1069 } 1070 1071 /* ld_field can address fewer indexes, if offset too large do RMW. 1072 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1073 */ 1074 if (idx <= RE_REG_LM_IDX_MAX) { 1075 reg = reg_lm(lm3 ? 3 : 0, idx); 1076 } else { 1077 reg = imm_a(nfp_prog); 1078 /* If it's not the first part of the load and we start a new GPR 1079 * that means we are loading a second part of the LMEM word into 1080 * a new GPR. IOW we've already looked that LMEM word and 1081 * therefore it has been loaded into imm_a(). 1082 */ 1083 if (first || !new_gpr) 1084 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1085 } 1086 1087 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); 1088 1089 if (should_inc) 1090 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1091 1092 return 0; 1093 } 1094 1095 static int 1096 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, 1097 unsigned int size, bool first, bool new_gpr, bool last, bool lm3, 1098 bool needs_inc) 1099 { 1100 bool should_inc = needs_inc && new_gpr && !last; 1101 u32 idx, dst_byte; 1102 enum shf_sc sc; 1103 swreg reg; 1104 int shf; 1105 u8 mask; 1106 1107 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) 1108 return -EOPNOTSUPP; 1109 1110 idx = off / 4; 1111 1112 /* Move the entire word */ 1113 if (size == 4) { 1114 wrp_mov(nfp_prog, 1115 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), 1116 reg_b(src)); 1117 return 0; 1118 } 1119 1120 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) 1121 return -EOPNOTSUPP; 1122 1123 dst_byte = off % 4; 1124 1125 mask = (1 << size) - 1; 1126 mask <<= dst_byte; 1127 1128 if (WARN_ON_ONCE(mask > 0xf)) 1129 return -EOPNOTSUPP; 1130 1131 shf = abs(src_byte - dst_byte) * 8; 1132 if (src_byte == dst_byte) { 1133 sc = SHF_SC_NONE; 1134 } else if (src_byte < dst_byte) { 1135 shf = 32 - shf; 1136 sc = SHF_SC_L_SHF; 1137 } else { 1138 sc = SHF_SC_R_SHF; 1139 } 1140 1141 /* ld_field can address fewer indexes, if offset too large do RMW. 1142 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. 1143 */ 1144 if (idx <= RE_REG_LM_IDX_MAX) { 1145 reg = reg_lm(lm3 ? 3 : 0, idx); 1146 } else { 1147 reg = imm_a(nfp_prog); 1148 /* Only first and last LMEM locations are going to need RMW, 1149 * the middle location will be overwritten fully. 1150 */ 1151 if (first || last) 1152 wrp_mov(nfp_prog, reg, reg_lm(0, idx)); 1153 } 1154 1155 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); 1156 1157 if (new_gpr || last) { 1158 if (idx > RE_REG_LM_IDX_MAX) 1159 wrp_mov(nfp_prog, reg_lm(0, idx), reg); 1160 if (should_inc) 1161 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); 1162 } 1163 1164 return 0; 1165 } 1166 1167 static int 1168 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1169 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, 1170 bool clr_gpr, lmem_step step) 1171 { 1172 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; 1173 bool first = true, last; 1174 bool needs_inc = false; 1175 swreg stack_off_reg; 1176 u8 prev_gpr = 255; 1177 u32 gpr_byte = 0; 1178 bool lm3 = true; 1179 int ret; 1180 1181 if (meta->ptr_not_const || 1182 meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) { 1183 /* Use of the last encountered ptr_off is OK, they all have 1184 * the same alignment. Depend on low bits of value being 1185 * discarded when written to LMaddr register. 1186 */ 1187 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, 1188 stack_imm(nfp_prog)); 1189 1190 emit_alu(nfp_prog, imm_b(nfp_prog), 1191 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); 1192 1193 needs_inc = true; 1194 } else if (off + size <= 64) { 1195 /* We can reach bottom 64B with LMaddr0 */ 1196 lm3 = false; 1197 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { 1198 /* We have to set up a new pointer. If we know the offset 1199 * and the entire access falls into a single 32 byte aligned 1200 * window we won't have to increment the LM pointer. 1201 * The 32 byte alignment is imporant because offset is ORed in 1202 * not added when doing *l$indexN[off]. 1203 */ 1204 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), 1205 stack_imm(nfp_prog)); 1206 emit_alu(nfp_prog, imm_b(nfp_prog), 1207 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1208 1209 off %= 32; 1210 } else { 1211 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), 1212 stack_imm(nfp_prog)); 1213 1214 emit_alu(nfp_prog, imm_b(nfp_prog), 1215 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); 1216 1217 needs_inc = true; 1218 } 1219 if (lm3) { 1220 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1221 /* For size < 4 one slot will be filled by zeroing of upper. */ 1222 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1223 } 1224 1225 if (clr_gpr && size < 8) 1226 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 1227 1228 while (size) { 1229 u32 slice_end; 1230 u8 slice_size; 1231 1232 slice_size = min(size, 4 - gpr_byte); 1233 slice_end = min(off + slice_size, round_up(off + 1, 4)); 1234 slice_size = slice_end - off; 1235 1236 last = slice_size == size; 1237 1238 if (needs_inc) 1239 off %= 4; 1240 1241 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, 1242 first, gpr != prev_gpr, last, lm3, needs_inc); 1243 if (ret) 1244 return ret; 1245 1246 prev_gpr = gpr; 1247 first = false; 1248 1249 gpr_byte += slice_size; 1250 if (gpr_byte >= 4) { 1251 gpr_byte -= 4; 1252 gpr++; 1253 } 1254 1255 size -= slice_size; 1256 off += slice_size; 1257 } 1258 1259 return 0; 1260 } 1261 1262 static void 1263 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 1264 { 1265 swreg tmp_reg; 1266 1267 if (alu_op == ALU_OP_AND) { 1268 if (!imm) 1269 wrp_immed(nfp_prog, reg_both(dst), 0); 1270 if (!imm || !~imm) 1271 return; 1272 } 1273 if (alu_op == ALU_OP_OR) { 1274 if (!~imm) 1275 wrp_immed(nfp_prog, reg_both(dst), ~0U); 1276 if (!imm || !~imm) 1277 return; 1278 } 1279 if (alu_op == ALU_OP_XOR) { 1280 if (!~imm) 1281 emit_alu(nfp_prog, reg_both(dst), reg_none(), 1282 ALU_OP_NOT, reg_b(dst)); 1283 if (!imm || !~imm) 1284 return; 1285 } 1286 1287 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1288 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 1289 } 1290 1291 static int 1292 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1293 enum alu_op alu_op, bool skip) 1294 { 1295 const struct bpf_insn *insn = &meta->insn; 1296 u64 imm = insn->imm; /* sign extend */ 1297 1298 if (skip) { 1299 meta->skip = true; 1300 return 0; 1301 } 1302 1303 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 1304 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 1305 1306 return 0; 1307 } 1308 1309 static int 1310 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1311 enum alu_op alu_op) 1312 { 1313 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1314 1315 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1316 emit_alu(nfp_prog, reg_both(dst + 1), 1317 reg_a(dst + 1), alu_op, reg_b(src + 1)); 1318 1319 return 0; 1320 } 1321 1322 static int 1323 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1324 enum alu_op alu_op, bool skip) 1325 { 1326 const struct bpf_insn *insn = &meta->insn; 1327 1328 if (skip) { 1329 meta->skip = true; 1330 return 0; 1331 } 1332 1333 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1334 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1335 1336 return 0; 1337 } 1338 1339 static int 1340 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1341 enum alu_op alu_op) 1342 { 1343 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 1344 1345 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 1346 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1347 1348 return 0; 1349 } 1350 1351 static void 1352 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 1353 enum br_mask br_mask, u16 off) 1354 { 1355 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 1356 emit_br(nfp_prog, br_mask, off, 0); 1357 } 1358 1359 static int 1360 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1361 enum alu_op alu_op, enum br_mask br_mask) 1362 { 1363 const struct bpf_insn *insn = &meta->insn; 1364 1365 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 1366 insn->src_reg * 2, br_mask, insn->off); 1367 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 1368 insn->src_reg * 2 + 1, br_mask, insn->off); 1369 1370 return 0; 1371 } 1372 1373 static const struct jmp_code_map { 1374 enum br_mask br_mask; 1375 bool swap; 1376 } jmp_code_map[] = { 1377 [BPF_JGT >> 4] = { BR_BLO, true }, 1378 [BPF_JGE >> 4] = { BR_BHS, false }, 1379 [BPF_JLT >> 4] = { BR_BLO, false }, 1380 [BPF_JLE >> 4] = { BR_BHS, true }, 1381 [BPF_JSGT >> 4] = { BR_BLT, true }, 1382 [BPF_JSGE >> 4] = { BR_BGE, false }, 1383 [BPF_JSLT >> 4] = { BR_BLT, false }, 1384 [BPF_JSLE >> 4] = { BR_BGE, true }, 1385 }; 1386 1387 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) 1388 { 1389 unsigned int op; 1390 1391 op = BPF_OP(meta->insn.code) >> 4; 1392 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ 1393 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || 1394 !jmp_code_map[op].br_mask, 1395 "no code found for jump instruction")) 1396 return NULL; 1397 1398 return &jmp_code_map[op]; 1399 } 1400 1401 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1402 { 1403 const struct bpf_insn *insn = &meta->insn; 1404 u64 imm = insn->imm; /* sign extend */ 1405 const struct jmp_code_map *code; 1406 enum alu_op alu_op, carry_op; 1407 u8 reg = insn->dst_reg * 2; 1408 swreg tmp_reg; 1409 1410 code = nfp_jmp_code_get(meta); 1411 if (!code) 1412 return -EINVAL; 1413 1414 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; 1415 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; 1416 1417 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1418 if (!code->swap) 1419 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); 1420 else 1421 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); 1422 1423 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1424 if (!code->swap) 1425 emit_alu(nfp_prog, reg_none(), 1426 reg_a(reg + 1), carry_op, tmp_reg); 1427 else 1428 emit_alu(nfp_prog, reg_none(), 1429 tmp_reg, carry_op, reg_a(reg + 1)); 1430 1431 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1432 1433 return 0; 1434 } 1435 1436 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1437 { 1438 const struct bpf_insn *insn = &meta->insn; 1439 const struct jmp_code_map *code; 1440 u8 areg, breg; 1441 1442 code = nfp_jmp_code_get(meta); 1443 if (!code) 1444 return -EINVAL; 1445 1446 areg = insn->dst_reg * 2; 1447 breg = insn->src_reg * 2; 1448 1449 if (code->swap) { 1450 areg ^= breg; 1451 breg ^= areg; 1452 areg ^= breg; 1453 } 1454 1455 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 1456 emit_alu(nfp_prog, reg_none(), 1457 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 1458 emit_br(nfp_prog, code->br_mask, insn->off, 0); 1459 1460 return 0; 1461 } 1462 1463 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) 1464 { 1465 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, 1466 SHF_SC_R_ROT, 8); 1467 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), 1468 SHF_SC_R_ROT, 16); 1469 } 1470 1471 static void 1472 wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1473 swreg rreg, bool gen_high_half) 1474 { 1475 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1476 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg); 1477 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg); 1478 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg); 1479 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg); 1480 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none()); 1481 if (gen_high_half) 1482 emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2, 1483 reg_none()); 1484 else 1485 wrp_immed(nfp_prog, dst_hi, 0); 1486 } 1487 1488 static void 1489 wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, 1490 swreg rreg) 1491 { 1492 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); 1493 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg); 1494 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg); 1495 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none()); 1496 } 1497 1498 static int 1499 wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1500 bool gen_high_half, bool ropnd_from_reg) 1501 { 1502 swreg multiplier, multiplicand, dst_hi, dst_lo; 1503 const struct bpf_insn *insn = &meta->insn; 1504 u32 lopnd_max, ropnd_max; 1505 u8 dst_reg; 1506 1507 dst_reg = insn->dst_reg; 1508 multiplicand = reg_a(dst_reg * 2); 1509 dst_hi = reg_both(dst_reg * 2 + 1); 1510 dst_lo = reg_both(dst_reg * 2); 1511 lopnd_max = meta->umax_dst; 1512 if (ropnd_from_reg) { 1513 multiplier = reg_b(insn->src_reg * 2); 1514 ropnd_max = meta->umax_src; 1515 } else { 1516 u32 imm = insn->imm; 1517 1518 multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1519 ropnd_max = imm; 1520 } 1521 if (lopnd_max > U16_MAX || ropnd_max > U16_MAX) 1522 wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier, 1523 gen_high_half); 1524 else 1525 wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier); 1526 1527 return 0; 1528 } 1529 1530 static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) 1531 { 1532 swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst); 1533 struct reciprocal_value_adv rvalue; 1534 u8 pre_shift, exp; 1535 swreg magic; 1536 1537 if (imm > U32_MAX) { 1538 wrp_immed(nfp_prog, dst_both, 0); 1539 return 0; 1540 } 1541 1542 /* NOTE: because we are using "reciprocal_value_adv" which doesn't 1543 * support "divisor > (1u << 31)", we need to JIT separate NFP sequence 1544 * to handle such case which actually equals to the result of unsigned 1545 * comparison "dst >= imm" which could be calculated using the following 1546 * NFP sequence: 1547 * 1548 * alu[--, dst, -, imm] 1549 * immed[imm, 0] 1550 * alu[dst, imm, +carry, 0] 1551 * 1552 */ 1553 if (imm > 1U << 31) { 1554 swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 1555 1556 emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b); 1557 wrp_immed(nfp_prog, imm_a(nfp_prog), 0); 1558 emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C, 1559 reg_imm(0)); 1560 return 0; 1561 } 1562 1563 rvalue = reciprocal_value_adv(imm, 32); 1564 exp = rvalue.exp; 1565 if (rvalue.is_wide_m && !(imm & 1)) { 1566 pre_shift = fls(imm & -imm) - 1; 1567 rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift); 1568 } else { 1569 pre_shift = 0; 1570 } 1571 magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog)); 1572 if (imm == 1U << exp) { 1573 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1574 SHF_SC_R_SHF, exp); 1575 } else if (rvalue.is_wide_m) { 1576 wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, 1577 magic, true); 1578 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, 1579 imm_b(nfp_prog)); 1580 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1581 SHF_SC_R_SHF, 1); 1582 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, 1583 imm_b(nfp_prog)); 1584 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, 1585 SHF_SC_R_SHF, rvalue.sh - 1); 1586 } else { 1587 if (pre_shift) 1588 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1589 dst_b, SHF_SC_R_SHF, pre_shift); 1590 wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true); 1591 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, 1592 dst_b, SHF_SC_R_SHF, rvalue.sh); 1593 } 1594 1595 return 0; 1596 } 1597 1598 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1599 { 1600 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); 1601 struct nfp_bpf_cap_adjust_head *adjust_head; 1602 u32 ret_einval, end; 1603 1604 adjust_head = &nfp_prog->bpf->adjust_head; 1605 1606 /* Optimized version - 5 vs 14 cycles */ 1607 if (nfp_prog->adjust_head_location != UINT_MAX) { 1608 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) 1609 return -EINVAL; 1610 1611 emit_alu(nfp_prog, pptr_reg(nfp_prog), 1612 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); 1613 emit_alu(nfp_prog, plen_reg(nfp_prog), 1614 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1615 emit_alu(nfp_prog, pv_len(nfp_prog), 1616 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1617 1618 wrp_immed(nfp_prog, reg_both(0), 0); 1619 wrp_immed(nfp_prog, reg_both(1), 0); 1620 1621 /* TODO: when adjust head is guaranteed to succeed we can 1622 * also eliminate the following if (r0 == 0) branch. 1623 */ 1624 1625 return 0; 1626 } 1627 1628 ret_einval = nfp_prog_current_offset(nfp_prog) + 14; 1629 end = ret_einval + 2; 1630 1631 /* We need to use a temp because offset is just a part of the pkt ptr */ 1632 emit_alu(nfp_prog, tmp, 1633 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); 1634 1635 /* Validate result will fit within FW datapath constraints */ 1636 emit_alu(nfp_prog, reg_none(), 1637 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); 1638 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1639 emit_alu(nfp_prog, reg_none(), 1640 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); 1641 emit_br(nfp_prog, BR_BLO, ret_einval, 0); 1642 1643 /* Validate the length is at least ETH_HLEN */ 1644 emit_alu(nfp_prog, tmp_len, 1645 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1646 emit_alu(nfp_prog, reg_none(), 1647 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1648 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1649 1650 /* Load the ret code */ 1651 wrp_immed(nfp_prog, reg_both(0), 0); 1652 wrp_immed(nfp_prog, reg_both(1), 0); 1653 1654 /* Modify the packet metadata */ 1655 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1656 1657 /* Skip over the -EINVAL ret code (defer 2) */ 1658 emit_br(nfp_prog, BR_UNC, end, 2); 1659 1660 emit_alu(nfp_prog, plen_reg(nfp_prog), 1661 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1662 emit_alu(nfp_prog, pv_len(nfp_prog), 1663 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1664 1665 /* return -EINVAL target */ 1666 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1667 return -EINVAL; 1668 1669 wrp_immed(nfp_prog, reg_both(0), -22); 1670 wrp_immed(nfp_prog, reg_both(1), ~0); 1671 1672 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1673 return -EINVAL; 1674 1675 return 0; 1676 } 1677 1678 static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1679 { 1680 u32 ret_einval, end; 1681 swreg plen, delta; 1682 1683 BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN)); 1684 1685 plen = imm_a(nfp_prog); 1686 delta = reg_a(2 * 2); 1687 1688 ret_einval = nfp_prog_current_offset(nfp_prog) + 9; 1689 end = nfp_prog_current_offset(nfp_prog) + 11; 1690 1691 /* Calculate resulting length */ 1692 emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta); 1693 /* delta == 0 is not allowed by the kernel, add must overflow to make 1694 * length smaller. 1695 */ 1696 emit_br(nfp_prog, BR_BCC, ret_einval, 0); 1697 1698 /* if (new_len < 14) then -EINVAL */ 1699 emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN)); 1700 emit_br(nfp_prog, BR_BMI, ret_einval, 0); 1701 1702 emit_alu(nfp_prog, plen_reg(nfp_prog), 1703 plen_reg(nfp_prog), ALU_OP_ADD, delta); 1704 emit_alu(nfp_prog, pv_len(nfp_prog), 1705 pv_len(nfp_prog), ALU_OP_ADD, delta); 1706 1707 emit_br(nfp_prog, BR_UNC, end, 2); 1708 wrp_immed(nfp_prog, reg_both(0), 0); 1709 wrp_immed(nfp_prog, reg_both(1), 0); 1710 1711 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) 1712 return -EINVAL; 1713 1714 wrp_immed(nfp_prog, reg_both(0), -22); 1715 wrp_immed(nfp_prog, reg_both(1), ~0); 1716 1717 if (!nfp_prog_confirm_current_offset(nfp_prog, end)) 1718 return -EINVAL; 1719 1720 return 0; 1721 } 1722 1723 static int 1724 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1725 { 1726 bool load_lm_ptr; 1727 u32 ret_tgt; 1728 s64 lm_off; 1729 1730 /* We only have to reload LM0 if the key is not at start of stack */ 1731 lm_off = nfp_prog->stack_frame_depth; 1732 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; 1733 load_lm_ptr = meta->arg2.var_off || lm_off; 1734 1735 /* Set LM0 to start of key */ 1736 if (load_lm_ptr) 1737 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); 1738 if (meta->func_id == BPF_FUNC_map_update_elem) 1739 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); 1740 1741 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1742 2, RELO_BR_HELPER); 1743 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 1744 1745 /* Load map ID into A0 */ 1746 wrp_mov(nfp_prog, reg_a(0), reg_a(2)); 1747 1748 /* Load the return address into B0 */ 1749 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1750 1751 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1752 return -EINVAL; 1753 1754 /* Reset the LM0 pointer */ 1755 if (!load_lm_ptr) 1756 return 0; 1757 1758 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); 1759 wrp_nops(nfp_prog, 3); 1760 1761 return 0; 1762 } 1763 1764 static int 1765 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1766 { 1767 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); 1768 /* CSR value is read in following immed[gpr, 0] */ 1769 emit_immed(nfp_prog, reg_both(0), 0, 1770 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1771 emit_immed(nfp_prog, reg_both(1), 0, 1772 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); 1773 return 0; 1774 } 1775 1776 static int 1777 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1778 { 1779 swreg ptr_type; 1780 u32 ret_tgt; 1781 1782 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); 1783 1784 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 1785 1786 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 1787 2, RELO_BR_HELPER); 1788 1789 /* Load ptr type into A1 */ 1790 wrp_mov(nfp_prog, reg_a(1), ptr_type); 1791 1792 /* Load the return address into B0 */ 1793 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); 1794 1795 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 1796 return -EINVAL; 1797 1798 return 0; 1799 } 1800 1801 static int 1802 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1803 { 1804 u32 jmp_tgt; 1805 1806 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; 1807 1808 /* Make sure the queue id fits into FW field */ 1809 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), 1810 ALU_OP_AND_NOT_B, reg_imm(0xff)); 1811 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); 1812 1813 /* Set the 'queue selected' bit and the queue value */ 1814 emit_shf(nfp_prog, pv_qsel_set(nfp_prog), 1815 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), 1816 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); 1817 emit_ld_field(nfp_prog, 1818 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), 1819 SHF_SC_NONE, 0); 1820 /* Delay slots end here, we will jump over next instruction if queue 1821 * value fits into the field. 1822 */ 1823 emit_ld_field(nfp_prog, 1824 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), 1825 SHF_SC_NONE, 0); 1826 1827 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) 1828 return -EINVAL; 1829 1830 return 0; 1831 } 1832 1833 /* --- Callbacks --- */ 1834 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1835 { 1836 const struct bpf_insn *insn = &meta->insn; 1837 u8 dst = insn->dst_reg * 2; 1838 u8 src = insn->src_reg * 2; 1839 1840 if (insn->src_reg == BPF_REG_10) { 1841 swreg stack_depth_reg; 1842 1843 stack_depth_reg = ur_load_imm_any(nfp_prog, 1844 nfp_prog->stack_frame_depth, 1845 stack_imm(nfp_prog)); 1846 emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog), 1847 ALU_OP_ADD, stack_depth_reg); 1848 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 1849 } else { 1850 wrp_reg_mov(nfp_prog, dst, src); 1851 wrp_reg_mov(nfp_prog, dst + 1, src + 1); 1852 } 1853 1854 return 0; 1855 } 1856 1857 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1858 { 1859 u64 imm = meta->insn.imm; /* sign extend */ 1860 1861 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 1862 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 1863 1864 return 0; 1865 } 1866 1867 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1868 { 1869 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 1870 } 1871 1872 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1873 { 1874 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 1875 } 1876 1877 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1878 { 1879 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 1880 } 1881 1882 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1883 { 1884 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1885 } 1886 1887 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1888 { 1889 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 1890 } 1891 1892 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1893 { 1894 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1895 } 1896 1897 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1898 { 1899 const struct bpf_insn *insn = &meta->insn; 1900 1901 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1902 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 1903 reg_b(insn->src_reg * 2)); 1904 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1905 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 1906 reg_b(insn->src_reg * 2 + 1)); 1907 1908 return 0; 1909 } 1910 1911 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1912 { 1913 const struct bpf_insn *insn = &meta->insn; 1914 u64 imm = insn->imm; /* sign extend */ 1915 1916 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 1917 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 1918 1919 return 0; 1920 } 1921 1922 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1923 { 1924 const struct bpf_insn *insn = &meta->insn; 1925 1926 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 1927 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 1928 reg_b(insn->src_reg * 2)); 1929 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 1930 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 1931 reg_b(insn->src_reg * 2 + 1)); 1932 1933 return 0; 1934 } 1935 1936 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1937 { 1938 const struct bpf_insn *insn = &meta->insn; 1939 u64 imm = insn->imm; /* sign extend */ 1940 1941 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 1942 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 1943 1944 return 0; 1945 } 1946 1947 static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1948 { 1949 return wrp_mul(nfp_prog, meta, true, true); 1950 } 1951 1952 static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1953 { 1954 return wrp_mul(nfp_prog, meta, true, false); 1955 } 1956 1957 static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1958 { 1959 const struct bpf_insn *insn = &meta->insn; 1960 1961 return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm); 1962 } 1963 1964 static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1965 { 1966 /* NOTE: verifier hook has rejected cases for which verifier doesn't 1967 * know whether the source operand is constant or not. 1968 */ 1969 return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src); 1970 } 1971 1972 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1973 { 1974 const struct bpf_insn *insn = &meta->insn; 1975 1976 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), 1977 ALU_OP_SUB, reg_b(insn->dst_reg * 2)); 1978 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), 1979 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); 1980 1981 return 0; 1982 } 1983 1984 /* Pseudo code: 1985 * if shift_amt >= 32 1986 * dst_high = dst_low << shift_amt[4:0] 1987 * dst_low = 0; 1988 * else 1989 * dst_high = (dst_high, dst_low) >> (32 - shift_amt) 1990 * dst_low = dst_low << shift_amt 1991 * 1992 * The indirect shift will use the same logic at runtime. 1993 */ 1994 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 1995 { 1996 if (shift_amt < 32) { 1997 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), 1998 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, 1999 32 - shift_amt); 2000 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2001 reg_b(dst), SHF_SC_L_SHF, shift_amt); 2002 } else if (shift_amt == 32) { 2003 wrp_reg_mov(nfp_prog, dst + 1, dst); 2004 wrp_immed(nfp_prog, reg_both(dst), 0); 2005 } else if (shift_amt > 32) { 2006 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2007 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32); 2008 wrp_immed(nfp_prog, reg_both(dst), 0); 2009 } 2010 2011 return 0; 2012 } 2013 2014 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2015 { 2016 const struct bpf_insn *insn = &meta->insn; 2017 u8 dst = insn->dst_reg * 2; 2018 2019 return __shl_imm64(nfp_prog, dst, insn->imm); 2020 } 2021 2022 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2023 { 2024 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB, 2025 reg_b(src)); 2026 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0)); 2027 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, 2028 reg_b(dst), SHF_SC_R_DSHF); 2029 } 2030 2031 /* NOTE: for indirect left shift, HIGH part should be calculated first. */ 2032 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2033 { 2034 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2035 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2036 reg_b(dst), SHF_SC_L_SHF); 2037 } 2038 2039 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2040 { 2041 shl_reg64_lt32_high(nfp_prog, dst, src); 2042 shl_reg64_lt32_low(nfp_prog, dst, src); 2043 } 2044 2045 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2046 { 2047 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2048 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2049 reg_b(dst), SHF_SC_L_SHF); 2050 wrp_immed(nfp_prog, reg_both(dst), 0); 2051 } 2052 2053 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2054 { 2055 const struct bpf_insn *insn = &meta->insn; 2056 u64 umin, umax; 2057 u8 dst, src; 2058 2059 dst = insn->dst_reg * 2; 2060 umin = meta->umin_src; 2061 umax = meta->umax_src; 2062 if (umin == umax) 2063 return __shl_imm64(nfp_prog, dst, umin); 2064 2065 src = insn->src_reg * 2; 2066 if (umax < 32) { 2067 shl_reg64_lt32(nfp_prog, dst, src); 2068 } else if (umin >= 32) { 2069 shl_reg64_ge32(nfp_prog, dst, src); 2070 } else { 2071 /* Generate different instruction sequences depending on runtime 2072 * value of shift amount. 2073 */ 2074 u16 label_ge32, label_end; 2075 2076 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7; 2077 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2078 2079 shl_reg64_lt32_high(nfp_prog, dst, src); 2080 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2081 emit_br(nfp_prog, BR_UNC, label_end, 2); 2082 /* shl_reg64_lt32_low packed in delay slot. */ 2083 shl_reg64_lt32_low(nfp_prog, dst, src); 2084 2085 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2086 return -EINVAL; 2087 shl_reg64_ge32(nfp_prog, dst, src); 2088 2089 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2090 return -EINVAL; 2091 } 2092 2093 return 0; 2094 } 2095 2096 /* Pseudo code: 2097 * if shift_amt >= 32 2098 * dst_high = 0; 2099 * dst_low = dst_high >> shift_amt[4:0] 2100 * else 2101 * dst_high = dst_high >> shift_amt 2102 * dst_low = (dst_high, dst_low) >> shift_amt 2103 * 2104 * The indirect shift will use the same logic at runtime. 2105 */ 2106 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2107 { 2108 if (shift_amt < 32) { 2109 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2110 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2111 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2112 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2113 } else if (shift_amt == 32) { 2114 wrp_reg_mov(nfp_prog, dst, dst + 1); 2115 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2116 } else if (shift_amt > 32) { 2117 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2118 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2119 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2120 } 2121 2122 return 0; 2123 } 2124 2125 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2126 { 2127 const struct bpf_insn *insn = &meta->insn; 2128 u8 dst = insn->dst_reg * 2; 2129 2130 return __shr_imm64(nfp_prog, dst, insn->imm); 2131 } 2132 2133 /* NOTE: for indirect right shift, LOW part should be calculated first. */ 2134 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2135 { 2136 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2137 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, 2138 reg_b(dst + 1), SHF_SC_R_SHF); 2139 } 2140 2141 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2142 { 2143 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2144 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2145 reg_b(dst), SHF_SC_R_DSHF); 2146 } 2147 2148 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2149 { 2150 shr_reg64_lt32_low(nfp_prog, dst, src); 2151 shr_reg64_lt32_high(nfp_prog, dst, src); 2152 } 2153 2154 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2155 { 2156 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); 2157 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, 2158 reg_b(dst + 1), SHF_SC_R_SHF); 2159 wrp_immed(nfp_prog, reg_both(dst + 1), 0); 2160 } 2161 2162 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2163 { 2164 const struct bpf_insn *insn = &meta->insn; 2165 u64 umin, umax; 2166 u8 dst, src; 2167 2168 dst = insn->dst_reg * 2; 2169 umin = meta->umin_src; 2170 umax = meta->umax_src; 2171 if (umin == umax) 2172 return __shr_imm64(nfp_prog, dst, umin); 2173 2174 src = insn->src_reg * 2; 2175 if (umax < 32) { 2176 shr_reg64_lt32(nfp_prog, dst, src); 2177 } else if (umin >= 32) { 2178 shr_reg64_ge32(nfp_prog, dst, src); 2179 } else { 2180 /* Generate different instruction sequences depending on runtime 2181 * value of shift amount. 2182 */ 2183 u16 label_ge32, label_end; 2184 2185 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2186 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2187 shr_reg64_lt32_low(nfp_prog, dst, src); 2188 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2189 emit_br(nfp_prog, BR_UNC, label_end, 2); 2190 /* shr_reg64_lt32_high packed in delay slot. */ 2191 shr_reg64_lt32_high(nfp_prog, dst, src); 2192 2193 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2194 return -EINVAL; 2195 shr_reg64_ge32(nfp_prog, dst, src); 2196 2197 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2198 return -EINVAL; 2199 } 2200 2201 return 0; 2202 } 2203 2204 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit 2205 * told through PREV_ALU result. 2206 */ 2207 static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) 2208 { 2209 if (shift_amt < 32) { 2210 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, 2211 reg_b(dst), SHF_SC_R_DSHF, shift_amt); 2212 /* Set signedness bit. */ 2213 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2214 reg_imm(0)); 2215 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2216 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); 2217 } else if (shift_amt == 32) { 2218 /* NOTE: this also helps setting signedness bit. */ 2219 wrp_reg_mov(nfp_prog, dst, dst + 1); 2220 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2221 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2222 } else if (shift_amt > 32) { 2223 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, 2224 reg_imm(0)); 2225 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2226 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); 2227 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2228 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2229 } 2230 2231 return 0; 2232 } 2233 2234 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2235 { 2236 const struct bpf_insn *insn = &meta->insn; 2237 u8 dst = insn->dst_reg * 2; 2238 2239 return __ashr_imm64(nfp_prog, dst, insn->imm); 2240 } 2241 2242 static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2243 { 2244 /* NOTE: the first insn will set both indirect shift amount (source A) 2245 * and signedness bit (MSB of result). 2246 */ 2247 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2248 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2249 reg_b(dst + 1), SHF_SC_R_SHF); 2250 } 2251 2252 static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2253 { 2254 /* NOTE: it is the same as logic shift because we don't need to shift in 2255 * signedness bit when the shift amount is less than 32. 2256 */ 2257 return shr_reg64_lt32_low(nfp_prog, dst, src); 2258 } 2259 2260 static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2261 { 2262 ashr_reg64_lt32_low(nfp_prog, dst, src); 2263 ashr_reg64_lt32_high(nfp_prog, dst, src); 2264 } 2265 2266 static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) 2267 { 2268 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); 2269 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, 2270 reg_b(dst + 1), SHF_SC_R_SHF); 2271 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, 2272 reg_b(dst + 1), SHF_SC_R_SHF, 31); 2273 } 2274 2275 /* Like ashr_imm64, but need to use indirect shift. */ 2276 static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2277 { 2278 const struct bpf_insn *insn = &meta->insn; 2279 u64 umin, umax; 2280 u8 dst, src; 2281 2282 dst = insn->dst_reg * 2; 2283 umin = meta->umin_src; 2284 umax = meta->umax_src; 2285 if (umin == umax) 2286 return __ashr_imm64(nfp_prog, dst, umin); 2287 2288 src = insn->src_reg * 2; 2289 if (umax < 32) { 2290 ashr_reg64_lt32(nfp_prog, dst, src); 2291 } else if (umin >= 32) { 2292 ashr_reg64_ge32(nfp_prog, dst, src); 2293 } else { 2294 u16 label_ge32, label_end; 2295 2296 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; 2297 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); 2298 ashr_reg64_lt32_low(nfp_prog, dst, src); 2299 label_end = nfp_prog_current_offset(nfp_prog) + 6; 2300 emit_br(nfp_prog, BR_UNC, label_end, 2); 2301 /* ashr_reg64_lt32_high packed in delay slot. */ 2302 ashr_reg64_lt32_high(nfp_prog, dst, src); 2303 2304 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) 2305 return -EINVAL; 2306 ashr_reg64_ge32(nfp_prog, dst, src); 2307 2308 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) 2309 return -EINVAL; 2310 } 2311 2312 return 0; 2313 } 2314 2315 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2316 { 2317 const struct bpf_insn *insn = &meta->insn; 2318 2319 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 2320 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2321 2322 return 0; 2323 } 2324 2325 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2326 { 2327 const struct bpf_insn *insn = &meta->insn; 2328 2329 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 2330 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2331 2332 return 0; 2333 } 2334 2335 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2336 { 2337 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 2338 } 2339 2340 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2341 { 2342 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 2343 } 2344 2345 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2346 { 2347 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 2348 } 2349 2350 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2351 { 2352 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 2353 } 2354 2355 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2356 { 2357 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 2358 } 2359 2360 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2361 { 2362 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 2363 } 2364 2365 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2366 { 2367 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 2368 } 2369 2370 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2371 { 2372 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 2373 } 2374 2375 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2376 { 2377 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 2378 } 2379 2380 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2381 { 2382 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 2383 } 2384 2385 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2386 { 2387 return wrp_mul(nfp_prog, meta, false, true); 2388 } 2389 2390 static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2391 { 2392 return wrp_mul(nfp_prog, meta, false, false); 2393 } 2394 2395 static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2396 { 2397 return div_reg64(nfp_prog, meta); 2398 } 2399 2400 static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2401 { 2402 return div_imm64(nfp_prog, meta); 2403 } 2404 2405 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2406 { 2407 u8 dst = meta->insn.dst_reg * 2; 2408 2409 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); 2410 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2411 2412 return 0; 2413 } 2414 2415 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2416 { 2417 const struct bpf_insn *insn = &meta->insn; 2418 2419 if (!insn->imm) 2420 return 1; /* TODO: zero shift means indirect */ 2421 2422 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 2423 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 2424 SHF_SC_L_SHF, insn->imm); 2425 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 2426 2427 return 0; 2428 } 2429 2430 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2431 { 2432 const struct bpf_insn *insn = &meta->insn; 2433 u8 gpr = insn->dst_reg * 2; 2434 2435 switch (insn->imm) { 2436 case 16: 2437 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), 2438 SHF_SC_R_ROT, 8); 2439 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), 2440 SHF_SC_R_SHF, 16); 2441 2442 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2443 break; 2444 case 32: 2445 wrp_end32(nfp_prog, reg_a(gpr), gpr); 2446 wrp_immed(nfp_prog, reg_both(gpr + 1), 0); 2447 break; 2448 case 64: 2449 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); 2450 2451 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); 2452 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); 2453 break; 2454 } 2455 2456 return 0; 2457 } 2458 2459 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2460 { 2461 struct nfp_insn_meta *prev = nfp_meta_prev(meta); 2462 u32 imm_lo, imm_hi; 2463 u8 dst; 2464 2465 dst = prev->insn.dst_reg * 2; 2466 imm_lo = prev->insn.imm; 2467 imm_hi = meta->insn.imm; 2468 2469 wrp_immed(nfp_prog, reg_both(dst), imm_lo); 2470 2471 /* mov is always 1 insn, load imm may be two, so try to use mov */ 2472 if (imm_hi == imm_lo) 2473 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); 2474 else 2475 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); 2476 2477 return 0; 2478 } 2479 2480 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2481 { 2482 meta->double_cb = imm_ld8_part2; 2483 return 0; 2484 } 2485 2486 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2487 { 2488 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 2489 } 2490 2491 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2492 { 2493 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 2494 } 2495 2496 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2497 { 2498 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 2499 } 2500 2501 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2502 { 2503 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2504 meta->insn.src_reg * 2, 1); 2505 } 2506 2507 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2508 { 2509 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2510 meta->insn.src_reg * 2, 2); 2511 } 2512 2513 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2514 { 2515 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 2516 meta->insn.src_reg * 2, 4); 2517 } 2518 2519 static int 2520 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2521 unsigned int size, unsigned int ptr_off) 2522 { 2523 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2524 meta->insn.dst_reg * 2, meta->insn.src_reg * 2, 2525 true, wrp_lmem_load); 2526 } 2527 2528 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2529 u8 size) 2530 { 2531 swreg dst = reg_both(meta->insn.dst_reg * 2); 2532 2533 switch (meta->insn.off) { 2534 case offsetof(struct __sk_buff, len): 2535 if (size != FIELD_SIZEOF(struct __sk_buff, len)) 2536 return -EOPNOTSUPP; 2537 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 2538 break; 2539 case offsetof(struct __sk_buff, data): 2540 if (size != FIELD_SIZEOF(struct __sk_buff, data)) 2541 return -EOPNOTSUPP; 2542 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2543 break; 2544 case offsetof(struct __sk_buff, data_end): 2545 if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 2546 return -EOPNOTSUPP; 2547 emit_alu(nfp_prog, dst, 2548 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2549 break; 2550 default: 2551 return -EOPNOTSUPP; 2552 } 2553 2554 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2555 2556 return 0; 2557 } 2558 2559 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2560 u8 size) 2561 { 2562 swreg dst = reg_both(meta->insn.dst_reg * 2); 2563 2564 switch (meta->insn.off) { 2565 case offsetof(struct xdp_md, data): 2566 if (size != FIELD_SIZEOF(struct xdp_md, data)) 2567 return -EOPNOTSUPP; 2568 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2569 break; 2570 case offsetof(struct xdp_md, data_end): 2571 if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 2572 return -EOPNOTSUPP; 2573 emit_alu(nfp_prog, dst, 2574 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); 2575 break; 2576 default: 2577 return -EOPNOTSUPP; 2578 } 2579 2580 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 2581 2582 return 0; 2583 } 2584 2585 static int 2586 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2587 unsigned int size) 2588 { 2589 swreg tmp_reg; 2590 2591 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2592 2593 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, 2594 tmp_reg, meta->insn.dst_reg * 2, size); 2595 } 2596 2597 static int 2598 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2599 unsigned int size) 2600 { 2601 swreg tmp_reg; 2602 2603 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2604 2605 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, 2606 tmp_reg, meta->insn.dst_reg * 2, size); 2607 } 2608 2609 static void 2610 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, 2611 struct nfp_insn_meta *meta) 2612 { 2613 s16 range_start = meta->pkt_cache.range_start; 2614 s16 range_end = meta->pkt_cache.range_end; 2615 swreg src_base, off; 2616 u8 xfer_num, len; 2617 bool indir; 2618 2619 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); 2620 src_base = reg_a(meta->insn.src_reg * 2); 2621 len = range_end - range_start; 2622 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; 2623 2624 indir = len > 8 * REG_WIDTH; 2625 /* Setup PREV_ALU for indirect mode. */ 2626 if (indir) 2627 wrp_immed(nfp_prog, reg_none(), 2628 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); 2629 2630 /* Cache memory into transfer-in registers. */ 2631 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, 2632 off, xfer_num - 1, CMD_CTX_SWAP, indir); 2633 } 2634 2635 static int 2636 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, 2637 struct nfp_insn_meta *meta, 2638 unsigned int size) 2639 { 2640 s16 range_start = meta->pkt_cache.range_start; 2641 s16 insn_off = meta->insn.off - range_start; 2642 swreg dst_lo, dst_hi, src_lo, src_mid; 2643 u8 dst_gpr = meta->insn.dst_reg * 2; 2644 u8 len_lo = size, len_mid = 0; 2645 u8 idx = insn_off / REG_WIDTH; 2646 u8 off = insn_off % REG_WIDTH; 2647 2648 dst_hi = reg_both(dst_gpr + 1); 2649 dst_lo = reg_both(dst_gpr); 2650 src_lo = reg_xfer(idx); 2651 2652 /* The read length could involve as many as three registers. */ 2653 if (size > REG_WIDTH - off) { 2654 /* Calculate the part in the second register. */ 2655 len_lo = REG_WIDTH - off; 2656 len_mid = size - len_lo; 2657 2658 /* Calculate the part in the third register. */ 2659 if (size > 2 * REG_WIDTH - off) 2660 len_mid = REG_WIDTH; 2661 } 2662 2663 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); 2664 2665 if (!len_mid) { 2666 wrp_immed(nfp_prog, dst_hi, 0); 2667 return 0; 2668 } 2669 2670 src_mid = reg_xfer(idx + 1); 2671 2672 if (size <= REG_WIDTH) { 2673 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); 2674 wrp_immed(nfp_prog, dst_hi, 0); 2675 } else { 2676 swreg src_hi = reg_xfer(idx + 2); 2677 2678 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, 2679 REG_WIDTH - len_lo, len_lo); 2680 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, 2681 REG_WIDTH - len_lo); 2682 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, 2683 len_lo); 2684 } 2685 2686 return 0; 2687 } 2688 2689 static int 2690 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, 2691 struct nfp_insn_meta *meta, 2692 unsigned int size) 2693 { 2694 swreg dst_lo, dst_hi, src_lo; 2695 u8 dst_gpr, idx; 2696 2697 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; 2698 dst_gpr = meta->insn.dst_reg * 2; 2699 dst_hi = reg_both(dst_gpr + 1); 2700 dst_lo = reg_both(dst_gpr); 2701 src_lo = reg_xfer(idx); 2702 2703 if (size < REG_WIDTH) { 2704 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); 2705 wrp_immed(nfp_prog, dst_hi, 0); 2706 } else if (size == REG_WIDTH) { 2707 wrp_mov(nfp_prog, dst_lo, src_lo); 2708 wrp_immed(nfp_prog, dst_hi, 0); 2709 } else { 2710 swreg src_hi = reg_xfer(idx + 1); 2711 2712 wrp_mov(nfp_prog, dst_lo, src_lo); 2713 wrp_mov(nfp_prog, dst_hi, src_hi); 2714 } 2715 2716 return 0; 2717 } 2718 2719 static int 2720 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, 2721 struct nfp_insn_meta *meta, unsigned int size) 2722 { 2723 u8 off = meta->insn.off - meta->pkt_cache.range_start; 2724 2725 if (IS_ALIGNED(off, REG_WIDTH)) 2726 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); 2727 2728 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); 2729 } 2730 2731 static int 2732 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2733 unsigned int size) 2734 { 2735 if (meta->ldst_gather_len) 2736 return nfp_cpp_memcpy(nfp_prog, meta); 2737 2738 if (meta->ptr.type == PTR_TO_CTX) { 2739 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2740 return mem_ldx_xdp(nfp_prog, meta, size); 2741 else 2742 return mem_ldx_skb(nfp_prog, meta, size); 2743 } 2744 2745 if (meta->ptr.type == PTR_TO_PACKET) { 2746 if (meta->pkt_cache.range_end) { 2747 if (meta->pkt_cache.do_init) 2748 mem_ldx_data_init_pktcache(nfp_prog, meta); 2749 2750 return mem_ldx_data_from_pktcache(nfp_prog, meta, size); 2751 } else { 2752 return mem_ldx_data(nfp_prog, meta, size); 2753 } 2754 } 2755 2756 if (meta->ptr.type == PTR_TO_STACK) 2757 return mem_ldx_stack(nfp_prog, meta, size, 2758 meta->ptr.off + meta->ptr.var_off.value); 2759 2760 if (meta->ptr.type == PTR_TO_MAP_VALUE) 2761 return mem_ldx_emem(nfp_prog, meta, size); 2762 2763 return -EOPNOTSUPP; 2764 } 2765 2766 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2767 { 2768 return mem_ldx(nfp_prog, meta, 1); 2769 } 2770 2771 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2772 { 2773 return mem_ldx(nfp_prog, meta, 2); 2774 } 2775 2776 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2777 { 2778 return mem_ldx(nfp_prog, meta, 4); 2779 } 2780 2781 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2782 { 2783 return mem_ldx(nfp_prog, meta, 8); 2784 } 2785 2786 static int 2787 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2788 unsigned int size) 2789 { 2790 u64 imm = meta->insn.imm; /* sign extend */ 2791 swreg off_reg; 2792 2793 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2794 2795 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2796 imm, size); 2797 } 2798 2799 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2800 unsigned int size) 2801 { 2802 if (meta->ptr.type == PTR_TO_PACKET) 2803 return mem_st_data(nfp_prog, meta, size); 2804 2805 return -EOPNOTSUPP; 2806 } 2807 2808 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2809 { 2810 return mem_st(nfp_prog, meta, 1); 2811 } 2812 2813 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2814 { 2815 return mem_st(nfp_prog, meta, 2); 2816 } 2817 2818 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2819 { 2820 return mem_st(nfp_prog, meta, 4); 2821 } 2822 2823 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2824 { 2825 return mem_st(nfp_prog, meta, 8); 2826 } 2827 2828 static int 2829 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2830 unsigned int size) 2831 { 2832 swreg off_reg; 2833 2834 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2835 2836 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, 2837 meta->insn.src_reg * 2, size); 2838 } 2839 2840 static int 2841 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2842 unsigned int size, unsigned int ptr_off) 2843 { 2844 return mem_op_stack(nfp_prog, meta, size, ptr_off, 2845 meta->insn.src_reg * 2, meta->insn.dst_reg * 2, 2846 false, wrp_lmem_store); 2847 } 2848 2849 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2850 { 2851 switch (meta->insn.off) { 2852 case offsetof(struct xdp_md, rx_queue_index): 2853 return nfp_queue_select(nfp_prog, meta); 2854 } 2855 2856 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ 2857 return -EOPNOTSUPP; 2858 } 2859 2860 static int 2861 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 2862 unsigned int size) 2863 { 2864 if (meta->ptr.type == PTR_TO_PACKET) 2865 return mem_stx_data(nfp_prog, meta, size); 2866 2867 if (meta->ptr.type == PTR_TO_STACK) 2868 return mem_stx_stack(nfp_prog, meta, size, 2869 meta->ptr.off + meta->ptr.var_off.value); 2870 2871 return -EOPNOTSUPP; 2872 } 2873 2874 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2875 { 2876 return mem_stx(nfp_prog, meta, 1); 2877 } 2878 2879 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2880 { 2881 return mem_stx(nfp_prog, meta, 2); 2882 } 2883 2884 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2885 { 2886 if (meta->ptr.type == PTR_TO_CTX) 2887 if (nfp_prog->type == BPF_PROG_TYPE_XDP) 2888 return mem_stx_xdp(nfp_prog, meta); 2889 return mem_stx(nfp_prog, meta, 4); 2890 } 2891 2892 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2893 { 2894 return mem_stx(nfp_prog, meta, 8); 2895 } 2896 2897 static int 2898 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) 2899 { 2900 u8 dst_gpr = meta->insn.dst_reg * 2; 2901 u8 src_gpr = meta->insn.src_reg * 2; 2902 unsigned int full_add, out; 2903 swreg addra, addrb, off; 2904 2905 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); 2906 2907 /* We can fit 16 bits into command immediate, if we know the immediate 2908 * is guaranteed to either always or never fit into 16 bit we only 2909 * generate code to handle that particular case, otherwise generate 2910 * code for both. 2911 */ 2912 out = nfp_prog_current_offset(nfp_prog); 2913 full_add = nfp_prog_current_offset(nfp_prog); 2914 2915 if (meta->insn.off) { 2916 out += 2; 2917 full_add += 2; 2918 } 2919 if (meta->xadd_maybe_16bit) { 2920 out += 3; 2921 full_add += 3; 2922 } 2923 if (meta->xadd_over_16bit) 2924 out += 2 + is64; 2925 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2926 out += 5; 2927 full_add += 5; 2928 } 2929 2930 /* Generate the branch for choosing add_imm vs add */ 2931 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { 2932 swreg max_imm = imm_a(nfp_prog); 2933 2934 wrp_immed(nfp_prog, max_imm, 0xffff); 2935 emit_alu(nfp_prog, reg_none(), 2936 max_imm, ALU_OP_SUB, reg_b(src_gpr)); 2937 emit_alu(nfp_prog, reg_none(), 2938 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); 2939 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); 2940 /* defer for add */ 2941 } 2942 2943 /* If insn has an offset add to the address */ 2944 if (!meta->insn.off) { 2945 addra = reg_a(dst_gpr); 2946 addrb = reg_b(dst_gpr + 1); 2947 } else { 2948 emit_alu(nfp_prog, imma_a(nfp_prog), 2949 reg_a(dst_gpr), ALU_OP_ADD, off); 2950 emit_alu(nfp_prog, imma_b(nfp_prog), 2951 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); 2952 addra = imma_a(nfp_prog); 2953 addrb = imma_b(nfp_prog); 2954 } 2955 2956 /* Generate the add_imm if 16 bits are possible */ 2957 if (meta->xadd_maybe_16bit) { 2958 swreg prev_alu = imm_a(nfp_prog); 2959 2960 wrp_immed(nfp_prog, prev_alu, 2961 FIELD_PREP(CMD_OVE_DATA, 2) | 2962 CMD_OVE_LEN | 2963 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); 2964 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); 2965 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, 2966 addra, addrb, 0, CMD_CTX_NO_SWAP); 2967 2968 if (meta->xadd_over_16bit) 2969 emit_br(nfp_prog, BR_UNC, out, 0); 2970 } 2971 2972 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) 2973 return -EINVAL; 2974 2975 /* Generate the add if 16 bits are not guaranteed */ 2976 if (meta->xadd_over_16bit) { 2977 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, 2978 addra, addrb, is64 << 2, 2979 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); 2980 2981 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); 2982 if (is64) 2983 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); 2984 } 2985 2986 if (!nfp_prog_confirm_current_offset(nfp_prog, out)) 2987 return -EINVAL; 2988 2989 return 0; 2990 } 2991 2992 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2993 { 2994 return mem_xadd(nfp_prog, meta, false); 2995 } 2996 2997 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2998 { 2999 return mem_xadd(nfp_prog, meta, true); 3000 } 3001 3002 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3003 { 3004 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 3005 3006 return 0; 3007 } 3008 3009 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3010 { 3011 const struct bpf_insn *insn = &meta->insn; 3012 u64 imm = insn->imm; /* sign extend */ 3013 swreg or1, or2, tmp_reg; 3014 3015 or1 = reg_a(insn->dst_reg * 2); 3016 or2 = reg_b(insn->dst_reg * 2 + 1); 3017 3018 if (imm & ~0U) { 3019 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3020 emit_alu(nfp_prog, imm_a(nfp_prog), 3021 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3022 or1 = imm_a(nfp_prog); 3023 } 3024 3025 if (imm >> 32) { 3026 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3027 emit_alu(nfp_prog, imm_b(nfp_prog), 3028 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3029 or2 = imm_b(nfp_prog); 3030 } 3031 3032 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 3033 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3034 3035 return 0; 3036 } 3037 3038 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3039 { 3040 const struct bpf_insn *insn = &meta->insn; 3041 u64 imm = insn->imm; /* sign extend */ 3042 swreg tmp_reg; 3043 3044 if (!imm) { 3045 meta->skip = true; 3046 return 0; 3047 } 3048 3049 if (imm & ~0U) { 3050 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3051 emit_alu(nfp_prog, reg_none(), 3052 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); 3053 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3054 } 3055 3056 if (imm >> 32) { 3057 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3058 emit_alu(nfp_prog, reg_none(), 3059 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 3060 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3061 } 3062 3063 return 0; 3064 } 3065 3066 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3067 { 3068 const struct bpf_insn *insn = &meta->insn; 3069 u64 imm = insn->imm; /* sign extend */ 3070 swreg tmp_reg; 3071 3072 if (!imm) { 3073 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 3074 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 3075 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3076 return 0; 3077 } 3078 3079 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3080 emit_alu(nfp_prog, reg_none(), 3081 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 3082 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3083 3084 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 3085 emit_alu(nfp_prog, reg_none(), 3086 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 3087 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3088 3089 return 0; 3090 } 3091 3092 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3093 { 3094 const struct bpf_insn *insn = &meta->insn; 3095 3096 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 3097 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 3098 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 3099 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 3100 emit_alu(nfp_prog, reg_none(), 3101 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 3102 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 3103 3104 return 0; 3105 } 3106 3107 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3108 { 3109 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 3110 } 3111 3112 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3113 { 3114 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 3115 } 3116 3117 static int 3118 bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3119 { 3120 u32 ret_tgt, stack_depth, offset_br; 3121 swreg tmp_reg; 3122 3123 stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN); 3124 /* Space for saving the return address is accounted for by the callee, 3125 * so stack_depth can be zero for the main function. 3126 */ 3127 if (stack_depth) { 3128 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3129 stack_imm(nfp_prog)); 3130 emit_alu(nfp_prog, stack_reg(nfp_prog), 3131 stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg); 3132 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3133 NFP_CSR_ACT_LM_ADDR0); 3134 } 3135 3136 /* Two cases for jumping to the callee: 3137 * 3138 * - If callee uses and needs to save R6~R9 then: 3139 * 1. Put the start offset of the callee into imm_b(). This will 3140 * require a fixup step, as we do not necessarily know this 3141 * address yet. 3142 * 2. Put the return address from the callee to the caller into 3143 * register ret_reg(). 3144 * 3. (After defer slots are consumed) Jump to the subroutine that 3145 * pushes the registers to the stack. 3146 * The subroutine acts as a trampoline, and returns to the address in 3147 * imm_b(), i.e. jumps to the callee. 3148 * 3149 * - If callee does not need to save R6~R9 then just load return 3150 * address to the caller in ret_reg(), and jump to the callee 3151 * directly. 3152 * 3153 * Using ret_reg() to pass the return address to the callee is set here 3154 * as a convention. The callee can then push this address onto its 3155 * stack frame in its prologue. The advantages of passing the return 3156 * address through ret_reg(), instead of pushing it to the stack right 3157 * here, are the following: 3158 * - It looks cleaner. 3159 * - If the called function is called multiple time, we get a lower 3160 * program size. 3161 * - We save two no-op instructions that should be added just before 3162 * the emit_br() when stack depth is not null otherwise. 3163 * - If we ever find a register to hold the return address during whole 3164 * execution of the callee, we will not have to push the return 3165 * address to the stack for leaf functions. 3166 */ 3167 if (!meta->jmp_dst) { 3168 pr_err("BUG: BPF-to-BPF call has no destination recorded\n"); 3169 return -ELOOP; 3170 } 3171 if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) { 3172 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; 3173 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, 3174 RELO_BR_GO_CALL_PUSH_REGS); 3175 offset_br = nfp_prog_current_offset(nfp_prog); 3176 wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL); 3177 } else { 3178 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; 3179 emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1); 3180 offset_br = nfp_prog_current_offset(nfp_prog); 3181 } 3182 wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL); 3183 3184 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) 3185 return -EINVAL; 3186 3187 if (stack_depth) { 3188 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, 3189 stack_imm(nfp_prog)); 3190 emit_alu(nfp_prog, stack_reg(nfp_prog), 3191 stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 3192 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), 3193 NFP_CSR_ACT_LM_ADDR0); 3194 wrp_nops(nfp_prog, 3); 3195 } 3196 3197 meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog); 3198 meta->num_insns_after_br -= offset_br; 3199 3200 return 0; 3201 } 3202 3203 static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3204 { 3205 switch (meta->insn.imm) { 3206 case BPF_FUNC_xdp_adjust_head: 3207 return adjust_head(nfp_prog, meta); 3208 case BPF_FUNC_xdp_adjust_tail: 3209 return adjust_tail(nfp_prog, meta); 3210 case BPF_FUNC_map_lookup_elem: 3211 case BPF_FUNC_map_update_elem: 3212 case BPF_FUNC_map_delete_elem: 3213 return map_call_stack_common(nfp_prog, meta); 3214 case BPF_FUNC_get_prandom_u32: 3215 return nfp_get_prandom_u32(nfp_prog, meta); 3216 case BPF_FUNC_perf_event_output: 3217 return nfp_perf_event_output(nfp_prog, meta); 3218 default: 3219 WARN_ONCE(1, "verifier allowed unsupported function\n"); 3220 return -EOPNOTSUPP; 3221 } 3222 } 3223 3224 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3225 { 3226 if (is_mbpf_pseudo_call(meta)) 3227 return bpf_to_bpf_call(nfp_prog, meta); 3228 else 3229 return helper_call(nfp_prog, meta); 3230 } 3231 3232 static bool nfp_is_main_function(struct nfp_insn_meta *meta) 3233 { 3234 return meta->subprog_idx == 0; 3235 } 3236 3237 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3238 { 3239 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); 3240 3241 return 0; 3242 } 3243 3244 static int 3245 nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3246 { 3247 if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) { 3248 /* Pop R6~R9 to the stack via related subroutine. 3249 * We loaded the return address to the caller into ret_reg(). 3250 * This means that the subroutine does not come back here, we 3251 * make it jump back to the subprogram caller directly! 3252 */ 3253 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1, 3254 RELO_BR_GO_CALL_POP_REGS); 3255 /* Pop return address from the stack. */ 3256 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3257 } else { 3258 /* Pop return address from the stack. */ 3259 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); 3260 /* Jump back to caller if no callee-saved registers were used 3261 * by the subprogram. 3262 */ 3263 emit_rtn(nfp_prog, ret_reg(nfp_prog), 0); 3264 } 3265 3266 return 0; 3267 } 3268 3269 static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3270 { 3271 if (nfp_is_main_function(meta)) 3272 return goto_out(nfp_prog, meta); 3273 else 3274 return nfp_subprog_epilogue(nfp_prog, meta); 3275 } 3276 3277 static const instr_cb_t instr_cb[256] = { 3278 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 3279 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 3280 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 3281 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 3282 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 3283 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 3284 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 3285 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 3286 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 3287 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 3288 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 3289 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 3290 [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64, 3291 [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64, 3292 [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64, 3293 [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64, 3294 [BPF_ALU64 | BPF_NEG] = neg_reg64, 3295 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, 3296 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 3297 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, 3298 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 3299 [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64, 3300 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, 3301 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 3302 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 3303 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 3304 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 3305 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 3306 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 3307 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 3308 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 3309 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 3310 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 3311 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 3312 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 3313 [BPF_ALU | BPF_MUL | BPF_X] = mul_reg, 3314 [BPF_ALU | BPF_MUL | BPF_K] = mul_imm, 3315 [BPF_ALU | BPF_DIV | BPF_X] = div_reg, 3316 [BPF_ALU | BPF_DIV | BPF_K] = div_imm, 3317 [BPF_ALU | BPF_NEG] = neg_reg, 3318 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 3319 [BPF_ALU | BPF_END | BPF_X] = end_reg32, 3320 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 3321 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 3322 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 3323 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 3324 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 3325 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 3326 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 3327 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, 3328 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, 3329 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 3330 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, 3331 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, 3332 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 3333 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 3334 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 3335 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 3336 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 3337 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 3338 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 3339 [BPF_ST | BPF_MEM | BPF_W] = mem_st4, 3340 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, 3341 [BPF_JMP | BPF_JA | BPF_K] = jump, 3342 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 3343 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, 3344 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, 3345 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, 3346 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, 3347 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, 3348 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, 3349 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, 3350 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, 3351 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 3352 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 3353 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 3354 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, 3355 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, 3356 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, 3357 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, 3358 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, 3359 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, 3360 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, 3361 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, 3362 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 3363 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 3364 [BPF_JMP | BPF_CALL] = call, 3365 [BPF_JMP | BPF_EXIT] = jmp_exit, 3366 }; 3367 3368 /* --- Assembler logic --- */ 3369 static int 3370 nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 3371 struct nfp_insn_meta *jmp_dst, u32 br_idx) 3372 { 3373 if (immed_get_value(nfp_prog->prog[br_idx + 1])) { 3374 pr_err("BUG: failed to fix up callee register saving\n"); 3375 return -EINVAL; 3376 } 3377 3378 immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off); 3379 3380 return 0; 3381 } 3382 3383 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 3384 { 3385 struct nfp_insn_meta *meta, *jmp_dst; 3386 u32 idx, br_idx; 3387 int err; 3388 3389 list_for_each_entry(meta, &nfp_prog->insns, l) { 3390 if (meta->skip) 3391 continue; 3392 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 3393 continue; 3394 if (meta->insn.code == (BPF_JMP | BPF_EXIT) && 3395 !nfp_is_main_function(meta)) 3396 continue; 3397 if (is_mbpf_helper_call(meta)) 3398 continue; 3399 3400 if (list_is_last(&meta->l, &nfp_prog->insns)) 3401 br_idx = nfp_prog->last_bpf_off; 3402 else 3403 br_idx = list_next_entry(meta, l)->off - 1; 3404 3405 /* For BPF-to-BPF function call, a stack adjustment sequence is 3406 * generated after the return instruction. Therefore, we must 3407 * withdraw the length of this sequence to have br_idx pointing 3408 * to where the "branch" NFP instruction is expected to be. 3409 */ 3410 if (is_mbpf_pseudo_call(meta)) 3411 br_idx -= meta->num_insns_after_br; 3412 3413 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 3414 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 3415 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 3416 return -ELOOP; 3417 } 3418 3419 if (meta->insn.code == (BPF_JMP | BPF_EXIT)) 3420 continue; 3421 3422 /* Leave special branches for later */ 3423 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3424 RELO_BR_REL && !is_mbpf_pseudo_call(meta)) 3425 continue; 3426 3427 if (!meta->jmp_dst) { 3428 pr_err("Non-exit jump doesn't have destination info recorded!!\n"); 3429 return -ELOOP; 3430 } 3431 3432 jmp_dst = meta->jmp_dst; 3433 3434 if (jmp_dst->skip) { 3435 pr_err("Branch landing on removed instruction!!\n"); 3436 return -ELOOP; 3437 } 3438 3439 if (is_mbpf_pseudo_call(meta) && 3440 nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) { 3441 err = nfp_fixup_immed_relo(nfp_prog, meta, 3442 jmp_dst, br_idx); 3443 if (err) 3444 return err; 3445 } 3446 3447 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != 3448 RELO_BR_REL) 3449 continue; 3450 3451 for (idx = meta->off; idx <= br_idx; idx++) { 3452 if (!nfp_is_br(nfp_prog->prog[idx])) 3453 continue; 3454 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 3455 } 3456 } 3457 3458 return 0; 3459 } 3460 3461 static void nfp_intro(struct nfp_prog *nfp_prog) 3462 { 3463 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); 3464 emit_alu(nfp_prog, plen_reg(nfp_prog), 3465 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); 3466 } 3467 3468 static void 3469 nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3470 { 3471 /* Save return address into the stack. */ 3472 wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog)); 3473 } 3474 3475 static void 3476 nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3477 { 3478 unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth; 3479 3480 nfp_prog->stack_frame_depth = round_up(depth, 4); 3481 nfp_subprog_prologue(nfp_prog, meta); 3482 } 3483 3484 bool nfp_is_subprog_start(struct nfp_insn_meta *meta) 3485 { 3486 return meta->flags & FLAG_INSN_IS_SUBPROG_START; 3487 } 3488 3489 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 3490 { 3491 /* TC direct-action mode: 3492 * 0,1 ok NOT SUPPORTED[1] 3493 * 2 drop 0x22 -> drop, count as stat1 3494 * 4,5 nuke 0x02 -> drop 3495 * 7 redir 0x44 -> redir, count as stat2 3496 * * unspec 0x11 -> pass, count as stat0 3497 * 3498 * [1] We can't support OK and RECLASSIFY because we can't tell TC 3499 * the exact decision made. We are forced to support UNSPEC 3500 * to handle aborts so that's the only one we handle for passing 3501 * packets up the stack. 3502 */ 3503 /* Target for aborts */ 3504 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3505 3506 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3507 3508 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3509 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 3510 3511 /* Target for normal exits */ 3512 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3513 3514 /* if R0 > 7 jump to abort */ 3515 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 3516 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3517 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3518 3519 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 3520 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 3521 3522 emit_shf(nfp_prog, reg_a(1), 3523 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 3524 3525 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3526 emit_shf(nfp_prog, reg_a(2), 3527 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3528 3529 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3530 emit_shf(nfp_prog, reg_b(2), 3531 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 3532 3533 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3534 3535 emit_shf(nfp_prog, reg_b(2), 3536 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 3537 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3538 } 3539 3540 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 3541 { 3542 /* XDP return codes: 3543 * 0 aborted 0x82 -> drop, count as stat3 3544 * 1 drop 0x22 -> drop, count as stat1 3545 * 2 pass 0x11 -> pass, count as stat0 3546 * 3 tx 0x44 -> redir, count as stat2 3547 * * unknown 0x82 -> drop, count as stat3 3548 */ 3549 /* Target for aborts */ 3550 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 3551 3552 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3553 3554 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3555 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 3556 3557 /* Target for normal exits */ 3558 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 3559 3560 /* if R0 > 3 jump to abort */ 3561 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 3562 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 3563 3564 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 3565 3566 emit_shf(nfp_prog, reg_a(1), 3567 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 3568 3569 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 3570 emit_shf(nfp_prog, reg_b(2), 3571 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 3572 3573 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); 3574 3575 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 3576 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 3577 } 3578 3579 static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog) 3580 { 3581 unsigned int idx; 3582 3583 for (idx = 1; idx < nfp_prog->subprog_cnt; idx++) 3584 if (nfp_prog->subprog[idx].needs_reg_push) 3585 return true; 3586 3587 return false; 3588 } 3589 3590 static void nfp_push_callee_registers(struct nfp_prog *nfp_prog) 3591 { 3592 u8 reg; 3593 3594 /* Subroutine: Save all callee saved registers (R6 ~ R9). 3595 * imm_b() holds the return address. 3596 */ 3597 nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog); 3598 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3599 u8 adj = (reg - BPF_REG_0) * 2; 3600 u8 idx = (reg - BPF_REG_6) * 2; 3601 3602 /* The first slot in the stack frame is used to push the return 3603 * address in bpf_to_bpf_call(), start just after. 3604 */ 3605 wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj)); 3606 3607 if (reg == BPF_REG_8) 3608 /* Prepare to jump back, last 3 insns use defer slots */ 3609 emit_rtn(nfp_prog, imm_b(nfp_prog), 3); 3610 3611 wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1)); 3612 } 3613 } 3614 3615 static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog) 3616 { 3617 u8 reg; 3618 3619 /* Subroutine: Restore all callee saved registers (R6 ~ R9). 3620 * ret_reg() holds the return address. 3621 */ 3622 nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog); 3623 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { 3624 u8 adj = (reg - BPF_REG_0) * 2; 3625 u8 idx = (reg - BPF_REG_6) * 2; 3626 3627 /* The first slot in the stack frame holds the return address, 3628 * start popping just after that. 3629 */ 3630 wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx)); 3631 3632 if (reg == BPF_REG_8) 3633 /* Prepare to jump back, last 3 insns use defer slots */ 3634 emit_rtn(nfp_prog, ret_reg(nfp_prog), 3); 3635 3636 wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1)); 3637 } 3638 } 3639 3640 static void nfp_outro(struct nfp_prog *nfp_prog) 3641 { 3642 switch (nfp_prog->type) { 3643 case BPF_PROG_TYPE_SCHED_CLS: 3644 nfp_outro_tc_da(nfp_prog); 3645 break; 3646 case BPF_PROG_TYPE_XDP: 3647 nfp_outro_xdp(nfp_prog); 3648 break; 3649 default: 3650 WARN_ON(1); 3651 } 3652 3653 if (!nfp_prog_needs_callee_reg_save(nfp_prog)) 3654 return; 3655 3656 nfp_push_callee_registers(nfp_prog); 3657 nfp_pop_callee_registers(nfp_prog); 3658 } 3659 3660 static int nfp_translate(struct nfp_prog *nfp_prog) 3661 { 3662 struct nfp_insn_meta *meta; 3663 unsigned int depth; 3664 int err; 3665 3666 depth = nfp_prog->subprog[0].stack_depth; 3667 nfp_prog->stack_frame_depth = round_up(depth, 4); 3668 3669 nfp_intro(nfp_prog); 3670 if (nfp_prog->error) 3671 return nfp_prog->error; 3672 3673 list_for_each_entry(meta, &nfp_prog->insns, l) { 3674 instr_cb_t cb = instr_cb[meta->insn.code]; 3675 3676 meta->off = nfp_prog_current_offset(nfp_prog); 3677 3678 if (nfp_is_subprog_start(meta)) { 3679 nfp_start_subprog(nfp_prog, meta); 3680 if (nfp_prog->error) 3681 return nfp_prog->error; 3682 } 3683 3684 if (meta->skip) { 3685 nfp_prog->n_translated++; 3686 continue; 3687 } 3688 3689 if (nfp_meta_has_prev(nfp_prog, meta) && 3690 nfp_meta_prev(meta)->double_cb) 3691 cb = nfp_meta_prev(meta)->double_cb; 3692 if (!cb) 3693 return -ENOENT; 3694 err = cb(nfp_prog, meta); 3695 if (err) 3696 return err; 3697 if (nfp_prog->error) 3698 return nfp_prog->error; 3699 3700 nfp_prog->n_translated++; 3701 } 3702 3703 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; 3704 3705 nfp_outro(nfp_prog); 3706 if (nfp_prog->error) 3707 return nfp_prog->error; 3708 3709 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); 3710 if (nfp_prog->error) 3711 return nfp_prog->error; 3712 3713 return nfp_fixup_branches(nfp_prog); 3714 } 3715 3716 /* --- Optimizations --- */ 3717 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 3718 { 3719 struct nfp_insn_meta *meta; 3720 3721 list_for_each_entry(meta, &nfp_prog->insns, l) { 3722 struct bpf_insn insn = meta->insn; 3723 3724 /* Programs converted from cBPF start with register xoring */ 3725 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 3726 insn.src_reg == insn.dst_reg) 3727 continue; 3728 3729 /* Programs start with R6 = R1 but we ignore the skb pointer */ 3730 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 3731 insn.src_reg == 1 && insn.dst_reg == 6) 3732 meta->skip = true; 3733 3734 /* Return as soon as something doesn't match */ 3735 if (!meta->skip) 3736 return; 3737 } 3738 } 3739 3740 /* abs(insn.imm) will fit better into unrestricted reg immediate - 3741 * convert add/sub of a negative number into a sub/add of a positive one. 3742 */ 3743 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) 3744 { 3745 struct nfp_insn_meta *meta; 3746 3747 list_for_each_entry(meta, &nfp_prog->insns, l) { 3748 struct bpf_insn insn = meta->insn; 3749 3750 if (meta->skip) 3751 continue; 3752 3753 if (BPF_CLASS(insn.code) != BPF_ALU && 3754 BPF_CLASS(insn.code) != BPF_ALU64 && 3755 BPF_CLASS(insn.code) != BPF_JMP) 3756 continue; 3757 if (BPF_SRC(insn.code) != BPF_K) 3758 continue; 3759 if (insn.imm >= 0) 3760 continue; 3761 3762 if (BPF_CLASS(insn.code) == BPF_JMP) { 3763 switch (BPF_OP(insn.code)) { 3764 case BPF_JGE: 3765 case BPF_JSGE: 3766 case BPF_JLT: 3767 case BPF_JSLT: 3768 meta->jump_neg_op = true; 3769 break; 3770 default: 3771 continue; 3772 } 3773 } else { 3774 if (BPF_OP(insn.code) == BPF_ADD) 3775 insn.code = BPF_CLASS(insn.code) | BPF_SUB; 3776 else if (BPF_OP(insn.code) == BPF_SUB) 3777 insn.code = BPF_CLASS(insn.code) | BPF_ADD; 3778 else 3779 continue; 3780 3781 meta->insn.code = insn.code | BPF_K; 3782 } 3783 3784 meta->insn.imm = -insn.imm; 3785 } 3786 } 3787 3788 /* Remove masking after load since our load guarantees this is not needed */ 3789 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 3790 { 3791 struct nfp_insn_meta *meta1, *meta2; 3792 const s32 exp_mask[] = { 3793 [BPF_B] = 0x000000ffU, 3794 [BPF_H] = 0x0000ffffU, 3795 [BPF_W] = 0xffffffffU, 3796 }; 3797 3798 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 3799 struct bpf_insn insn, next; 3800 3801 insn = meta1->insn; 3802 next = meta2->insn; 3803 3804 if (BPF_CLASS(insn.code) != BPF_LD) 3805 continue; 3806 if (BPF_MODE(insn.code) != BPF_ABS && 3807 BPF_MODE(insn.code) != BPF_IND) 3808 continue; 3809 3810 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 3811 continue; 3812 3813 if (!exp_mask[BPF_SIZE(insn.code)]) 3814 continue; 3815 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 3816 continue; 3817 3818 if (next.src_reg || next.dst_reg) 3819 continue; 3820 3821 if (meta2->flags & FLAG_INSN_IS_JUMP_DST) 3822 continue; 3823 3824 meta2->skip = true; 3825 } 3826 } 3827 3828 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 3829 { 3830 struct nfp_insn_meta *meta1, *meta2, *meta3; 3831 3832 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 3833 struct bpf_insn insn, next1, next2; 3834 3835 insn = meta1->insn; 3836 next1 = meta2->insn; 3837 next2 = meta3->insn; 3838 3839 if (BPF_CLASS(insn.code) != BPF_LD) 3840 continue; 3841 if (BPF_MODE(insn.code) != BPF_ABS && 3842 BPF_MODE(insn.code) != BPF_IND) 3843 continue; 3844 if (BPF_SIZE(insn.code) != BPF_W) 3845 continue; 3846 3847 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 3848 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 3849 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 3850 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 3851 continue; 3852 3853 if (next1.src_reg || next1.dst_reg || 3854 next2.src_reg || next2.dst_reg) 3855 continue; 3856 3857 if (next1.imm != 0x20 || next2.imm != 0x20) 3858 continue; 3859 3860 if (meta2->flags & FLAG_INSN_IS_JUMP_DST || 3861 meta3->flags & FLAG_INSN_IS_JUMP_DST) 3862 continue; 3863 3864 meta2->skip = true; 3865 meta3->skip = true; 3866 } 3867 } 3868 3869 /* load/store pair that forms memory copy sould look like the following: 3870 * 3871 * ld_width R, [addr_src + offset_src] 3872 * st_width [addr_dest + offset_dest], R 3873 * 3874 * The destination register of load and source register of store should 3875 * be the same, load and store should also perform at the same width. 3876 * If either of addr_src or addr_dest is stack pointer, we don't do the 3877 * CPP optimization as stack is modelled by registers on NFP. 3878 */ 3879 static bool 3880 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, 3881 struct nfp_insn_meta *st_meta) 3882 { 3883 struct bpf_insn *ld = &ld_meta->insn; 3884 struct bpf_insn *st = &st_meta->insn; 3885 3886 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) 3887 return false; 3888 3889 if (ld_meta->ptr.type != PTR_TO_PACKET && 3890 ld_meta->ptr.type != PTR_TO_MAP_VALUE) 3891 return false; 3892 3893 if (st_meta->ptr.type != PTR_TO_PACKET) 3894 return false; 3895 3896 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) 3897 return false; 3898 3899 if (ld->dst_reg != st->src_reg) 3900 return false; 3901 3902 /* There is jump to the store insn in this pair. */ 3903 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) 3904 return false; 3905 3906 return true; 3907 } 3908 3909 /* Currently, we only support chaining load/store pairs if: 3910 * 3911 * - Their address base registers are the same. 3912 * - Their address offsets are in the same order. 3913 * - They operate at the same memory width. 3914 * - There is no jump into the middle of them. 3915 */ 3916 static bool 3917 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, 3918 struct nfp_insn_meta *st_meta, 3919 struct bpf_insn *prev_ld, 3920 struct bpf_insn *prev_st) 3921 { 3922 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; 3923 struct bpf_insn *ld = &ld_meta->insn; 3924 struct bpf_insn *st = &st_meta->insn; 3925 s16 prev_ld_off, prev_st_off; 3926 3927 /* This pair is the start pair. */ 3928 if (!prev_ld) 3929 return true; 3930 3931 prev_size = BPF_LDST_BYTES(prev_ld); 3932 curr_size = BPF_LDST_BYTES(ld); 3933 prev_ld_base = prev_ld->src_reg; 3934 prev_st_base = prev_st->dst_reg; 3935 prev_ld_dst = prev_ld->dst_reg; 3936 prev_ld_off = prev_ld->off; 3937 prev_st_off = prev_st->off; 3938 3939 if (ld->dst_reg != prev_ld_dst) 3940 return false; 3941 3942 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) 3943 return false; 3944 3945 if (curr_size != prev_size) 3946 return false; 3947 3948 /* There is jump to the head of this pair. */ 3949 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) 3950 return false; 3951 3952 /* Both in ascending order. */ 3953 if (prev_ld_off + prev_size == ld->off && 3954 prev_st_off + prev_size == st->off) 3955 return true; 3956 3957 /* Both in descending order. */ 3958 if (ld->off + curr_size == prev_ld_off && 3959 st->off + curr_size == prev_st_off) 3960 return true; 3961 3962 return false; 3963 } 3964 3965 /* Return TRUE if cross memory access happens. Cross memory access means 3966 * store area is overlapping with load area that a later load might load 3967 * the value from previous store, for this case we can't treat the sequence 3968 * as an memory copy. 3969 */ 3970 static bool 3971 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, 3972 struct nfp_insn_meta *head_st_meta) 3973 { 3974 s16 head_ld_off, head_st_off, ld_off; 3975 3976 /* Different pointer types does not overlap. */ 3977 if (head_ld_meta->ptr.type != head_st_meta->ptr.type) 3978 return false; 3979 3980 /* load and store are both PTR_TO_PACKET, check ID info. */ 3981 if (head_ld_meta->ptr.id != head_st_meta->ptr.id) 3982 return true; 3983 3984 /* Canonicalize the offsets. Turn all of them against the original 3985 * base register. 3986 */ 3987 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; 3988 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; 3989 ld_off = ld->off + head_ld_meta->ptr.off; 3990 3991 /* Ascending order cross. */ 3992 if (ld_off > head_ld_off && 3993 head_ld_off < head_st_off && ld_off >= head_st_off) 3994 return true; 3995 3996 /* Descending order cross. */ 3997 if (ld_off < head_ld_off && 3998 head_ld_off > head_st_off && ld_off <= head_st_off) 3999 return true; 4000 4001 return false; 4002 } 4003 4004 /* This pass try to identify the following instructoin sequences. 4005 * 4006 * load R, [regA + offA] 4007 * store [regB + offB], R 4008 * load R, [regA + offA + const_imm_A] 4009 * store [regB + offB + const_imm_A], R 4010 * load R, [regA + offA + 2 * const_imm_A] 4011 * store [regB + offB + 2 * const_imm_A], R 4012 * ... 4013 * 4014 * Above sequence is typically generated by compiler when lowering 4015 * memcpy. NFP prefer using CPP instructions to accelerate it. 4016 */ 4017 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) 4018 { 4019 struct nfp_insn_meta *head_ld_meta = NULL; 4020 struct nfp_insn_meta *head_st_meta = NULL; 4021 struct nfp_insn_meta *meta1, *meta2; 4022 struct bpf_insn *prev_ld = NULL; 4023 struct bpf_insn *prev_st = NULL; 4024 u8 count = 0; 4025 4026 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4027 struct bpf_insn *ld = &meta1->insn; 4028 struct bpf_insn *st = &meta2->insn; 4029 4030 /* Reset record status if any of the following if true: 4031 * - The current insn pair is not load/store. 4032 * - The load/store pair doesn't chain with previous one. 4033 * - The chained load/store pair crossed with previous pair. 4034 * - The chained load/store pair has a total size of memory 4035 * copy beyond 128 bytes which is the maximum length a 4036 * single NFP CPP command can transfer. 4037 */ 4038 if (!curr_pair_is_memcpy(meta1, meta2) || 4039 !curr_pair_chain_with_previous(meta1, meta2, prev_ld, 4040 prev_st) || 4041 (head_ld_meta && (cross_mem_access(ld, head_ld_meta, 4042 head_st_meta) || 4043 head_ld_meta->ldst_gather_len >= 128))) { 4044 if (!count) 4045 continue; 4046 4047 if (count > 1) { 4048 s16 prev_ld_off = prev_ld->off; 4049 s16 prev_st_off = prev_st->off; 4050 s16 head_ld_off = head_ld_meta->insn.off; 4051 4052 if (prev_ld_off < head_ld_off) { 4053 head_ld_meta->insn.off = prev_ld_off; 4054 head_st_meta->insn.off = prev_st_off; 4055 head_ld_meta->ldst_gather_len = 4056 -head_ld_meta->ldst_gather_len; 4057 } 4058 4059 head_ld_meta->paired_st = &head_st_meta->insn; 4060 head_st_meta->skip = true; 4061 } else { 4062 head_ld_meta->ldst_gather_len = 0; 4063 } 4064 4065 /* If the chain is ended by an load/store pair then this 4066 * could serve as the new head of the the next chain. 4067 */ 4068 if (curr_pair_is_memcpy(meta1, meta2)) { 4069 head_ld_meta = meta1; 4070 head_st_meta = meta2; 4071 head_ld_meta->ldst_gather_len = 4072 BPF_LDST_BYTES(ld); 4073 meta1 = nfp_meta_next(meta1); 4074 meta2 = nfp_meta_next(meta2); 4075 prev_ld = ld; 4076 prev_st = st; 4077 count = 1; 4078 } else { 4079 head_ld_meta = NULL; 4080 head_st_meta = NULL; 4081 prev_ld = NULL; 4082 prev_st = NULL; 4083 count = 0; 4084 } 4085 4086 continue; 4087 } 4088 4089 if (!head_ld_meta) { 4090 head_ld_meta = meta1; 4091 head_st_meta = meta2; 4092 } else { 4093 meta1->skip = true; 4094 meta2->skip = true; 4095 } 4096 4097 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); 4098 meta1 = nfp_meta_next(meta1); 4099 meta2 = nfp_meta_next(meta2); 4100 prev_ld = ld; 4101 prev_st = st; 4102 count++; 4103 } 4104 } 4105 4106 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) 4107 { 4108 struct nfp_insn_meta *meta, *range_node = NULL; 4109 s16 range_start = 0, range_end = 0; 4110 bool cache_avail = false; 4111 struct bpf_insn *insn; 4112 s32 range_ptr_off = 0; 4113 u32 range_ptr_id = 0; 4114 4115 list_for_each_entry(meta, &nfp_prog->insns, l) { 4116 if (meta->flags & FLAG_INSN_IS_JUMP_DST) 4117 cache_avail = false; 4118 4119 if (meta->skip) 4120 continue; 4121 4122 insn = &meta->insn; 4123 4124 if (is_mbpf_store_pkt(meta) || 4125 insn->code == (BPF_JMP | BPF_CALL) || 4126 is_mbpf_classic_store_pkt(meta) || 4127 is_mbpf_classic_load(meta)) { 4128 cache_avail = false; 4129 continue; 4130 } 4131 4132 if (!is_mbpf_load(meta)) 4133 continue; 4134 4135 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { 4136 cache_avail = false; 4137 continue; 4138 } 4139 4140 if (!cache_avail) { 4141 cache_avail = true; 4142 if (range_node) 4143 goto end_current_then_start_new; 4144 goto start_new; 4145 } 4146 4147 /* Check ID to make sure two reads share the same 4148 * variable offset against PTR_TO_PACKET, and check OFF 4149 * to make sure they also share the same constant 4150 * offset. 4151 * 4152 * OFFs don't really need to be the same, because they 4153 * are the constant offsets against PTR_TO_PACKET, so 4154 * for different OFFs, we could canonicalize them to 4155 * offsets against original packet pointer. We don't 4156 * support this. 4157 */ 4158 if (meta->ptr.id == range_ptr_id && 4159 meta->ptr.off == range_ptr_off) { 4160 s16 new_start = range_start; 4161 s16 end, off = insn->off; 4162 s16 new_end = range_end; 4163 bool changed = false; 4164 4165 if (off < range_start) { 4166 new_start = off; 4167 changed = true; 4168 } 4169 4170 end = off + BPF_LDST_BYTES(insn); 4171 if (end > range_end) { 4172 new_end = end; 4173 changed = true; 4174 } 4175 4176 if (!changed) 4177 continue; 4178 4179 if (new_end - new_start <= 64) { 4180 /* Install new range. */ 4181 range_start = new_start; 4182 range_end = new_end; 4183 continue; 4184 } 4185 } 4186 4187 end_current_then_start_new: 4188 range_node->pkt_cache.range_start = range_start; 4189 range_node->pkt_cache.range_end = range_end; 4190 start_new: 4191 range_node = meta; 4192 range_node->pkt_cache.do_init = true; 4193 range_ptr_id = range_node->ptr.id; 4194 range_ptr_off = range_node->ptr.off; 4195 range_start = insn->off; 4196 range_end = insn->off + BPF_LDST_BYTES(insn); 4197 } 4198 4199 if (range_node) { 4200 range_node->pkt_cache.range_start = range_start; 4201 range_node->pkt_cache.range_end = range_end; 4202 } 4203 4204 list_for_each_entry(meta, &nfp_prog->insns, l) { 4205 if (meta->skip) 4206 continue; 4207 4208 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { 4209 if (meta->pkt_cache.do_init) { 4210 range_start = meta->pkt_cache.range_start; 4211 range_end = meta->pkt_cache.range_end; 4212 } else { 4213 meta->pkt_cache.range_start = range_start; 4214 meta->pkt_cache.range_end = range_end; 4215 } 4216 } 4217 } 4218 } 4219 4220 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 4221 { 4222 nfp_bpf_opt_reg_init(nfp_prog); 4223 4224 nfp_bpf_opt_neg_add_sub(nfp_prog); 4225 nfp_bpf_opt_ld_mask(nfp_prog); 4226 nfp_bpf_opt_ld_shift(nfp_prog); 4227 nfp_bpf_opt_ldst_gather(nfp_prog); 4228 nfp_bpf_opt_pkt_cache(nfp_prog); 4229 4230 return 0; 4231 } 4232 4233 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) 4234 { 4235 struct nfp_insn_meta *meta1, *meta2; 4236 struct nfp_bpf_map *nfp_map; 4237 struct bpf_map *map; 4238 u32 id; 4239 4240 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 4241 if (meta1->skip || meta2->skip) 4242 continue; 4243 4244 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || 4245 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) 4246 continue; 4247 4248 map = (void *)(unsigned long)((u32)meta1->insn.imm | 4249 (u64)meta2->insn.imm << 32); 4250 if (bpf_map_offload_neutral(map)) { 4251 id = map->id; 4252 } else { 4253 nfp_map = map_to_offmap(map)->dev_priv; 4254 id = nfp_map->tid; 4255 } 4256 4257 meta1->insn.imm = id; 4258 meta2->insn.imm = 0; 4259 } 4260 4261 return 0; 4262 } 4263 4264 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) 4265 { 4266 __le64 *ustore = (__force __le64 *)prog; 4267 int i; 4268 4269 for (i = 0; i < len; i++) { 4270 int err; 4271 4272 err = nfp_ustore_check_valid_no_ecc(prog[i]); 4273 if (err) 4274 return err; 4275 4276 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); 4277 } 4278 4279 return 0; 4280 } 4281 4282 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) 4283 { 4284 void *prog; 4285 4286 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); 4287 if (!prog) 4288 return; 4289 4290 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); 4291 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); 4292 kvfree(nfp_prog->prog); 4293 nfp_prog->prog = prog; 4294 } 4295 4296 int nfp_bpf_jit(struct nfp_prog *nfp_prog) 4297 { 4298 int ret; 4299 4300 ret = nfp_bpf_replace_map_ptrs(nfp_prog); 4301 if (ret) 4302 return ret; 4303 4304 ret = nfp_bpf_optimize(nfp_prog); 4305 if (ret) 4306 return ret; 4307 4308 ret = nfp_translate(nfp_prog); 4309 if (ret) { 4310 pr_err("Translation failed with error %d (translated: %u)\n", 4311 ret, nfp_prog->n_translated); 4312 return -EINVAL; 4313 } 4314 4315 nfp_bpf_prog_trim(nfp_prog); 4316 4317 return ret; 4318 } 4319 4320 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) 4321 { 4322 struct nfp_insn_meta *meta; 4323 4324 /* Another pass to record jump information. */ 4325 list_for_each_entry(meta, &nfp_prog->insns, l) { 4326 struct nfp_insn_meta *dst_meta; 4327 u64 code = meta->insn.code; 4328 unsigned int dst_idx; 4329 bool pseudo_call; 4330 4331 if (BPF_CLASS(code) != BPF_JMP) 4332 continue; 4333 if (BPF_OP(code) == BPF_EXIT) 4334 continue; 4335 if (is_mbpf_helper_call(meta)) 4336 continue; 4337 4338 /* If opcode is BPF_CALL at this point, this can only be a 4339 * BPF-to-BPF call (a.k.a pseudo call). 4340 */ 4341 pseudo_call = BPF_OP(code) == BPF_CALL; 4342 4343 if (pseudo_call) 4344 dst_idx = meta->n + 1 + meta->insn.imm; 4345 else 4346 dst_idx = meta->n + 1 + meta->insn.off; 4347 4348 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt); 4349 4350 if (pseudo_call) 4351 dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START; 4352 4353 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; 4354 meta->jmp_dst = dst_meta; 4355 } 4356 } 4357 4358 bool nfp_bpf_supported_opcode(u8 code) 4359 { 4360 return !!instr_cb[code]; 4361 } 4362 4363 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) 4364 { 4365 unsigned int i; 4366 u64 *prog; 4367 int err; 4368 4369 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), 4370 GFP_KERNEL); 4371 if (!prog) 4372 return ERR_PTR(-ENOMEM); 4373 4374 for (i = 0; i < nfp_prog->prog_len; i++) { 4375 enum nfp_relo_type special; 4376 u32 val; 4377 u16 off; 4378 4379 special = FIELD_GET(OP_RELO_TYPE, prog[i]); 4380 switch (special) { 4381 case RELO_NONE: 4382 continue; 4383 case RELO_BR_REL: 4384 br_add_offset(&prog[i], bv->start_off); 4385 break; 4386 case RELO_BR_GO_OUT: 4387 br_set_offset(&prog[i], 4388 nfp_prog->tgt_out + bv->start_off); 4389 break; 4390 case RELO_BR_GO_ABORT: 4391 br_set_offset(&prog[i], 4392 nfp_prog->tgt_abort + bv->start_off); 4393 break; 4394 case RELO_BR_GO_CALL_PUSH_REGS: 4395 if (!nfp_prog->tgt_call_push_regs) { 4396 pr_err("BUG: failed to detect subprogram registers needs\n"); 4397 err = -EINVAL; 4398 goto err_free_prog; 4399 } 4400 off = nfp_prog->tgt_call_push_regs + bv->start_off; 4401 br_set_offset(&prog[i], off); 4402 break; 4403 case RELO_BR_GO_CALL_POP_REGS: 4404 if (!nfp_prog->tgt_call_pop_regs) { 4405 pr_err("BUG: failed to detect subprogram registers needs\n"); 4406 err = -EINVAL; 4407 goto err_free_prog; 4408 } 4409 off = nfp_prog->tgt_call_pop_regs + bv->start_off; 4410 br_set_offset(&prog[i], off); 4411 break; 4412 case RELO_BR_NEXT_PKT: 4413 br_set_offset(&prog[i], bv->tgt_done); 4414 break; 4415 case RELO_BR_HELPER: 4416 val = br_get_offset(prog[i]); 4417 val -= BR_OFF_RELO; 4418 switch (val) { 4419 case BPF_FUNC_map_lookup_elem: 4420 val = nfp_prog->bpf->helpers.map_lookup; 4421 break; 4422 case BPF_FUNC_map_update_elem: 4423 val = nfp_prog->bpf->helpers.map_update; 4424 break; 4425 case BPF_FUNC_map_delete_elem: 4426 val = nfp_prog->bpf->helpers.map_delete; 4427 break; 4428 case BPF_FUNC_perf_event_output: 4429 val = nfp_prog->bpf->helpers.perf_event_output; 4430 break; 4431 default: 4432 pr_err("relocation of unknown helper %d\n", 4433 val); 4434 err = -EINVAL; 4435 goto err_free_prog; 4436 } 4437 br_set_offset(&prog[i], val); 4438 break; 4439 case RELO_IMMED_REL: 4440 immed_add_value(&prog[i], bv->start_off); 4441 break; 4442 } 4443 4444 prog[i] &= ~OP_RELO_TYPE; 4445 } 4446 4447 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); 4448 if (err) 4449 goto err_free_prog; 4450 4451 return prog; 4452 4453 err_free_prog: 4454 kfree(prog); 4455 return ERR_PTR(err); 4456 } 4457