1 /* 2 * Copyright (C) 2016 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "NFP net bpf: " fmt 35 36 #include <linux/kernel.h> 37 #include <linux/bpf.h> 38 #include <linux/filter.h> 39 #include <linux/pkt_cls.h> 40 #include <linux/unistd.h> 41 42 #include "main.h" 43 #include "../nfp_asm.h" 44 45 /* --- NFP prog --- */ 46 /* Foreach "multiple" entries macros provide pos and next<n> pointers. 47 * It's safe to modify the next pointers (but not pos). 48 */ 49 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \ 50 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 51 next = list_next_entry(pos, l); \ 52 &(nfp_prog)->insns != &pos->l && \ 53 &(nfp_prog)->insns != &next->l; \ 54 pos = nfp_meta_next(pos), \ 55 next = nfp_meta_next(pos)) 56 57 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \ 58 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ 59 next = list_next_entry(pos, l), \ 60 next2 = list_next_entry(next, l); \ 61 &(nfp_prog)->insns != &pos->l && \ 62 &(nfp_prog)->insns != &next->l && \ 63 &(nfp_prog)->insns != &next2->l; \ 64 pos = nfp_meta_next(pos), \ 65 next = nfp_meta_next(pos), \ 66 next2 = nfp_meta_next(next)) 67 68 static bool 69 nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 70 { 71 return meta->l.next != &nfp_prog->insns; 72 } 73 74 static bool 75 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 76 { 77 return meta->l.prev != &nfp_prog->insns; 78 } 79 80 static void nfp_prog_free(struct nfp_prog *nfp_prog) 81 { 82 struct nfp_insn_meta *meta, *tmp; 83 84 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) { 85 list_del(&meta->l); 86 kfree(meta); 87 } 88 kfree(nfp_prog); 89 } 90 91 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) 92 { 93 if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) { 94 nfp_prog->error = -ENOSPC; 95 return; 96 } 97 98 nfp_prog->prog[nfp_prog->prog_len] = insn; 99 nfp_prog->prog_len++; 100 } 101 102 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 103 { 104 return nfp_prog->start_off + nfp_prog->prog_len; 105 } 106 107 static unsigned int 108 nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset) 109 { 110 return offset - nfp_prog->start_off; 111 } 112 113 /* --- SW reg --- */ 114 struct nfp_insn_ur_regs { 115 enum alu_dst_ab dst_ab; 116 u16 dst; 117 u16 areg, breg; 118 bool swap; 119 bool wr_both; 120 }; 121 122 struct nfp_insn_re_regs { 123 enum alu_dst_ab dst_ab; 124 u8 dst; 125 u8 areg, breg; 126 bool swap; 127 bool wr_both; 128 bool i8; 129 }; 130 131 static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst) 132 { 133 u16 val = FIELD_GET(NN_REG_VAL, swreg); 134 135 switch (FIELD_GET(NN_REG_TYPE, swreg)) { 136 case NN_REG_GPR_A: 137 case NN_REG_GPR_B: 138 case NN_REG_GPR_BOTH: 139 return val; 140 case NN_REG_NNR: 141 return UR_REG_NN | val; 142 case NN_REG_XFER: 143 return UR_REG_XFR | val; 144 case NN_REG_IMM: 145 if (val & ~0xff) { 146 pr_err("immediate too large\n"); 147 return 0; 148 } 149 return UR_REG_IMM_encode(val); 150 case NN_REG_NONE: 151 return is_dst ? UR_REG_NO_DST : REG_NONE; 152 default: 153 pr_err("unrecognized reg encoding %08x\n", swreg); 154 return 0; 155 } 156 } 157 158 static int 159 swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg) 160 { 161 memset(reg, 0, sizeof(*reg)); 162 163 /* Decode destination */ 164 if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) 165 return -EFAULT; 166 167 if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B) 168 reg->dst_ab = ALU_DST_B; 169 if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH) 170 reg->wr_both = true; 171 reg->dst = nfp_swreg_to_unreg(dst, true); 172 173 /* Decode source operands */ 174 if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg)) 175 return -EFAULT; 176 177 if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B || 178 FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) { 179 reg->areg = nfp_swreg_to_unreg(rreg, false); 180 reg->breg = nfp_swreg_to_unreg(lreg, false); 181 reg->swap = true; 182 } else { 183 reg->areg = nfp_swreg_to_unreg(lreg, false); 184 reg->breg = nfp_swreg_to_unreg(rreg, false); 185 } 186 187 return 0; 188 } 189 190 static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8) 191 { 192 u16 val = FIELD_GET(NN_REG_VAL, swreg); 193 194 switch (FIELD_GET(NN_REG_TYPE, swreg)) { 195 case NN_REG_GPR_A: 196 case NN_REG_GPR_B: 197 case NN_REG_GPR_BOTH: 198 return val; 199 case NN_REG_XFER: 200 return RE_REG_XFR | val; 201 case NN_REG_IMM: 202 if (val & ~(0x7f | has_imm8 << 7)) { 203 pr_err("immediate too large\n"); 204 return 0; 205 } 206 *i8 = val & 0x80; 207 return RE_REG_IMM_encode(val & 0x7f); 208 case NN_REG_NONE: 209 return is_dst ? RE_REG_NO_DST : REG_NONE; 210 default: 211 pr_err("unrecognized reg encoding\n"); 212 return 0; 213 } 214 } 215 216 static int 217 swreg_to_restricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_re_regs *reg, 218 bool has_imm8) 219 { 220 memset(reg, 0, sizeof(*reg)); 221 222 /* Decode destination */ 223 if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) 224 return -EFAULT; 225 226 if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B) 227 reg->dst_ab = ALU_DST_B; 228 if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH) 229 reg->wr_both = true; 230 reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL); 231 232 /* Decode source operands */ 233 if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg)) 234 return -EFAULT; 235 236 if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B || 237 FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) { 238 reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, ®->i8); 239 reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, ®->i8); 240 reg->swap = true; 241 } else { 242 reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, ®->i8); 243 reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, ®->i8); 244 } 245 246 return 0; 247 } 248 249 /* --- Emitters --- */ 250 static const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { 251 [CMD_TGT_WRITE8] = { 0x00, 0x42 }, 252 [CMD_TGT_READ8] = { 0x01, 0x43 }, 253 [CMD_TGT_READ_LE] = { 0x01, 0x40 }, 254 [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 }, 255 }; 256 257 static void 258 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 259 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync) 260 { 261 enum cmd_ctx_swap ctx; 262 u64 insn; 263 264 if (sync) 265 ctx = CMD_CTX_SWAP; 266 else 267 ctx = CMD_CTX_NO_SWAP; 268 269 insn = FIELD_PREP(OP_CMD_A_SRC, areg) | 270 FIELD_PREP(OP_CMD_CTX, ctx) | 271 FIELD_PREP(OP_CMD_B_SRC, breg) | 272 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | 273 FIELD_PREP(OP_CMD_XFER, xfer) | 274 FIELD_PREP(OP_CMD_CNT, size) | 275 FIELD_PREP(OP_CMD_SIG, sync) | 276 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | 277 FIELD_PREP(OP_CMD_MODE, mode); 278 279 nfp_prog_push(nfp_prog, insn); 280 } 281 282 static void 283 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 284 u8 mode, u8 xfer, u32 lreg, u32 rreg, u8 size, bool sync) 285 { 286 struct nfp_insn_re_regs reg; 287 int err; 288 289 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false); 290 if (err) { 291 nfp_prog->error = err; 292 return; 293 } 294 if (reg.swap) { 295 pr_err("cmd can't swap arguments\n"); 296 nfp_prog->error = -EFAULT; 297 return; 298 } 299 300 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync); 301 } 302 303 static void 304 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, 305 enum br_ctx_signal_state css, u16 addr, u8 defer) 306 { 307 u16 addr_lo, addr_hi; 308 u64 insn; 309 310 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 311 addr_hi = addr != addr_lo; 312 313 insn = OP_BR_BASE | 314 FIELD_PREP(OP_BR_MASK, mask) | 315 FIELD_PREP(OP_BR_EV_PIP, ev_pip) | 316 FIELD_PREP(OP_BR_CSS, css) | 317 FIELD_PREP(OP_BR_DEFBR, defer) | 318 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) | 319 FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 320 321 nfp_prog_push(nfp_prog, insn); 322 } 323 324 static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer) 325 { 326 if (defer > 2) { 327 pr_err("BUG: branch defer out of bounds %d\n", defer); 328 nfp_prog->error = -EFAULT; 329 return; 330 } 331 __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer); 332 } 333 334 static void 335 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 336 { 337 __emit_br(nfp_prog, mask, 338 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, 339 BR_CSS_NONE, addr, defer); 340 } 341 342 static void 343 __emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8, 344 u8 byte, bool equal, u16 addr, u8 defer) 345 { 346 u16 addr_lo, addr_hi; 347 u64 insn; 348 349 addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO)); 350 addr_hi = addr != addr_lo; 351 352 insn = OP_BBYTE_BASE | 353 FIELD_PREP(OP_BB_A_SRC, areg) | 354 FIELD_PREP(OP_BB_BYTE, byte) | 355 FIELD_PREP(OP_BB_B_SRC, breg) | 356 FIELD_PREP(OP_BB_I8, imm8) | 357 FIELD_PREP(OP_BB_EQ, equal) | 358 FIELD_PREP(OP_BB_DEFBR, defer) | 359 FIELD_PREP(OP_BB_ADDR_LO, addr_lo) | 360 FIELD_PREP(OP_BB_ADDR_HI, addr_hi); 361 362 nfp_prog_push(nfp_prog, insn); 363 } 364 365 static void 366 emit_br_byte_neq(struct nfp_prog *nfp_prog, 367 u32 dst, u8 imm, u8 byte, u16 addr, u8 defer) 368 { 369 struct nfp_insn_re_regs reg; 370 int err; 371 372 err = swreg_to_restricted(reg_none(), dst, reg_imm(imm), ®, true); 373 if (err) { 374 nfp_prog->error = err; 375 return; 376 } 377 378 __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr, 379 defer); 380 } 381 382 static void 383 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, 384 enum immed_width width, bool invert, 385 enum immed_shift shift, bool wr_both) 386 { 387 u64 insn; 388 389 insn = OP_IMMED_BASE | 390 FIELD_PREP(OP_IMMED_A_SRC, areg) | 391 FIELD_PREP(OP_IMMED_B_SRC, breg) | 392 FIELD_PREP(OP_IMMED_IMM, imm_hi) | 393 FIELD_PREP(OP_IMMED_WIDTH, width) | 394 FIELD_PREP(OP_IMMED_INV, invert) | 395 FIELD_PREP(OP_IMMED_SHIFT, shift) | 396 FIELD_PREP(OP_IMMED_WR_AB, wr_both); 397 398 nfp_prog_push(nfp_prog, insn); 399 } 400 401 static void 402 emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm, 403 enum immed_width width, bool invert, enum immed_shift shift) 404 { 405 struct nfp_insn_ur_regs reg; 406 int err; 407 408 if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) { 409 nfp_prog->error = -EFAULT; 410 return; 411 } 412 413 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®); 414 if (err) { 415 nfp_prog->error = err; 416 return; 417 } 418 419 __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width, 420 invert, shift, reg.wr_both); 421 } 422 423 static void 424 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 425 enum shf_sc sc, u8 shift, 426 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both) 427 { 428 u64 insn; 429 430 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) { 431 nfp_prog->error = -EFAULT; 432 return; 433 } 434 435 if (sc == SHF_SC_L_SHF) 436 shift = 32 - shift; 437 438 insn = OP_SHF_BASE | 439 FIELD_PREP(OP_SHF_A_SRC, areg) | 440 FIELD_PREP(OP_SHF_SC, sc) | 441 FIELD_PREP(OP_SHF_B_SRC, breg) | 442 FIELD_PREP(OP_SHF_I8, i8) | 443 FIELD_PREP(OP_SHF_SW, sw) | 444 FIELD_PREP(OP_SHF_DST, dst) | 445 FIELD_PREP(OP_SHF_SHIFT, shift) | 446 FIELD_PREP(OP_SHF_OP, op) | 447 FIELD_PREP(OP_SHF_DST_AB, dst_ab) | 448 FIELD_PREP(OP_SHF_WR_AB, wr_both); 449 450 nfp_prog_push(nfp_prog, insn); 451 } 452 453 static void 454 emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg, 455 enum shf_sc sc, u8 shift) 456 { 457 struct nfp_insn_re_regs reg; 458 int err; 459 460 err = swreg_to_restricted(dst, lreg, rreg, ®, true); 461 if (err) { 462 nfp_prog->error = err; 463 return; 464 } 465 466 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, 467 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both); 468 } 469 470 static void 471 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, 472 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both) 473 { 474 u64 insn; 475 476 insn = OP_ALU_BASE | 477 FIELD_PREP(OP_ALU_A_SRC, areg) | 478 FIELD_PREP(OP_ALU_B_SRC, breg) | 479 FIELD_PREP(OP_ALU_DST, dst) | 480 FIELD_PREP(OP_ALU_SW, swap) | 481 FIELD_PREP(OP_ALU_OP, op) | 482 FIELD_PREP(OP_ALU_DST_AB, dst_ab) | 483 FIELD_PREP(OP_ALU_WR_AB, wr_both); 484 485 nfp_prog_push(nfp_prog, insn); 486 } 487 488 static void 489 emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg) 490 { 491 struct nfp_insn_ur_regs reg; 492 int err; 493 494 err = swreg_to_unrestricted(dst, lreg, rreg, ®); 495 if (err) { 496 nfp_prog->error = err; 497 return; 498 } 499 500 __emit_alu(nfp_prog, reg.dst, reg.dst_ab, 501 reg.areg, op, reg.breg, reg.swap, reg.wr_both); 502 } 503 504 static void 505 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, 506 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, 507 bool zero, bool swap, bool wr_both) 508 { 509 u64 insn; 510 511 insn = OP_LDF_BASE | 512 FIELD_PREP(OP_LDF_A_SRC, areg) | 513 FIELD_PREP(OP_LDF_SC, sc) | 514 FIELD_PREP(OP_LDF_B_SRC, breg) | 515 FIELD_PREP(OP_LDF_I8, imm8) | 516 FIELD_PREP(OP_LDF_SW, swap) | 517 FIELD_PREP(OP_LDF_ZF, zero) | 518 FIELD_PREP(OP_LDF_BMASK, bmask) | 519 FIELD_PREP(OP_LDF_SHF, shift) | 520 FIELD_PREP(OP_LDF_WR_AB, wr_both); 521 522 nfp_prog_push(nfp_prog, insn); 523 } 524 525 static void 526 emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift, 527 u32 dst, u8 bmask, u32 src, bool zero) 528 { 529 struct nfp_insn_re_regs reg; 530 int err; 531 532 err = swreg_to_restricted(reg_none(), dst, src, ®, true); 533 if (err) { 534 nfp_prog->error = err; 535 return; 536 } 537 538 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, 539 reg.i8, zero, reg.swap, reg.wr_both); 540 } 541 542 static void 543 emit_ld_field(struct nfp_prog *nfp_prog, u32 dst, u8 bmask, u32 src, 544 enum shf_sc sc, u8 shift) 545 { 546 emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false); 547 } 548 549 /* --- Wrappers --- */ 550 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) 551 { 552 if (!(imm & 0xffff0000)) { 553 *val = imm; 554 *shift = IMMED_SHIFT_0B; 555 } else if (!(imm & 0xff0000ff)) { 556 *val = imm >> 8; 557 *shift = IMMED_SHIFT_1B; 558 } else if (!(imm & 0x0000ffff)) { 559 *val = imm >> 16; 560 *shift = IMMED_SHIFT_2B; 561 } else { 562 return false; 563 } 564 565 return true; 566 } 567 568 static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm) 569 { 570 enum immed_shift shift; 571 u16 val; 572 573 if (pack_immed(imm, &val, &shift)) { 574 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift); 575 } else if (pack_immed(~imm, &val, &shift)) { 576 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift); 577 } else { 578 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL, 579 false, IMMED_SHIFT_0B); 580 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD, 581 false, IMMED_SHIFT_2B); 582 } 583 } 584 585 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) 586 * If the @imm is small enough encode it directly in operand and return 587 * otherwise load @imm to a spare register and return its encoding. 588 */ 589 static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg) 590 { 591 if (FIELD_FIT(UR_REG_IMM_MAX, imm)) 592 return reg_imm(imm); 593 594 wrp_immed(nfp_prog, tmp_reg, imm); 595 return tmp_reg; 596 } 597 598 /* re_load_imm_any() - encode immediate or use tmp register (restricted) 599 * If the @imm is small enough encode it directly in operand and return 600 * otherwise load @imm to a spare register and return its encoding. 601 */ 602 static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg) 603 { 604 if (FIELD_FIT(RE_REG_IMM_MAX, imm)) 605 return reg_imm(imm); 606 607 wrp_immed(nfp_prog, tmp_reg, imm); 608 return tmp_reg; 609 } 610 611 static void 612 wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask, 613 enum br_special special) 614 { 615 emit_br(nfp_prog, mask, 0, 0); 616 617 nfp_prog->prog[nfp_prog->prog_len - 1] |= 618 FIELD_PREP(OP_BR_SPECIAL, special); 619 } 620 621 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) 622 { 623 emit_alu(nfp_prog, reg_both(dst), reg_none(), ALU_OP_NONE, reg_b(src)); 624 } 625 626 static int 627 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, 628 u16 src, bool src_valid, u8 size) 629 { 630 unsigned int i; 631 u16 shift, sz; 632 u32 tmp_reg; 633 634 /* We load the value from the address indicated in @offset and then 635 * shift out the data we don't need. Note: this is big endian! 636 */ 637 sz = size < 4 ? 4 : size; 638 shift = size < 4 ? 4 - size : 0; 639 640 if (src_valid) { 641 /* Calculate the true offset (src_reg + imm) */ 642 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 643 emit_alu(nfp_prog, imm_both(nfp_prog), 644 reg_a(src), ALU_OP_ADD, tmp_reg); 645 /* Check packet length (size guaranteed to fit b/c it's u8) */ 646 emit_alu(nfp_prog, imm_a(nfp_prog), 647 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 648 emit_alu(nfp_prog, reg_none(), 649 NFP_BPF_ABI_LEN, ALU_OP_SUB, imm_a(nfp_prog)); 650 wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); 651 /* Load data */ 652 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 653 pkt_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true); 654 } else { 655 /* Check packet length */ 656 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, 657 imm_a(nfp_prog)); 658 emit_alu(nfp_prog, reg_none(), 659 NFP_BPF_ABI_LEN, ALU_OP_SUB, tmp_reg); 660 wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); 661 /* Load data */ 662 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 663 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, 664 pkt_reg(nfp_prog), tmp_reg, sz - 1, true); 665 } 666 667 i = 0; 668 if (shift) 669 emit_shf(nfp_prog, reg_both(0), reg_none(), SHF_OP_NONE, 670 reg_xfer(0), SHF_SC_R_SHF, shift * 8); 671 else 672 for (; i * 4 < size; i++) 673 emit_alu(nfp_prog, reg_both(i), 674 reg_none(), ALU_OP_NONE, reg_xfer(i)); 675 676 if (i < 2) 677 wrp_immed(nfp_prog, reg_both(1), 0); 678 679 return 0; 680 } 681 682 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) 683 { 684 return construct_data_ind_ld(nfp_prog, offset, 0, false, size); 685 } 686 687 static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src) 688 { 689 emit_alu(nfp_prog, NFP_BPF_ABI_MARK, 690 reg_none(), ALU_OP_NONE, reg_b(src)); 691 emit_alu(nfp_prog, NFP_BPF_ABI_FLAGS, 692 NFP_BPF_ABI_FLAGS, ALU_OP_OR, reg_imm(NFP_BPF_ABI_FLAG_MARK)); 693 694 return 0; 695 } 696 697 static void 698 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) 699 { 700 u32 tmp_reg; 701 702 if (alu_op == ALU_OP_AND) { 703 if (!imm) 704 wrp_immed(nfp_prog, reg_both(dst), 0); 705 if (!imm || !~imm) 706 return; 707 } 708 if (alu_op == ALU_OP_OR) { 709 if (!~imm) 710 wrp_immed(nfp_prog, reg_both(dst), ~0U); 711 if (!imm || !~imm) 712 return; 713 } 714 if (alu_op == ALU_OP_XOR) { 715 if (!~imm) 716 emit_alu(nfp_prog, reg_both(dst), reg_none(), 717 ALU_OP_NEG, reg_b(dst)); 718 if (!imm || !~imm) 719 return; 720 } 721 722 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); 723 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg); 724 } 725 726 static int 727 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 728 enum alu_op alu_op, bool skip) 729 { 730 const struct bpf_insn *insn = &meta->insn; 731 u64 imm = insn->imm; /* sign extend */ 732 733 if (skip) { 734 meta->skip = true; 735 return 0; 736 } 737 738 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U); 739 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32); 740 741 return 0; 742 } 743 744 static int 745 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 746 enum alu_op alu_op) 747 { 748 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 749 750 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 751 emit_alu(nfp_prog, reg_both(dst + 1), 752 reg_a(dst + 1), alu_op, reg_b(src + 1)); 753 754 return 0; 755 } 756 757 static int 758 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 759 enum alu_op alu_op, bool skip) 760 { 761 const struct bpf_insn *insn = &meta->insn; 762 763 if (skip) { 764 meta->skip = true; 765 return 0; 766 } 767 768 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 769 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 770 771 return 0; 772 } 773 774 static int 775 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 776 enum alu_op alu_op) 777 { 778 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2; 779 780 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src)); 781 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 782 783 return 0; 784 } 785 786 static void 787 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src, 788 enum br_mask br_mask, u16 off) 789 { 790 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src)); 791 emit_br(nfp_prog, br_mask, off, 0); 792 } 793 794 static int 795 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 796 enum alu_op alu_op, enum br_mask br_mask) 797 { 798 const struct bpf_insn *insn = &meta->insn; 799 800 if (insn->off < 0) /* TODO */ 801 return -EOPNOTSUPP; 802 803 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, 804 insn->src_reg * 2, br_mask, insn->off); 805 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, 806 insn->src_reg * 2 + 1, br_mask, insn->off); 807 808 return 0; 809 } 810 811 static int 812 wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 813 enum br_mask br_mask, bool swap) 814 { 815 const struct bpf_insn *insn = &meta->insn; 816 u64 imm = insn->imm; /* sign extend */ 817 u8 reg = insn->dst_reg * 2; 818 u32 tmp_reg; 819 820 if (insn->off < 0) /* TODO */ 821 return -EOPNOTSUPP; 822 823 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 824 if (!swap) 825 emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg); 826 else 827 emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg)); 828 829 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 830 if (!swap) 831 emit_alu(nfp_prog, reg_none(), 832 reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg); 833 else 834 emit_alu(nfp_prog, reg_none(), 835 tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1)); 836 837 emit_br(nfp_prog, br_mask, insn->off, 0); 838 839 return 0; 840 } 841 842 static int 843 wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 844 enum br_mask br_mask, bool swap) 845 { 846 const struct bpf_insn *insn = &meta->insn; 847 u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2; 848 849 if (insn->off < 0) /* TODO */ 850 return -EOPNOTSUPP; 851 852 if (swap) { 853 areg ^= breg; 854 breg ^= areg; 855 areg ^= breg; 856 } 857 858 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); 859 emit_alu(nfp_prog, reg_none(), 860 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); 861 emit_br(nfp_prog, br_mask, insn->off, 0); 862 863 return 0; 864 } 865 866 /* --- Callbacks --- */ 867 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 868 { 869 const struct bpf_insn *insn = &meta->insn; 870 871 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 872 wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->src_reg * 2 + 1); 873 874 return 0; 875 } 876 877 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 878 { 879 u64 imm = meta->insn.imm; /* sign extend */ 880 881 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U); 882 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32); 883 884 return 0; 885 } 886 887 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 888 { 889 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR); 890 } 891 892 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 893 { 894 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm); 895 } 896 897 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 898 { 899 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND); 900 } 901 902 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 903 { 904 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 905 } 906 907 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 908 { 909 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR); 910 } 911 912 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 913 { 914 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 915 } 916 917 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 918 { 919 const struct bpf_insn *insn = &meta->insn; 920 921 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 922 reg_a(insn->dst_reg * 2), ALU_OP_ADD, 923 reg_b(insn->src_reg * 2)); 924 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 925 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C, 926 reg_b(insn->src_reg * 2 + 1)); 927 928 return 0; 929 } 930 931 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 932 { 933 const struct bpf_insn *insn = &meta->insn; 934 u64 imm = insn->imm; /* sign extend */ 935 936 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U); 937 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32); 938 939 return 0; 940 } 941 942 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 943 { 944 const struct bpf_insn *insn = &meta->insn; 945 946 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), 947 reg_a(insn->dst_reg * 2), ALU_OP_SUB, 948 reg_b(insn->src_reg * 2)); 949 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 950 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C, 951 reg_b(insn->src_reg * 2 + 1)); 952 953 return 0; 954 } 955 956 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 957 { 958 const struct bpf_insn *insn = &meta->insn; 959 u64 imm = insn->imm; /* sign extend */ 960 961 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U); 962 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32); 963 964 return 0; 965 } 966 967 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 968 { 969 const struct bpf_insn *insn = &meta->insn; 970 971 if (insn->imm != 32) 972 return 1; /* TODO */ 973 974 wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->dst_reg * 2); 975 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), 0); 976 977 return 0; 978 } 979 980 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 981 { 982 const struct bpf_insn *insn = &meta->insn; 983 984 if (insn->imm != 32) 985 return 1; /* TODO */ 986 987 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->dst_reg * 2 + 1); 988 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 989 990 return 0; 991 } 992 993 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 994 { 995 const struct bpf_insn *insn = &meta->insn; 996 997 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); 998 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 999 1000 return 0; 1001 } 1002 1003 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1004 { 1005 const struct bpf_insn *insn = &meta->insn; 1006 1007 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 1008 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1009 1010 return 0; 1011 } 1012 1013 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1014 { 1015 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR); 1016 } 1017 1018 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1019 { 1020 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 1021 } 1022 1023 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1024 { 1025 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND); 1026 } 1027 1028 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1029 { 1030 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 1031 } 1032 1033 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1034 { 1035 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR); 1036 } 1037 1038 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1039 { 1040 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 1041 } 1042 1043 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1044 { 1045 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD); 1046 } 1047 1048 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1049 { 1050 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 1051 } 1052 1053 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1054 { 1055 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB); 1056 } 1057 1058 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1059 { 1060 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 1061 } 1062 1063 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1064 { 1065 const struct bpf_insn *insn = &meta->insn; 1066 1067 if (!insn->imm) 1068 return 1; /* TODO: zero shift means indirect */ 1069 1070 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), 1071 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), 1072 SHF_SC_L_SHF, insn->imm); 1073 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1074 1075 return 0; 1076 } 1077 1078 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1079 { 1080 wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1), 1081 meta->insn.imm); 1082 1083 return 0; 1084 } 1085 1086 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1087 { 1088 const struct bpf_insn *insn = &meta->insn; 1089 1090 meta->double_cb = imm_ld8_part2; 1091 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); 1092 1093 return 0; 1094 } 1095 1096 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1097 { 1098 return construct_data_ld(nfp_prog, meta->insn.imm, 1); 1099 } 1100 1101 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1102 { 1103 return construct_data_ld(nfp_prog, meta->insn.imm, 2); 1104 } 1105 1106 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1107 { 1108 return construct_data_ld(nfp_prog, meta->insn.imm, 4); 1109 } 1110 1111 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1112 { 1113 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1114 meta->insn.src_reg * 2, true, 1); 1115 } 1116 1117 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1118 { 1119 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1120 meta->insn.src_reg * 2, true, 2); 1121 } 1122 1123 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1124 { 1125 return construct_data_ind_ld(nfp_prog, meta->insn.imm, 1126 meta->insn.src_reg * 2, true, 4); 1127 } 1128 1129 static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1130 { 1131 if (meta->insn.off == offsetof(struct sk_buff, len)) 1132 emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2), 1133 reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN); 1134 else 1135 return -EOPNOTSUPP; 1136 1137 return 0; 1138 } 1139 1140 static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1141 { 1142 u32 dst = reg_both(meta->insn.dst_reg * 2); 1143 1144 if (meta->insn.off != offsetof(struct xdp_md, data) && 1145 meta->insn.off != offsetof(struct xdp_md, data_end)) 1146 return -EOPNOTSUPP; 1147 1148 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT); 1149 1150 if (meta->insn.off == offsetof(struct xdp_md, data)) 1151 return 0; 1152 1153 emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, NFP_BPF_ABI_LEN); 1154 1155 return 0; 1156 } 1157 1158 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1159 { 1160 int ret; 1161 1162 if (nfp_prog->act == NN_ACT_XDP) 1163 ret = mem_ldx4_xdp(nfp_prog, meta); 1164 else 1165 ret = mem_ldx4_skb(nfp_prog, meta); 1166 1167 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); 1168 1169 return ret; 1170 } 1171 1172 static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1173 { 1174 if (meta->insn.off == offsetof(struct sk_buff, mark)) 1175 return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2); 1176 1177 return -EOPNOTSUPP; 1178 } 1179 1180 static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1181 { 1182 return -EOPNOTSUPP; 1183 } 1184 1185 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1186 { 1187 if (nfp_prog->act == NN_ACT_XDP) 1188 return mem_stx4_xdp(nfp_prog, meta); 1189 return mem_stx4_skb(nfp_prog, meta); 1190 } 1191 1192 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1193 { 1194 if (meta->insn.off < 0) /* TODO */ 1195 return -EOPNOTSUPP; 1196 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); 1197 1198 return 0; 1199 } 1200 1201 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1202 { 1203 const struct bpf_insn *insn = &meta->insn; 1204 u64 imm = insn->imm; /* sign extend */ 1205 u32 or1 = reg_a(insn->dst_reg * 2), or2 = reg_b(insn->dst_reg * 2 + 1); 1206 u32 tmp_reg; 1207 1208 if (insn->off < 0) /* TODO */ 1209 return -EOPNOTSUPP; 1210 1211 if (imm & ~0U) { 1212 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1213 emit_alu(nfp_prog, imm_a(nfp_prog), 1214 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 1215 or1 = imm_a(nfp_prog); 1216 } 1217 1218 if (imm >> 32) { 1219 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1220 emit_alu(nfp_prog, imm_b(nfp_prog), 1221 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 1222 or2 = imm_b(nfp_prog); 1223 } 1224 1225 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2); 1226 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 1227 1228 return 0; 1229 } 1230 1231 static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1232 { 1233 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false); 1234 } 1235 1236 static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1237 { 1238 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); 1239 } 1240 1241 static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1242 { 1243 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); 1244 } 1245 1246 static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1247 { 1248 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); 1249 } 1250 1251 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1252 { 1253 const struct bpf_insn *insn = &meta->insn; 1254 u64 imm = insn->imm; /* sign extend */ 1255 u32 tmp_reg; 1256 1257 if (insn->off < 0) /* TODO */ 1258 return -EOPNOTSUPP; 1259 1260 if (!imm) { 1261 meta->skip = true; 1262 return 0; 1263 } 1264 1265 if (imm & ~0U) { 1266 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1267 emit_alu(nfp_prog, reg_none(), 1268 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); 1269 emit_br(nfp_prog, BR_BNE, insn->off, 0); 1270 } 1271 1272 if (imm >> 32) { 1273 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1274 emit_alu(nfp_prog, reg_none(), 1275 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 1276 emit_br(nfp_prog, BR_BNE, insn->off, 0); 1277 } 1278 1279 return 0; 1280 } 1281 1282 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1283 { 1284 const struct bpf_insn *insn = &meta->insn; 1285 u64 imm = insn->imm; /* sign extend */ 1286 u32 tmp_reg; 1287 1288 if (insn->off < 0) /* TODO */ 1289 return -EOPNOTSUPP; 1290 1291 if (!imm) { 1292 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), 1293 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); 1294 emit_br(nfp_prog, BR_BNE, insn->off, 0); 1295 } 1296 1297 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 1298 emit_alu(nfp_prog, reg_none(), 1299 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); 1300 emit_br(nfp_prog, BR_BNE, insn->off, 0); 1301 1302 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); 1303 emit_alu(nfp_prog, reg_none(), 1304 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); 1305 emit_br(nfp_prog, BR_BNE, insn->off, 0); 1306 1307 return 0; 1308 } 1309 1310 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1311 { 1312 const struct bpf_insn *insn = &meta->insn; 1313 1314 if (insn->off < 0) /* TODO */ 1315 return -EOPNOTSUPP; 1316 1317 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), 1318 ALU_OP_XOR, reg_b(insn->src_reg * 2)); 1319 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), 1320 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); 1321 emit_alu(nfp_prog, reg_none(), 1322 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); 1323 emit_br(nfp_prog, BR_BEQ, insn->off, 0); 1324 1325 return 0; 1326 } 1327 1328 static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1329 { 1330 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false); 1331 } 1332 1333 static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1334 { 1335 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); 1336 } 1337 1338 static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1339 { 1340 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); 1341 } 1342 1343 static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1344 { 1345 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); 1346 } 1347 1348 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1349 { 1350 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 1351 } 1352 1353 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1354 { 1355 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); 1356 } 1357 1358 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1359 { 1360 wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT); 1361 1362 return 0; 1363 } 1364 1365 static const instr_cb_t instr_cb[256] = { 1366 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, 1367 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, 1368 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64, 1369 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64, 1370 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64, 1371 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64, 1372 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64, 1373 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64, 1374 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64, 1375 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, 1376 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, 1377 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, 1378 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, 1379 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, 1380 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, 1381 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, 1382 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, 1383 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm, 1384 [BPF_ALU | BPF_AND | BPF_X] = and_reg, 1385 [BPF_ALU | BPF_AND | BPF_K] = and_imm, 1386 [BPF_ALU | BPF_OR | BPF_X] = or_reg, 1387 [BPF_ALU | BPF_OR | BPF_K] = or_imm, 1388 [BPF_ALU | BPF_ADD | BPF_X] = add_reg, 1389 [BPF_ALU | BPF_ADD | BPF_K] = add_imm, 1390 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, 1391 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, 1392 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, 1393 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, 1394 [BPF_LD | BPF_ABS | BPF_B] = data_ld1, 1395 [BPF_LD | BPF_ABS | BPF_H] = data_ld2, 1396 [BPF_LD | BPF_ABS | BPF_W] = data_ld4, 1397 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, 1398 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, 1399 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, 1400 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, 1401 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 1402 [BPF_JMP | BPF_JA | BPF_K] = jump, 1403 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, 1404 [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, 1405 [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, 1406 [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, 1407 [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, 1408 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 1409 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 1410 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 1411 [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, 1412 [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, 1413 [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, 1414 [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, 1415 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 1416 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 1417 [BPF_JMP | BPF_EXIT] = goto_out, 1418 }; 1419 1420 /* --- Misc code --- */ 1421 static void br_set_offset(u64 *instr, u16 offset) 1422 { 1423 u16 addr_lo, addr_hi; 1424 1425 addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); 1426 addr_hi = offset != addr_lo; 1427 *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO); 1428 *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi); 1429 *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo); 1430 } 1431 1432 /* --- Assembler logic --- */ 1433 static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 1434 { 1435 struct nfp_insn_meta *meta, *next; 1436 u32 off, br_idx; 1437 u32 idx; 1438 1439 nfp_for_each_insn_walk2(nfp_prog, meta, next) { 1440 if (meta->skip) 1441 continue; 1442 if (BPF_CLASS(meta->insn.code) != BPF_JMP) 1443 continue; 1444 1445 br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1; 1446 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 1447 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 1448 br_idx, meta->insn.code, nfp_prog->prog[br_idx]); 1449 return -ELOOP; 1450 } 1451 /* Leave special branches for later */ 1452 if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx])) 1453 continue; 1454 1455 /* Find the target offset in assembler realm */ 1456 off = meta->insn.off; 1457 if (!off) { 1458 pr_err("Fixup found zero offset!!\n"); 1459 return -ELOOP; 1460 } 1461 1462 while (off && nfp_meta_has_next(nfp_prog, next)) { 1463 next = nfp_meta_next(next); 1464 off--; 1465 } 1466 if (off) { 1467 pr_err("Fixup found too large jump!! %d\n", off); 1468 return -ELOOP; 1469 } 1470 1471 if (next->skip) { 1472 pr_err("Branch landing on removed instruction!!\n"); 1473 return -ELOOP; 1474 } 1475 1476 for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off); 1477 idx <= br_idx; idx++) { 1478 if (!nfp_is_br(nfp_prog->prog[idx])) 1479 continue; 1480 br_set_offset(&nfp_prog->prog[idx], next->off); 1481 } 1482 } 1483 1484 /* Fixup 'goto out's separately, they can be scattered around */ 1485 for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) { 1486 enum br_special special; 1487 1488 if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE) 1489 continue; 1490 1491 special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]); 1492 switch (special) { 1493 case OP_BR_NORMAL: 1494 break; 1495 case OP_BR_GO_OUT: 1496 br_set_offset(&nfp_prog->prog[br_idx], 1497 nfp_prog->tgt_out); 1498 break; 1499 case OP_BR_GO_ABORT: 1500 br_set_offset(&nfp_prog->prog[br_idx], 1501 nfp_prog->tgt_abort); 1502 break; 1503 } 1504 1505 nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL; 1506 } 1507 1508 return 0; 1509 } 1510 1511 static void nfp_intro(struct nfp_prog *nfp_prog) 1512 { 1513 emit_alu(nfp_prog, pkt_reg(nfp_prog), 1514 reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT); 1515 } 1516 1517 static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog) 1518 { 1519 const u8 act2code[] = { 1520 [NN_ACT_TC_DROP] = 0x22, 1521 [NN_ACT_TC_REDIR] = 0x24 1522 }; 1523 /* Target for aborts */ 1524 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 1525 wrp_immed(nfp_prog, reg_both(0), 0); 1526 1527 /* Target for normal exits */ 1528 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 1529 /* Legacy TC mode: 1530 * 0 0x11 -> pass, count as stat0 1531 * -1 drop 0x22 -> drop, count as stat1 1532 * redir 0x24 -> redir, count as stat1 1533 * ife mark 0x21 -> pass, count as stat1 1534 * ife + tx 0x24 -> redir, count as stat1 1535 */ 1536 emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2); 1537 emit_alu(nfp_prog, reg_a(0), 1538 reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS); 1539 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 1540 1541 emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1); 1542 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]), 1543 SHF_SC_L_SHF, 16); 1544 } 1545 1546 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) 1547 { 1548 /* TC direct-action mode: 1549 * 0,1 ok NOT SUPPORTED[1] 1550 * 2 drop 0x22 -> drop, count as stat1 1551 * 4,5 nuke 0x02 -> drop 1552 * 7 redir 0x44 -> redir, count as stat2 1553 * * unspec 0x11 -> pass, count as stat0 1554 * 1555 * [1] We can't support OK and RECLASSIFY because we can't tell TC 1556 * the exact decision made. We are forced to support UNSPEC 1557 * to handle aborts so that's the only one we handle for passing 1558 * packets up the stack. 1559 */ 1560 /* Target for aborts */ 1561 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 1562 1563 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); 1564 1565 emit_alu(nfp_prog, reg_a(0), 1566 reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS); 1567 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 1568 1569 /* Target for normal exits */ 1570 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 1571 1572 /* if R0 > 7 jump to abort */ 1573 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); 1574 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 1575 emit_alu(nfp_prog, reg_a(0), 1576 reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS); 1577 1578 wrp_immed(nfp_prog, reg_b(2), 0x41221211); 1579 wrp_immed(nfp_prog, reg_b(3), 0x41001211); 1580 1581 emit_shf(nfp_prog, reg_a(1), 1582 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2); 1583 1584 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 1585 emit_shf(nfp_prog, reg_a(2), 1586 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 1587 1588 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 1589 emit_shf(nfp_prog, reg_b(2), 1590 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 1591 1592 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); 1593 1594 emit_shf(nfp_prog, reg_b(2), 1595 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 1596 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 1597 } 1598 1599 static void nfp_outro_xdp(struct nfp_prog *nfp_prog) 1600 { 1601 /* XDP return codes: 1602 * 0 aborted 0x82 -> drop, count as stat3 1603 * 1 drop 0x22 -> drop, count as stat1 1604 * 2 pass 0x11 -> pass, count as stat0 1605 * 3 tx 0x44 -> redir, count as stat2 1606 * * unknown 0x82 -> drop, count as stat3 1607 */ 1608 /* Target for aborts */ 1609 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 1610 1611 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); 1612 1613 emit_alu(nfp_prog, reg_a(0), 1614 reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS); 1615 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 1616 1617 /* Target for normal exits */ 1618 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); 1619 1620 /* if R0 > 3 jump to abort */ 1621 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0)); 1622 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); 1623 1624 wrp_immed(nfp_prog, reg_b(2), 0x44112282); 1625 1626 emit_shf(nfp_prog, reg_a(1), 1627 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3); 1628 1629 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0)); 1630 emit_shf(nfp_prog, reg_b(2), 1631 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 1632 1633 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); 1634 1635 emit_alu(nfp_prog, reg_a(0), 1636 reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS); 1637 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 1638 } 1639 1640 static void nfp_outro(struct nfp_prog *nfp_prog) 1641 { 1642 switch (nfp_prog->act) { 1643 case NN_ACT_DIRECT: 1644 nfp_outro_tc_da(nfp_prog); 1645 break; 1646 case NN_ACT_TC_DROP: 1647 case NN_ACT_TC_REDIR: 1648 nfp_outro_tc_legacy(nfp_prog); 1649 break; 1650 case NN_ACT_XDP: 1651 nfp_outro_xdp(nfp_prog); 1652 break; 1653 } 1654 } 1655 1656 static int nfp_translate(struct nfp_prog *nfp_prog) 1657 { 1658 struct nfp_insn_meta *meta; 1659 int err; 1660 1661 nfp_intro(nfp_prog); 1662 if (nfp_prog->error) 1663 return nfp_prog->error; 1664 1665 list_for_each_entry(meta, &nfp_prog->insns, l) { 1666 instr_cb_t cb = instr_cb[meta->insn.code]; 1667 1668 meta->off = nfp_prog_current_offset(nfp_prog); 1669 1670 if (meta->skip) { 1671 nfp_prog->n_translated++; 1672 continue; 1673 } 1674 1675 if (nfp_meta_has_prev(nfp_prog, meta) && 1676 nfp_meta_prev(meta)->double_cb) 1677 cb = nfp_meta_prev(meta)->double_cb; 1678 if (!cb) 1679 return -ENOENT; 1680 err = cb(nfp_prog, meta); 1681 if (err) 1682 return err; 1683 1684 nfp_prog->n_translated++; 1685 } 1686 1687 nfp_outro(nfp_prog); 1688 if (nfp_prog->error) 1689 return nfp_prog->error; 1690 1691 return nfp_fixup_branches(nfp_prog); 1692 } 1693 1694 static int 1695 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, 1696 unsigned int cnt) 1697 { 1698 unsigned int i; 1699 1700 for (i = 0; i < cnt; i++) { 1701 struct nfp_insn_meta *meta; 1702 1703 meta = kzalloc(sizeof(*meta), GFP_KERNEL); 1704 if (!meta) 1705 return -ENOMEM; 1706 1707 meta->insn = prog[i]; 1708 meta->n = i; 1709 1710 list_add_tail(&meta->l, &nfp_prog->insns); 1711 } 1712 1713 return 0; 1714 } 1715 1716 /* --- Optimizations --- */ 1717 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) 1718 { 1719 struct nfp_insn_meta *meta; 1720 1721 list_for_each_entry(meta, &nfp_prog->insns, l) { 1722 struct bpf_insn insn = meta->insn; 1723 1724 /* Programs converted from cBPF start with register xoring */ 1725 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) && 1726 insn.src_reg == insn.dst_reg) 1727 continue; 1728 1729 /* Programs start with R6 = R1 but we ignore the skb pointer */ 1730 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && 1731 insn.src_reg == 1 && insn.dst_reg == 6) 1732 meta->skip = true; 1733 1734 /* Return as soon as something doesn't match */ 1735 if (!meta->skip) 1736 return; 1737 } 1738 } 1739 1740 /* Try to rename registers so that program uses only low ones */ 1741 static int nfp_bpf_opt_reg_rename(struct nfp_prog *nfp_prog) 1742 { 1743 bool reg_used[MAX_BPF_REG] = {}; 1744 u8 tgt_reg[MAX_BPF_REG] = {}; 1745 struct nfp_insn_meta *meta; 1746 unsigned int i, j; 1747 1748 list_for_each_entry(meta, &nfp_prog->insns, l) { 1749 if (meta->skip) 1750 continue; 1751 1752 reg_used[meta->insn.src_reg] = true; 1753 reg_used[meta->insn.dst_reg] = true; 1754 } 1755 1756 for (i = 0, j = 0; i < ARRAY_SIZE(tgt_reg); i++) { 1757 if (!reg_used[i]) 1758 continue; 1759 1760 tgt_reg[i] = j++; 1761 } 1762 nfp_prog->num_regs = j; 1763 1764 list_for_each_entry(meta, &nfp_prog->insns, l) { 1765 meta->insn.src_reg = tgt_reg[meta->insn.src_reg]; 1766 meta->insn.dst_reg = tgt_reg[meta->insn.dst_reg]; 1767 } 1768 1769 return 0; 1770 } 1771 1772 /* Remove masking after load since our load guarantees this is not needed */ 1773 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) 1774 { 1775 struct nfp_insn_meta *meta1, *meta2; 1776 const s32 exp_mask[] = { 1777 [BPF_B] = 0x000000ffU, 1778 [BPF_H] = 0x0000ffffU, 1779 [BPF_W] = 0xffffffffU, 1780 }; 1781 1782 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { 1783 struct bpf_insn insn, next; 1784 1785 insn = meta1->insn; 1786 next = meta2->insn; 1787 1788 if (BPF_CLASS(insn.code) != BPF_LD) 1789 continue; 1790 if (BPF_MODE(insn.code) != BPF_ABS && 1791 BPF_MODE(insn.code) != BPF_IND) 1792 continue; 1793 1794 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K)) 1795 continue; 1796 1797 if (!exp_mask[BPF_SIZE(insn.code)]) 1798 continue; 1799 if (exp_mask[BPF_SIZE(insn.code)] != next.imm) 1800 continue; 1801 1802 if (next.src_reg || next.dst_reg) 1803 continue; 1804 1805 meta2->skip = true; 1806 } 1807 } 1808 1809 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) 1810 { 1811 struct nfp_insn_meta *meta1, *meta2, *meta3; 1812 1813 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) { 1814 struct bpf_insn insn, next1, next2; 1815 1816 insn = meta1->insn; 1817 next1 = meta2->insn; 1818 next2 = meta3->insn; 1819 1820 if (BPF_CLASS(insn.code) != BPF_LD) 1821 continue; 1822 if (BPF_MODE(insn.code) != BPF_ABS && 1823 BPF_MODE(insn.code) != BPF_IND) 1824 continue; 1825 if (BPF_SIZE(insn.code) != BPF_W) 1826 continue; 1827 1828 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) && 1829 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) && 1830 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) && 1831 next2.code == (BPF_LSH | BPF_K | BPF_ALU64))) 1832 continue; 1833 1834 if (next1.src_reg || next1.dst_reg || 1835 next2.src_reg || next2.dst_reg) 1836 continue; 1837 1838 if (next1.imm != 0x20 || next2.imm != 0x20) 1839 continue; 1840 1841 meta2->skip = true; 1842 meta3->skip = true; 1843 } 1844 } 1845 1846 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) 1847 { 1848 int ret; 1849 1850 nfp_bpf_opt_reg_init(nfp_prog); 1851 1852 ret = nfp_bpf_opt_reg_rename(nfp_prog); 1853 if (ret) 1854 return ret; 1855 1856 nfp_bpf_opt_ld_mask(nfp_prog); 1857 nfp_bpf_opt_ld_shift(nfp_prog); 1858 1859 return 0; 1860 } 1861 1862 /** 1863 * nfp_bpf_jit() - translate BPF code into NFP assembly 1864 * @filter: kernel BPF filter struct 1865 * @prog_mem: memory to store assembler instructions 1866 * @act: action attached to this eBPF program 1867 * @prog_start: offset of the first instruction when loaded 1868 * @prog_done: where to jump on exit 1869 * @prog_sz: size of @prog_mem in instructions 1870 * @res: achieved parameters of translation results 1871 */ 1872 int 1873 nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem, 1874 enum nfp_bpf_action_type act, 1875 unsigned int prog_start, unsigned int prog_done, 1876 unsigned int prog_sz, struct nfp_bpf_result *res) 1877 { 1878 struct nfp_prog *nfp_prog; 1879 int ret; 1880 1881 nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL); 1882 if (!nfp_prog) 1883 return -ENOMEM; 1884 1885 INIT_LIST_HEAD(&nfp_prog->insns); 1886 nfp_prog->act = act; 1887 nfp_prog->start_off = prog_start; 1888 nfp_prog->tgt_done = prog_done; 1889 1890 ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len); 1891 if (ret) 1892 goto out; 1893 1894 ret = nfp_prog_verify(nfp_prog, filter); 1895 if (ret) 1896 goto out; 1897 1898 ret = nfp_bpf_optimize(nfp_prog); 1899 if (ret) 1900 goto out; 1901 1902 if (nfp_prog->num_regs <= 7) 1903 nfp_prog->regs_per_thread = 16; 1904 else 1905 nfp_prog->regs_per_thread = 32; 1906 1907 nfp_prog->prog = prog_mem; 1908 nfp_prog->__prog_alloc_len = prog_sz; 1909 1910 ret = nfp_translate(nfp_prog); 1911 if (ret) { 1912 pr_err("Translation failed with error %d (translated: %u)\n", 1913 ret, nfp_prog->n_translated); 1914 ret = -EINVAL; 1915 } 1916 1917 res->n_instr = nfp_prog->prog_len; 1918 res->dense_mode = nfp_prog->num_regs <= 7; 1919 out: 1920 nfp_prog_free(nfp_prog); 1921 1922 return ret; 1923 } 1924