1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */ 3 4 #ifndef __NFP_BPF_H__ 5 #define __NFP_BPF_H__ 1 6 7 #include <linux/bitfield.h> 8 #include <linux/bpf.h> 9 #include <linux/bpf_verifier.h> 10 #include <linux/kernel.h> 11 #include <linux/list.h> 12 #include <linux/rhashtable.h> 13 #include <linux/skbuff.h> 14 #include <linux/types.h> 15 #include <linux/wait.h> 16 17 #include "../ccm.h" 18 #include "../nfp_asm.h" 19 #include "fw.h" 20 21 #define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg) 22 23 /* For relocation logic use up-most byte of branch instruction as scratch 24 * area. Remember to clear this before sending instructions to HW! 25 */ 26 #define OP_RELO_TYPE 0xff00000000000000ULL 27 28 enum nfp_relo_type { 29 RELO_NONE = 0, 30 /* standard internal jumps */ 31 RELO_BR_REL, 32 /* internal jumps to parts of the outro */ 33 RELO_BR_GO_OUT, 34 RELO_BR_GO_ABORT, 35 RELO_BR_GO_CALL_PUSH_REGS, 36 RELO_BR_GO_CALL_POP_REGS, 37 /* external jumps to fixed addresses */ 38 RELO_BR_NEXT_PKT, 39 RELO_BR_HELPER, 40 /* immediate relocation against load address */ 41 RELO_IMMED_REL, 42 }; 43 44 /* To make absolute relocated branches (branches other than RELO_BR_REL) 45 * distinguishable in user space dumps from normal jumps, add a large offset 46 * to them. 47 */ 48 #define BR_OFF_RELO 15000 49 50 enum static_regs { 51 STATIC_REG_IMMA = 20, /* Bank AB */ 52 STATIC_REG_IMM = 21, /* Bank AB */ 53 STATIC_REG_STACK = 22, /* Bank A */ 54 STATIC_REG_PKT_LEN = 22, /* Bank B */ 55 }; 56 57 enum pkt_vec { 58 PKT_VEC_PKT_LEN = 0, 59 PKT_VEC_PKT_PTR = 2, 60 PKT_VEC_QSEL_SET = 4, 61 PKT_VEC_QSEL_VAL = 6, 62 }; 63 64 #define PKT_VEL_QSEL_SET_BIT 4 65 66 #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) 67 #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) 68 #define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET) 69 #define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL) 70 71 #define stack_reg(np) reg_a(STATIC_REG_STACK) 72 #define stack_imm(np) imm_b(np) 73 #define plen_reg(np) reg_b(STATIC_REG_PKT_LEN) 74 #define pptr_reg(np) pv_ctm_ptr(np) 75 #define imm_a(np) reg_a(STATIC_REG_IMM) 76 #define imm_b(np) reg_b(STATIC_REG_IMM) 77 #define imma_a(np) reg_a(STATIC_REG_IMMA) 78 #define imma_b(np) reg_b(STATIC_REG_IMMA) 79 #define imm_both(np) reg_both(STATIC_REG_IMM) 80 #define ret_reg(np) imm_a(np) 81 82 #define NFP_BPF_ABI_FLAGS reg_imm(0) 83 #define NFP_BPF_ABI_FLAG_MARK 1 84 85 /** 86 * struct nfp_app_bpf - bpf app priv structure 87 * @app: backpointer to the app 88 * @ccm: common control message handler data 89 * 90 * @bpf_dev: BPF offload device handle 91 * 92 * @cmsg_key_sz: size of key in cmsg element array 93 * @cmsg_val_sz: size of value in cmsg element array 94 * 95 * @map_list: list of offloaded maps 96 * @maps_in_use: number of currently offloaded maps 97 * @map_elems_in_use: number of elements allocated to offloaded maps 98 * 99 * @maps_neutral: hash table of offload-neutral maps (on pointer) 100 * 101 * @abi_version: global BPF ABI version 102 * 103 * @adjust_head: adjust head capability 104 * @adjust_head.flags: extra flags for adjust head 105 * @adjust_head.off_min: minimal packet offset within buffer required 106 * @adjust_head.off_max: maximum packet offset within buffer required 107 * @adjust_head.guaranteed_sub: negative adjustment guaranteed possible 108 * @adjust_head.guaranteed_add: positive adjustment guaranteed possible 109 * 110 * @maps: map capability 111 * @maps.types: supported map types 112 * @maps.max_maps: max number of maps supported 113 * @maps.max_elems: max number of entries in each map 114 * @maps.max_key_sz: max size of map key 115 * @maps.max_val_sz: max size of map value 116 * @maps.max_elem_sz: max size of map entry (key + value) 117 * 118 * @helpers: helper addressess for various calls 119 * @helpers.map_lookup: map lookup helper address 120 * @helpers.map_update: map update helper address 121 * @helpers.map_delete: map delete helper address 122 * @helpers.perf_event_output: output perf event to a ring buffer 123 * 124 * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) 125 * @queue_select: BPF can set the RX queue ID in packet vector 126 * @adjust_tail: BPF can simply trunc packet size for adjust tail 127 */ 128 struct nfp_app_bpf { 129 struct nfp_app *app; 130 struct nfp_ccm ccm; 131 132 struct bpf_offload_dev *bpf_dev; 133 134 unsigned int cmsg_key_sz; 135 unsigned int cmsg_val_sz; 136 137 struct list_head map_list; 138 unsigned int maps_in_use; 139 unsigned int map_elems_in_use; 140 141 struct rhashtable maps_neutral; 142 143 u32 abi_version; 144 145 struct nfp_bpf_cap_adjust_head { 146 u32 flags; 147 int off_min; 148 int off_max; 149 int guaranteed_sub; 150 int guaranteed_add; 151 } adjust_head; 152 153 struct { 154 u32 types; 155 u32 max_maps; 156 u32 max_elems; 157 u32 max_key_sz; 158 u32 max_val_sz; 159 u32 max_elem_sz; 160 } maps; 161 162 struct { 163 u32 map_lookup; 164 u32 map_update; 165 u32 map_delete; 166 u32 perf_event_output; 167 } helpers; 168 169 bool pseudo_random; 170 bool queue_select; 171 bool adjust_tail; 172 }; 173 174 enum nfp_bpf_map_use { 175 NFP_MAP_UNUSED = 0, 176 NFP_MAP_USE_READ, 177 NFP_MAP_USE_WRITE, 178 NFP_MAP_USE_ATOMIC_CNT, 179 }; 180 181 struct nfp_bpf_map_word { 182 unsigned char type :4; 183 unsigned char non_zero_update :1; 184 }; 185 186 /** 187 * struct nfp_bpf_map - private per-map data attached to BPF maps for offload 188 * @offmap: pointer to the offloaded BPF map 189 * @bpf: back pointer to bpf app private structure 190 * @tid: table id identifying map on datapath 191 * @l: link on the nfp_app_bpf->map_list list 192 * @use_map: map of how the value is used (in 4B chunks) 193 */ 194 struct nfp_bpf_map { 195 struct bpf_offloaded_map *offmap; 196 struct nfp_app_bpf *bpf; 197 u32 tid; 198 struct list_head l; 199 struct nfp_bpf_map_word use_map[]; 200 }; 201 202 struct nfp_bpf_neutral_map { 203 struct rhash_head l; 204 struct bpf_map *ptr; 205 u32 map_id; 206 u32 count; 207 }; 208 209 extern const struct rhashtable_params nfp_bpf_maps_neutral_params; 210 211 struct nfp_prog; 212 struct nfp_insn_meta; 213 typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); 214 215 #define nfp_prog_first_meta(nfp_prog) \ 216 list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) 217 #define nfp_prog_last_meta(nfp_prog) \ 218 list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) 219 #define nfp_meta_next(meta) list_next_entry(meta, l) 220 #define nfp_meta_prev(meta) list_prev_entry(meta, l) 221 222 /** 223 * struct nfp_bpf_reg_state - register state for calls 224 * @reg: BPF register state from latest path 225 * @var_off: for stack arg - changes stack offset on different paths 226 */ 227 struct nfp_bpf_reg_state { 228 struct bpf_reg_state reg; 229 bool var_off; 230 }; 231 232 #define FLAG_INSN_IS_JUMP_DST BIT(0) 233 #define FLAG_INSN_IS_SUBPROG_START BIT(1) 234 #define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2) 235 /* Instruction is pointless, noop even on its own */ 236 #define FLAG_INSN_SKIP_NOOP BIT(3) 237 /* Instruction is optimized out based on preceding instructions */ 238 #define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4) 239 /* Instruction is optimized by the verifier */ 240 #define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5) 241 242 #define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \ 243 FLAG_INSN_SKIP_PREC_DEPENDENT | \ 244 FLAG_INSN_SKIP_VERIFIER_OPT) 245 246 /** 247 * struct nfp_insn_meta - BPF instruction wrapper 248 * @insn: BPF instruction 249 * @ptr: pointer type for memory operations 250 * @ldst_gather_len: memcpy length gathered from load/store sequence 251 * @paired_st: the paired store insn at the head of the sequence 252 * @ptr_not_const: pointer is not always constant 253 * @pkt_cache: packet data cache information 254 * @pkt_cache.range_start: start offset for associated packet data cache 255 * @pkt_cache.range_end: end offset for associated packet data cache 256 * @pkt_cache.do_init: this read needs to initialize packet data cache 257 * @xadd_over_16bit: 16bit immediate is not guaranteed 258 * @xadd_maybe_16bit: 16bit immediate is possible 259 * @jmp_dst: destination info for jump instructions 260 * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB 261 * @num_insns_after_br: number of insns following a branch jump, used for fixup 262 * @func_id: function id for call instructions 263 * @arg1: arg1 for call instructions 264 * @arg2: arg2 for call instructions 265 * @umin_src: copy of core verifier umin_value for src opearnd. 266 * @umax_src: copy of core verifier umax_value for src operand. 267 * @umin_dst: copy of core verifier umin_value for dst opearnd. 268 * @umax_dst: copy of core verifier umax_value for dst operand. 269 * @off: index of first generated machine instruction (in nfp_prog.prog) 270 * @n: eBPF instruction number 271 * @flags: eBPF instruction extra optimization flags 272 * @subprog_idx: index of subprogram to which the instruction belongs 273 * @double_cb: callback for second part of the instruction 274 * @l: link on nfp_prog->insns list 275 */ 276 struct nfp_insn_meta { 277 struct bpf_insn insn; 278 union { 279 /* pointer ops (ld/st/xadd) */ 280 struct { 281 struct bpf_reg_state ptr; 282 struct bpf_insn *paired_st; 283 s16 ldst_gather_len; 284 bool ptr_not_const; 285 struct { 286 s16 range_start; 287 s16 range_end; 288 bool do_init; 289 } pkt_cache; 290 bool xadd_over_16bit; 291 bool xadd_maybe_16bit; 292 }; 293 /* jump */ 294 struct { 295 struct nfp_insn_meta *jmp_dst; 296 bool jump_neg_op; 297 u32 num_insns_after_br; /* only for BPF-to-BPF calls */ 298 }; 299 /* function calls */ 300 struct { 301 u32 func_id; 302 struct bpf_reg_state arg1; 303 struct nfp_bpf_reg_state arg2; 304 }; 305 /* We are interested in range info for operands of ALU 306 * operations. For example, shift amount, multiplicand and 307 * multiplier etc. 308 */ 309 struct { 310 u64 umin_src; 311 u64 umax_src; 312 u64 umin_dst; 313 u64 umax_dst; 314 }; 315 }; 316 unsigned int off; 317 unsigned short n; 318 unsigned short flags; 319 unsigned short subprog_idx; 320 instr_cb_t double_cb; 321 322 struct list_head l; 323 }; 324 325 #define BPF_SIZE_MASK 0x18 326 327 static inline u8 mbpf_class(const struct nfp_insn_meta *meta) 328 { 329 return BPF_CLASS(meta->insn.code); 330 } 331 332 static inline u8 mbpf_src(const struct nfp_insn_meta *meta) 333 { 334 return BPF_SRC(meta->insn.code); 335 } 336 337 static inline u8 mbpf_op(const struct nfp_insn_meta *meta) 338 { 339 return BPF_OP(meta->insn.code); 340 } 341 342 static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) 343 { 344 return BPF_MODE(meta->insn.code); 345 } 346 347 static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta) 348 { 349 return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU; 350 } 351 352 static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) 353 { 354 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM); 355 } 356 357 static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta) 358 { 359 return mbpf_class(meta) == BPF_JMP32; 360 } 361 362 static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta) 363 { 364 return mbpf_class(meta) == BPF_JMP; 365 } 366 367 static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta) 368 { 369 return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta); 370 } 371 372 static inline bool is_mbpf_store(const struct nfp_insn_meta *meta) 373 { 374 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM); 375 } 376 377 static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta) 378 { 379 return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET; 380 } 381 382 static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta) 383 { 384 return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET; 385 } 386 387 static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta) 388 { 389 u8 code = meta->insn.code; 390 391 return BPF_CLASS(code) == BPF_LD && 392 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND); 393 } 394 395 static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta) 396 { 397 u8 code = meta->insn.code; 398 399 return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM; 400 } 401 402 static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta) 403 { 404 return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET; 405 } 406 407 static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) 408 { 409 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); 410 } 411 412 static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta) 413 { 414 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL; 415 } 416 417 static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) 418 { 419 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV; 420 } 421 422 static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta) 423 { 424 u8 op; 425 426 if (is_mbpf_jmp32(meta)) 427 return true; 428 429 if (!is_mbpf_jmp64(meta)) 430 return false; 431 432 op = mbpf_op(meta); 433 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 434 } 435 436 static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta) 437 { 438 struct bpf_insn insn = meta->insn; 439 440 return insn.code == (BPF_JMP | BPF_CALL) && 441 insn.src_reg != BPF_PSEUDO_CALL; 442 } 443 444 static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta) 445 { 446 struct bpf_insn insn = meta->insn; 447 448 return insn.code == (BPF_JMP | BPF_CALL) && 449 insn.src_reg == BPF_PSEUDO_CALL; 450 } 451 452 #define STACK_FRAME_ALIGN 64 453 454 /** 455 * struct nfp_bpf_subprog_info - nfp BPF sub-program (a.k.a. function) info 456 * @stack_depth: maximum stack depth used by this sub-program 457 * @needs_reg_push: whether sub-program uses callee-saved registers 458 */ 459 struct nfp_bpf_subprog_info { 460 u16 stack_depth; 461 u8 needs_reg_push : 1; 462 }; 463 464 /** 465 * struct nfp_prog - nfp BPF program 466 * @bpf: backpointer to the bpf app priv structure 467 * @prog: machine code 468 * @prog_len: number of valid instructions in @prog array 469 * @__prog_alloc_len: alloc size of @prog array 470 * @stack_size: total amount of stack used 471 * @verifier_meta: temporary storage for verifier's insn meta 472 * @type: BPF program type 473 * @last_bpf_off: address of the last instruction translated from BPF 474 * @tgt_out: jump target for normal exit 475 * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) 476 * @tgt_call_push_regs: jump target for subroutine for saving R6~R9 to stack 477 * @tgt_call_pop_regs: jump target for subroutine used for restoring R6~R9 478 * @n_translated: number of successfully translated instructions (for errors) 479 * @error: error code if something went wrong 480 * @stack_frame_depth: max stack depth for current frame 481 * @adjust_head_location: if program has single adjust head call - the insn no. 482 * @map_records_cnt: the number of map pointers recorded for this prog 483 * @subprog_cnt: number of sub-programs, including main function 484 * @map_records: the map record pointers from bpf->maps_neutral 485 * @subprog: pointer to an array of objects holding info about sub-programs 486 * @n_insns: number of instructions on @insns list 487 * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) 488 */ 489 struct nfp_prog { 490 struct nfp_app_bpf *bpf; 491 492 u64 *prog; 493 unsigned int prog_len; 494 unsigned int __prog_alloc_len; 495 496 unsigned int stack_size; 497 498 struct nfp_insn_meta *verifier_meta; 499 500 enum bpf_prog_type type; 501 502 unsigned int last_bpf_off; 503 unsigned int tgt_out; 504 unsigned int tgt_abort; 505 unsigned int tgt_call_push_regs; 506 unsigned int tgt_call_pop_regs; 507 508 unsigned int n_translated; 509 int error; 510 511 unsigned int stack_frame_depth; 512 unsigned int adjust_head_location; 513 514 unsigned int map_records_cnt; 515 unsigned int subprog_cnt; 516 struct nfp_bpf_neutral_map **map_records; 517 struct nfp_bpf_subprog_info *subprog; 518 519 unsigned int n_insns; 520 struct list_head insns; 521 }; 522 523 /** 524 * struct nfp_bpf_vnic - per-vNIC BPF priv structure 525 * @tc_prog: currently loaded cls_bpf program 526 * @start_off: address of the first instruction in the memory 527 * @tgt_done: jump target to get the next packet 528 */ 529 struct nfp_bpf_vnic { 530 struct bpf_prog *tc_prog; 531 unsigned int start_off; 532 unsigned int tgt_done; 533 }; 534 535 bool nfp_is_subprog_start(struct nfp_insn_meta *meta); 536 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog); 537 int nfp_bpf_jit(struct nfp_prog *prog); 538 bool nfp_bpf_supported_opcode(u8 code); 539 540 int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, 541 int prev_insn_idx); 542 int nfp_bpf_finalize(struct bpf_verifier_env *env); 543 544 int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off, 545 struct bpf_insn *insn); 546 int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); 547 548 extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops; 549 550 struct netdev_bpf; 551 struct nfp_app; 552 struct nfp_net; 553 554 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, 555 struct netdev_bpf *bpf); 556 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, 557 bool old_prog, struct netlink_ext_ack *extack); 558 559 struct nfp_insn_meta * 560 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 561 unsigned int insn_idx); 562 563 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); 564 565 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf); 566 long long int 567 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); 568 void 569 nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map); 570 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, 571 void *next_key); 572 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, 573 void *key, void *value, u64 flags); 574 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key); 575 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, 576 void *key, void *value); 577 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, 578 void *key, void *next_key); 579 580 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, 581 unsigned int len); 582 583 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); 584 void 585 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, 586 unsigned int len); 587 #endif 588