1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */ 3 4 #ifndef __NFP_BPF_H__ 5 #define __NFP_BPF_H__ 1 6 7 #include <linux/bitfield.h> 8 #include <linux/bpf.h> 9 #include <linux/bpf_verifier.h> 10 #include <linux/kernel.h> 11 #include <linux/list.h> 12 #include <linux/rhashtable.h> 13 #include <linux/skbuff.h> 14 #include <linux/types.h> 15 #include <linux/wait.h> 16 17 #include "../nfp_asm.h" 18 #include "fw.h" 19 20 #define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg) 21 22 /* For relocation logic use up-most byte of branch instruction as scratch 23 * area. Remember to clear this before sending instructions to HW! 24 */ 25 #define OP_RELO_TYPE 0xff00000000000000ULL 26 27 enum nfp_relo_type { 28 RELO_NONE = 0, 29 /* standard internal jumps */ 30 RELO_BR_REL, 31 /* internal jumps to parts of the outro */ 32 RELO_BR_GO_OUT, 33 RELO_BR_GO_ABORT, 34 RELO_BR_GO_CALL_PUSH_REGS, 35 RELO_BR_GO_CALL_POP_REGS, 36 /* external jumps to fixed addresses */ 37 RELO_BR_NEXT_PKT, 38 RELO_BR_HELPER, 39 /* immediate relocation against load address */ 40 RELO_IMMED_REL, 41 }; 42 43 /* To make absolute relocated branches (branches other than RELO_BR_REL) 44 * distinguishable in user space dumps from normal jumps, add a large offset 45 * to them. 46 */ 47 #define BR_OFF_RELO 15000 48 49 enum static_regs { 50 STATIC_REG_IMMA = 20, /* Bank AB */ 51 STATIC_REG_IMM = 21, /* Bank AB */ 52 STATIC_REG_STACK = 22, /* Bank A */ 53 STATIC_REG_PKT_LEN = 22, /* Bank B */ 54 }; 55 56 enum pkt_vec { 57 PKT_VEC_PKT_LEN = 0, 58 PKT_VEC_PKT_PTR = 2, 59 PKT_VEC_QSEL_SET = 4, 60 PKT_VEC_QSEL_VAL = 6, 61 }; 62 63 #define PKT_VEL_QSEL_SET_BIT 4 64 65 #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) 66 #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) 67 #define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET) 68 #define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL) 69 70 #define stack_reg(np) reg_a(STATIC_REG_STACK) 71 #define stack_imm(np) imm_b(np) 72 #define plen_reg(np) reg_b(STATIC_REG_PKT_LEN) 73 #define pptr_reg(np) pv_ctm_ptr(np) 74 #define imm_a(np) reg_a(STATIC_REG_IMM) 75 #define imm_b(np) reg_b(STATIC_REG_IMM) 76 #define imma_a(np) reg_a(STATIC_REG_IMMA) 77 #define imma_b(np) reg_b(STATIC_REG_IMMA) 78 #define imm_both(np) reg_both(STATIC_REG_IMM) 79 #define ret_reg(np) imm_a(np) 80 81 #define NFP_BPF_ABI_FLAGS reg_imm(0) 82 #define NFP_BPF_ABI_FLAG_MARK 1 83 84 /** 85 * struct nfp_app_bpf - bpf app priv structure 86 * @app: backpointer to the app 87 * 88 * @bpf_dev: BPF offload device handle 89 * 90 * @tag_allocator: bitmap of control message tags in use 91 * @tag_alloc_next: next tag bit to allocate 92 * @tag_alloc_last: next tag bit to be freed 93 * 94 * @cmsg_replies: received cmsg replies waiting to be consumed 95 * @cmsg_wq: work queue for waiting for cmsg replies 96 * 97 * @cmsg_key_sz: size of key in cmsg element array 98 * @cmsg_val_sz: size of value in cmsg element array 99 * 100 * @map_list: list of offloaded maps 101 * @maps_in_use: number of currently offloaded maps 102 * @map_elems_in_use: number of elements allocated to offloaded maps 103 * 104 * @maps_neutral: hash table of offload-neutral maps (on pointer) 105 * 106 * @abi_version: global BPF ABI version 107 * 108 * @adjust_head: adjust head capability 109 * @adjust_head.flags: extra flags for adjust head 110 * @adjust_head.off_min: minimal packet offset within buffer required 111 * @adjust_head.off_max: maximum packet offset within buffer required 112 * @adjust_head.guaranteed_sub: negative adjustment guaranteed possible 113 * @adjust_head.guaranteed_add: positive adjustment guaranteed possible 114 * 115 * @maps: map capability 116 * @maps.types: supported map types 117 * @maps.max_maps: max number of maps supported 118 * @maps.max_elems: max number of entries in each map 119 * @maps.max_key_sz: max size of map key 120 * @maps.max_val_sz: max size of map value 121 * @maps.max_elem_sz: max size of map entry (key + value) 122 * 123 * @helpers: helper addressess for various calls 124 * @helpers.map_lookup: map lookup helper address 125 * @helpers.map_update: map update helper address 126 * @helpers.map_delete: map delete helper address 127 * @helpers.perf_event_output: output perf event to a ring buffer 128 * 129 * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) 130 * @queue_select: BPF can set the RX queue ID in packet vector 131 * @adjust_tail: BPF can simply trunc packet size for adjust tail 132 */ 133 struct nfp_app_bpf { 134 struct nfp_app *app; 135 136 struct bpf_offload_dev *bpf_dev; 137 138 DECLARE_BITMAP(tag_allocator, U16_MAX + 1); 139 u16 tag_alloc_next; 140 u16 tag_alloc_last; 141 142 struct sk_buff_head cmsg_replies; 143 struct wait_queue_head cmsg_wq; 144 145 unsigned int cmsg_key_sz; 146 unsigned int cmsg_val_sz; 147 148 struct list_head map_list; 149 unsigned int maps_in_use; 150 unsigned int map_elems_in_use; 151 152 struct rhashtable maps_neutral; 153 154 u32 abi_version; 155 156 struct nfp_bpf_cap_adjust_head { 157 u32 flags; 158 int off_min; 159 int off_max; 160 int guaranteed_sub; 161 int guaranteed_add; 162 } adjust_head; 163 164 struct { 165 u32 types; 166 u32 max_maps; 167 u32 max_elems; 168 u32 max_key_sz; 169 u32 max_val_sz; 170 u32 max_elem_sz; 171 } maps; 172 173 struct { 174 u32 map_lookup; 175 u32 map_update; 176 u32 map_delete; 177 u32 perf_event_output; 178 } helpers; 179 180 bool pseudo_random; 181 bool queue_select; 182 bool adjust_tail; 183 }; 184 185 enum nfp_bpf_map_use { 186 NFP_MAP_UNUSED = 0, 187 NFP_MAP_USE_READ, 188 NFP_MAP_USE_WRITE, 189 NFP_MAP_USE_ATOMIC_CNT, 190 }; 191 192 struct nfp_bpf_map_word { 193 unsigned char type :4; 194 unsigned char non_zero_update :1; 195 }; 196 197 /** 198 * struct nfp_bpf_map - private per-map data attached to BPF maps for offload 199 * @offmap: pointer to the offloaded BPF map 200 * @bpf: back pointer to bpf app private structure 201 * @tid: table id identifying map on datapath 202 * @l: link on the nfp_app_bpf->map_list list 203 * @use_map: map of how the value is used (in 4B chunks) 204 */ 205 struct nfp_bpf_map { 206 struct bpf_offloaded_map *offmap; 207 struct nfp_app_bpf *bpf; 208 u32 tid; 209 struct list_head l; 210 struct nfp_bpf_map_word use_map[]; 211 }; 212 213 struct nfp_bpf_neutral_map { 214 struct rhash_head l; 215 struct bpf_map *ptr; 216 u32 map_id; 217 u32 count; 218 }; 219 220 extern const struct rhashtable_params nfp_bpf_maps_neutral_params; 221 222 struct nfp_prog; 223 struct nfp_insn_meta; 224 typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); 225 226 #define nfp_prog_first_meta(nfp_prog) \ 227 list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) 228 #define nfp_prog_last_meta(nfp_prog) \ 229 list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) 230 #define nfp_meta_next(meta) list_next_entry(meta, l) 231 #define nfp_meta_prev(meta) list_prev_entry(meta, l) 232 233 /** 234 * struct nfp_bpf_reg_state - register state for calls 235 * @reg: BPF register state from latest path 236 * @var_off: for stack arg - changes stack offset on different paths 237 */ 238 struct nfp_bpf_reg_state { 239 struct bpf_reg_state reg; 240 bool var_off; 241 }; 242 243 #define FLAG_INSN_IS_JUMP_DST BIT(0) 244 #define FLAG_INSN_IS_SUBPROG_START BIT(1) 245 #define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2) 246 /* Instruction is pointless, noop even on its own */ 247 #define FLAG_INSN_SKIP_NOOP BIT(3) 248 /* Instruction is optimized out based on preceding instructions */ 249 #define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4) 250 /* Instruction is optimized by the verifier */ 251 #define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5) 252 253 #define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \ 254 FLAG_INSN_SKIP_PREC_DEPENDENT | \ 255 FLAG_INSN_SKIP_VERIFIER_OPT) 256 257 /** 258 * struct nfp_insn_meta - BPF instruction wrapper 259 * @insn: BPF instruction 260 * @ptr: pointer type for memory operations 261 * @ldst_gather_len: memcpy length gathered from load/store sequence 262 * @paired_st: the paired store insn at the head of the sequence 263 * @ptr_not_const: pointer is not always constant 264 * @pkt_cache: packet data cache information 265 * @pkt_cache.range_start: start offset for associated packet data cache 266 * @pkt_cache.range_end: end offset for associated packet data cache 267 * @pkt_cache.do_init: this read needs to initialize packet data cache 268 * @xadd_over_16bit: 16bit immediate is not guaranteed 269 * @xadd_maybe_16bit: 16bit immediate is possible 270 * @jmp_dst: destination info for jump instructions 271 * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB 272 * @num_insns_after_br: number of insns following a branch jump, used for fixup 273 * @func_id: function id for call instructions 274 * @arg1: arg1 for call instructions 275 * @arg2: arg2 for call instructions 276 * @umin_src: copy of core verifier umin_value for src opearnd. 277 * @umax_src: copy of core verifier umax_value for src operand. 278 * @umin_dst: copy of core verifier umin_value for dst opearnd. 279 * @umax_dst: copy of core verifier umax_value for dst operand. 280 * @off: index of first generated machine instruction (in nfp_prog.prog) 281 * @n: eBPF instruction number 282 * @flags: eBPF instruction extra optimization flags 283 * @subprog_idx: index of subprogram to which the instruction belongs 284 * @double_cb: callback for second part of the instruction 285 * @l: link on nfp_prog->insns list 286 */ 287 struct nfp_insn_meta { 288 struct bpf_insn insn; 289 union { 290 /* pointer ops (ld/st/xadd) */ 291 struct { 292 struct bpf_reg_state ptr; 293 struct bpf_insn *paired_st; 294 s16 ldst_gather_len; 295 bool ptr_not_const; 296 struct { 297 s16 range_start; 298 s16 range_end; 299 bool do_init; 300 } pkt_cache; 301 bool xadd_over_16bit; 302 bool xadd_maybe_16bit; 303 }; 304 /* jump */ 305 struct { 306 struct nfp_insn_meta *jmp_dst; 307 bool jump_neg_op; 308 u32 num_insns_after_br; /* only for BPF-to-BPF calls */ 309 }; 310 /* function calls */ 311 struct { 312 u32 func_id; 313 struct bpf_reg_state arg1; 314 struct nfp_bpf_reg_state arg2; 315 }; 316 /* We are interested in range info for operands of ALU 317 * operations. For example, shift amount, multiplicand and 318 * multiplier etc. 319 */ 320 struct { 321 u64 umin_src; 322 u64 umax_src; 323 u64 umin_dst; 324 u64 umax_dst; 325 }; 326 }; 327 unsigned int off; 328 unsigned short n; 329 unsigned short flags; 330 unsigned short subprog_idx; 331 instr_cb_t double_cb; 332 333 struct list_head l; 334 }; 335 336 #define BPF_SIZE_MASK 0x18 337 338 static inline u8 mbpf_class(const struct nfp_insn_meta *meta) 339 { 340 return BPF_CLASS(meta->insn.code); 341 } 342 343 static inline u8 mbpf_src(const struct nfp_insn_meta *meta) 344 { 345 return BPF_SRC(meta->insn.code); 346 } 347 348 static inline u8 mbpf_op(const struct nfp_insn_meta *meta) 349 { 350 return BPF_OP(meta->insn.code); 351 } 352 353 static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) 354 { 355 return BPF_MODE(meta->insn.code); 356 } 357 358 static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta) 359 { 360 return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU; 361 } 362 363 static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) 364 { 365 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM); 366 } 367 368 static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta) 369 { 370 return mbpf_class(meta) == BPF_JMP32; 371 } 372 373 static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta) 374 { 375 return mbpf_class(meta) == BPF_JMP; 376 } 377 378 static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta) 379 { 380 return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta); 381 } 382 383 static inline bool is_mbpf_store(const struct nfp_insn_meta *meta) 384 { 385 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM); 386 } 387 388 static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta) 389 { 390 return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET; 391 } 392 393 static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta) 394 { 395 return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET; 396 } 397 398 static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta) 399 { 400 u8 code = meta->insn.code; 401 402 return BPF_CLASS(code) == BPF_LD && 403 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND); 404 } 405 406 static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta) 407 { 408 u8 code = meta->insn.code; 409 410 return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM; 411 } 412 413 static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta) 414 { 415 return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET; 416 } 417 418 static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) 419 { 420 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); 421 } 422 423 static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta) 424 { 425 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL; 426 } 427 428 static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) 429 { 430 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV; 431 } 432 433 static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta) 434 { 435 u8 op; 436 437 if (is_mbpf_jmp32(meta)) 438 return true; 439 440 if (!is_mbpf_jmp64(meta)) 441 return false; 442 443 op = mbpf_op(meta); 444 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 445 } 446 447 static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta) 448 { 449 struct bpf_insn insn = meta->insn; 450 451 return insn.code == (BPF_JMP | BPF_CALL) && 452 insn.src_reg != BPF_PSEUDO_CALL; 453 } 454 455 static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta) 456 { 457 struct bpf_insn insn = meta->insn; 458 459 return insn.code == (BPF_JMP | BPF_CALL) && 460 insn.src_reg == BPF_PSEUDO_CALL; 461 } 462 463 #define STACK_FRAME_ALIGN 64 464 465 /** 466 * struct nfp_bpf_subprog_info - nfp BPF sub-program (a.k.a. function) info 467 * @stack_depth: maximum stack depth used by this sub-program 468 * @needs_reg_push: whether sub-program uses callee-saved registers 469 */ 470 struct nfp_bpf_subprog_info { 471 u16 stack_depth; 472 u8 needs_reg_push : 1; 473 }; 474 475 /** 476 * struct nfp_prog - nfp BPF program 477 * @bpf: backpointer to the bpf app priv structure 478 * @prog: machine code 479 * @prog_len: number of valid instructions in @prog array 480 * @__prog_alloc_len: alloc size of @prog array 481 * @stack_size: total amount of stack used 482 * @verifier_meta: temporary storage for verifier's insn meta 483 * @type: BPF program type 484 * @last_bpf_off: address of the last instruction translated from BPF 485 * @tgt_out: jump target for normal exit 486 * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) 487 * @tgt_call_push_regs: jump target for subroutine for saving R6~R9 to stack 488 * @tgt_call_pop_regs: jump target for subroutine used for restoring R6~R9 489 * @n_translated: number of successfully translated instructions (for errors) 490 * @error: error code if something went wrong 491 * @stack_frame_depth: max stack depth for current frame 492 * @adjust_head_location: if program has single adjust head call - the insn no. 493 * @map_records_cnt: the number of map pointers recorded for this prog 494 * @subprog_cnt: number of sub-programs, including main function 495 * @map_records: the map record pointers from bpf->maps_neutral 496 * @subprog: pointer to an array of objects holding info about sub-programs 497 * @n_insns: number of instructions on @insns list 498 * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) 499 */ 500 struct nfp_prog { 501 struct nfp_app_bpf *bpf; 502 503 u64 *prog; 504 unsigned int prog_len; 505 unsigned int __prog_alloc_len; 506 507 unsigned int stack_size; 508 509 struct nfp_insn_meta *verifier_meta; 510 511 enum bpf_prog_type type; 512 513 unsigned int last_bpf_off; 514 unsigned int tgt_out; 515 unsigned int tgt_abort; 516 unsigned int tgt_call_push_regs; 517 unsigned int tgt_call_pop_regs; 518 519 unsigned int n_translated; 520 int error; 521 522 unsigned int stack_frame_depth; 523 unsigned int adjust_head_location; 524 525 unsigned int map_records_cnt; 526 unsigned int subprog_cnt; 527 struct nfp_bpf_neutral_map **map_records; 528 struct nfp_bpf_subprog_info *subprog; 529 530 unsigned int n_insns; 531 struct list_head insns; 532 }; 533 534 /** 535 * struct nfp_bpf_vnic - per-vNIC BPF priv structure 536 * @tc_prog: currently loaded cls_bpf program 537 * @start_off: address of the first instruction in the memory 538 * @tgt_done: jump target to get the next packet 539 */ 540 struct nfp_bpf_vnic { 541 struct bpf_prog *tc_prog; 542 unsigned int start_off; 543 unsigned int tgt_done; 544 }; 545 546 bool nfp_is_subprog_start(struct nfp_insn_meta *meta); 547 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog); 548 int nfp_bpf_jit(struct nfp_prog *prog); 549 bool nfp_bpf_supported_opcode(u8 code); 550 551 int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, 552 int prev_insn_idx); 553 int nfp_bpf_finalize(struct bpf_verifier_env *env); 554 555 int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off, 556 struct bpf_insn *insn); 557 int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); 558 559 extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops; 560 561 struct netdev_bpf; 562 struct nfp_app; 563 struct nfp_net; 564 565 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, 566 struct netdev_bpf *bpf); 567 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, 568 bool old_prog, struct netlink_ext_ack *extack); 569 570 struct nfp_insn_meta * 571 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 572 unsigned int insn_idx); 573 574 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); 575 576 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf); 577 long long int 578 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); 579 void 580 nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map); 581 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, 582 void *next_key); 583 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, 584 void *key, void *value, u64 flags); 585 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key); 586 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, 587 void *key, void *value); 588 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, 589 void *key, void *next_key); 590 591 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, 592 unsigned int len); 593 594 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); 595 void 596 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, 597 unsigned int len); 598 #endif 599