1 /* 2 * Copyright (C) 2016-2018 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef __NFP_BPF_H__ 35 #define __NFP_BPF_H__ 1 36 37 #include <linux/bitfield.h> 38 #include <linux/bpf.h> 39 #include <linux/bpf_verifier.h> 40 #include <linux/kernel.h> 41 #include <linux/list.h> 42 #include <linux/rhashtable.h> 43 #include <linux/skbuff.h> 44 #include <linux/types.h> 45 #include <linux/wait.h> 46 47 #include "../nfp_asm.h" 48 #include "fw.h" 49 50 #define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg) 51 52 /* For relocation logic use up-most byte of branch instruction as scratch 53 * area. Remember to clear this before sending instructions to HW! 54 */ 55 #define OP_RELO_TYPE 0xff00000000000000ULL 56 57 enum nfp_relo_type { 58 RELO_NONE = 0, 59 /* standard internal jumps */ 60 RELO_BR_REL, 61 /* internal jumps to parts of the outro */ 62 RELO_BR_GO_OUT, 63 RELO_BR_GO_ABORT, 64 /* external jumps to fixed addresses */ 65 RELO_BR_NEXT_PKT, 66 RELO_BR_HELPER, 67 /* immediate relocation against load address */ 68 RELO_IMMED_REL, 69 }; 70 71 /* To make absolute relocated branches (branches other than RELO_BR_REL) 72 * distinguishable in user space dumps from normal jumps, add a large offset 73 * to them. 74 */ 75 #define BR_OFF_RELO 15000 76 77 enum static_regs { 78 STATIC_REG_IMMA = 20, /* Bank AB */ 79 STATIC_REG_IMM = 21, /* Bank AB */ 80 STATIC_REG_STACK = 22, /* Bank A */ 81 STATIC_REG_PKT_LEN = 22, /* Bank B */ 82 }; 83 84 enum pkt_vec { 85 PKT_VEC_PKT_LEN = 0, 86 PKT_VEC_PKT_PTR = 2, 87 PKT_VEC_QSEL_SET = 4, 88 PKT_VEC_QSEL_VAL = 6, 89 }; 90 91 #define PKT_VEL_QSEL_SET_BIT 4 92 93 #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) 94 #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) 95 #define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET) 96 #define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL) 97 98 #define stack_reg(np) reg_a(STATIC_REG_STACK) 99 #define stack_imm(np) imm_b(np) 100 #define plen_reg(np) reg_b(STATIC_REG_PKT_LEN) 101 #define pptr_reg(np) pv_ctm_ptr(np) 102 #define imm_a(np) reg_a(STATIC_REG_IMM) 103 #define imm_b(np) reg_b(STATIC_REG_IMM) 104 #define imma_a(np) reg_a(STATIC_REG_IMMA) 105 #define imma_b(np) reg_b(STATIC_REG_IMMA) 106 #define imm_both(np) reg_both(STATIC_REG_IMM) 107 108 #define NFP_BPF_ABI_FLAGS reg_imm(0) 109 #define NFP_BPF_ABI_FLAG_MARK 1 110 111 /** 112 * struct nfp_app_bpf - bpf app priv structure 113 * @app: backpointer to the app 114 * 115 * @bpf_dev: BPF offload device handle 116 * 117 * @tag_allocator: bitmap of control message tags in use 118 * @tag_alloc_next: next tag bit to allocate 119 * @tag_alloc_last: next tag bit to be freed 120 * 121 * @cmsg_replies: received cmsg replies waiting to be consumed 122 * @cmsg_wq: work queue for waiting for cmsg replies 123 * 124 * @map_list: list of offloaded maps 125 * @maps_in_use: number of currently offloaded maps 126 * @map_elems_in_use: number of elements allocated to offloaded maps 127 * 128 * @maps_neutral: hash table of offload-neutral maps (on pointer) 129 * 130 * @adjust_head: adjust head capability 131 * @adjust_head.flags: extra flags for adjust head 132 * @adjust_head.off_min: minimal packet offset within buffer required 133 * @adjust_head.off_max: maximum packet offset within buffer required 134 * @adjust_head.guaranteed_sub: negative adjustment guaranteed possible 135 * @adjust_head.guaranteed_add: positive adjustment guaranteed possible 136 * 137 * @maps: map capability 138 * @maps.types: supported map types 139 * @maps.max_maps: max number of maps supported 140 * @maps.max_elems: max number of entries in each map 141 * @maps.max_key_sz: max size of map key 142 * @maps.max_val_sz: max size of map value 143 * @maps.max_elem_sz: max size of map entry (key + value) 144 * 145 * @helpers: helper addressess for various calls 146 * @helpers.map_lookup: map lookup helper address 147 * @helpers.map_update: map update helper address 148 * @helpers.map_delete: map delete helper address 149 * @helpers.perf_event_output: output perf event to a ring buffer 150 * 151 * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) 152 * @queue_select: BPF can set the RX queue ID in packet vector 153 * @adjust_tail: BPF can simply trunc packet size for adjust tail 154 */ 155 struct nfp_app_bpf { 156 struct nfp_app *app; 157 158 struct bpf_offload_dev *bpf_dev; 159 160 DECLARE_BITMAP(tag_allocator, U16_MAX + 1); 161 u16 tag_alloc_next; 162 u16 tag_alloc_last; 163 164 struct sk_buff_head cmsg_replies; 165 struct wait_queue_head cmsg_wq; 166 167 struct list_head map_list; 168 unsigned int maps_in_use; 169 unsigned int map_elems_in_use; 170 171 struct rhashtable maps_neutral; 172 173 struct nfp_bpf_cap_adjust_head { 174 u32 flags; 175 int off_min; 176 int off_max; 177 int guaranteed_sub; 178 int guaranteed_add; 179 } adjust_head; 180 181 struct { 182 u32 types; 183 u32 max_maps; 184 u32 max_elems; 185 u32 max_key_sz; 186 u32 max_val_sz; 187 u32 max_elem_sz; 188 } maps; 189 190 struct { 191 u32 map_lookup; 192 u32 map_update; 193 u32 map_delete; 194 u32 perf_event_output; 195 } helpers; 196 197 bool pseudo_random; 198 bool queue_select; 199 bool adjust_tail; 200 }; 201 202 enum nfp_bpf_map_use { 203 NFP_MAP_UNUSED = 0, 204 NFP_MAP_USE_READ, 205 NFP_MAP_USE_WRITE, 206 NFP_MAP_USE_ATOMIC_CNT, 207 }; 208 209 /** 210 * struct nfp_bpf_map - private per-map data attached to BPF maps for offload 211 * @offmap: pointer to the offloaded BPF map 212 * @bpf: back pointer to bpf app private structure 213 * @tid: table id identifying map on datapath 214 * @l: link on the nfp_app_bpf->map_list list 215 * @use_map: map of how the value is used (in 4B chunks) 216 */ 217 struct nfp_bpf_map { 218 struct bpf_offloaded_map *offmap; 219 struct nfp_app_bpf *bpf; 220 u32 tid; 221 struct list_head l; 222 enum nfp_bpf_map_use use_map[]; 223 }; 224 225 struct nfp_bpf_neutral_map { 226 struct rhash_head l; 227 struct bpf_map *ptr; 228 u32 map_id; 229 u32 count; 230 }; 231 232 extern const struct rhashtable_params nfp_bpf_maps_neutral_params; 233 234 struct nfp_prog; 235 struct nfp_insn_meta; 236 typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); 237 238 #define nfp_prog_first_meta(nfp_prog) \ 239 list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) 240 #define nfp_prog_last_meta(nfp_prog) \ 241 list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) 242 #define nfp_meta_next(meta) list_next_entry(meta, l) 243 #define nfp_meta_prev(meta) list_prev_entry(meta, l) 244 245 /** 246 * struct nfp_bpf_reg_state - register state for calls 247 * @reg: BPF register state from latest path 248 * @var_off: for stack arg - changes stack offset on different paths 249 */ 250 struct nfp_bpf_reg_state { 251 struct bpf_reg_state reg; 252 bool var_off; 253 }; 254 255 #define FLAG_INSN_IS_JUMP_DST BIT(0) 256 257 /** 258 * struct nfp_insn_meta - BPF instruction wrapper 259 * @insn: BPF instruction 260 * @ptr: pointer type for memory operations 261 * @ldst_gather_len: memcpy length gathered from load/store sequence 262 * @paired_st: the paired store insn at the head of the sequence 263 * @ptr_not_const: pointer is not always constant 264 * @pkt_cache: packet data cache information 265 * @pkt_cache.range_start: start offset for associated packet data cache 266 * @pkt_cache.range_end: end offset for associated packet data cache 267 * @pkt_cache.do_init: this read needs to initialize packet data cache 268 * @xadd_over_16bit: 16bit immediate is not guaranteed 269 * @xadd_maybe_16bit: 16bit immediate is possible 270 * @jmp_dst: destination info for jump instructions 271 * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB 272 * @func_id: function id for call instructions 273 * @arg1: arg1 for call instructions 274 * @arg2: arg2 for call instructions 275 * @umin_src: copy of core verifier umin_value for src opearnd. 276 * @umax_src: copy of core verifier umax_value for src operand. 277 * @umin_dst: copy of core verifier umin_value for dst opearnd. 278 * @umax_dst: copy of core verifier umax_value for dst operand. 279 * @off: index of first generated machine instruction (in nfp_prog.prog) 280 * @n: eBPF instruction number 281 * @flags: eBPF instruction extra optimization flags 282 * @skip: skip this instruction (optimized out) 283 * @double_cb: callback for second part of the instruction 284 * @l: link on nfp_prog->insns list 285 */ 286 struct nfp_insn_meta { 287 struct bpf_insn insn; 288 union { 289 /* pointer ops (ld/st/xadd) */ 290 struct { 291 struct bpf_reg_state ptr; 292 struct bpf_insn *paired_st; 293 s16 ldst_gather_len; 294 bool ptr_not_const; 295 struct { 296 s16 range_start; 297 s16 range_end; 298 bool do_init; 299 } pkt_cache; 300 bool xadd_over_16bit; 301 bool xadd_maybe_16bit; 302 }; 303 /* jump */ 304 struct { 305 struct nfp_insn_meta *jmp_dst; 306 bool jump_neg_op; 307 }; 308 /* function calls */ 309 struct { 310 u32 func_id; 311 struct bpf_reg_state arg1; 312 struct nfp_bpf_reg_state arg2; 313 }; 314 /* We are interested in range info for operands of ALU 315 * operations. For example, shift amount, multiplicand and 316 * multiplier etc. 317 */ 318 struct { 319 u64 umin_src; 320 u64 umax_src; 321 u64 umin_dst; 322 u64 umax_dst; 323 }; 324 }; 325 unsigned int off; 326 unsigned short n; 327 unsigned short flags; 328 bool skip; 329 instr_cb_t double_cb; 330 331 struct list_head l; 332 }; 333 334 #define BPF_SIZE_MASK 0x18 335 336 static inline u8 mbpf_class(const struct nfp_insn_meta *meta) 337 { 338 return BPF_CLASS(meta->insn.code); 339 } 340 341 static inline u8 mbpf_src(const struct nfp_insn_meta *meta) 342 { 343 return BPF_SRC(meta->insn.code); 344 } 345 346 static inline u8 mbpf_op(const struct nfp_insn_meta *meta) 347 { 348 return BPF_OP(meta->insn.code); 349 } 350 351 static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) 352 { 353 return BPF_MODE(meta->insn.code); 354 } 355 356 static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta) 357 { 358 return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU; 359 } 360 361 static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) 362 { 363 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM); 364 } 365 366 static inline bool is_mbpf_store(const struct nfp_insn_meta *meta) 367 { 368 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM); 369 } 370 371 static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta) 372 { 373 return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET; 374 } 375 376 static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta) 377 { 378 return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET; 379 } 380 381 static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta) 382 { 383 u8 code = meta->insn.code; 384 385 return BPF_CLASS(code) == BPF_LD && 386 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND); 387 } 388 389 static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta) 390 { 391 u8 code = meta->insn.code; 392 393 return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM; 394 } 395 396 static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta) 397 { 398 return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET; 399 } 400 401 static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) 402 { 403 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); 404 } 405 406 static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta) 407 { 408 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL; 409 } 410 411 static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) 412 { 413 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV; 414 } 415 416 /** 417 * struct nfp_prog - nfp BPF program 418 * @bpf: backpointer to the bpf app priv structure 419 * @prog: machine code 420 * @prog_len: number of valid instructions in @prog array 421 * @__prog_alloc_len: alloc size of @prog array 422 * @verifier_meta: temporary storage for verifier's insn meta 423 * @type: BPF program type 424 * @last_bpf_off: address of the last instruction translated from BPF 425 * @tgt_out: jump target for normal exit 426 * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) 427 * @n_translated: number of successfully translated instructions (for errors) 428 * @error: error code if something went wrong 429 * @stack_depth: max stack depth from the verifier 430 * @adjust_head_location: if program has single adjust head call - the insn no. 431 * @map_records_cnt: the number of map pointers recorded for this prog 432 * @map_records: the map record pointers from bpf->maps_neutral 433 * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) 434 */ 435 struct nfp_prog { 436 struct nfp_app_bpf *bpf; 437 438 u64 *prog; 439 unsigned int prog_len; 440 unsigned int __prog_alloc_len; 441 442 struct nfp_insn_meta *verifier_meta; 443 444 enum bpf_prog_type type; 445 446 unsigned int last_bpf_off; 447 unsigned int tgt_out; 448 unsigned int tgt_abort; 449 450 unsigned int n_translated; 451 int error; 452 453 unsigned int stack_depth; 454 unsigned int adjust_head_location; 455 456 unsigned int map_records_cnt; 457 struct nfp_bpf_neutral_map **map_records; 458 459 struct list_head insns; 460 }; 461 462 /** 463 * struct nfp_bpf_vnic - per-vNIC BPF priv structure 464 * @tc_prog: currently loaded cls_bpf program 465 * @start_off: address of the first instruction in the memory 466 * @tgt_done: jump target to get the next packet 467 */ 468 struct nfp_bpf_vnic { 469 struct bpf_prog *tc_prog; 470 unsigned int start_off; 471 unsigned int tgt_done; 472 }; 473 474 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt); 475 int nfp_bpf_jit(struct nfp_prog *prog); 476 bool nfp_bpf_supported_opcode(u8 code); 477 478 extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops; 479 480 struct netdev_bpf; 481 struct nfp_app; 482 struct nfp_net; 483 484 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, 485 struct netdev_bpf *bpf); 486 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, 487 bool old_prog, struct netlink_ext_ack *extack); 488 489 struct nfp_insn_meta * 490 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 491 unsigned int insn_idx, unsigned int n_insns); 492 493 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); 494 495 long long int 496 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); 497 void 498 nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map); 499 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, 500 void *next_key); 501 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, 502 void *key, void *value, u64 flags); 503 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key); 504 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, 505 void *key, void *value); 506 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, 507 void *key, void *next_key); 508 509 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, 510 unsigned int len); 511 512 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); 513 void 514 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, 515 unsigned int len); 516 #endif 517