1 /* 2 * Copyright (C) 2016-2018 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef __NFP_BPF_H__ 35 #define __NFP_BPF_H__ 1 36 37 #include <linux/bitfield.h> 38 #include <linux/bpf.h> 39 #include <linux/bpf_verifier.h> 40 #include <linux/kernel.h> 41 #include <linux/list.h> 42 #include <linux/rhashtable.h> 43 #include <linux/skbuff.h> 44 #include <linux/types.h> 45 #include <linux/wait.h> 46 47 #include "../nfp_asm.h" 48 #include "fw.h" 49 50 /* For relocation logic use up-most byte of branch instruction as scratch 51 * area. Remember to clear this before sending instructions to HW! 52 */ 53 #define OP_RELO_TYPE 0xff00000000000000ULL 54 55 enum nfp_relo_type { 56 RELO_NONE = 0, 57 /* standard internal jumps */ 58 RELO_BR_REL, 59 /* internal jumps to parts of the outro */ 60 RELO_BR_GO_OUT, 61 RELO_BR_GO_ABORT, 62 /* external jumps to fixed addresses */ 63 RELO_BR_NEXT_PKT, 64 RELO_BR_HELPER, 65 /* immediate relocation against load address */ 66 RELO_IMMED_REL, 67 }; 68 69 /* To make absolute relocated branches (branches other than RELO_BR_REL) 70 * distinguishable in user space dumps from normal jumps, add a large offset 71 * to them. 72 */ 73 #define BR_OFF_RELO 15000 74 75 enum static_regs { 76 STATIC_REG_IMMA = 20, /* Bank AB */ 77 STATIC_REG_IMM = 21, /* Bank AB */ 78 STATIC_REG_STACK = 22, /* Bank A */ 79 STATIC_REG_PKT_LEN = 22, /* Bank B */ 80 }; 81 82 enum pkt_vec { 83 PKT_VEC_PKT_LEN = 0, 84 PKT_VEC_PKT_PTR = 2, 85 PKT_VEC_QSEL_SET = 4, 86 PKT_VEC_QSEL_VAL = 6, 87 }; 88 89 #define PKT_VEL_QSEL_SET_BIT 4 90 91 #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) 92 #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) 93 #define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET) 94 #define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL) 95 96 #define stack_reg(np) reg_a(STATIC_REG_STACK) 97 #define stack_imm(np) imm_b(np) 98 #define plen_reg(np) reg_b(STATIC_REG_PKT_LEN) 99 #define pptr_reg(np) pv_ctm_ptr(np) 100 #define imm_a(np) reg_a(STATIC_REG_IMM) 101 #define imm_b(np) reg_b(STATIC_REG_IMM) 102 #define imma_a(np) reg_a(STATIC_REG_IMMA) 103 #define imma_b(np) reg_b(STATIC_REG_IMMA) 104 #define imm_both(np) reg_both(STATIC_REG_IMM) 105 106 #define NFP_BPF_ABI_FLAGS reg_imm(0) 107 #define NFP_BPF_ABI_FLAG_MARK 1 108 109 /** 110 * struct nfp_app_bpf - bpf app priv structure 111 * @app: backpointer to the app 112 * 113 * @tag_allocator: bitmap of control message tags in use 114 * @tag_alloc_next: next tag bit to allocate 115 * @tag_alloc_last: next tag bit to be freed 116 * 117 * @cmsg_replies: received cmsg replies waiting to be consumed 118 * @cmsg_wq: work queue for waiting for cmsg replies 119 * 120 * @map_list: list of offloaded maps 121 * @maps_in_use: number of currently offloaded maps 122 * @map_elems_in_use: number of elements allocated to offloaded maps 123 * 124 * @maps_neutral: hash table of offload-neutral maps (on pointer) 125 * 126 * @adjust_head: adjust head capability 127 * @adjust_head.flags: extra flags for adjust head 128 * @adjust_head.off_min: minimal packet offset within buffer required 129 * @adjust_head.off_max: maximum packet offset within buffer required 130 * @adjust_head.guaranteed_sub: negative adjustment guaranteed possible 131 * @adjust_head.guaranteed_add: positive adjustment guaranteed possible 132 * 133 * @maps: map capability 134 * @maps.types: supported map types 135 * @maps.max_maps: max number of maps supported 136 * @maps.max_elems: max number of entries in each map 137 * @maps.max_key_sz: max size of map key 138 * @maps.max_val_sz: max size of map value 139 * @maps.max_elem_sz: max size of map entry (key + value) 140 * 141 * @helpers: helper addressess for various calls 142 * @helpers.map_lookup: map lookup helper address 143 * @helpers.map_update: map update helper address 144 * @helpers.map_delete: map delete helper address 145 * @helpers.perf_event_output: output perf event to a ring buffer 146 * 147 * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) 148 * @queue_select: BPF can set the RX queue ID in packet vector 149 */ 150 struct nfp_app_bpf { 151 struct nfp_app *app; 152 153 DECLARE_BITMAP(tag_allocator, U16_MAX + 1); 154 u16 tag_alloc_next; 155 u16 tag_alloc_last; 156 157 struct sk_buff_head cmsg_replies; 158 struct wait_queue_head cmsg_wq; 159 160 struct list_head map_list; 161 unsigned int maps_in_use; 162 unsigned int map_elems_in_use; 163 164 struct rhashtable maps_neutral; 165 166 struct nfp_bpf_cap_adjust_head { 167 u32 flags; 168 int off_min; 169 int off_max; 170 int guaranteed_sub; 171 int guaranteed_add; 172 } adjust_head; 173 174 struct { 175 u32 types; 176 u32 max_maps; 177 u32 max_elems; 178 u32 max_key_sz; 179 u32 max_val_sz; 180 u32 max_elem_sz; 181 } maps; 182 183 struct { 184 u32 map_lookup; 185 u32 map_update; 186 u32 map_delete; 187 u32 perf_event_output; 188 } helpers; 189 190 bool pseudo_random; 191 bool queue_select; 192 }; 193 194 enum nfp_bpf_map_use { 195 NFP_MAP_UNUSED = 0, 196 NFP_MAP_USE_READ, 197 NFP_MAP_USE_WRITE, 198 NFP_MAP_USE_ATOMIC_CNT, 199 }; 200 201 /** 202 * struct nfp_bpf_map - private per-map data attached to BPF maps for offload 203 * @offmap: pointer to the offloaded BPF map 204 * @bpf: back pointer to bpf app private structure 205 * @tid: table id identifying map on datapath 206 * @l: link on the nfp_app_bpf->map_list list 207 * @use_map: map of how the value is used (in 4B chunks) 208 */ 209 struct nfp_bpf_map { 210 struct bpf_offloaded_map *offmap; 211 struct nfp_app_bpf *bpf; 212 u32 tid; 213 struct list_head l; 214 enum nfp_bpf_map_use use_map[]; 215 }; 216 217 struct nfp_bpf_neutral_map { 218 struct rhash_head l; 219 struct bpf_map *ptr; 220 u32 count; 221 }; 222 223 extern const struct rhashtable_params nfp_bpf_maps_neutral_params; 224 225 struct nfp_prog; 226 struct nfp_insn_meta; 227 typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); 228 229 #define nfp_prog_first_meta(nfp_prog) \ 230 list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) 231 #define nfp_prog_last_meta(nfp_prog) \ 232 list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) 233 #define nfp_meta_next(meta) list_next_entry(meta, l) 234 #define nfp_meta_prev(meta) list_prev_entry(meta, l) 235 236 /** 237 * struct nfp_bpf_reg_state - register state for calls 238 * @reg: BPF register state from latest path 239 * @var_off: for stack arg - changes stack offset on different paths 240 */ 241 struct nfp_bpf_reg_state { 242 struct bpf_reg_state reg; 243 bool var_off; 244 }; 245 246 #define FLAG_INSN_IS_JUMP_DST BIT(0) 247 248 /** 249 * struct nfp_insn_meta - BPF instruction wrapper 250 * @insn: BPF instruction 251 * @ptr: pointer type for memory operations 252 * @ldst_gather_len: memcpy length gathered from load/store sequence 253 * @paired_st: the paired store insn at the head of the sequence 254 * @ptr_not_const: pointer is not always constant 255 * @pkt_cache: packet data cache information 256 * @pkt_cache.range_start: start offset for associated packet data cache 257 * @pkt_cache.range_end: end offset for associated packet data cache 258 * @pkt_cache.do_init: this read needs to initialize packet data cache 259 * @xadd_over_16bit: 16bit immediate is not guaranteed 260 * @xadd_maybe_16bit: 16bit immediate is possible 261 * @jmp_dst: destination info for jump instructions 262 * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB 263 * @func_id: function id for call instructions 264 * @arg1: arg1 for call instructions 265 * @arg2: arg2 for call instructions 266 * @umin: copy of core verifier umin_value. 267 * @umax: copy of core verifier umax_value. 268 * @off: index of first generated machine instruction (in nfp_prog.prog) 269 * @n: eBPF instruction number 270 * @flags: eBPF instruction extra optimization flags 271 * @skip: skip this instruction (optimized out) 272 * @double_cb: callback for second part of the instruction 273 * @l: link on nfp_prog->insns list 274 */ 275 struct nfp_insn_meta { 276 struct bpf_insn insn; 277 union { 278 /* pointer ops (ld/st/xadd) */ 279 struct { 280 struct bpf_reg_state ptr; 281 struct bpf_insn *paired_st; 282 s16 ldst_gather_len; 283 bool ptr_not_const; 284 struct { 285 s16 range_start; 286 s16 range_end; 287 bool do_init; 288 } pkt_cache; 289 bool xadd_over_16bit; 290 bool xadd_maybe_16bit; 291 }; 292 /* jump */ 293 struct { 294 struct nfp_insn_meta *jmp_dst; 295 bool jump_neg_op; 296 }; 297 /* function calls */ 298 struct { 299 u32 func_id; 300 struct bpf_reg_state arg1; 301 struct nfp_bpf_reg_state arg2; 302 }; 303 /* We are interested in range info for some operands, 304 * for example, the shift amount. 305 */ 306 struct { 307 u64 umin; 308 u64 umax; 309 }; 310 }; 311 unsigned int off; 312 unsigned short n; 313 unsigned short flags; 314 bool skip; 315 instr_cb_t double_cb; 316 317 struct list_head l; 318 }; 319 320 #define BPF_SIZE_MASK 0x18 321 322 static inline u8 mbpf_class(const struct nfp_insn_meta *meta) 323 { 324 return BPF_CLASS(meta->insn.code); 325 } 326 327 static inline u8 mbpf_src(const struct nfp_insn_meta *meta) 328 { 329 return BPF_SRC(meta->insn.code); 330 } 331 332 static inline u8 mbpf_op(const struct nfp_insn_meta *meta) 333 { 334 return BPF_OP(meta->insn.code); 335 } 336 337 static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) 338 { 339 return BPF_MODE(meta->insn.code); 340 } 341 342 static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) 343 { 344 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM); 345 } 346 347 static inline bool is_mbpf_store(const struct nfp_insn_meta *meta) 348 { 349 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM); 350 } 351 352 static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta) 353 { 354 return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET; 355 } 356 357 static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta) 358 { 359 return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET; 360 } 361 362 static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta) 363 { 364 u8 code = meta->insn.code; 365 366 return BPF_CLASS(code) == BPF_LD && 367 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND); 368 } 369 370 static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta) 371 { 372 u8 code = meta->insn.code; 373 374 return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM; 375 } 376 377 static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta) 378 { 379 return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET; 380 } 381 382 static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) 383 { 384 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); 385 } 386 387 static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta) 388 { 389 u8 code = meta->insn.code; 390 bool is_alu, is_shift; 391 u8 opclass, opcode; 392 393 opclass = BPF_CLASS(code); 394 is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU; 395 if (!is_alu) 396 return false; 397 398 opcode = BPF_OP(code); 399 is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH; 400 if (!is_shift) 401 return false; 402 403 return BPF_SRC(code) == BPF_X; 404 } 405 406 /** 407 * struct nfp_prog - nfp BPF program 408 * @bpf: backpointer to the bpf app priv structure 409 * @prog: machine code 410 * @prog_len: number of valid instructions in @prog array 411 * @__prog_alloc_len: alloc size of @prog array 412 * @verifier_meta: temporary storage for verifier's insn meta 413 * @type: BPF program type 414 * @last_bpf_off: address of the last instruction translated from BPF 415 * @tgt_out: jump target for normal exit 416 * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) 417 * @n_translated: number of successfully translated instructions (for errors) 418 * @error: error code if something went wrong 419 * @stack_depth: max stack depth from the verifier 420 * @adjust_head_location: if program has single adjust head call - the insn no. 421 * @map_records_cnt: the number of map pointers recorded for this prog 422 * @map_records: the map record pointers from bpf->maps_neutral 423 * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) 424 */ 425 struct nfp_prog { 426 struct nfp_app_bpf *bpf; 427 428 u64 *prog; 429 unsigned int prog_len; 430 unsigned int __prog_alloc_len; 431 432 struct nfp_insn_meta *verifier_meta; 433 434 enum bpf_prog_type type; 435 436 unsigned int last_bpf_off; 437 unsigned int tgt_out; 438 unsigned int tgt_abort; 439 440 unsigned int n_translated; 441 int error; 442 443 unsigned int stack_depth; 444 unsigned int adjust_head_location; 445 446 unsigned int map_records_cnt; 447 struct nfp_bpf_neutral_map **map_records; 448 449 struct list_head insns; 450 }; 451 452 /** 453 * struct nfp_bpf_vnic - per-vNIC BPF priv structure 454 * @tc_prog: currently loaded cls_bpf program 455 * @start_off: address of the first instruction in the memory 456 * @tgt_done: jump target to get the next packet 457 */ 458 struct nfp_bpf_vnic { 459 struct bpf_prog *tc_prog; 460 unsigned int start_off; 461 unsigned int tgt_done; 462 }; 463 464 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt); 465 int nfp_bpf_jit(struct nfp_prog *prog); 466 bool nfp_bpf_supported_opcode(u8 code); 467 468 extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops; 469 470 struct netdev_bpf; 471 struct nfp_app; 472 struct nfp_net; 473 474 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, 475 struct netdev_bpf *bpf); 476 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, 477 bool old_prog, struct netlink_ext_ack *extack); 478 479 struct nfp_insn_meta * 480 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 481 unsigned int insn_idx, unsigned int n_insns); 482 483 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); 484 485 long long int 486 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); 487 void 488 nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map); 489 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, 490 void *next_key); 491 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, 492 void *key, void *value, u64 flags); 493 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key); 494 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, 495 void *key, void *value); 496 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, 497 void *key, void *next_key); 498 499 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb); 500 501 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); 502 #endif 503