1 /* 2 * Copyright (C) 2016-2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef __NFP_BPF_H__ 35 #define __NFP_BPF_H__ 1 36 37 #include <linux/bitfield.h> 38 #include <linux/bpf.h> 39 #include <linux/bpf_verifier.h> 40 #include <linux/kernel.h> 41 #include <linux/list.h> 42 #include <linux/skbuff.h> 43 #include <linux/types.h> 44 #include <linux/wait.h> 45 46 #include "../nfp_asm.h" 47 #include "fw.h" 48 49 /* For relocation logic use up-most byte of branch instruction as scratch 50 * area. Remember to clear this before sending instructions to HW! 51 */ 52 #define OP_RELO_TYPE 0xff00000000000000ULL 53 54 enum nfp_relo_type { 55 RELO_NONE = 0, 56 /* standard internal jumps */ 57 RELO_BR_REL, 58 /* internal jumps to parts of the outro */ 59 RELO_BR_GO_OUT, 60 RELO_BR_GO_ABORT, 61 /* external jumps to fixed addresses */ 62 RELO_BR_NEXT_PKT, 63 RELO_BR_HELPER, 64 /* immediate relocation against load address */ 65 RELO_IMMED_REL, 66 }; 67 68 /* To make absolute relocated branches (branches other than RELO_BR_REL) 69 * distinguishable in user space dumps from normal jumps, add a large offset 70 * to them. 71 */ 72 #define BR_OFF_RELO 15000 73 74 enum static_regs { 75 STATIC_REG_IMMA = 20, /* Bank AB */ 76 STATIC_REG_IMM = 21, /* Bank AB */ 77 STATIC_REG_STACK = 22, /* Bank A */ 78 STATIC_REG_PKT_LEN = 22, /* Bank B */ 79 }; 80 81 enum pkt_vec { 82 PKT_VEC_PKT_LEN = 0, 83 PKT_VEC_PKT_PTR = 2, 84 }; 85 86 #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) 87 #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) 88 89 #define stack_reg(np) reg_a(STATIC_REG_STACK) 90 #define stack_imm(np) imm_b(np) 91 #define plen_reg(np) reg_b(STATIC_REG_PKT_LEN) 92 #define pptr_reg(np) pv_ctm_ptr(np) 93 #define imm_a(np) reg_a(STATIC_REG_IMM) 94 #define imm_b(np) reg_b(STATIC_REG_IMM) 95 #define imma_a(np) reg_a(STATIC_REG_IMMA) 96 #define imma_b(np) reg_b(STATIC_REG_IMMA) 97 #define imm_both(np) reg_both(STATIC_REG_IMM) 98 99 #define NFP_BPF_ABI_FLAGS reg_imm(0) 100 #define NFP_BPF_ABI_FLAG_MARK 1 101 102 /** 103 * struct nfp_app_bpf - bpf app priv structure 104 * @app: backpointer to the app 105 * 106 * @tag_allocator: bitmap of control message tags in use 107 * @tag_alloc_next: next tag bit to allocate 108 * @tag_alloc_last: next tag bit to be freed 109 * 110 * @cmsg_replies: received cmsg replies waiting to be consumed 111 * @cmsg_wq: work queue for waiting for cmsg replies 112 * 113 * @map_list: list of offloaded maps 114 * @maps_in_use: number of currently offloaded maps 115 * @map_elems_in_use: number of elements allocated to offloaded maps 116 * 117 * @adjust_head: adjust head capability 118 * @adjust_head.flags: extra flags for adjust head 119 * @adjust_head.off_min: minimal packet offset within buffer required 120 * @adjust_head.off_max: maximum packet offset within buffer required 121 * @adjust_head.guaranteed_sub: negative adjustment guaranteed possible 122 * @adjust_head.guaranteed_add: positive adjustment guaranteed possible 123 * 124 * @maps: map capability 125 * @maps.types: supported map types 126 * @maps.max_maps: max number of maps supported 127 * @maps.max_elems: max number of entries in each map 128 * @maps.max_key_sz: max size of map key 129 * @maps.max_val_sz: max size of map value 130 * @maps.max_elem_sz: max size of map entry (key + value) 131 * 132 * @helpers: helper addressess for various calls 133 * @helpers.map_lookup: map lookup helper address 134 * @helpers.map_update: map update helper address 135 * @helpers.map_delete: map delete helper address 136 */ 137 struct nfp_app_bpf { 138 struct nfp_app *app; 139 140 DECLARE_BITMAP(tag_allocator, U16_MAX + 1); 141 u16 tag_alloc_next; 142 u16 tag_alloc_last; 143 144 struct sk_buff_head cmsg_replies; 145 struct wait_queue_head cmsg_wq; 146 147 struct list_head map_list; 148 unsigned int maps_in_use; 149 unsigned int map_elems_in_use; 150 151 struct nfp_bpf_cap_adjust_head { 152 u32 flags; 153 int off_min; 154 int off_max; 155 int guaranteed_sub; 156 int guaranteed_add; 157 } adjust_head; 158 159 struct { 160 u32 types; 161 u32 max_maps; 162 u32 max_elems; 163 u32 max_key_sz; 164 u32 max_val_sz; 165 u32 max_elem_sz; 166 } maps; 167 168 struct { 169 u32 map_lookup; 170 u32 map_update; 171 u32 map_delete; 172 } helpers; 173 }; 174 175 enum nfp_bpf_map_use { 176 NFP_MAP_UNUSED = 0, 177 NFP_MAP_USE_READ, 178 NFP_MAP_USE_WRITE, 179 NFP_MAP_USE_ATOMIC_CNT, 180 }; 181 182 /** 183 * struct nfp_bpf_map - private per-map data attached to BPF maps for offload 184 * @offmap: pointer to the offloaded BPF map 185 * @bpf: back pointer to bpf app private structure 186 * @tid: table id identifying map on datapath 187 * @l: link on the nfp_app_bpf->map_list list 188 * @use_map: map of how the value is used (in 4B chunks) 189 */ 190 struct nfp_bpf_map { 191 struct bpf_offloaded_map *offmap; 192 struct nfp_app_bpf *bpf; 193 u32 tid; 194 struct list_head l; 195 enum nfp_bpf_map_use use_map[]; 196 }; 197 198 struct nfp_prog; 199 struct nfp_insn_meta; 200 typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); 201 202 #define nfp_prog_first_meta(nfp_prog) \ 203 list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) 204 #define nfp_prog_last_meta(nfp_prog) \ 205 list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) 206 #define nfp_meta_next(meta) list_next_entry(meta, l) 207 #define nfp_meta_prev(meta) list_prev_entry(meta, l) 208 209 /** 210 * struct nfp_bpf_reg_state - register state for calls 211 * @reg: BPF register state from latest path 212 * @var_off: for stack arg - changes stack offset on different paths 213 */ 214 struct nfp_bpf_reg_state { 215 struct bpf_reg_state reg; 216 bool var_off; 217 }; 218 219 #define FLAG_INSN_IS_JUMP_DST BIT(0) 220 221 /** 222 * struct nfp_insn_meta - BPF instruction wrapper 223 * @insn: BPF instruction 224 * @ptr: pointer type for memory operations 225 * @ldst_gather_len: memcpy length gathered from load/store sequence 226 * @paired_st: the paired store insn at the head of the sequence 227 * @ptr_not_const: pointer is not always constant 228 * @pkt_cache: packet data cache information 229 * @pkt_cache.range_start: start offset for associated packet data cache 230 * @pkt_cache.range_end: end offset for associated packet data cache 231 * @pkt_cache.do_init: this read needs to initialize packet data cache 232 * @xadd_over_16bit: 16bit immediate is not guaranteed 233 * @xadd_maybe_16bit: 16bit immediate is possible 234 * @jmp_dst: destination info for jump instructions 235 * @func_id: function id for call instructions 236 * @arg1: arg1 for call instructions 237 * @arg2: arg2 for call instructions 238 * @off: index of first generated machine instruction (in nfp_prog.prog) 239 * @n: eBPF instruction number 240 * @flags: eBPF instruction extra optimization flags 241 * @skip: skip this instruction (optimized out) 242 * @double_cb: callback for second part of the instruction 243 * @l: link on nfp_prog->insns list 244 */ 245 struct nfp_insn_meta { 246 struct bpf_insn insn; 247 union { 248 /* pointer ops (ld/st/xadd) */ 249 struct { 250 struct bpf_reg_state ptr; 251 struct bpf_insn *paired_st; 252 s16 ldst_gather_len; 253 bool ptr_not_const; 254 struct { 255 s16 range_start; 256 s16 range_end; 257 bool do_init; 258 } pkt_cache; 259 bool xadd_over_16bit; 260 bool xadd_maybe_16bit; 261 }; 262 /* jump */ 263 struct nfp_insn_meta *jmp_dst; 264 /* function calls */ 265 struct { 266 u32 func_id; 267 struct bpf_reg_state arg1; 268 struct nfp_bpf_reg_state arg2; 269 }; 270 }; 271 unsigned int off; 272 unsigned short n; 273 unsigned short flags; 274 bool skip; 275 instr_cb_t double_cb; 276 277 struct list_head l; 278 }; 279 280 #define BPF_SIZE_MASK 0x18 281 282 static inline u8 mbpf_class(const struct nfp_insn_meta *meta) 283 { 284 return BPF_CLASS(meta->insn.code); 285 } 286 287 static inline u8 mbpf_src(const struct nfp_insn_meta *meta) 288 { 289 return BPF_SRC(meta->insn.code); 290 } 291 292 static inline u8 mbpf_op(const struct nfp_insn_meta *meta) 293 { 294 return BPF_OP(meta->insn.code); 295 } 296 297 static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) 298 { 299 return BPF_MODE(meta->insn.code); 300 } 301 302 static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) 303 { 304 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM); 305 } 306 307 static inline bool is_mbpf_store(const struct nfp_insn_meta *meta) 308 { 309 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM); 310 } 311 312 static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta) 313 { 314 return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET; 315 } 316 317 static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta) 318 { 319 return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET; 320 } 321 322 static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta) 323 { 324 u8 code = meta->insn.code; 325 326 return BPF_CLASS(code) == BPF_LD && 327 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND); 328 } 329 330 static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta) 331 { 332 u8 code = meta->insn.code; 333 334 return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM; 335 } 336 337 static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta) 338 { 339 return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET; 340 } 341 342 static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) 343 { 344 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); 345 } 346 347 /** 348 * struct nfp_prog - nfp BPF program 349 * @bpf: backpointer to the bpf app priv structure 350 * @prog: machine code 351 * @prog_len: number of valid instructions in @prog array 352 * @__prog_alloc_len: alloc size of @prog array 353 * @verifier_meta: temporary storage for verifier's insn meta 354 * @type: BPF program type 355 * @last_bpf_off: address of the last instruction translated from BPF 356 * @tgt_out: jump target for normal exit 357 * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) 358 * @n_translated: number of successfully translated instructions (for errors) 359 * @error: error code if something went wrong 360 * @stack_depth: max stack depth from the verifier 361 * @adjust_head_location: if program has single adjust head call - the insn no. 362 * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) 363 */ 364 struct nfp_prog { 365 struct nfp_app_bpf *bpf; 366 367 u64 *prog; 368 unsigned int prog_len; 369 unsigned int __prog_alloc_len; 370 371 struct nfp_insn_meta *verifier_meta; 372 373 enum bpf_prog_type type; 374 375 unsigned int last_bpf_off; 376 unsigned int tgt_out; 377 unsigned int tgt_abort; 378 379 unsigned int n_translated; 380 int error; 381 382 unsigned int stack_depth; 383 unsigned int adjust_head_location; 384 385 struct list_head insns; 386 }; 387 388 /** 389 * struct nfp_bpf_vnic - per-vNIC BPF priv structure 390 * @tc_prog: currently loaded cls_bpf program 391 * @start_off: address of the first instruction in the memory 392 * @tgt_done: jump target to get the next packet 393 */ 394 struct nfp_bpf_vnic { 395 struct bpf_prog *tc_prog; 396 unsigned int start_off; 397 unsigned int tgt_done; 398 }; 399 400 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt); 401 int nfp_bpf_jit(struct nfp_prog *prog); 402 bool nfp_bpf_supported_opcode(u8 code); 403 404 extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops; 405 406 struct netdev_bpf; 407 struct nfp_app; 408 struct nfp_net; 409 410 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, 411 struct netdev_bpf *bpf); 412 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, 413 bool old_prog, struct netlink_ext_ack *extack); 414 415 struct nfp_insn_meta * 416 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 417 unsigned int insn_idx, unsigned int n_insns); 418 419 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); 420 421 long long int 422 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); 423 void 424 nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map); 425 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, 426 void *next_key); 427 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, 428 void *key, void *value, u64 flags); 429 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key); 430 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, 431 void *key, void *value); 432 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, 433 void *key, void *next_key); 434 435 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); 436 #endif 437