1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
3 
4 #ifndef __NFP_BPF_H__
5 #define __NFP_BPF_H__ 1
6 
7 #include <linux/bitfield.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_verifier.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/rhashtable.h>
13 #include <linux/skbuff.h>
14 #include <linux/types.h>
15 #include <linux/wait.h>
16 
17 #include "../ccm.h"
18 #include "../nfp_asm.h"
19 #include "fw.h"
20 
21 #define cmsg_warn(bpf, msg...)	nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
22 
23 /* For relocation logic use up-most byte of branch instruction as scratch
24  * area.  Remember to clear this before sending instructions to HW!
25  */
26 #define OP_RELO_TYPE	0xff00000000000000ULL
27 
28 enum nfp_relo_type {
29 	RELO_NONE = 0,
30 	/* standard internal jumps */
31 	RELO_BR_REL,
32 	/* internal jumps to parts of the outro */
33 	RELO_BR_GO_OUT,
34 	RELO_BR_GO_ABORT,
35 	RELO_BR_GO_CALL_PUSH_REGS,
36 	RELO_BR_GO_CALL_POP_REGS,
37 	/* external jumps to fixed addresses */
38 	RELO_BR_NEXT_PKT,
39 	RELO_BR_HELPER,
40 	/* immediate relocation against load address */
41 	RELO_IMMED_REL,
42 };
43 
44 /* To make absolute relocated branches (branches other than RELO_BR_REL)
45  * distinguishable in user space dumps from normal jumps, add a large offset
46  * to them.
47  */
48 #define BR_OFF_RELO		15000
49 
50 enum static_regs {
51 	STATIC_REG_IMMA		= 20, /* Bank AB */
52 	STATIC_REG_IMM		= 21, /* Bank AB */
53 	STATIC_REG_STACK	= 22, /* Bank A */
54 	STATIC_REG_PKT_LEN	= 22, /* Bank B */
55 };
56 
57 enum pkt_vec {
58 	PKT_VEC_PKT_LEN		= 0,
59 	PKT_VEC_PKT_PTR		= 2,
60 	PKT_VEC_QSEL_SET	= 4,
61 	PKT_VEC_QSEL_VAL	= 6,
62 };
63 
64 #define PKT_VEL_QSEL_SET_BIT	4
65 
66 #define pv_len(np)	reg_lm(1, PKT_VEC_PKT_LEN)
67 #define pv_ctm_ptr(np)	reg_lm(1, PKT_VEC_PKT_PTR)
68 #define pv_qsel_set(np)	reg_lm(1, PKT_VEC_QSEL_SET)
69 #define pv_qsel_val(np)	reg_lm(1, PKT_VEC_QSEL_VAL)
70 
71 #define stack_reg(np)	reg_a(STATIC_REG_STACK)
72 #define stack_imm(np)	imm_b(np)
73 #define plen_reg(np)	reg_b(STATIC_REG_PKT_LEN)
74 #define pptr_reg(np)	pv_ctm_ptr(np)
75 #define imm_a(np)	reg_a(STATIC_REG_IMM)
76 #define imm_b(np)	reg_b(STATIC_REG_IMM)
77 #define imma_a(np)	reg_a(STATIC_REG_IMMA)
78 #define imma_b(np)	reg_b(STATIC_REG_IMMA)
79 #define imm_both(np)	reg_both(STATIC_REG_IMM)
80 #define ret_reg(np)	imm_a(np)
81 
82 #define NFP_BPF_ABI_FLAGS	reg_imm(0)
83 #define   NFP_BPF_ABI_FLAG_MARK	1
84 
85 /**
86  * struct nfp_app_bpf - bpf app priv structure
87  * @app:		backpointer to the app
88  * @ccm:		common control message handler data
89  *
90  * @bpf_dev:		BPF offload device handle
91  *
92  * @cmsg_key_sz:	size of key in cmsg element array
93  * @cmsg_val_sz:	size of value in cmsg element array
94  *
95  * @map_list:		list of offloaded maps
96  * @maps_in_use:	number of currently offloaded maps
97  * @map_elems_in_use:	number of elements allocated to offloaded maps
98  *
99  * @maps_neutral:	hash table of offload-neutral maps (on pointer)
100  *
101  * @abi_version:	global BPF ABI version
102  *
103  * @adjust_head:	adjust head capability
104  * @adjust_head.flags:		extra flags for adjust head
105  * @adjust_head.off_min:	minimal packet offset within buffer required
106  * @adjust_head.off_max:	maximum packet offset within buffer required
107  * @adjust_head.guaranteed_sub:	negative adjustment guaranteed possible
108  * @adjust_head.guaranteed_add:	positive adjustment guaranteed possible
109  *
110  * @maps:		map capability
111  * @maps.types:			supported map types
112  * @maps.max_maps:		max number of maps supported
113  * @maps.max_elems:		max number of entries in each map
114  * @maps.max_key_sz:		max size of map key
115  * @maps.max_val_sz:		max size of map value
116  * @maps.max_elem_sz:		max size of map entry (key + value)
117  *
118  * @helpers:		helper addressess for various calls
119  * @helpers.map_lookup:		map lookup helper address
120  * @helpers.map_update:		map update helper address
121  * @helpers.map_delete:		map delete helper address
122  * @helpers.perf_event_output:	output perf event to a ring buffer
123  *
124  * @pseudo_random:	FW initialized the pseudo-random machinery (CSRs)
125  * @queue_select:	BPF can set the RX queue ID in packet vector
126  * @adjust_tail:	BPF can simply trunc packet size for adjust tail
127  */
128 struct nfp_app_bpf {
129 	struct nfp_app *app;
130 	struct nfp_ccm ccm;
131 
132 	struct bpf_offload_dev *bpf_dev;
133 
134 	unsigned int cmsg_key_sz;
135 	unsigned int cmsg_val_sz;
136 
137 	struct list_head map_list;
138 	unsigned int maps_in_use;
139 	unsigned int map_elems_in_use;
140 
141 	struct rhashtable maps_neutral;
142 
143 	u32 abi_version;
144 
145 	struct nfp_bpf_cap_adjust_head {
146 		u32 flags;
147 		int off_min;
148 		int off_max;
149 		int guaranteed_sub;
150 		int guaranteed_add;
151 	} adjust_head;
152 
153 	struct {
154 		u32 types;
155 		u32 max_maps;
156 		u32 max_elems;
157 		u32 max_key_sz;
158 		u32 max_val_sz;
159 		u32 max_elem_sz;
160 	} maps;
161 
162 	struct {
163 		u32 map_lookup;
164 		u32 map_update;
165 		u32 map_delete;
166 		u32 perf_event_output;
167 	} helpers;
168 
169 	bool pseudo_random;
170 	bool queue_select;
171 	bool adjust_tail;
172 };
173 
174 enum nfp_bpf_map_use {
175 	NFP_MAP_UNUSED = 0,
176 	NFP_MAP_USE_READ,
177 	NFP_MAP_USE_WRITE,
178 	NFP_MAP_USE_ATOMIC_CNT,
179 };
180 
181 struct nfp_bpf_map_word {
182 	unsigned char type		:4;
183 	unsigned char non_zero_update	:1;
184 };
185 
186 /**
187  * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
188  * @offmap:	pointer to the offloaded BPF map
189  * @bpf:	back pointer to bpf app private structure
190  * @tid:	table id identifying map on datapath
191  * @l:		link on the nfp_app_bpf->map_list list
192  * @use_map:	map of how the value is used (in 4B chunks)
193  */
194 struct nfp_bpf_map {
195 	struct bpf_offloaded_map *offmap;
196 	struct nfp_app_bpf *bpf;
197 	u32 tid;
198 	struct list_head l;
199 	struct nfp_bpf_map_word use_map[];
200 };
201 
202 struct nfp_bpf_neutral_map {
203 	struct rhash_head l;
204 	struct bpf_map *ptr;
205 	u32 map_id;
206 	u32 count;
207 };
208 
209 extern const struct rhashtable_params nfp_bpf_maps_neutral_params;
210 
211 struct nfp_prog;
212 struct nfp_insn_meta;
213 typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
214 
215 #define nfp_prog_first_meta(nfp_prog)					\
216 	list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
217 #define nfp_prog_last_meta(nfp_prog)					\
218 	list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
219 #define nfp_meta_next(meta)	list_next_entry(meta, l)
220 #define nfp_meta_prev(meta)	list_prev_entry(meta, l)
221 
222 /**
223  * struct nfp_bpf_reg_state - register state for calls
224  * @reg: BPF register state from latest path
225  * @var_off: for stack arg - changes stack offset on different paths
226  */
227 struct nfp_bpf_reg_state {
228 	struct bpf_reg_state reg;
229 	bool var_off;
230 };
231 
232 #define FLAG_INSN_IS_JUMP_DST			BIT(0)
233 #define FLAG_INSN_IS_SUBPROG_START		BIT(1)
234 #define FLAG_INSN_PTR_CALLER_STACK_FRAME	BIT(2)
235 /* Instruction is pointless, noop even on its own */
236 #define FLAG_INSN_SKIP_NOOP			BIT(3)
237 /* Instruction is optimized out based on preceding instructions */
238 #define FLAG_INSN_SKIP_PREC_DEPENDENT		BIT(4)
239 /* Instruction is optimized by the verifier */
240 #define FLAG_INSN_SKIP_VERIFIER_OPT		BIT(5)
241 /* Instruction needs to zero extend to high 32-bit */
242 #define FLAG_INSN_DO_ZEXT			BIT(6)
243 
244 #define FLAG_INSN_SKIP_MASK		(FLAG_INSN_SKIP_NOOP | \
245 					 FLAG_INSN_SKIP_PREC_DEPENDENT | \
246 					 FLAG_INSN_SKIP_VERIFIER_OPT)
247 
248 /**
249  * struct nfp_insn_meta - BPF instruction wrapper
250  * @insn: BPF instruction
251  * @ptr: pointer type for memory operations
252  * @ldst_gather_len: memcpy length gathered from load/store sequence
253  * @paired_st: the paired store insn at the head of the sequence
254  * @ptr_not_const: pointer is not always constant
255  * @pkt_cache: packet data cache information
256  * @pkt_cache.range_start: start offset for associated packet data cache
257  * @pkt_cache.range_end: end offset for associated packet data cache
258  * @pkt_cache.do_init: this read needs to initialize packet data cache
259  * @xadd_over_16bit: 16bit immediate is not guaranteed
260  * @xadd_maybe_16bit: 16bit immediate is possible
261  * @jmp_dst: destination info for jump instructions
262  * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
263  * @num_insns_after_br: number of insns following a branch jump, used for fixup
264  * @func_id: function id for call instructions
265  * @arg1: arg1 for call instructions
266  * @arg2: arg2 for call instructions
267  * @umin_src: copy of core verifier umin_value for src opearnd.
268  * @umax_src: copy of core verifier umax_value for src operand.
269  * @umin_dst: copy of core verifier umin_value for dst opearnd.
270  * @umax_dst: copy of core verifier umax_value for dst operand.
271  * @off: index of first generated machine instruction (in nfp_prog.prog)
272  * @n: eBPF instruction number
273  * @flags: eBPF instruction extra optimization flags
274  * @subprog_idx: index of subprogram to which the instruction belongs
275  * @double_cb: callback for second part of the instruction
276  * @l: link on nfp_prog->insns list
277  */
278 struct nfp_insn_meta {
279 	struct bpf_insn insn;
280 	union {
281 		/* pointer ops (ld/st/xadd) */
282 		struct {
283 			struct bpf_reg_state ptr;
284 			struct bpf_insn *paired_st;
285 			s16 ldst_gather_len;
286 			bool ptr_not_const;
287 			struct {
288 				s16 range_start;
289 				s16 range_end;
290 				bool do_init;
291 			} pkt_cache;
292 			bool xadd_over_16bit;
293 			bool xadd_maybe_16bit;
294 		};
295 		/* jump */
296 		struct {
297 			struct nfp_insn_meta *jmp_dst;
298 			bool jump_neg_op;
299 			u32 num_insns_after_br; /* only for BPF-to-BPF calls */
300 		};
301 		/* function calls */
302 		struct {
303 			u32 func_id;
304 			struct bpf_reg_state arg1;
305 			struct nfp_bpf_reg_state arg2;
306 		};
307 		/* We are interested in range info for operands of ALU
308 		 * operations. For example, shift amount, multiplicand and
309 		 * multiplier etc.
310 		 */
311 		struct {
312 			u64 umin_src;
313 			u64 umax_src;
314 			u64 umin_dst;
315 			u64 umax_dst;
316 		};
317 	};
318 	unsigned int off;
319 	unsigned short n;
320 	unsigned short flags;
321 	unsigned short subprog_idx;
322 	instr_cb_t double_cb;
323 
324 	struct list_head l;
325 };
326 
327 #define BPF_SIZE_MASK	0x18
328 
329 static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
330 {
331 	return BPF_CLASS(meta->insn.code);
332 }
333 
334 static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
335 {
336 	return BPF_SRC(meta->insn.code);
337 }
338 
339 static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
340 {
341 	return BPF_OP(meta->insn.code);
342 }
343 
344 static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
345 {
346 	return BPF_MODE(meta->insn.code);
347 }
348 
349 static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta)
350 {
351 	return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU;
352 }
353 
354 static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
355 {
356 	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
357 }
358 
359 static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta)
360 {
361 	return mbpf_class(meta) == BPF_JMP32;
362 }
363 
364 static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta)
365 {
366 	return mbpf_class(meta) == BPF_JMP;
367 }
368 
369 static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta)
370 {
371 	return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta);
372 }
373 
374 static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
375 {
376 	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
377 }
378 
379 static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
380 {
381 	return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
382 }
383 
384 static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
385 {
386 	return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
387 }
388 
389 static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
390 {
391 	u8 code = meta->insn.code;
392 
393 	return BPF_CLASS(code) == BPF_LD &&
394 	       (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
395 }
396 
397 static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
398 {
399 	u8 code = meta->insn.code;
400 
401 	return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
402 }
403 
404 static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
405 {
406 	return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
407 }
408 
409 static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
410 {
411 	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
412 }
413 
414 static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
415 {
416 	return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL;
417 }
418 
419 static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
420 {
421 	return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
422 }
423 
424 static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta)
425 {
426 	u8 op;
427 
428 	if (is_mbpf_jmp32(meta))
429 		return true;
430 
431 	if (!is_mbpf_jmp64(meta))
432 		return false;
433 
434 	op = mbpf_op(meta);
435 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
436 }
437 
438 static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
439 {
440 	struct bpf_insn insn = meta->insn;
441 
442 	return insn.code == (BPF_JMP | BPF_CALL) &&
443 		insn.src_reg != BPF_PSEUDO_CALL;
444 }
445 
446 static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta)
447 {
448 	struct bpf_insn insn = meta->insn;
449 
450 	return insn.code == (BPF_JMP | BPF_CALL) &&
451 		insn.src_reg == BPF_PSEUDO_CALL;
452 }
453 
454 #define STACK_FRAME_ALIGN 64
455 
456 /**
457  * struct nfp_bpf_subprog_info - nfp BPF sub-program (a.k.a. function) info
458  * @stack_depth:	maximum stack depth used by this sub-program
459  * @needs_reg_push:	whether sub-program uses callee-saved registers
460  */
461 struct nfp_bpf_subprog_info {
462 	u16 stack_depth;
463 	u8 needs_reg_push : 1;
464 };
465 
466 /**
467  * struct nfp_prog - nfp BPF program
468  * @bpf: backpointer to the bpf app priv structure
469  * @prog: machine code
470  * @prog_len: number of valid instructions in @prog array
471  * @__prog_alloc_len: alloc size of @prog array
472  * @stack_size: total amount of stack used
473  * @verifier_meta: temporary storage for verifier's insn meta
474  * @type: BPF program type
475  * @last_bpf_off: address of the last instruction translated from BPF
476  * @tgt_out: jump target for normal exit
477  * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
478  * @tgt_call_push_regs: jump target for subroutine for saving R6~R9 to stack
479  * @tgt_call_pop_regs: jump target for subroutine used for restoring R6~R9
480  * @n_translated: number of successfully translated instructions (for errors)
481  * @error: error code if something went wrong
482  * @stack_frame_depth: max stack depth for current frame
483  * @adjust_head_location: if program has single adjust head call - the insn no.
484  * @map_records_cnt: the number of map pointers recorded for this prog
485  * @subprog_cnt: number of sub-programs, including main function
486  * @map_records: the map record pointers from bpf->maps_neutral
487  * @subprog: pointer to an array of objects holding info about sub-programs
488  * @n_insns: number of instructions on @insns list
489  * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
490  */
491 struct nfp_prog {
492 	struct nfp_app_bpf *bpf;
493 
494 	u64 *prog;
495 	unsigned int prog_len;
496 	unsigned int __prog_alloc_len;
497 
498 	unsigned int stack_size;
499 
500 	struct nfp_insn_meta *verifier_meta;
501 
502 	enum bpf_prog_type type;
503 
504 	unsigned int last_bpf_off;
505 	unsigned int tgt_out;
506 	unsigned int tgt_abort;
507 	unsigned int tgt_call_push_regs;
508 	unsigned int tgt_call_pop_regs;
509 
510 	unsigned int n_translated;
511 	int error;
512 
513 	unsigned int stack_frame_depth;
514 	unsigned int adjust_head_location;
515 
516 	unsigned int map_records_cnt;
517 	unsigned int subprog_cnt;
518 	struct nfp_bpf_neutral_map **map_records;
519 	struct nfp_bpf_subprog_info *subprog;
520 
521 	unsigned int n_insns;
522 	struct list_head insns;
523 };
524 
525 /**
526  * struct nfp_bpf_vnic - per-vNIC BPF priv structure
527  * @tc_prog:	currently loaded cls_bpf program
528  * @start_off:	address of the first instruction in the memory
529  * @tgt_done:	jump target to get the next packet
530  */
531 struct nfp_bpf_vnic {
532 	struct bpf_prog *tc_prog;
533 	unsigned int start_off;
534 	unsigned int tgt_done;
535 };
536 
537 bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
538 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
539 int nfp_bpf_jit(struct nfp_prog *prog);
540 bool nfp_bpf_supported_opcode(u8 code);
541 
542 int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
543 		    int prev_insn_idx);
544 int nfp_bpf_finalize(struct bpf_verifier_env *env);
545 
546 int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
547 			     struct bpf_insn *insn);
548 int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
549 
550 extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
551 
552 struct netdev_bpf;
553 struct nfp_app;
554 struct nfp_net;
555 
556 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
557 		struct netdev_bpf *bpf);
558 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
559 			bool old_prog, struct netlink_ext_ack *extack);
560 
561 struct nfp_insn_meta *
562 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
563 		  unsigned int insn_idx);
564 
565 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
566 
567 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
568 long long int
569 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
570 void
571 nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
572 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
573 				void *next_key);
574 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
575 			      void *key, void *value, u64 flags);
576 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
577 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
578 			      void *key, void *value);
579 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
580 			       void *key, void *next_key);
581 
582 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
583 			 unsigned int len);
584 
585 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
586 void
587 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data,
588 			unsigned int len);
589 #endif
590