1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
3 
4 #define pr_fmt(fmt)	"NFP net bpf: " fmt
5 
6 #include <linux/bug.h>
7 #include <linux/bpf.h>
8 #include <linux/filter.h>
9 #include <linux/kernel.h>
10 #include <linux/pkt_cls.h>
11 #include <linux/reciprocal_div.h>
12 #include <linux/unistd.h>
13 
14 #include "main.h"
15 #include "../nfp_asm.h"
16 #include "../nfp_net_ctrl.h"
17 
18 /* --- NFP prog --- */
19 /* Foreach "multiple" entries macros provide pos and next<n> pointers.
20  * It's safe to modify the next pointers (but not pos).
21  */
22 #define nfp_for_each_insn_walk2(nfp_prog, pos, next)			\
23 	for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
24 	     next = list_next_entry(pos, l);			\
25 	     &(nfp_prog)->insns != &pos->l &&			\
26 	     &(nfp_prog)->insns != &next->l;			\
27 	     pos = nfp_meta_next(pos),				\
28 	     next = nfp_meta_next(pos))
29 
30 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2)		\
31 	for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
32 	     next = list_next_entry(pos, l),			\
33 	     next2 = list_next_entry(next, l);			\
34 	     &(nfp_prog)->insns != &pos->l &&			\
35 	     &(nfp_prog)->insns != &next->l &&			\
36 	     &(nfp_prog)->insns != &next2->l;			\
37 	     pos = nfp_meta_next(pos),				\
38 	     next = nfp_meta_next(pos),				\
39 	     next2 = nfp_meta_next(next))
40 
41 static bool
42 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
43 {
44 	return meta->l.prev != &nfp_prog->insns;
45 }
46 
47 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
48 {
49 	if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) {
50 		pr_warn("instruction limit reached (%u NFP instructions)\n",
51 			nfp_prog->prog_len);
52 		nfp_prog->error = -ENOSPC;
53 		return;
54 	}
55 
56 	nfp_prog->prog[nfp_prog->prog_len] = insn;
57 	nfp_prog->prog_len++;
58 }
59 
60 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
61 {
62 	return nfp_prog->prog_len;
63 }
64 
65 static bool
66 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
67 {
68 	/* If there is a recorded error we may have dropped instructions;
69 	 * that doesn't have to be due to translator bug, and the translation
70 	 * will fail anyway, so just return OK.
71 	 */
72 	if (nfp_prog->error)
73 		return true;
74 	return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
75 }
76 
77 /* --- Emitters --- */
78 static void
79 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
80 	   u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx,
81 	   bool indir)
82 {
83 	u64 insn;
84 
85 	insn =	FIELD_PREP(OP_CMD_A_SRC, areg) |
86 		FIELD_PREP(OP_CMD_CTX, ctx) |
87 		FIELD_PREP(OP_CMD_B_SRC, breg) |
88 		FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
89 		FIELD_PREP(OP_CMD_XFER, xfer) |
90 		FIELD_PREP(OP_CMD_CNT, size) |
91 		FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) |
92 		FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
93 		FIELD_PREP(OP_CMD_INDIR, indir) |
94 		FIELD_PREP(OP_CMD_MODE, mode);
95 
96 	nfp_prog_push(nfp_prog, insn);
97 }
98 
99 static void
100 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
101 	     swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir)
102 {
103 	struct nfp_insn_re_regs reg;
104 	int err;
105 
106 	err = swreg_to_restricted(reg_none(), lreg, rreg, &reg, false);
107 	if (err) {
108 		nfp_prog->error = err;
109 		return;
110 	}
111 	if (reg.swap) {
112 		pr_err("cmd can't swap arguments\n");
113 		nfp_prog->error = -EFAULT;
114 		return;
115 	}
116 	if (reg.dst_lmextn || reg.src_lmextn) {
117 		pr_err("cmd can't use LMextn\n");
118 		nfp_prog->error = -EFAULT;
119 		return;
120 	}
121 
122 	__emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx,
123 		   indir);
124 }
125 
126 static void
127 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
128 	 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx)
129 {
130 	emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false);
131 }
132 
133 static void
134 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
135 	       swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx)
136 {
137 	emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true);
138 }
139 
140 static void
141 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
142 	  enum br_ctx_signal_state css, u16 addr, u8 defer)
143 {
144 	u16 addr_lo, addr_hi;
145 	u64 insn;
146 
147 	addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
148 	addr_hi = addr != addr_lo;
149 
150 	insn = OP_BR_BASE |
151 		FIELD_PREP(OP_BR_MASK, mask) |
152 		FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
153 		FIELD_PREP(OP_BR_CSS, css) |
154 		FIELD_PREP(OP_BR_DEFBR, defer) |
155 		FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
156 		FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
157 
158 	nfp_prog_push(nfp_prog, insn);
159 }
160 
161 static void
162 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
163 	     enum nfp_relo_type relo)
164 {
165 	if (mask == BR_UNC && defer > 2) {
166 		pr_err("BUG: branch defer out of bounds %d\n", defer);
167 		nfp_prog->error = -EFAULT;
168 		return;
169 	}
170 
171 	__emit_br(nfp_prog, mask,
172 		  mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
173 		  BR_CSS_NONE, addr, defer);
174 
175 	nfp_prog->prog[nfp_prog->prog_len - 1] |=
176 		FIELD_PREP(OP_RELO_TYPE, relo);
177 }
178 
179 static void
180 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
181 {
182 	emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
183 }
184 
185 static void
186 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer,
187 	      bool set, bool src_lmextn)
188 {
189 	u16 addr_lo, addr_hi;
190 	u64 insn;
191 
192 	addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO));
193 	addr_hi = addr != addr_lo;
194 
195 	insn = OP_BR_BIT_BASE |
196 		FIELD_PREP(OP_BR_BIT_A_SRC, areg) |
197 		FIELD_PREP(OP_BR_BIT_B_SRC, breg) |
198 		FIELD_PREP(OP_BR_BIT_BV, set) |
199 		FIELD_PREP(OP_BR_BIT_DEFBR, defer) |
200 		FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) |
201 		FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) |
202 		FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn);
203 
204 	nfp_prog_push(nfp_prog, insn);
205 }
206 
207 static void
208 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr,
209 		 u8 defer, bool set, enum nfp_relo_type relo)
210 {
211 	struct nfp_insn_re_regs reg;
212 	int err;
213 
214 	/* NOTE: The bit to test is specified as an rotation amount, such that
215 	 *	 the bit to test will be placed on the MSB of the result when
216 	 *	 doing a rotate right. For bit X, we need right rotate X + 1.
217 	 */
218 	bit += 1;
219 
220 	err = swreg_to_restricted(reg_none(), src, reg_imm(bit), &reg, false);
221 	if (err) {
222 		nfp_prog->error = err;
223 		return;
224 	}
225 
226 	__emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set,
227 		      reg.src_lmextn);
228 
229 	nfp_prog->prog[nfp_prog->prog_len - 1] |=
230 		FIELD_PREP(OP_RELO_TYPE, relo);
231 }
232 
233 static void
234 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer)
235 {
236 	emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL);
237 }
238 
239 static void
240 __emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
241 	      u8 defer, bool dst_lmextn, bool src_lmextn)
242 {
243 	u64 insn;
244 
245 	insn = OP_BR_ALU_BASE |
246 		FIELD_PREP(OP_BR_ALU_A_SRC, areg) |
247 		FIELD_PREP(OP_BR_ALU_B_SRC, breg) |
248 		FIELD_PREP(OP_BR_ALU_DEFBR, defer) |
249 		FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) |
250 		FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) |
251 		FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn);
252 
253 	nfp_prog_push(nfp_prog, insn);
254 }
255 
256 static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer)
257 {
258 	struct nfp_insn_ur_regs reg;
259 	int err;
260 
261 	err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), &reg);
262 	if (err) {
263 		nfp_prog->error = err;
264 		return;
265 	}
266 
267 	__emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn,
268 		      reg.src_lmextn);
269 }
270 
271 static void
272 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
273 	     enum immed_width width, bool invert,
274 	     enum immed_shift shift, bool wr_both,
275 	     bool dst_lmextn, bool src_lmextn)
276 {
277 	u64 insn;
278 
279 	insn = OP_IMMED_BASE |
280 		FIELD_PREP(OP_IMMED_A_SRC, areg) |
281 		FIELD_PREP(OP_IMMED_B_SRC, breg) |
282 		FIELD_PREP(OP_IMMED_IMM, imm_hi) |
283 		FIELD_PREP(OP_IMMED_WIDTH, width) |
284 		FIELD_PREP(OP_IMMED_INV, invert) |
285 		FIELD_PREP(OP_IMMED_SHIFT, shift) |
286 		FIELD_PREP(OP_IMMED_WR_AB, wr_both) |
287 		FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) |
288 		FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn);
289 
290 	nfp_prog_push(nfp_prog, insn);
291 }
292 
293 static void
294 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
295 	   enum immed_width width, bool invert, enum immed_shift shift)
296 {
297 	struct nfp_insn_ur_regs reg;
298 	int err;
299 
300 	if (swreg_type(dst) == NN_REG_IMM) {
301 		nfp_prog->error = -EFAULT;
302 		return;
303 	}
304 
305 	err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), &reg);
306 	if (err) {
307 		nfp_prog->error = err;
308 		return;
309 	}
310 
311 	/* Use reg.dst when destination is No-Dest. */
312 	__emit_immed(nfp_prog,
313 		     swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg,
314 		     reg.breg, imm >> 8, width, invert, shift,
315 		     reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
316 }
317 
318 static void
319 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
320 	   enum shf_sc sc, u8 shift,
321 	   u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
322 	   bool dst_lmextn, bool src_lmextn)
323 {
324 	u64 insn;
325 
326 	if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
327 		nfp_prog->error = -EFAULT;
328 		return;
329 	}
330 
331 	if (sc == SHF_SC_L_SHF)
332 		shift = 32 - shift;
333 
334 	insn = OP_SHF_BASE |
335 		FIELD_PREP(OP_SHF_A_SRC, areg) |
336 		FIELD_PREP(OP_SHF_SC, sc) |
337 		FIELD_PREP(OP_SHF_B_SRC, breg) |
338 		FIELD_PREP(OP_SHF_I8, i8) |
339 		FIELD_PREP(OP_SHF_SW, sw) |
340 		FIELD_PREP(OP_SHF_DST, dst) |
341 		FIELD_PREP(OP_SHF_SHIFT, shift) |
342 		FIELD_PREP(OP_SHF_OP, op) |
343 		FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
344 		FIELD_PREP(OP_SHF_WR_AB, wr_both) |
345 		FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) |
346 		FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn);
347 
348 	nfp_prog_push(nfp_prog, insn);
349 }
350 
351 static void
352 emit_shf(struct nfp_prog *nfp_prog, swreg dst,
353 	 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
354 {
355 	struct nfp_insn_re_regs reg;
356 	int err;
357 
358 	err = swreg_to_restricted(dst, lreg, rreg, &reg, true);
359 	if (err) {
360 		nfp_prog->error = err;
361 		return;
362 	}
363 
364 	__emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
365 		   reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both,
366 		   reg.dst_lmextn, reg.src_lmextn);
367 }
368 
369 static void
370 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst,
371 	       swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc)
372 {
373 	if (sc == SHF_SC_R_ROT) {
374 		pr_err("indirect shift is not allowed on rotation\n");
375 		nfp_prog->error = -EFAULT;
376 		return;
377 	}
378 
379 	emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0);
380 }
381 
382 static void
383 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
384 	   u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
385 	   bool dst_lmextn, bool src_lmextn)
386 {
387 	u64 insn;
388 
389 	insn = OP_ALU_BASE |
390 		FIELD_PREP(OP_ALU_A_SRC, areg) |
391 		FIELD_PREP(OP_ALU_B_SRC, breg) |
392 		FIELD_PREP(OP_ALU_DST, dst) |
393 		FIELD_PREP(OP_ALU_SW, swap) |
394 		FIELD_PREP(OP_ALU_OP, op) |
395 		FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
396 		FIELD_PREP(OP_ALU_WR_AB, wr_both) |
397 		FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) |
398 		FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn);
399 
400 	nfp_prog_push(nfp_prog, insn);
401 }
402 
403 static void
404 emit_alu(struct nfp_prog *nfp_prog, swreg dst,
405 	 swreg lreg, enum alu_op op, swreg rreg)
406 {
407 	struct nfp_insn_ur_regs reg;
408 	int err;
409 
410 	err = swreg_to_unrestricted(dst, lreg, rreg, &reg);
411 	if (err) {
412 		nfp_prog->error = err;
413 		return;
414 	}
415 
416 	__emit_alu(nfp_prog, reg.dst, reg.dst_ab,
417 		   reg.areg, op, reg.breg, reg.swap, reg.wr_both,
418 		   reg.dst_lmextn, reg.src_lmextn);
419 }
420 
421 static void
422 __emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg,
423 	   enum mul_type type, enum mul_step step, u16 breg, bool swap,
424 	   bool wr_both, bool dst_lmextn, bool src_lmextn)
425 {
426 	u64 insn;
427 
428 	insn = OP_MUL_BASE |
429 		FIELD_PREP(OP_MUL_A_SRC, areg) |
430 		FIELD_PREP(OP_MUL_B_SRC, breg) |
431 		FIELD_PREP(OP_MUL_STEP, step) |
432 		FIELD_PREP(OP_MUL_DST_AB, dst_ab) |
433 		FIELD_PREP(OP_MUL_SW, swap) |
434 		FIELD_PREP(OP_MUL_TYPE, type) |
435 		FIELD_PREP(OP_MUL_WR_AB, wr_both) |
436 		FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) |
437 		FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn);
438 
439 	nfp_prog_push(nfp_prog, insn);
440 }
441 
442 static void
443 emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type,
444 	 enum mul_step step, swreg rreg)
445 {
446 	struct nfp_insn_ur_regs reg;
447 	u16 areg;
448 	int err;
449 
450 	if (type == MUL_TYPE_START && step != MUL_STEP_NONE) {
451 		nfp_prog->error = -EINVAL;
452 		return;
453 	}
454 
455 	if (step == MUL_LAST || step == MUL_LAST_2) {
456 		/* When type is step and step Number is LAST or LAST2, left
457 		 * source is used as destination.
458 		 */
459 		err = swreg_to_unrestricted(lreg, reg_none(), rreg, &reg);
460 		areg = reg.dst;
461 	} else {
462 		err = swreg_to_unrestricted(reg_none(), lreg, rreg, &reg);
463 		areg = reg.areg;
464 	}
465 
466 	if (err) {
467 		nfp_prog->error = err;
468 		return;
469 	}
470 
471 	__emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap,
472 		   reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
473 }
474 
475 static void
476 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
477 		u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
478 		bool zero, bool swap, bool wr_both,
479 		bool dst_lmextn, bool src_lmextn)
480 {
481 	u64 insn;
482 
483 	insn = OP_LDF_BASE |
484 		FIELD_PREP(OP_LDF_A_SRC, areg) |
485 		FIELD_PREP(OP_LDF_SC, sc) |
486 		FIELD_PREP(OP_LDF_B_SRC, breg) |
487 		FIELD_PREP(OP_LDF_I8, imm8) |
488 		FIELD_PREP(OP_LDF_SW, swap) |
489 		FIELD_PREP(OP_LDF_ZF, zero) |
490 		FIELD_PREP(OP_LDF_BMASK, bmask) |
491 		FIELD_PREP(OP_LDF_SHF, shift) |
492 		FIELD_PREP(OP_LDF_WR_AB, wr_both) |
493 		FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) |
494 		FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn);
495 
496 	nfp_prog_push(nfp_prog, insn);
497 }
498 
499 static void
500 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
501 		  enum shf_sc sc, u8 shift, bool zero)
502 {
503 	struct nfp_insn_re_regs reg;
504 	int err;
505 
506 	/* Note: ld_field is special as it uses one of the src regs as dst */
507 	err = swreg_to_restricted(dst, dst, src, &reg, true);
508 	if (err) {
509 		nfp_prog->error = err;
510 		return;
511 	}
512 
513 	__emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
514 			reg.i8, zero, reg.swap, reg.wr_both,
515 			reg.dst_lmextn, reg.src_lmextn);
516 }
517 
518 static void
519 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
520 	      enum shf_sc sc, u8 shift)
521 {
522 	emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
523 }
524 
525 static void
526 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr,
527 	    bool dst_lmextn, bool src_lmextn)
528 {
529 	u64 insn;
530 
531 	insn = OP_LCSR_BASE |
532 		FIELD_PREP(OP_LCSR_A_SRC, areg) |
533 		FIELD_PREP(OP_LCSR_B_SRC, breg) |
534 		FIELD_PREP(OP_LCSR_WRITE, wr) |
535 		FIELD_PREP(OP_LCSR_ADDR, addr / 4) |
536 		FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) |
537 		FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn);
538 
539 	nfp_prog_push(nfp_prog, insn);
540 }
541 
542 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr)
543 {
544 	struct nfp_insn_ur_regs reg;
545 	int err;
546 
547 	/* This instruction takes immeds instead of reg_none() for the ignored
548 	 * operand, but we can't encode 2 immeds in one instr with our normal
549 	 * swreg infra so if param is an immed, we encode as reg_none() and
550 	 * copy the immed to both operands.
551 	 */
552 	if (swreg_type(src) == NN_REG_IMM) {
553 		err = swreg_to_unrestricted(reg_none(), src, reg_none(), &reg);
554 		reg.breg = reg.areg;
555 	} else {
556 		err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), &reg);
557 	}
558 	if (err) {
559 		nfp_prog->error = err;
560 		return;
561 	}
562 
563 	__emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr,
564 		    false, reg.src_lmextn);
565 }
566 
567 /* CSR value is read in following immed[gpr, 0] */
568 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr)
569 {
570 	__emit_lcsr(nfp_prog, 0, 0, false, addr, false, false);
571 }
572 
573 static void emit_nop(struct nfp_prog *nfp_prog)
574 {
575 	__emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
576 }
577 
578 /* --- Wrappers --- */
579 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
580 {
581 	if (!(imm & 0xffff0000)) {
582 		*val = imm;
583 		*shift = IMMED_SHIFT_0B;
584 	} else if (!(imm & 0xff0000ff)) {
585 		*val = imm >> 8;
586 		*shift = IMMED_SHIFT_1B;
587 	} else if (!(imm & 0x0000ffff)) {
588 		*val = imm >> 16;
589 		*shift = IMMED_SHIFT_2B;
590 	} else {
591 		return false;
592 	}
593 
594 	return true;
595 }
596 
597 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
598 {
599 	enum immed_shift shift;
600 	u16 val;
601 
602 	if (pack_immed(imm, &val, &shift)) {
603 		emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
604 	} else if (pack_immed(~imm, &val, &shift)) {
605 		emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
606 	} else {
607 		emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
608 			   false, IMMED_SHIFT_0B);
609 		emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
610 			   false, IMMED_SHIFT_2B);
611 	}
612 }
613 
614 static void
615 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
616 	       enum nfp_relo_type relo)
617 {
618 	if (imm > 0xffff) {
619 		pr_err("relocation of a large immediate!\n");
620 		nfp_prog->error = -EFAULT;
621 		return;
622 	}
623 	emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
624 
625 	nfp_prog->prog[nfp_prog->prog_len - 1] |=
626 		FIELD_PREP(OP_RELO_TYPE, relo);
627 }
628 
629 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
630  * If the @imm is small enough encode it directly in operand and return
631  * otherwise load @imm to a spare register and return its encoding.
632  */
633 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
634 {
635 	if (FIELD_FIT(UR_REG_IMM_MAX, imm))
636 		return reg_imm(imm);
637 
638 	wrp_immed(nfp_prog, tmp_reg, imm);
639 	return tmp_reg;
640 }
641 
642 /* re_load_imm_any() - encode immediate or use tmp register (restricted)
643  * If the @imm is small enough encode it directly in operand and return
644  * otherwise load @imm to a spare register and return its encoding.
645  */
646 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
647 {
648 	if (FIELD_FIT(RE_REG_IMM_MAX, imm))
649 		return reg_imm(imm);
650 
651 	wrp_immed(nfp_prog, tmp_reg, imm);
652 	return tmp_reg;
653 }
654 
655 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
656 {
657 	while (count--)
658 		emit_nop(nfp_prog);
659 }
660 
661 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
662 {
663 	emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
664 }
665 
666 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
667 {
668 	wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
669 }
670 
671 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the
672  * result to @dst from low end.
673  */
674 static void
675 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
676 		u8 offset)
677 {
678 	enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE;
679 	u8 mask = (1 << field_len) - 1;
680 
681 	emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
682 }
683 
684 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the
685  * result to @dst from offset, there is no change on the other bits of @dst.
686  */
687 static void
688 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src,
689 		   u8 field_len, u8 offset)
690 {
691 	enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE;
692 	u8 mask = ((1 << field_len) - 1) << offset;
693 
694 	emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8);
695 }
696 
697 static void
698 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
699 	      swreg *rega, swreg *regb)
700 {
701 	if (offset == reg_imm(0)) {
702 		*rega = reg_a(src_gpr);
703 		*regb = reg_b(src_gpr + 1);
704 		return;
705 	}
706 
707 	emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
708 	emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
709 		 reg_imm(0));
710 	*rega = imm_a(nfp_prog);
711 	*regb = imm_b(nfp_prog);
712 }
713 
714 /* NFP has Command Push Pull bus which supports bluk memory operations. */
715 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
716 {
717 	bool descending_seq = meta->ldst_gather_len < 0;
718 	s16 len = abs(meta->ldst_gather_len);
719 	swreg src_base, off;
720 	bool src_40bit_addr;
721 	unsigned int i;
722 	u8 xfer_num;
723 
724 	off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
725 	src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
726 	src_base = reg_a(meta->insn.src_reg * 2);
727 	xfer_num = round_up(len, 4) / 4;
728 
729 	if (src_40bit_addr)
730 		addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base,
731 			      &off);
732 
733 	/* Setup PREV_ALU fields to override memory read length. */
734 	if (len > 32)
735 		wrp_immed(nfp_prog, reg_none(),
736 			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
737 
738 	/* Memory read from source addr into transfer-in registers. */
739 	emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
740 		     src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
741 		     src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32);
742 
743 	/* Move from transfer-in to transfer-out. */
744 	for (i = 0; i < xfer_num; i++)
745 		wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i));
746 
747 	off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog));
748 
749 	if (len <= 8) {
750 		/* Use single direct_ref write8. */
751 		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
752 			 reg_a(meta->paired_st->dst_reg * 2), off, len - 1,
753 			 CMD_CTX_SWAP);
754 	} else if (len <= 32 && IS_ALIGNED(len, 4)) {
755 		/* Use single direct_ref write32. */
756 		emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
757 			 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1,
758 			 CMD_CTX_SWAP);
759 	} else if (len <= 32) {
760 		/* Use single indirect_ref write8. */
761 		wrp_immed(nfp_prog, reg_none(),
762 			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1));
763 		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
764 			       reg_a(meta->paired_st->dst_reg * 2), off,
765 			       len - 1, CMD_CTX_SWAP);
766 	} else if (IS_ALIGNED(len, 4)) {
767 		/* Use single indirect_ref write32. */
768 		wrp_immed(nfp_prog, reg_none(),
769 			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
770 		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
771 			       reg_a(meta->paired_st->dst_reg * 2), off,
772 			       xfer_num - 1, CMD_CTX_SWAP);
773 	} else if (len <= 40) {
774 		/* Use one direct_ref write32 to write the first 32-bytes, then
775 		 * another direct_ref write8 to write the remaining bytes.
776 		 */
777 		emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
778 			 reg_a(meta->paired_st->dst_reg * 2), off, 7,
779 			 CMD_CTX_SWAP);
780 
781 		off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32,
782 				      imm_b(nfp_prog));
783 		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8,
784 			 reg_a(meta->paired_st->dst_reg * 2), off, len - 33,
785 			 CMD_CTX_SWAP);
786 	} else {
787 		/* Use one indirect_ref write32 to write 4-bytes aligned length,
788 		 * then another direct_ref write8 to write the remaining bytes.
789 		 */
790 		u8 new_off;
791 
792 		wrp_immed(nfp_prog, reg_none(),
793 			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2));
794 		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
795 			       reg_a(meta->paired_st->dst_reg * 2), off,
796 			       xfer_num - 2, CMD_CTX_SWAP);
797 		new_off = meta->paired_st->off + (xfer_num - 1) * 4;
798 		off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog));
799 		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b,
800 			 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off,
801 			 (len & 0x3) - 1, CMD_CTX_SWAP);
802 	}
803 
804 	/* TODO: The following extra load is to make sure data flow be identical
805 	 *  before and after we do memory copy optimization.
806 	 *
807 	 *  The load destination register is not guaranteed to be dead, so we
808 	 *  need to make sure it is loaded with the value the same as before
809 	 *  this transformation.
810 	 *
811 	 *  These extra loads could be removed once we have accurate register
812 	 *  usage information.
813 	 */
814 	if (descending_seq)
815 		xfer_num = 0;
816 	else if (BPF_SIZE(meta->insn.code) != BPF_DW)
817 		xfer_num = xfer_num - 1;
818 	else
819 		xfer_num = xfer_num - 2;
820 
821 	switch (BPF_SIZE(meta->insn.code)) {
822 	case BPF_B:
823 		wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
824 				reg_xfer(xfer_num), 1,
825 				IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1);
826 		break;
827 	case BPF_H:
828 		wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
829 				reg_xfer(xfer_num), 2, (len & 3) ^ 2);
830 		break;
831 	case BPF_W:
832 		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
833 			reg_xfer(0));
834 		break;
835 	case BPF_DW:
836 		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
837 			reg_xfer(xfer_num));
838 		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1),
839 			reg_xfer(xfer_num + 1));
840 		break;
841 	}
842 
843 	if (BPF_SIZE(meta->insn.code) != BPF_DW)
844 		wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
845 
846 	return 0;
847 }
848 
849 static int
850 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
851 {
852 	unsigned int i;
853 	u16 shift, sz;
854 
855 	/* We load the value from the address indicated in @offset and then
856 	 * shift out the data we don't need.  Note: this is big endian!
857 	 */
858 	sz = max(size, 4);
859 	shift = size < 4 ? 4 - size : 0;
860 
861 	emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
862 		 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP);
863 
864 	i = 0;
865 	if (shift)
866 		emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE,
867 			 reg_xfer(0), SHF_SC_R_SHF, shift * 8);
868 	else
869 		for (; i * 4 < size; i++)
870 			wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
871 
872 	if (i < 2)
873 		wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
874 
875 	return 0;
876 }
877 
878 static int
879 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
880 		   swreg lreg, swreg rreg, int size, enum cmd_mode mode)
881 {
882 	unsigned int i;
883 	u8 mask, sz;
884 
885 	/* We load the value from the address indicated in rreg + lreg and then
886 	 * mask out the data we don't need.  Note: this is little endian!
887 	 */
888 	sz = max(size, 4);
889 	mask = size < 4 ? GENMASK(size - 1, 0) : 0;
890 
891 	emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
892 		 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP);
893 
894 	i = 0;
895 	if (mask)
896 		emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask,
897 				  reg_xfer(0), SHF_SC_NONE, 0, true);
898 	else
899 		for (; i * 4 < size; i++)
900 			wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
901 
902 	if (i < 2)
903 		wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
904 
905 	return 0;
906 }
907 
908 static int
909 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
910 			  u8 dst_gpr, u8 size)
911 {
912 	return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
913 				  size, CMD_MODE_32b);
914 }
915 
916 static int
917 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
918 			  u8 dst_gpr, u8 size)
919 {
920 	swreg rega, regb;
921 
922 	addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
923 
924 	return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
925 				  size, CMD_MODE_40b_BA);
926 }
927 
928 static int
929 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
930 {
931 	swreg tmp_reg;
932 
933 	/* Calculate the true offset (src_reg + imm) */
934 	tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
935 	emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg);
936 
937 	/* Check packet length (size guaranteed to fit b/c it's u8) */
938 	emit_alu(nfp_prog, imm_a(nfp_prog),
939 		 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
940 	emit_alu(nfp_prog, reg_none(),
941 		 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
942 	emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
943 
944 	/* Load data */
945 	return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
946 }
947 
948 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
949 {
950 	swreg tmp_reg;
951 
952 	/* Check packet length */
953 	tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
954 	emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
955 	emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
956 
957 	/* Load data */
958 	tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
959 	return data_ld(nfp_prog, tmp_reg, 0, size);
960 }
961 
962 static int
963 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
964 		    u8 src_gpr, u8 size)
965 {
966 	unsigned int i;
967 
968 	for (i = 0; i * 4 < size; i++)
969 		wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i));
970 
971 	emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
972 		 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP);
973 
974 	return 0;
975 }
976 
977 static int
978 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
979 		   u64 imm, u8 size)
980 {
981 	wrp_immed(nfp_prog, reg_xfer(0), imm);
982 	if (size == 8)
983 		wrp_immed(nfp_prog, reg_xfer(1), imm >> 32);
984 
985 	emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
986 		 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP);
987 
988 	return 0;
989 }
990 
991 typedef int
992 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off,
993 	     unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
994 	     bool needs_inc);
995 
996 static int
997 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off,
998 	      unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
999 	      bool needs_inc)
1000 {
1001 	bool should_inc = needs_inc && new_gpr && !last;
1002 	u32 idx, src_byte;
1003 	enum shf_sc sc;
1004 	swreg reg;
1005 	int shf;
1006 	u8 mask;
1007 
1008 	if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4))
1009 		return -EOPNOTSUPP;
1010 
1011 	idx = off / 4;
1012 
1013 	/* Move the entire word */
1014 	if (size == 4) {
1015 		wrp_mov(nfp_prog, reg_both(dst),
1016 			should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx));
1017 		return 0;
1018 	}
1019 
1020 	if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
1021 		return -EOPNOTSUPP;
1022 
1023 	src_byte = off % 4;
1024 
1025 	mask = (1 << size) - 1;
1026 	mask <<= dst_byte;
1027 
1028 	if (WARN_ON_ONCE(mask > 0xf))
1029 		return -EOPNOTSUPP;
1030 
1031 	shf = abs(src_byte - dst_byte) * 8;
1032 	if (src_byte == dst_byte) {
1033 		sc = SHF_SC_NONE;
1034 	} else if (src_byte < dst_byte) {
1035 		shf = 32 - shf;
1036 		sc = SHF_SC_L_SHF;
1037 	} else {
1038 		sc = SHF_SC_R_SHF;
1039 	}
1040 
1041 	/* ld_field can address fewer indexes, if offset too large do RMW.
1042 	 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
1043 	 */
1044 	if (idx <= RE_REG_LM_IDX_MAX) {
1045 		reg = reg_lm(lm3 ? 3 : 0, idx);
1046 	} else {
1047 		reg = imm_a(nfp_prog);
1048 		/* If it's not the first part of the load and we start a new GPR
1049 		 * that means we are loading a second part of the LMEM word into
1050 		 * a new GPR.  IOW we've already looked that LMEM word and
1051 		 * therefore it has been loaded into imm_a().
1052 		 */
1053 		if (first || !new_gpr)
1054 			wrp_mov(nfp_prog, reg, reg_lm(0, idx));
1055 	}
1056 
1057 	emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr);
1058 
1059 	if (should_inc)
1060 		wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
1061 
1062 	return 0;
1063 }
1064 
1065 static int
1066 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off,
1067 	       unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
1068 	       bool needs_inc)
1069 {
1070 	bool should_inc = needs_inc && new_gpr && !last;
1071 	u32 idx, dst_byte;
1072 	enum shf_sc sc;
1073 	swreg reg;
1074 	int shf;
1075 	u8 mask;
1076 
1077 	if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4))
1078 		return -EOPNOTSUPP;
1079 
1080 	idx = off / 4;
1081 
1082 	/* Move the entire word */
1083 	if (size == 4) {
1084 		wrp_mov(nfp_prog,
1085 			should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx),
1086 			reg_b(src));
1087 		return 0;
1088 	}
1089 
1090 	if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
1091 		return -EOPNOTSUPP;
1092 
1093 	dst_byte = off % 4;
1094 
1095 	mask = (1 << size) - 1;
1096 	mask <<= dst_byte;
1097 
1098 	if (WARN_ON_ONCE(mask > 0xf))
1099 		return -EOPNOTSUPP;
1100 
1101 	shf = abs(src_byte - dst_byte) * 8;
1102 	if (src_byte == dst_byte) {
1103 		sc = SHF_SC_NONE;
1104 	} else if (src_byte < dst_byte) {
1105 		shf = 32 - shf;
1106 		sc = SHF_SC_L_SHF;
1107 	} else {
1108 		sc = SHF_SC_R_SHF;
1109 	}
1110 
1111 	/* ld_field can address fewer indexes, if offset too large do RMW.
1112 	 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
1113 	 */
1114 	if (idx <= RE_REG_LM_IDX_MAX) {
1115 		reg = reg_lm(lm3 ? 3 : 0, idx);
1116 	} else {
1117 		reg = imm_a(nfp_prog);
1118 		/* Only first and last LMEM locations are going to need RMW,
1119 		 * the middle location will be overwritten fully.
1120 		 */
1121 		if (first || last)
1122 			wrp_mov(nfp_prog, reg, reg_lm(0, idx));
1123 	}
1124 
1125 	emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf);
1126 
1127 	if (new_gpr || last) {
1128 		if (idx > RE_REG_LM_IDX_MAX)
1129 			wrp_mov(nfp_prog, reg_lm(0, idx), reg);
1130 		if (should_inc)
1131 			wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
1132 	}
1133 
1134 	return 0;
1135 }
1136 
1137 static int
1138 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1139 	     unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
1140 	     bool clr_gpr, lmem_step step)
1141 {
1142 	s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
1143 	bool first = true, last;
1144 	bool needs_inc = false;
1145 	swreg stack_off_reg;
1146 	u8 prev_gpr = 255;
1147 	u32 gpr_byte = 0;
1148 	bool lm3 = true;
1149 	int ret;
1150 
1151 	if (meta->ptr_not_const ||
1152 	    meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) {
1153 		/* Use of the last encountered ptr_off is OK, they all have
1154 		 * the same alignment.  Depend on low bits of value being
1155 		 * discarded when written to LMaddr register.
1156 		 */
1157 		stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off,
1158 						stack_imm(nfp_prog));
1159 
1160 		emit_alu(nfp_prog, imm_b(nfp_prog),
1161 			 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg);
1162 
1163 		needs_inc = true;
1164 	} else if (off + size <= 64) {
1165 		/* We can reach bottom 64B with LMaddr0 */
1166 		lm3 = false;
1167 	} else if (round_down(off, 32) == round_down(off + size - 1, 32)) {
1168 		/* We have to set up a new pointer.  If we know the offset
1169 		 * and the entire access falls into a single 32 byte aligned
1170 		 * window we won't have to increment the LM pointer.
1171 		 * The 32 byte alignment is imporant because offset is ORed in
1172 		 * not added when doing *l$indexN[off].
1173 		 */
1174 		stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32),
1175 						stack_imm(nfp_prog));
1176 		emit_alu(nfp_prog, imm_b(nfp_prog),
1177 			 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
1178 
1179 		off %= 32;
1180 	} else {
1181 		stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4),
1182 						stack_imm(nfp_prog));
1183 
1184 		emit_alu(nfp_prog, imm_b(nfp_prog),
1185 			 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
1186 
1187 		needs_inc = true;
1188 	}
1189 	if (lm3) {
1190 		emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
1191 		/* For size < 4 one slot will be filled by zeroing of upper. */
1192 		wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
1193 	}
1194 
1195 	if (clr_gpr && size < 8)
1196 		wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
1197 
1198 	while (size) {
1199 		u32 slice_end;
1200 		u8 slice_size;
1201 
1202 		slice_size = min(size, 4 - gpr_byte);
1203 		slice_end = min(off + slice_size, round_up(off + 1, 4));
1204 		slice_size = slice_end - off;
1205 
1206 		last = slice_size == size;
1207 
1208 		if (needs_inc)
1209 			off %= 4;
1210 
1211 		ret = step(nfp_prog, gpr, gpr_byte, off, slice_size,
1212 			   first, gpr != prev_gpr, last, lm3, needs_inc);
1213 		if (ret)
1214 			return ret;
1215 
1216 		prev_gpr = gpr;
1217 		first = false;
1218 
1219 		gpr_byte += slice_size;
1220 		if (gpr_byte >= 4) {
1221 			gpr_byte -= 4;
1222 			gpr++;
1223 		}
1224 
1225 		size -= slice_size;
1226 		off += slice_size;
1227 	}
1228 
1229 	return 0;
1230 }
1231 
1232 static void
1233 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
1234 {
1235 	swreg tmp_reg;
1236 
1237 	if (alu_op == ALU_OP_AND) {
1238 		if (!imm)
1239 			wrp_immed(nfp_prog, reg_both(dst), 0);
1240 		if (!imm || !~imm)
1241 			return;
1242 	}
1243 	if (alu_op == ALU_OP_OR) {
1244 		if (!~imm)
1245 			wrp_immed(nfp_prog, reg_both(dst), ~0U);
1246 		if (!imm || !~imm)
1247 			return;
1248 	}
1249 	if (alu_op == ALU_OP_XOR) {
1250 		if (!~imm)
1251 			emit_alu(nfp_prog, reg_both(dst), reg_none(),
1252 				 ALU_OP_NOT, reg_b(dst));
1253 		if (!imm || !~imm)
1254 			return;
1255 	}
1256 
1257 	tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1258 	emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
1259 }
1260 
1261 static int
1262 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1263 	      enum alu_op alu_op, bool skip)
1264 {
1265 	const struct bpf_insn *insn = &meta->insn;
1266 	u64 imm = insn->imm; /* sign extend */
1267 
1268 	if (skip) {
1269 		meta->skip = true;
1270 		return 0;
1271 	}
1272 
1273 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
1274 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
1275 
1276 	return 0;
1277 }
1278 
1279 static int
1280 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1281 	      enum alu_op alu_op)
1282 {
1283 	u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
1284 
1285 	emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
1286 	emit_alu(nfp_prog, reg_both(dst + 1),
1287 		 reg_a(dst + 1), alu_op, reg_b(src + 1));
1288 
1289 	return 0;
1290 }
1291 
1292 static int
1293 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1294 	      enum alu_op alu_op, bool skip)
1295 {
1296 	const struct bpf_insn *insn = &meta->insn;
1297 
1298 	if (skip) {
1299 		meta->skip = true;
1300 		return 0;
1301 	}
1302 
1303 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
1304 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1305 
1306 	return 0;
1307 }
1308 
1309 static int
1310 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1311 	      enum alu_op alu_op)
1312 {
1313 	u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
1314 
1315 	emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
1316 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1317 
1318 	return 0;
1319 }
1320 
1321 static void
1322 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
1323 		 enum br_mask br_mask, u16 off)
1324 {
1325 	emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
1326 	emit_br(nfp_prog, br_mask, off, 0);
1327 }
1328 
1329 static int
1330 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1331 	     enum alu_op alu_op, enum br_mask br_mask)
1332 {
1333 	const struct bpf_insn *insn = &meta->insn;
1334 
1335 	wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
1336 			 insn->src_reg * 2, br_mask, insn->off);
1337 	wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
1338 			 insn->src_reg * 2 + 1, br_mask, insn->off);
1339 
1340 	return 0;
1341 }
1342 
1343 static const struct jmp_code_map {
1344 	enum br_mask br_mask;
1345 	bool swap;
1346 } jmp_code_map[] = {
1347 	[BPF_JGT >> 4]	= { BR_BLO, true },
1348 	[BPF_JGE >> 4]	= { BR_BHS, false },
1349 	[BPF_JLT >> 4]	= { BR_BLO, false },
1350 	[BPF_JLE >> 4]	= { BR_BHS, true },
1351 	[BPF_JSGT >> 4]	= { BR_BLT, true },
1352 	[BPF_JSGE >> 4]	= { BR_BGE, false },
1353 	[BPF_JSLT >> 4]	= { BR_BLT, false },
1354 	[BPF_JSLE >> 4]	= { BR_BGE, true },
1355 };
1356 
1357 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta)
1358 {
1359 	unsigned int op;
1360 
1361 	op = BPF_OP(meta->insn.code) >> 4;
1362 	/* br_mask of 0 is BR_BEQ which we don't use in jump code table */
1363 	if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) ||
1364 		      !jmp_code_map[op].br_mask,
1365 		      "no code found for jump instruction"))
1366 		return NULL;
1367 
1368 	return &jmp_code_map[op];
1369 }
1370 
1371 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1372 {
1373 	const struct bpf_insn *insn = &meta->insn;
1374 	u64 imm = insn->imm; /* sign extend */
1375 	const struct jmp_code_map *code;
1376 	enum alu_op alu_op, carry_op;
1377 	u8 reg = insn->dst_reg * 2;
1378 	swreg tmp_reg;
1379 
1380 	code = nfp_jmp_code_get(meta);
1381 	if (!code)
1382 		return -EINVAL;
1383 
1384 	alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB;
1385 	carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C;
1386 
1387 	tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
1388 	if (!code->swap)
1389 		emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg);
1390 	else
1391 		emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
1392 
1393 	tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
1394 	if (!code->swap)
1395 		emit_alu(nfp_prog, reg_none(),
1396 			 reg_a(reg + 1), carry_op, tmp_reg);
1397 	else
1398 		emit_alu(nfp_prog, reg_none(),
1399 			 tmp_reg, carry_op, reg_a(reg + 1));
1400 
1401 	emit_br(nfp_prog, code->br_mask, insn->off, 0);
1402 
1403 	return 0;
1404 }
1405 
1406 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1407 {
1408 	const struct bpf_insn *insn = &meta->insn;
1409 	const struct jmp_code_map *code;
1410 	u8 areg, breg;
1411 
1412 	code = nfp_jmp_code_get(meta);
1413 	if (!code)
1414 		return -EINVAL;
1415 
1416 	areg = insn->dst_reg * 2;
1417 	breg = insn->src_reg * 2;
1418 
1419 	if (code->swap) {
1420 		areg ^= breg;
1421 		breg ^= areg;
1422 		areg ^= breg;
1423 	}
1424 
1425 	emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
1426 	emit_alu(nfp_prog, reg_none(),
1427 		 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
1428 	emit_br(nfp_prog, code->br_mask, insn->off, 0);
1429 
1430 	return 0;
1431 }
1432 
1433 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
1434 {
1435 	emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in,
1436 		      SHF_SC_R_ROT, 8);
1437 	emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out),
1438 		      SHF_SC_R_ROT, 16);
1439 }
1440 
1441 static void
1442 wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
1443 	    swreg rreg, bool gen_high_half)
1444 {
1445 	emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
1446 	emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg);
1447 	emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg);
1448 	emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg);
1449 	emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg);
1450 	emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none());
1451 	if (gen_high_half)
1452 		emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2,
1453 			 reg_none());
1454 	else
1455 		wrp_immed(nfp_prog, dst_hi, 0);
1456 }
1457 
1458 static void
1459 wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
1460 	    swreg rreg)
1461 {
1462 	emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
1463 	emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg);
1464 	emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg);
1465 	emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none());
1466 }
1467 
1468 static int
1469 wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1470 	bool gen_high_half, bool ropnd_from_reg)
1471 {
1472 	swreg multiplier, multiplicand, dst_hi, dst_lo;
1473 	const struct bpf_insn *insn = &meta->insn;
1474 	u32 lopnd_max, ropnd_max;
1475 	u8 dst_reg;
1476 
1477 	dst_reg = insn->dst_reg;
1478 	multiplicand = reg_a(dst_reg * 2);
1479 	dst_hi = reg_both(dst_reg * 2 + 1);
1480 	dst_lo = reg_both(dst_reg * 2);
1481 	lopnd_max = meta->umax_dst;
1482 	if (ropnd_from_reg) {
1483 		multiplier = reg_b(insn->src_reg * 2);
1484 		ropnd_max = meta->umax_src;
1485 	} else {
1486 		u32 imm = insn->imm;
1487 
1488 		multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1489 		ropnd_max = imm;
1490 	}
1491 	if (lopnd_max > U16_MAX || ropnd_max > U16_MAX)
1492 		wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier,
1493 			    gen_high_half);
1494 	else
1495 		wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier);
1496 
1497 	return 0;
1498 }
1499 
1500 static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm)
1501 {
1502 	swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst);
1503 	struct reciprocal_value_adv rvalue;
1504 	u8 pre_shift, exp;
1505 	swreg magic;
1506 
1507 	if (imm > U32_MAX) {
1508 		wrp_immed(nfp_prog, dst_both, 0);
1509 		return 0;
1510 	}
1511 
1512 	/* NOTE: because we are using "reciprocal_value_adv" which doesn't
1513 	 * support "divisor > (1u << 31)", we need to JIT separate NFP sequence
1514 	 * to handle such case which actually equals to the result of unsigned
1515 	 * comparison "dst >= imm" which could be calculated using the following
1516 	 * NFP sequence:
1517 	 *
1518 	 *  alu[--, dst, -, imm]
1519 	 *  immed[imm, 0]
1520 	 *  alu[dst, imm, +carry, 0]
1521 	 *
1522 	 */
1523 	if (imm > 1U << 31) {
1524 		swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1525 
1526 		emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b);
1527 		wrp_immed(nfp_prog, imm_a(nfp_prog), 0);
1528 		emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C,
1529 			 reg_imm(0));
1530 		return 0;
1531 	}
1532 
1533 	rvalue = reciprocal_value_adv(imm, 32);
1534 	exp = rvalue.exp;
1535 	if (rvalue.is_wide_m && !(imm & 1)) {
1536 		pre_shift = fls(imm & -imm) - 1;
1537 		rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift);
1538 	} else {
1539 		pre_shift = 0;
1540 	}
1541 	magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog));
1542 	if (imm == 1U << exp) {
1543 		emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1544 			 SHF_SC_R_SHF, exp);
1545 	} else if (rvalue.is_wide_m) {
1546 		wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a,
1547 			    magic, true);
1548 		emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB,
1549 			 imm_b(nfp_prog));
1550 		emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1551 			 SHF_SC_R_SHF, 1);
1552 		emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD,
1553 			 imm_b(nfp_prog));
1554 		emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1555 			 SHF_SC_R_SHF, rvalue.sh - 1);
1556 	} else {
1557 		if (pre_shift)
1558 			emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
1559 				 dst_b, SHF_SC_R_SHF, pre_shift);
1560 		wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true);
1561 		emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
1562 			 dst_b, SHF_SC_R_SHF, rvalue.sh);
1563 	}
1564 
1565 	return 0;
1566 }
1567 
1568 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1569 {
1570 	swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
1571 	struct nfp_bpf_cap_adjust_head *adjust_head;
1572 	u32 ret_einval, end;
1573 
1574 	adjust_head = &nfp_prog->bpf->adjust_head;
1575 
1576 	/* Optimized version - 5 vs 14 cycles */
1577 	if (nfp_prog->adjust_head_location != UINT_MAX) {
1578 		if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n))
1579 			return -EINVAL;
1580 
1581 		emit_alu(nfp_prog, pptr_reg(nfp_prog),
1582 			 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog));
1583 		emit_alu(nfp_prog, plen_reg(nfp_prog),
1584 			 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1585 		emit_alu(nfp_prog, pv_len(nfp_prog),
1586 			 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1587 
1588 		wrp_immed(nfp_prog, reg_both(0), 0);
1589 		wrp_immed(nfp_prog, reg_both(1), 0);
1590 
1591 		/* TODO: when adjust head is guaranteed to succeed we can
1592 		 * also eliminate the following if (r0 == 0) branch.
1593 		 */
1594 
1595 		return 0;
1596 	}
1597 
1598 	ret_einval = nfp_prog_current_offset(nfp_prog) + 14;
1599 	end = ret_einval + 2;
1600 
1601 	/* We need to use a temp because offset is just a part of the pkt ptr */
1602 	emit_alu(nfp_prog, tmp,
1603 		 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog));
1604 
1605 	/* Validate result will fit within FW datapath constraints */
1606 	emit_alu(nfp_prog, reg_none(),
1607 		 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min));
1608 	emit_br(nfp_prog, BR_BLO, ret_einval, 0);
1609 	emit_alu(nfp_prog, reg_none(),
1610 		 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp);
1611 	emit_br(nfp_prog, BR_BLO, ret_einval, 0);
1612 
1613 	/* Validate the length is at least ETH_HLEN */
1614 	emit_alu(nfp_prog, tmp_len,
1615 		 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1616 	emit_alu(nfp_prog, reg_none(),
1617 		 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN));
1618 	emit_br(nfp_prog, BR_BMI, ret_einval, 0);
1619 
1620 	/* Load the ret code */
1621 	wrp_immed(nfp_prog, reg_both(0), 0);
1622 	wrp_immed(nfp_prog, reg_both(1), 0);
1623 
1624 	/* Modify the packet metadata */
1625 	emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
1626 
1627 	/* Skip over the -EINVAL ret code (defer 2) */
1628 	emit_br(nfp_prog, BR_UNC, end, 2);
1629 
1630 	emit_alu(nfp_prog, plen_reg(nfp_prog),
1631 		 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1632 	emit_alu(nfp_prog, pv_len(nfp_prog),
1633 		 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1634 
1635 	/* return -EINVAL target */
1636 	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
1637 		return -EINVAL;
1638 
1639 	wrp_immed(nfp_prog, reg_both(0), -22);
1640 	wrp_immed(nfp_prog, reg_both(1), ~0);
1641 
1642 	if (!nfp_prog_confirm_current_offset(nfp_prog, end))
1643 		return -EINVAL;
1644 
1645 	return 0;
1646 }
1647 
1648 static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1649 {
1650 	u32 ret_einval, end;
1651 	swreg plen, delta;
1652 
1653 	BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN));
1654 
1655 	plen = imm_a(nfp_prog);
1656 	delta = reg_a(2 * 2);
1657 
1658 	ret_einval = nfp_prog_current_offset(nfp_prog) + 9;
1659 	end = nfp_prog_current_offset(nfp_prog) + 11;
1660 
1661 	/* Calculate resulting length */
1662 	emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta);
1663 	/* delta == 0 is not allowed by the kernel, add must overflow to make
1664 	 * length smaller.
1665 	 */
1666 	emit_br(nfp_prog, BR_BCC, ret_einval, 0);
1667 
1668 	/* if (new_len < 14) then -EINVAL */
1669 	emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN));
1670 	emit_br(nfp_prog, BR_BMI, ret_einval, 0);
1671 
1672 	emit_alu(nfp_prog, plen_reg(nfp_prog),
1673 		 plen_reg(nfp_prog), ALU_OP_ADD, delta);
1674 	emit_alu(nfp_prog, pv_len(nfp_prog),
1675 		 pv_len(nfp_prog), ALU_OP_ADD, delta);
1676 
1677 	emit_br(nfp_prog, BR_UNC, end, 2);
1678 	wrp_immed(nfp_prog, reg_both(0), 0);
1679 	wrp_immed(nfp_prog, reg_both(1), 0);
1680 
1681 	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
1682 		return -EINVAL;
1683 
1684 	wrp_immed(nfp_prog, reg_both(0), -22);
1685 	wrp_immed(nfp_prog, reg_both(1), ~0);
1686 
1687 	if (!nfp_prog_confirm_current_offset(nfp_prog, end))
1688 		return -EINVAL;
1689 
1690 	return 0;
1691 }
1692 
1693 static int
1694 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1695 {
1696 	bool load_lm_ptr;
1697 	u32 ret_tgt;
1698 	s64 lm_off;
1699 
1700 	/* We only have to reload LM0 if the key is not at start of stack */
1701 	lm_off = nfp_prog->stack_frame_depth;
1702 	lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off;
1703 	load_lm_ptr = meta->arg2.var_off || lm_off;
1704 
1705 	/* Set LM0 to start of key */
1706 	if (load_lm_ptr)
1707 		emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
1708 	if (meta->func_id == BPF_FUNC_map_update_elem)
1709 		emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2);
1710 
1711 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
1712 		     2, RELO_BR_HELPER);
1713 	ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
1714 
1715 	/* Load map ID into A0 */
1716 	wrp_mov(nfp_prog, reg_a(0), reg_a(2));
1717 
1718 	/* Load the return address into B0 */
1719 	wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
1720 
1721 	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
1722 		return -EINVAL;
1723 
1724 	/* Reset the LM0 pointer */
1725 	if (!load_lm_ptr)
1726 		return 0;
1727 
1728 	emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
1729 	wrp_nops(nfp_prog, 3);
1730 
1731 	return 0;
1732 }
1733 
1734 static int
1735 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1736 {
1737 	__emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM);
1738 	/* CSR value is read in following immed[gpr, 0] */
1739 	emit_immed(nfp_prog, reg_both(0), 0,
1740 		   IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
1741 	emit_immed(nfp_prog, reg_both(1), 0,
1742 		   IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
1743 	return 0;
1744 }
1745 
1746 static int
1747 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1748 {
1749 	swreg ptr_type;
1750 	u32 ret_tgt;
1751 
1752 	ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog));
1753 
1754 	ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
1755 
1756 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
1757 		     2, RELO_BR_HELPER);
1758 
1759 	/* Load ptr type into A1 */
1760 	wrp_mov(nfp_prog, reg_a(1), ptr_type);
1761 
1762 	/* Load the return address into B0 */
1763 	wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
1764 
1765 	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
1766 		return -EINVAL;
1767 
1768 	return 0;
1769 }
1770 
1771 static int
1772 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1773 {
1774 	u32 jmp_tgt;
1775 
1776 	jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5;
1777 
1778 	/* Make sure the queue id fits into FW field */
1779 	emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2),
1780 		 ALU_OP_AND_NOT_B, reg_imm(0xff));
1781 	emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2);
1782 
1783 	/* Set the 'queue selected' bit and the queue value */
1784 	emit_shf(nfp_prog, pv_qsel_set(nfp_prog),
1785 		 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1),
1786 		 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT);
1787 	emit_ld_field(nfp_prog,
1788 		      pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2),
1789 		      SHF_SC_NONE, 0);
1790 	/* Delay slots end here, we will jump over next instruction if queue
1791 	 * value fits into the field.
1792 	 */
1793 	emit_ld_field(nfp_prog,
1794 		      pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX),
1795 		      SHF_SC_NONE, 0);
1796 
1797 	if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt))
1798 		return -EINVAL;
1799 
1800 	return 0;
1801 }
1802 
1803 /* --- Callbacks --- */
1804 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1805 {
1806 	const struct bpf_insn *insn = &meta->insn;
1807 	u8 dst = insn->dst_reg * 2;
1808 	u8 src = insn->src_reg * 2;
1809 
1810 	if (insn->src_reg == BPF_REG_10) {
1811 		swreg stack_depth_reg;
1812 
1813 		stack_depth_reg = ur_load_imm_any(nfp_prog,
1814 						  nfp_prog->stack_frame_depth,
1815 						  stack_imm(nfp_prog));
1816 		emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog),
1817 			 ALU_OP_ADD, stack_depth_reg);
1818 		wrp_immed(nfp_prog, reg_both(dst + 1), 0);
1819 	} else {
1820 		wrp_reg_mov(nfp_prog, dst, src);
1821 		wrp_reg_mov(nfp_prog, dst + 1, src + 1);
1822 	}
1823 
1824 	return 0;
1825 }
1826 
1827 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1828 {
1829 	u64 imm = meta->insn.imm; /* sign extend */
1830 
1831 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
1832 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
1833 
1834 	return 0;
1835 }
1836 
1837 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1838 {
1839 	return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
1840 }
1841 
1842 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1843 {
1844 	return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
1845 }
1846 
1847 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1848 {
1849 	return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
1850 }
1851 
1852 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1853 {
1854 	return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
1855 }
1856 
1857 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1858 {
1859 	return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
1860 }
1861 
1862 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1863 {
1864 	return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
1865 }
1866 
1867 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1868 {
1869 	const struct bpf_insn *insn = &meta->insn;
1870 
1871 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1872 		 reg_a(insn->dst_reg * 2), ALU_OP_ADD,
1873 		 reg_b(insn->src_reg * 2));
1874 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1875 		 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
1876 		 reg_b(insn->src_reg * 2 + 1));
1877 
1878 	return 0;
1879 }
1880 
1881 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1882 {
1883 	const struct bpf_insn *insn = &meta->insn;
1884 	u64 imm = insn->imm; /* sign extend */
1885 
1886 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
1887 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
1888 
1889 	return 0;
1890 }
1891 
1892 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1893 {
1894 	const struct bpf_insn *insn = &meta->insn;
1895 
1896 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1897 		 reg_a(insn->dst_reg * 2), ALU_OP_SUB,
1898 		 reg_b(insn->src_reg * 2));
1899 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1900 		 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
1901 		 reg_b(insn->src_reg * 2 + 1));
1902 
1903 	return 0;
1904 }
1905 
1906 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1907 {
1908 	const struct bpf_insn *insn = &meta->insn;
1909 	u64 imm = insn->imm; /* sign extend */
1910 
1911 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
1912 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
1913 
1914 	return 0;
1915 }
1916 
1917 static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1918 {
1919 	return wrp_mul(nfp_prog, meta, true, true);
1920 }
1921 
1922 static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1923 {
1924 	return wrp_mul(nfp_prog, meta, true, false);
1925 }
1926 
1927 static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1928 {
1929 	const struct bpf_insn *insn = &meta->insn;
1930 
1931 	return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm);
1932 }
1933 
1934 static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1935 {
1936 	/* NOTE: verifier hook has rejected cases for which verifier doesn't
1937 	 * know whether the source operand is constant or not.
1938 	 */
1939 	return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src);
1940 }
1941 
1942 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1943 {
1944 	const struct bpf_insn *insn = &meta->insn;
1945 
1946 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0),
1947 		 ALU_OP_SUB, reg_b(insn->dst_reg * 2));
1948 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0),
1949 		 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1));
1950 
1951 	return 0;
1952 }
1953 
1954 /* Pseudo code:
1955  *   if shift_amt >= 32
1956  *     dst_high = dst_low << shift_amt[4:0]
1957  *     dst_low = 0;
1958  *   else
1959  *     dst_high = (dst_high, dst_low) >> (32 - shift_amt)
1960  *     dst_low = dst_low << shift_amt
1961  *
1962  * The indirect shift will use the same logic at runtime.
1963  */
1964 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
1965 {
1966 	if (shift_amt < 32) {
1967 		emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1),
1968 			 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF,
1969 			 32 - shift_amt);
1970 		emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
1971 			 reg_b(dst), SHF_SC_L_SHF, shift_amt);
1972 	} else if (shift_amt == 32) {
1973 		wrp_reg_mov(nfp_prog, dst + 1, dst);
1974 		wrp_immed(nfp_prog, reg_both(dst), 0);
1975 	} else if (shift_amt > 32) {
1976 		emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
1977 			 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32);
1978 		wrp_immed(nfp_prog, reg_both(dst), 0);
1979 	}
1980 
1981 	return 0;
1982 }
1983 
1984 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1985 {
1986 	const struct bpf_insn *insn = &meta->insn;
1987 	u8 dst = insn->dst_reg * 2;
1988 
1989 	return __shl_imm64(nfp_prog, dst, insn->imm);
1990 }
1991 
1992 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
1993 {
1994 	emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB,
1995 		 reg_b(src));
1996 	emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0));
1997 	emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE,
1998 		       reg_b(dst), SHF_SC_R_DSHF);
1999 }
2000 
2001 /* NOTE: for indirect left shift, HIGH part should be calculated first. */
2002 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2003 {
2004 	emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2005 	emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2006 		       reg_b(dst), SHF_SC_L_SHF);
2007 }
2008 
2009 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2010 {
2011 	shl_reg64_lt32_high(nfp_prog, dst, src);
2012 	shl_reg64_lt32_low(nfp_prog, dst, src);
2013 }
2014 
2015 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2016 {
2017 	emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2018 	emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2019 		       reg_b(dst), SHF_SC_L_SHF);
2020 	wrp_immed(nfp_prog, reg_both(dst), 0);
2021 }
2022 
2023 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2024 {
2025 	const struct bpf_insn *insn = &meta->insn;
2026 	u64 umin, umax;
2027 	u8 dst, src;
2028 
2029 	dst = insn->dst_reg * 2;
2030 	umin = meta->umin_src;
2031 	umax = meta->umax_src;
2032 	if (umin == umax)
2033 		return __shl_imm64(nfp_prog, dst, umin);
2034 
2035 	src = insn->src_reg * 2;
2036 	if (umax < 32) {
2037 		shl_reg64_lt32(nfp_prog, dst, src);
2038 	} else if (umin >= 32) {
2039 		shl_reg64_ge32(nfp_prog, dst, src);
2040 	} else {
2041 		/* Generate different instruction sequences depending on runtime
2042 		 * value of shift amount.
2043 		 */
2044 		u16 label_ge32, label_end;
2045 
2046 		label_ge32 = nfp_prog_current_offset(nfp_prog) + 7;
2047 		emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2048 
2049 		shl_reg64_lt32_high(nfp_prog, dst, src);
2050 		label_end = nfp_prog_current_offset(nfp_prog) + 6;
2051 		emit_br(nfp_prog, BR_UNC, label_end, 2);
2052 		/* shl_reg64_lt32_low packed in delay slot. */
2053 		shl_reg64_lt32_low(nfp_prog, dst, src);
2054 
2055 		if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2056 			return -EINVAL;
2057 		shl_reg64_ge32(nfp_prog, dst, src);
2058 
2059 		if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2060 			return -EINVAL;
2061 	}
2062 
2063 	return 0;
2064 }
2065 
2066 /* Pseudo code:
2067  *   if shift_amt >= 32
2068  *     dst_high = 0;
2069  *     dst_low = dst_high >> shift_amt[4:0]
2070  *   else
2071  *     dst_high = dst_high >> shift_amt
2072  *     dst_low = (dst_high, dst_low) >> shift_amt
2073  *
2074  * The indirect shift will use the same logic at runtime.
2075  */
2076 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
2077 {
2078 	if (shift_amt < 32) {
2079 		emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
2080 			 reg_b(dst), SHF_SC_R_DSHF, shift_amt);
2081 		emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2082 			 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt);
2083 	} else if (shift_amt == 32) {
2084 		wrp_reg_mov(nfp_prog, dst, dst + 1);
2085 		wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2086 	} else if (shift_amt > 32) {
2087 		emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2088 			 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32);
2089 		wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2096 {
2097 	const struct bpf_insn *insn = &meta->insn;
2098 	u8 dst = insn->dst_reg * 2;
2099 
2100 	return __shr_imm64(nfp_prog, dst, insn->imm);
2101 }
2102 
2103 /* NOTE: for indirect right shift, LOW part should be calculated first. */
2104 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2105 {
2106 	emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2107 	emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2108 		       reg_b(dst + 1), SHF_SC_R_SHF);
2109 }
2110 
2111 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2112 {
2113 	emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2114 	emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
2115 		       reg_b(dst), SHF_SC_R_DSHF);
2116 }
2117 
2118 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2119 {
2120 	shr_reg64_lt32_low(nfp_prog, dst, src);
2121 	shr_reg64_lt32_high(nfp_prog, dst, src);
2122 }
2123 
2124 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2125 {
2126 	emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2127 	emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2128 		       reg_b(dst + 1), SHF_SC_R_SHF);
2129 	wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2130 }
2131 
2132 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2133 {
2134 	const struct bpf_insn *insn = &meta->insn;
2135 	u64 umin, umax;
2136 	u8 dst, src;
2137 
2138 	dst = insn->dst_reg * 2;
2139 	umin = meta->umin_src;
2140 	umax = meta->umax_src;
2141 	if (umin == umax)
2142 		return __shr_imm64(nfp_prog, dst, umin);
2143 
2144 	src = insn->src_reg * 2;
2145 	if (umax < 32) {
2146 		shr_reg64_lt32(nfp_prog, dst, src);
2147 	} else if (umin >= 32) {
2148 		shr_reg64_ge32(nfp_prog, dst, src);
2149 	} else {
2150 		/* Generate different instruction sequences depending on runtime
2151 		 * value of shift amount.
2152 		 */
2153 		u16 label_ge32, label_end;
2154 
2155 		label_ge32 = nfp_prog_current_offset(nfp_prog) + 6;
2156 		emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2157 		shr_reg64_lt32_low(nfp_prog, dst, src);
2158 		label_end = nfp_prog_current_offset(nfp_prog) + 6;
2159 		emit_br(nfp_prog, BR_UNC, label_end, 2);
2160 		/* shr_reg64_lt32_high packed in delay slot. */
2161 		shr_reg64_lt32_high(nfp_prog, dst, src);
2162 
2163 		if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2164 			return -EINVAL;
2165 		shr_reg64_ge32(nfp_prog, dst, src);
2166 
2167 		if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2168 			return -EINVAL;
2169 	}
2170 
2171 	return 0;
2172 }
2173 
2174 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit
2175  * told through PREV_ALU result.
2176  */
2177 static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
2178 {
2179 	if (shift_amt < 32) {
2180 		emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
2181 			 reg_b(dst), SHF_SC_R_DSHF, shift_amt);
2182 		/* Set signedness bit. */
2183 		emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR,
2184 			 reg_imm(0));
2185 		emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2186 			 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt);
2187 	} else if (shift_amt == 32) {
2188 		/* NOTE: this also helps setting signedness bit. */
2189 		wrp_reg_mov(nfp_prog, dst, dst + 1);
2190 		emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2191 			 reg_b(dst + 1), SHF_SC_R_SHF, 31);
2192 	} else if (shift_amt > 32) {
2193 		emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR,
2194 			 reg_imm(0));
2195 		emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
2196 			 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32);
2197 		emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2198 			 reg_b(dst + 1), SHF_SC_R_SHF, 31);
2199 	}
2200 
2201 	return 0;
2202 }
2203 
2204 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2205 {
2206 	const struct bpf_insn *insn = &meta->insn;
2207 	u8 dst = insn->dst_reg * 2;
2208 
2209 	return __ashr_imm64(nfp_prog, dst, insn->imm);
2210 }
2211 
2212 static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2213 {
2214 	/* NOTE: the first insn will set both indirect shift amount (source A)
2215 	 * and signedness bit (MSB of result).
2216 	 */
2217 	emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1));
2218 	emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2219 		       reg_b(dst + 1), SHF_SC_R_SHF);
2220 }
2221 
2222 static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2223 {
2224 	/* NOTE: it is the same as logic shift because we don't need to shift in
2225 	 * signedness bit when the shift amount is less than 32.
2226 	 */
2227 	return shr_reg64_lt32_low(nfp_prog, dst, src);
2228 }
2229 
2230 static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2231 {
2232 	ashr_reg64_lt32_low(nfp_prog, dst, src);
2233 	ashr_reg64_lt32_high(nfp_prog, dst, src);
2234 }
2235 
2236 static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2237 {
2238 	emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1));
2239 	emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
2240 		       reg_b(dst + 1), SHF_SC_R_SHF);
2241 	emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2242 		 reg_b(dst + 1), SHF_SC_R_SHF, 31);
2243 }
2244 
2245 /* Like ashr_imm64, but need to use indirect shift. */
2246 static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2247 {
2248 	const struct bpf_insn *insn = &meta->insn;
2249 	u64 umin, umax;
2250 	u8 dst, src;
2251 
2252 	dst = insn->dst_reg * 2;
2253 	umin = meta->umin_src;
2254 	umax = meta->umax_src;
2255 	if (umin == umax)
2256 		return __ashr_imm64(nfp_prog, dst, umin);
2257 
2258 	src = insn->src_reg * 2;
2259 	if (umax < 32) {
2260 		ashr_reg64_lt32(nfp_prog, dst, src);
2261 	} else if (umin >= 32) {
2262 		ashr_reg64_ge32(nfp_prog, dst, src);
2263 	} else {
2264 		u16 label_ge32, label_end;
2265 
2266 		label_ge32 = nfp_prog_current_offset(nfp_prog) + 6;
2267 		emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2268 		ashr_reg64_lt32_low(nfp_prog, dst, src);
2269 		label_end = nfp_prog_current_offset(nfp_prog) + 6;
2270 		emit_br(nfp_prog, BR_UNC, label_end, 2);
2271 		/* ashr_reg64_lt32_high packed in delay slot. */
2272 		ashr_reg64_lt32_high(nfp_prog, dst, src);
2273 
2274 		if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2275 			return -EINVAL;
2276 		ashr_reg64_ge32(nfp_prog, dst, src);
2277 
2278 		if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2279 			return -EINVAL;
2280 	}
2281 
2282 	return 0;
2283 }
2284 
2285 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2286 {
2287 	const struct bpf_insn *insn = &meta->insn;
2288 
2289 	wrp_reg_mov(nfp_prog, insn->dst_reg * 2,  insn->src_reg * 2);
2290 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
2291 
2292 	return 0;
2293 }
2294 
2295 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2296 {
2297 	const struct bpf_insn *insn = &meta->insn;
2298 
2299 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
2300 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
2301 
2302 	return 0;
2303 }
2304 
2305 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2306 {
2307 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
2308 }
2309 
2310 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2311 {
2312 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
2313 }
2314 
2315 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2316 {
2317 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
2318 }
2319 
2320 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2321 {
2322 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
2323 }
2324 
2325 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2326 {
2327 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
2328 }
2329 
2330 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2331 {
2332 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
2333 }
2334 
2335 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2336 {
2337 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
2338 }
2339 
2340 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2341 {
2342 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
2343 }
2344 
2345 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2346 {
2347 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
2348 }
2349 
2350 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2351 {
2352 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
2353 }
2354 
2355 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2356 {
2357 	return wrp_mul(nfp_prog, meta, false, true);
2358 }
2359 
2360 static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2361 {
2362 	return wrp_mul(nfp_prog, meta, false, false);
2363 }
2364 
2365 static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2366 {
2367 	return div_reg64(nfp_prog, meta);
2368 }
2369 
2370 static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2371 {
2372 	return div_imm64(nfp_prog, meta);
2373 }
2374 
2375 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2376 {
2377 	u8 dst = meta->insn.dst_reg * 2;
2378 
2379 	emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst));
2380 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
2381 
2382 	return 0;
2383 }
2384 
2385 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2386 {
2387 	const struct bpf_insn *insn = &meta->insn;
2388 
2389 	if (!insn->imm)
2390 		return 1; /* TODO: zero shift means indirect */
2391 
2392 	emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
2393 		 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
2394 		 SHF_SC_L_SHF, insn->imm);
2395 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
2396 
2397 	return 0;
2398 }
2399 
2400 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2401 {
2402 	const struct bpf_insn *insn = &meta->insn;
2403 	u8 gpr = insn->dst_reg * 2;
2404 
2405 	switch (insn->imm) {
2406 	case 16:
2407 		emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr),
2408 			      SHF_SC_R_ROT, 8);
2409 		emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr),
2410 			      SHF_SC_R_SHF, 16);
2411 
2412 		wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
2413 		break;
2414 	case 32:
2415 		wrp_end32(nfp_prog, reg_a(gpr), gpr);
2416 		wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
2417 		break;
2418 	case 64:
2419 		wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1));
2420 
2421 		wrp_end32(nfp_prog, reg_a(gpr), gpr + 1);
2422 		wrp_end32(nfp_prog, imm_a(nfp_prog), gpr);
2423 		break;
2424 	}
2425 
2426 	return 0;
2427 }
2428 
2429 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2430 {
2431 	struct nfp_insn_meta *prev = nfp_meta_prev(meta);
2432 	u32 imm_lo, imm_hi;
2433 	u8 dst;
2434 
2435 	dst = prev->insn.dst_reg * 2;
2436 	imm_lo = prev->insn.imm;
2437 	imm_hi = meta->insn.imm;
2438 
2439 	wrp_immed(nfp_prog, reg_both(dst), imm_lo);
2440 
2441 	/* mov is always 1 insn, load imm may be two, so try to use mov */
2442 	if (imm_hi == imm_lo)
2443 		wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst));
2444 	else
2445 		wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi);
2446 
2447 	return 0;
2448 }
2449 
2450 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2451 {
2452 	meta->double_cb = imm_ld8_part2;
2453 	return 0;
2454 }
2455 
2456 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2457 {
2458 	return construct_data_ld(nfp_prog, meta->insn.imm, 1);
2459 }
2460 
2461 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2462 {
2463 	return construct_data_ld(nfp_prog, meta->insn.imm, 2);
2464 }
2465 
2466 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2467 {
2468 	return construct_data_ld(nfp_prog, meta->insn.imm, 4);
2469 }
2470 
2471 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2472 {
2473 	return construct_data_ind_ld(nfp_prog, meta->insn.imm,
2474 				     meta->insn.src_reg * 2, 1);
2475 }
2476 
2477 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2478 {
2479 	return construct_data_ind_ld(nfp_prog, meta->insn.imm,
2480 				     meta->insn.src_reg * 2, 2);
2481 }
2482 
2483 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2484 {
2485 	return construct_data_ind_ld(nfp_prog, meta->insn.imm,
2486 				     meta->insn.src_reg * 2, 4);
2487 }
2488 
2489 static int
2490 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2491 	      unsigned int size, unsigned int ptr_off)
2492 {
2493 	return mem_op_stack(nfp_prog, meta, size, ptr_off,
2494 			    meta->insn.dst_reg * 2, meta->insn.src_reg * 2,
2495 			    true, wrp_lmem_load);
2496 }
2497 
2498 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2499 		       u8 size)
2500 {
2501 	swreg dst = reg_both(meta->insn.dst_reg * 2);
2502 
2503 	switch (meta->insn.off) {
2504 	case offsetof(struct __sk_buff, len):
2505 		if (size != FIELD_SIZEOF(struct __sk_buff, len))
2506 			return -EOPNOTSUPP;
2507 		wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
2508 		break;
2509 	case offsetof(struct __sk_buff, data):
2510 		if (size != FIELD_SIZEOF(struct __sk_buff, data))
2511 			return -EOPNOTSUPP;
2512 		wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
2513 		break;
2514 	case offsetof(struct __sk_buff, data_end):
2515 		if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
2516 			return -EOPNOTSUPP;
2517 		emit_alu(nfp_prog, dst,
2518 			 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
2519 		break;
2520 	default:
2521 		return -EOPNOTSUPP;
2522 	}
2523 
2524 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
2525 
2526 	return 0;
2527 }
2528 
2529 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2530 		       u8 size)
2531 {
2532 	swreg dst = reg_both(meta->insn.dst_reg * 2);
2533 
2534 	switch (meta->insn.off) {
2535 	case offsetof(struct xdp_md, data):
2536 		if (size != FIELD_SIZEOF(struct xdp_md, data))
2537 			return -EOPNOTSUPP;
2538 		wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
2539 		break;
2540 	case offsetof(struct xdp_md, data_end):
2541 		if (size != FIELD_SIZEOF(struct xdp_md, data_end))
2542 			return -EOPNOTSUPP;
2543 		emit_alu(nfp_prog, dst,
2544 			 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
2545 		break;
2546 	default:
2547 		return -EOPNOTSUPP;
2548 	}
2549 
2550 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
2551 
2552 	return 0;
2553 }
2554 
2555 static int
2556 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2557 	     unsigned int size)
2558 {
2559 	swreg tmp_reg;
2560 
2561 	tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2562 
2563 	return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
2564 					 tmp_reg, meta->insn.dst_reg * 2, size);
2565 }
2566 
2567 static int
2568 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2569 	     unsigned int size)
2570 {
2571 	swreg tmp_reg;
2572 
2573 	tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2574 
2575 	return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
2576 					 tmp_reg, meta->insn.dst_reg * 2, size);
2577 }
2578 
2579 static void
2580 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog,
2581 			   struct nfp_insn_meta *meta)
2582 {
2583 	s16 range_start = meta->pkt_cache.range_start;
2584 	s16 range_end = meta->pkt_cache.range_end;
2585 	swreg src_base, off;
2586 	u8 xfer_num, len;
2587 	bool indir;
2588 
2589 	off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog));
2590 	src_base = reg_a(meta->insn.src_reg * 2);
2591 	len = range_end - range_start;
2592 	xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH;
2593 
2594 	indir = len > 8 * REG_WIDTH;
2595 	/* Setup PREV_ALU for indirect mode. */
2596 	if (indir)
2597 		wrp_immed(nfp_prog, reg_none(),
2598 			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
2599 
2600 	/* Cache memory into transfer-in registers. */
2601 	emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base,
2602 		     off, xfer_num - 1, CMD_CTX_SWAP, indir);
2603 }
2604 
2605 static int
2606 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
2607 				     struct nfp_insn_meta *meta,
2608 				     unsigned int size)
2609 {
2610 	s16 range_start = meta->pkt_cache.range_start;
2611 	s16 insn_off = meta->insn.off - range_start;
2612 	swreg dst_lo, dst_hi, src_lo, src_mid;
2613 	u8 dst_gpr = meta->insn.dst_reg * 2;
2614 	u8 len_lo = size, len_mid = 0;
2615 	u8 idx = insn_off / REG_WIDTH;
2616 	u8 off = insn_off % REG_WIDTH;
2617 
2618 	dst_hi = reg_both(dst_gpr + 1);
2619 	dst_lo = reg_both(dst_gpr);
2620 	src_lo = reg_xfer(idx);
2621 
2622 	/* The read length could involve as many as three registers. */
2623 	if (size > REG_WIDTH - off) {
2624 		/* Calculate the part in the second register. */
2625 		len_lo = REG_WIDTH - off;
2626 		len_mid = size - len_lo;
2627 
2628 		/* Calculate the part in the third register. */
2629 		if (size > 2 * REG_WIDTH - off)
2630 			len_mid = REG_WIDTH;
2631 	}
2632 
2633 	wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off);
2634 
2635 	if (!len_mid) {
2636 		wrp_immed(nfp_prog, dst_hi, 0);
2637 		return 0;
2638 	}
2639 
2640 	src_mid = reg_xfer(idx + 1);
2641 
2642 	if (size <= REG_WIDTH) {
2643 		wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo);
2644 		wrp_immed(nfp_prog, dst_hi, 0);
2645 	} else {
2646 		swreg src_hi = reg_xfer(idx + 2);
2647 
2648 		wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid,
2649 				   REG_WIDTH - len_lo, len_lo);
2650 		wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo,
2651 				REG_WIDTH - len_lo);
2652 		wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo,
2653 				   len_lo);
2654 	}
2655 
2656 	return 0;
2657 }
2658 
2659 static int
2660 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog,
2661 				   struct nfp_insn_meta *meta,
2662 				   unsigned int size)
2663 {
2664 	swreg dst_lo, dst_hi, src_lo;
2665 	u8 dst_gpr, idx;
2666 
2667 	idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH;
2668 	dst_gpr = meta->insn.dst_reg * 2;
2669 	dst_hi = reg_both(dst_gpr + 1);
2670 	dst_lo = reg_both(dst_gpr);
2671 	src_lo = reg_xfer(idx);
2672 
2673 	if (size < REG_WIDTH) {
2674 		wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0);
2675 		wrp_immed(nfp_prog, dst_hi, 0);
2676 	} else if (size == REG_WIDTH) {
2677 		wrp_mov(nfp_prog, dst_lo, src_lo);
2678 		wrp_immed(nfp_prog, dst_hi, 0);
2679 	} else {
2680 		swreg src_hi = reg_xfer(idx + 1);
2681 
2682 		wrp_mov(nfp_prog, dst_lo, src_lo);
2683 		wrp_mov(nfp_prog, dst_hi, src_hi);
2684 	}
2685 
2686 	return 0;
2687 }
2688 
2689 static int
2690 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog,
2691 			   struct nfp_insn_meta *meta, unsigned int size)
2692 {
2693 	u8 off = meta->insn.off - meta->pkt_cache.range_start;
2694 
2695 	if (IS_ALIGNED(off, REG_WIDTH))
2696 		return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size);
2697 
2698 	return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size);
2699 }
2700 
2701 static int
2702 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2703 	unsigned int size)
2704 {
2705 	if (meta->ldst_gather_len)
2706 		return nfp_cpp_memcpy(nfp_prog, meta);
2707 
2708 	if (meta->ptr.type == PTR_TO_CTX) {
2709 		if (nfp_prog->type == BPF_PROG_TYPE_XDP)
2710 			return mem_ldx_xdp(nfp_prog, meta, size);
2711 		else
2712 			return mem_ldx_skb(nfp_prog, meta, size);
2713 	}
2714 
2715 	if (meta->ptr.type == PTR_TO_PACKET) {
2716 		if (meta->pkt_cache.range_end) {
2717 			if (meta->pkt_cache.do_init)
2718 				mem_ldx_data_init_pktcache(nfp_prog, meta);
2719 
2720 			return mem_ldx_data_from_pktcache(nfp_prog, meta, size);
2721 		} else {
2722 			return mem_ldx_data(nfp_prog, meta, size);
2723 		}
2724 	}
2725 
2726 	if (meta->ptr.type == PTR_TO_STACK)
2727 		return mem_ldx_stack(nfp_prog, meta, size,
2728 				     meta->ptr.off + meta->ptr.var_off.value);
2729 
2730 	if (meta->ptr.type == PTR_TO_MAP_VALUE)
2731 		return mem_ldx_emem(nfp_prog, meta, size);
2732 
2733 	return -EOPNOTSUPP;
2734 }
2735 
2736 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2737 {
2738 	return mem_ldx(nfp_prog, meta, 1);
2739 }
2740 
2741 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2742 {
2743 	return mem_ldx(nfp_prog, meta, 2);
2744 }
2745 
2746 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2747 {
2748 	return mem_ldx(nfp_prog, meta, 4);
2749 }
2750 
2751 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2752 {
2753 	return mem_ldx(nfp_prog, meta, 8);
2754 }
2755 
2756 static int
2757 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2758 	    unsigned int size)
2759 {
2760 	u64 imm = meta->insn.imm; /* sign extend */
2761 	swreg off_reg;
2762 
2763 	off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2764 
2765 	return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
2766 				  imm, size);
2767 }
2768 
2769 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2770 		  unsigned int size)
2771 {
2772 	if (meta->ptr.type == PTR_TO_PACKET)
2773 		return mem_st_data(nfp_prog, meta, size);
2774 
2775 	return -EOPNOTSUPP;
2776 }
2777 
2778 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2779 {
2780 	return mem_st(nfp_prog, meta, 1);
2781 }
2782 
2783 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2784 {
2785 	return mem_st(nfp_prog, meta, 2);
2786 }
2787 
2788 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2789 {
2790 	return mem_st(nfp_prog, meta, 4);
2791 }
2792 
2793 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2794 {
2795 	return mem_st(nfp_prog, meta, 8);
2796 }
2797 
2798 static int
2799 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2800 	     unsigned int size)
2801 {
2802 	swreg off_reg;
2803 
2804 	off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2805 
2806 	return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
2807 				   meta->insn.src_reg * 2, size);
2808 }
2809 
2810 static int
2811 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2812 	      unsigned int size, unsigned int ptr_off)
2813 {
2814 	return mem_op_stack(nfp_prog, meta, size, ptr_off,
2815 			    meta->insn.src_reg * 2, meta->insn.dst_reg * 2,
2816 			    false, wrp_lmem_store);
2817 }
2818 
2819 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2820 {
2821 	switch (meta->insn.off) {
2822 	case offsetof(struct xdp_md, rx_queue_index):
2823 		return nfp_queue_select(nfp_prog, meta);
2824 	}
2825 
2826 	WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */
2827 	return -EOPNOTSUPP;
2828 }
2829 
2830 static int
2831 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2832 	unsigned int size)
2833 {
2834 	if (meta->ptr.type == PTR_TO_PACKET)
2835 		return mem_stx_data(nfp_prog, meta, size);
2836 
2837 	if (meta->ptr.type == PTR_TO_STACK)
2838 		return mem_stx_stack(nfp_prog, meta, size,
2839 				     meta->ptr.off + meta->ptr.var_off.value);
2840 
2841 	return -EOPNOTSUPP;
2842 }
2843 
2844 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2845 {
2846 	return mem_stx(nfp_prog, meta, 1);
2847 }
2848 
2849 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2850 {
2851 	return mem_stx(nfp_prog, meta, 2);
2852 }
2853 
2854 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2855 {
2856 	if (meta->ptr.type == PTR_TO_CTX)
2857 		if (nfp_prog->type == BPF_PROG_TYPE_XDP)
2858 			return mem_stx_xdp(nfp_prog, meta);
2859 	return mem_stx(nfp_prog, meta, 4);
2860 }
2861 
2862 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2863 {
2864 	return mem_stx(nfp_prog, meta, 8);
2865 }
2866 
2867 static int
2868 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
2869 {
2870 	u8 dst_gpr = meta->insn.dst_reg * 2;
2871 	u8 src_gpr = meta->insn.src_reg * 2;
2872 	unsigned int full_add, out;
2873 	swreg addra, addrb, off;
2874 
2875 	off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2876 
2877 	/* We can fit 16 bits into command immediate, if we know the immediate
2878 	 * is guaranteed to either always or never fit into 16 bit we only
2879 	 * generate code to handle that particular case, otherwise generate
2880 	 * code for both.
2881 	 */
2882 	out = nfp_prog_current_offset(nfp_prog);
2883 	full_add = nfp_prog_current_offset(nfp_prog);
2884 
2885 	if (meta->insn.off) {
2886 		out += 2;
2887 		full_add += 2;
2888 	}
2889 	if (meta->xadd_maybe_16bit) {
2890 		out += 3;
2891 		full_add += 3;
2892 	}
2893 	if (meta->xadd_over_16bit)
2894 		out += 2 + is64;
2895 	if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) {
2896 		out += 5;
2897 		full_add += 5;
2898 	}
2899 
2900 	/* Generate the branch for choosing add_imm vs add */
2901 	if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) {
2902 		swreg max_imm = imm_a(nfp_prog);
2903 
2904 		wrp_immed(nfp_prog, max_imm, 0xffff);
2905 		emit_alu(nfp_prog, reg_none(),
2906 			 max_imm, ALU_OP_SUB, reg_b(src_gpr));
2907 		emit_alu(nfp_prog, reg_none(),
2908 			 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1));
2909 		emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0);
2910 		/* defer for add */
2911 	}
2912 
2913 	/* If insn has an offset add to the address */
2914 	if (!meta->insn.off) {
2915 		addra = reg_a(dst_gpr);
2916 		addrb = reg_b(dst_gpr + 1);
2917 	} else {
2918 		emit_alu(nfp_prog, imma_a(nfp_prog),
2919 			 reg_a(dst_gpr), ALU_OP_ADD, off);
2920 		emit_alu(nfp_prog, imma_b(nfp_prog),
2921 			 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0));
2922 		addra = imma_a(nfp_prog);
2923 		addrb = imma_b(nfp_prog);
2924 	}
2925 
2926 	/* Generate the add_imm if 16 bits are possible */
2927 	if (meta->xadd_maybe_16bit) {
2928 		swreg prev_alu = imm_a(nfp_prog);
2929 
2930 		wrp_immed(nfp_prog, prev_alu,
2931 			  FIELD_PREP(CMD_OVE_DATA, 2) |
2932 			  CMD_OVE_LEN |
2933 			  FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2));
2934 		wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2);
2935 		emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0,
2936 			       addra, addrb, 0, CMD_CTX_NO_SWAP);
2937 
2938 		if (meta->xadd_over_16bit)
2939 			emit_br(nfp_prog, BR_UNC, out, 0);
2940 	}
2941 
2942 	if (!nfp_prog_confirm_current_offset(nfp_prog, full_add))
2943 		return -EINVAL;
2944 
2945 	/* Generate the add if 16 bits are not guaranteed */
2946 	if (meta->xadd_over_16bit) {
2947 		emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0,
2948 			 addra, addrb, is64 << 2,
2949 			 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1);
2950 
2951 		wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr));
2952 		if (is64)
2953 			wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1));
2954 	}
2955 
2956 	if (!nfp_prog_confirm_current_offset(nfp_prog, out))
2957 		return -EINVAL;
2958 
2959 	return 0;
2960 }
2961 
2962 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2963 {
2964 	return mem_xadd(nfp_prog, meta, false);
2965 }
2966 
2967 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2968 {
2969 	return mem_xadd(nfp_prog, meta, true);
2970 }
2971 
2972 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2973 {
2974 	emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
2975 
2976 	return 0;
2977 }
2978 
2979 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2980 {
2981 	const struct bpf_insn *insn = &meta->insn;
2982 	u64 imm = insn->imm; /* sign extend */
2983 	swreg or1, or2, tmp_reg;
2984 
2985 	or1 = reg_a(insn->dst_reg * 2);
2986 	or2 = reg_b(insn->dst_reg * 2 + 1);
2987 
2988 	if (imm & ~0U) {
2989 		tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
2990 		emit_alu(nfp_prog, imm_a(nfp_prog),
2991 			 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
2992 		or1 = imm_a(nfp_prog);
2993 	}
2994 
2995 	if (imm >> 32) {
2996 		tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
2997 		emit_alu(nfp_prog, imm_b(nfp_prog),
2998 			 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
2999 		or2 = imm_b(nfp_prog);
3000 	}
3001 
3002 	emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
3003 	emit_br(nfp_prog, BR_BEQ, insn->off, 0);
3004 
3005 	return 0;
3006 }
3007 
3008 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3009 {
3010 	const struct bpf_insn *insn = &meta->insn;
3011 	u64 imm = insn->imm; /* sign extend */
3012 	swreg tmp_reg;
3013 
3014 	if (!imm) {
3015 		meta->skip = true;
3016 		return 0;
3017 	}
3018 
3019 	if (imm & ~0U) {
3020 		tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3021 		emit_alu(nfp_prog, reg_none(),
3022 			 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
3023 		emit_br(nfp_prog, BR_BNE, insn->off, 0);
3024 	}
3025 
3026 	if (imm >> 32) {
3027 		tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
3028 		emit_alu(nfp_prog, reg_none(),
3029 			 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
3030 		emit_br(nfp_prog, BR_BNE, insn->off, 0);
3031 	}
3032 
3033 	return 0;
3034 }
3035 
3036 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3037 {
3038 	const struct bpf_insn *insn = &meta->insn;
3039 	u64 imm = insn->imm; /* sign extend */
3040 	swreg tmp_reg;
3041 
3042 	if (!imm) {
3043 		emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
3044 			 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
3045 		emit_br(nfp_prog, BR_BNE, insn->off, 0);
3046 		return 0;
3047 	}
3048 
3049 	tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3050 	emit_alu(nfp_prog, reg_none(),
3051 		 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
3052 	emit_br(nfp_prog, BR_BNE, insn->off, 0);
3053 
3054 	tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
3055 	emit_alu(nfp_prog, reg_none(),
3056 		 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
3057 	emit_br(nfp_prog, BR_BNE, insn->off, 0);
3058 
3059 	return 0;
3060 }
3061 
3062 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3063 {
3064 	const struct bpf_insn *insn = &meta->insn;
3065 
3066 	emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
3067 		 ALU_OP_XOR, reg_b(insn->src_reg * 2));
3068 	emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
3069 		 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
3070 	emit_alu(nfp_prog, reg_none(),
3071 		 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
3072 	emit_br(nfp_prog, BR_BEQ, insn->off, 0);
3073 
3074 	return 0;
3075 }
3076 
3077 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3078 {
3079 	return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
3080 }
3081 
3082 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3083 {
3084 	return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
3085 }
3086 
3087 static int
3088 bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3089 {
3090 	u32 ret_tgt, stack_depth, offset_br;
3091 	swreg tmp_reg;
3092 
3093 	stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN);
3094 	/* Space for saving the return address is accounted for by the callee,
3095 	 * so stack_depth can be zero for the main function.
3096 	 */
3097 	if (stack_depth) {
3098 		tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
3099 					  stack_imm(nfp_prog));
3100 		emit_alu(nfp_prog, stack_reg(nfp_prog),
3101 			 stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg);
3102 		emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
3103 			    NFP_CSR_ACT_LM_ADDR0);
3104 	}
3105 
3106 	/* Two cases for jumping to the callee:
3107 	 *
3108 	 * - If callee uses and needs to save R6~R9 then:
3109 	 *     1. Put the start offset of the callee into imm_b(). This will
3110 	 *        require a fixup step, as we do not necessarily know this
3111 	 *        address yet.
3112 	 *     2. Put the return address from the callee to the caller into
3113 	 *        register ret_reg().
3114 	 *     3. (After defer slots are consumed) Jump to the subroutine that
3115 	 *        pushes the registers to the stack.
3116 	 *   The subroutine acts as a trampoline, and returns to the address in
3117 	 *   imm_b(), i.e. jumps to the callee.
3118 	 *
3119 	 * - If callee does not need to save R6~R9 then just load return
3120 	 *   address to the caller in ret_reg(), and jump to the callee
3121 	 *   directly.
3122 	 *
3123 	 * Using ret_reg() to pass the return address to the callee is set here
3124 	 * as a convention. The callee can then push this address onto its
3125 	 * stack frame in its prologue. The advantages of passing the return
3126 	 * address through ret_reg(), instead of pushing it to the stack right
3127 	 * here, are the following:
3128 	 * - It looks cleaner.
3129 	 * - If the called function is called multiple time, we get a lower
3130 	 *   program size.
3131 	 * - We save two no-op instructions that should be added just before
3132 	 *   the emit_br() when stack depth is not null otherwise.
3133 	 * - If we ever find a register to hold the return address during whole
3134 	 *   execution of the callee, we will not have to push the return
3135 	 *   address to the stack for leaf functions.
3136 	 */
3137 	if (!meta->jmp_dst) {
3138 		pr_err("BUG: BPF-to-BPF call has no destination recorded\n");
3139 		return -ELOOP;
3140 	}
3141 	if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) {
3142 		ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
3143 		emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2,
3144 			     RELO_BR_GO_CALL_PUSH_REGS);
3145 		offset_br = nfp_prog_current_offset(nfp_prog);
3146 		wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL);
3147 	} else {
3148 		ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
3149 		emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1);
3150 		offset_br = nfp_prog_current_offset(nfp_prog);
3151 	}
3152 	wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL);
3153 
3154 	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
3155 		return -EINVAL;
3156 
3157 	if (stack_depth) {
3158 		tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
3159 					  stack_imm(nfp_prog));
3160 		emit_alu(nfp_prog, stack_reg(nfp_prog),
3161 			 stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
3162 		emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
3163 			    NFP_CSR_ACT_LM_ADDR0);
3164 		wrp_nops(nfp_prog, 3);
3165 	}
3166 
3167 	meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog);
3168 	meta->num_insns_after_br -= offset_br;
3169 
3170 	return 0;
3171 }
3172 
3173 static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3174 {
3175 	switch (meta->insn.imm) {
3176 	case BPF_FUNC_xdp_adjust_head:
3177 		return adjust_head(nfp_prog, meta);
3178 	case BPF_FUNC_xdp_adjust_tail:
3179 		return adjust_tail(nfp_prog, meta);
3180 	case BPF_FUNC_map_lookup_elem:
3181 	case BPF_FUNC_map_update_elem:
3182 	case BPF_FUNC_map_delete_elem:
3183 		return map_call_stack_common(nfp_prog, meta);
3184 	case BPF_FUNC_get_prandom_u32:
3185 		return nfp_get_prandom_u32(nfp_prog, meta);
3186 	case BPF_FUNC_perf_event_output:
3187 		return nfp_perf_event_output(nfp_prog, meta);
3188 	default:
3189 		WARN_ONCE(1, "verifier allowed unsupported function\n");
3190 		return -EOPNOTSUPP;
3191 	}
3192 }
3193 
3194 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3195 {
3196 	if (is_mbpf_pseudo_call(meta))
3197 		return bpf_to_bpf_call(nfp_prog, meta);
3198 	else
3199 		return helper_call(nfp_prog, meta);
3200 }
3201 
3202 static bool nfp_is_main_function(struct nfp_insn_meta *meta)
3203 {
3204 	return meta->subprog_idx == 0;
3205 }
3206 
3207 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3208 {
3209 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
3210 
3211 	return 0;
3212 }
3213 
3214 static int
3215 nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3216 {
3217 	if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) {
3218 		/* Pop R6~R9 to the stack via related subroutine.
3219 		 * We loaded the return address to the caller into ret_reg().
3220 		 * This means that the subroutine does not come back here, we
3221 		 * make it jump back to the subprogram caller directly!
3222 		 */
3223 		emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1,
3224 			     RELO_BR_GO_CALL_POP_REGS);
3225 		/* Pop return address from the stack. */
3226 		wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0));
3227 	} else {
3228 		/* Pop return address from the stack. */
3229 		wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0));
3230 		/* Jump back to caller if no callee-saved registers were used
3231 		 * by the subprogram.
3232 		 */
3233 		emit_rtn(nfp_prog, ret_reg(nfp_prog), 0);
3234 	}
3235 
3236 	return 0;
3237 }
3238 
3239 static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3240 {
3241 	if (nfp_is_main_function(meta))
3242 		return goto_out(nfp_prog, meta);
3243 	else
3244 		return nfp_subprog_epilogue(nfp_prog, meta);
3245 }
3246 
3247 static const instr_cb_t instr_cb[256] = {
3248 	[BPF_ALU64 | BPF_MOV | BPF_X] =	mov_reg64,
3249 	[BPF_ALU64 | BPF_MOV | BPF_K] =	mov_imm64,
3250 	[BPF_ALU64 | BPF_XOR | BPF_X] =	xor_reg64,
3251 	[BPF_ALU64 | BPF_XOR | BPF_K] =	xor_imm64,
3252 	[BPF_ALU64 | BPF_AND | BPF_X] =	and_reg64,
3253 	[BPF_ALU64 | BPF_AND | BPF_K] =	and_imm64,
3254 	[BPF_ALU64 | BPF_OR | BPF_X] =	or_reg64,
3255 	[BPF_ALU64 | BPF_OR | BPF_K] =	or_imm64,
3256 	[BPF_ALU64 | BPF_ADD | BPF_X] =	add_reg64,
3257 	[BPF_ALU64 | BPF_ADD | BPF_K] =	add_imm64,
3258 	[BPF_ALU64 | BPF_SUB | BPF_X] =	sub_reg64,
3259 	[BPF_ALU64 | BPF_SUB | BPF_K] =	sub_imm64,
3260 	[BPF_ALU64 | BPF_MUL | BPF_X] =	mul_reg64,
3261 	[BPF_ALU64 | BPF_MUL | BPF_K] =	mul_imm64,
3262 	[BPF_ALU64 | BPF_DIV | BPF_X] =	div_reg64,
3263 	[BPF_ALU64 | BPF_DIV | BPF_K] =	div_imm64,
3264 	[BPF_ALU64 | BPF_NEG] =		neg_reg64,
3265 	[BPF_ALU64 | BPF_LSH | BPF_X] =	shl_reg64,
3266 	[BPF_ALU64 | BPF_LSH | BPF_K] =	shl_imm64,
3267 	[BPF_ALU64 | BPF_RSH | BPF_X] =	shr_reg64,
3268 	[BPF_ALU64 | BPF_RSH | BPF_K] =	shr_imm64,
3269 	[BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64,
3270 	[BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64,
3271 	[BPF_ALU | BPF_MOV | BPF_X] =	mov_reg,
3272 	[BPF_ALU | BPF_MOV | BPF_K] =	mov_imm,
3273 	[BPF_ALU | BPF_XOR | BPF_X] =	xor_reg,
3274 	[BPF_ALU | BPF_XOR | BPF_K] =	xor_imm,
3275 	[BPF_ALU | BPF_AND | BPF_X] =	and_reg,
3276 	[BPF_ALU | BPF_AND | BPF_K] =	and_imm,
3277 	[BPF_ALU | BPF_OR | BPF_X] =	or_reg,
3278 	[BPF_ALU | BPF_OR | BPF_K] =	or_imm,
3279 	[BPF_ALU | BPF_ADD | BPF_X] =	add_reg,
3280 	[BPF_ALU | BPF_ADD | BPF_K] =	add_imm,
3281 	[BPF_ALU | BPF_SUB | BPF_X] =	sub_reg,
3282 	[BPF_ALU | BPF_SUB | BPF_K] =	sub_imm,
3283 	[BPF_ALU | BPF_MUL | BPF_X] =	mul_reg,
3284 	[BPF_ALU | BPF_MUL | BPF_K] =	mul_imm,
3285 	[BPF_ALU | BPF_DIV | BPF_X] =	div_reg,
3286 	[BPF_ALU | BPF_DIV | BPF_K] =	div_imm,
3287 	[BPF_ALU | BPF_NEG] =		neg_reg,
3288 	[BPF_ALU | BPF_LSH | BPF_K] =	shl_imm,
3289 	[BPF_ALU | BPF_END | BPF_X] =	end_reg32,
3290 	[BPF_LD | BPF_IMM | BPF_DW] =	imm_ld8,
3291 	[BPF_LD | BPF_ABS | BPF_B] =	data_ld1,
3292 	[BPF_LD | BPF_ABS | BPF_H] =	data_ld2,
3293 	[BPF_LD | BPF_ABS | BPF_W] =	data_ld4,
3294 	[BPF_LD | BPF_IND | BPF_B] =	data_ind_ld1,
3295 	[BPF_LD | BPF_IND | BPF_H] =	data_ind_ld2,
3296 	[BPF_LD | BPF_IND | BPF_W] =	data_ind_ld4,
3297 	[BPF_LDX | BPF_MEM | BPF_B] =	mem_ldx1,
3298 	[BPF_LDX | BPF_MEM | BPF_H] =	mem_ldx2,
3299 	[BPF_LDX | BPF_MEM | BPF_W] =	mem_ldx4,
3300 	[BPF_LDX | BPF_MEM | BPF_DW] =	mem_ldx8,
3301 	[BPF_STX | BPF_MEM | BPF_B] =	mem_stx1,
3302 	[BPF_STX | BPF_MEM | BPF_H] =	mem_stx2,
3303 	[BPF_STX | BPF_MEM | BPF_W] =	mem_stx4,
3304 	[BPF_STX | BPF_MEM | BPF_DW] =	mem_stx8,
3305 	[BPF_STX | BPF_XADD | BPF_W] =	mem_xadd4,
3306 	[BPF_STX | BPF_XADD | BPF_DW] =	mem_xadd8,
3307 	[BPF_ST | BPF_MEM | BPF_B] =	mem_st1,
3308 	[BPF_ST | BPF_MEM | BPF_H] =	mem_st2,
3309 	[BPF_ST | BPF_MEM | BPF_W] =	mem_st4,
3310 	[BPF_ST | BPF_MEM | BPF_DW] =	mem_st8,
3311 	[BPF_JMP | BPF_JA | BPF_K] =	jump,
3312 	[BPF_JMP | BPF_JEQ | BPF_K] =	jeq_imm,
3313 	[BPF_JMP | BPF_JGT | BPF_K] =	cmp_imm,
3314 	[BPF_JMP | BPF_JGE | BPF_K] =	cmp_imm,
3315 	[BPF_JMP | BPF_JLT | BPF_K] =	cmp_imm,
3316 	[BPF_JMP | BPF_JLE | BPF_K] =	cmp_imm,
3317 	[BPF_JMP | BPF_JSGT | BPF_K] =  cmp_imm,
3318 	[BPF_JMP | BPF_JSGE | BPF_K] =  cmp_imm,
3319 	[BPF_JMP | BPF_JSLT | BPF_K] =  cmp_imm,
3320 	[BPF_JMP | BPF_JSLE | BPF_K] =  cmp_imm,
3321 	[BPF_JMP | BPF_JSET | BPF_K] =	jset_imm,
3322 	[BPF_JMP | BPF_JNE | BPF_K] =	jne_imm,
3323 	[BPF_JMP | BPF_JEQ | BPF_X] =	jeq_reg,
3324 	[BPF_JMP | BPF_JGT | BPF_X] =	cmp_reg,
3325 	[BPF_JMP | BPF_JGE | BPF_X] =	cmp_reg,
3326 	[BPF_JMP | BPF_JLT | BPF_X] =	cmp_reg,
3327 	[BPF_JMP | BPF_JLE | BPF_X] =	cmp_reg,
3328 	[BPF_JMP | BPF_JSGT | BPF_X] =  cmp_reg,
3329 	[BPF_JMP | BPF_JSGE | BPF_X] =  cmp_reg,
3330 	[BPF_JMP | BPF_JSLT | BPF_X] =  cmp_reg,
3331 	[BPF_JMP | BPF_JSLE | BPF_X] =  cmp_reg,
3332 	[BPF_JMP | BPF_JSET | BPF_X] =	jset_reg,
3333 	[BPF_JMP | BPF_JNE | BPF_X] =	jne_reg,
3334 	[BPF_JMP | BPF_CALL] =		call,
3335 	[BPF_JMP | BPF_EXIT] =		jmp_exit,
3336 };
3337 
3338 /* --- Assembler logic --- */
3339 static int
3340 nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
3341 		     struct nfp_insn_meta *jmp_dst, u32 br_idx)
3342 {
3343 	if (immed_get_value(nfp_prog->prog[br_idx + 1])) {
3344 		pr_err("BUG: failed to fix up callee register saving\n");
3345 		return -EINVAL;
3346 	}
3347 
3348 	immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off);
3349 
3350 	return 0;
3351 }
3352 
3353 static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
3354 {
3355 	struct nfp_insn_meta *meta, *jmp_dst;
3356 	u32 idx, br_idx;
3357 	int err;
3358 
3359 	list_for_each_entry(meta, &nfp_prog->insns, l) {
3360 		if (meta->skip)
3361 			continue;
3362 		if (BPF_CLASS(meta->insn.code) != BPF_JMP)
3363 			continue;
3364 		if (meta->insn.code == (BPF_JMP | BPF_EXIT) &&
3365 		    !nfp_is_main_function(meta))
3366 			continue;
3367 		if (is_mbpf_helper_call(meta))
3368 			continue;
3369 
3370 		if (list_is_last(&meta->l, &nfp_prog->insns))
3371 			br_idx = nfp_prog->last_bpf_off;
3372 		else
3373 			br_idx = list_next_entry(meta, l)->off - 1;
3374 
3375 		/* For BPF-to-BPF function call, a stack adjustment sequence is
3376 		 * generated after the return instruction. Therefore, we must
3377 		 * withdraw the length of this sequence to have br_idx pointing
3378 		 * to where the "branch" NFP instruction is expected to be.
3379 		 */
3380 		if (is_mbpf_pseudo_call(meta))
3381 			br_idx -= meta->num_insns_after_br;
3382 
3383 		if (!nfp_is_br(nfp_prog->prog[br_idx])) {
3384 			pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
3385 			       br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
3386 			return -ELOOP;
3387 		}
3388 
3389 		if (meta->insn.code == (BPF_JMP | BPF_EXIT))
3390 			continue;
3391 
3392 		/* Leave special branches for later */
3393 		if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
3394 		    RELO_BR_REL && !is_mbpf_pseudo_call(meta))
3395 			continue;
3396 
3397 		if (!meta->jmp_dst) {
3398 			pr_err("Non-exit jump doesn't have destination info recorded!!\n");
3399 			return -ELOOP;
3400 		}
3401 
3402 		jmp_dst = meta->jmp_dst;
3403 
3404 		if (jmp_dst->skip) {
3405 			pr_err("Branch landing on removed instruction!!\n");
3406 			return -ELOOP;
3407 		}
3408 
3409 		if (is_mbpf_pseudo_call(meta) &&
3410 		    nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) {
3411 			err = nfp_fixup_immed_relo(nfp_prog, meta,
3412 						   jmp_dst, br_idx);
3413 			if (err)
3414 				return err;
3415 		}
3416 
3417 		if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
3418 		    RELO_BR_REL)
3419 			continue;
3420 
3421 		for (idx = meta->off; idx <= br_idx; idx++) {
3422 			if (!nfp_is_br(nfp_prog->prog[idx]))
3423 				continue;
3424 			br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
3425 		}
3426 	}
3427 
3428 	return 0;
3429 }
3430 
3431 static void nfp_intro(struct nfp_prog *nfp_prog)
3432 {
3433 	wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0));
3434 	emit_alu(nfp_prog, plen_reg(nfp_prog),
3435 		 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
3436 }
3437 
3438 static void
3439 nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3440 {
3441 	/* Save return address into the stack. */
3442 	wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog));
3443 }
3444 
3445 static void
3446 nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3447 {
3448 	unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth;
3449 
3450 	nfp_prog->stack_frame_depth = round_up(depth, 4);
3451 	nfp_subprog_prologue(nfp_prog, meta);
3452 }
3453 
3454 bool nfp_is_subprog_start(struct nfp_insn_meta *meta)
3455 {
3456 	return meta->flags & FLAG_INSN_IS_SUBPROG_START;
3457 }
3458 
3459 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
3460 {
3461 	/* TC direct-action mode:
3462 	 *   0,1   ok        NOT SUPPORTED[1]
3463 	 *   2   drop  0x22 -> drop,  count as stat1
3464 	 *   4,5 nuke  0x02 -> drop
3465 	 *   7  redir  0x44 -> redir, count as stat2
3466 	 *   * unspec  0x11 -> pass,  count as stat0
3467 	 *
3468 	 * [1] We can't support OK and RECLASSIFY because we can't tell TC
3469 	 *     the exact decision made.  We are forced to support UNSPEC
3470 	 *     to handle aborts so that's the only one we handle for passing
3471 	 *     packets up the stack.
3472 	 */
3473 	/* Target for aborts */
3474 	nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
3475 
3476 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3477 
3478 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3479 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
3480 
3481 	/* Target for normal exits */
3482 	nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
3483 
3484 	/* if R0 > 7 jump to abort */
3485 	emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
3486 	emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
3487 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3488 
3489 	wrp_immed(nfp_prog, reg_b(2), 0x41221211);
3490 	wrp_immed(nfp_prog, reg_b(3), 0x41001211);
3491 
3492 	emit_shf(nfp_prog, reg_a(1),
3493 		 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
3494 
3495 	emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3496 	emit_shf(nfp_prog, reg_a(2),
3497 		 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
3498 
3499 	emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3500 	emit_shf(nfp_prog, reg_b(2),
3501 		 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
3502 
3503 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3504 
3505 	emit_shf(nfp_prog, reg_b(2),
3506 		 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
3507 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
3508 }
3509 
3510 static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
3511 {
3512 	/* XDP return codes:
3513 	 *   0 aborted  0x82 -> drop,  count as stat3
3514 	 *   1    drop  0x22 -> drop,  count as stat1
3515 	 *   2    pass  0x11 -> pass,  count as stat0
3516 	 *   3      tx  0x44 -> redir, count as stat2
3517 	 *   * unknown  0x82 -> drop,  count as stat3
3518 	 */
3519 	/* Target for aborts */
3520 	nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
3521 
3522 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3523 
3524 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3525 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
3526 
3527 	/* Target for normal exits */
3528 	nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
3529 
3530 	/* if R0 > 3 jump to abort */
3531 	emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0));
3532 	emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
3533 
3534 	wrp_immed(nfp_prog, reg_b(2), 0x44112282);
3535 
3536 	emit_shf(nfp_prog, reg_a(1),
3537 		 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3);
3538 
3539 	emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3540 	emit_shf(nfp_prog, reg_b(2),
3541 		 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
3542 
3543 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3544 
3545 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3546 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
3547 }
3548 
3549 static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog)
3550 {
3551 	unsigned int idx;
3552 
3553 	for (idx = 1; idx < nfp_prog->subprog_cnt; idx++)
3554 		if (nfp_prog->subprog[idx].needs_reg_push)
3555 			return true;
3556 
3557 	return false;
3558 }
3559 
3560 static void nfp_push_callee_registers(struct nfp_prog *nfp_prog)
3561 {
3562 	u8 reg;
3563 
3564 	/* Subroutine: Save all callee saved registers (R6 ~ R9).
3565 	 * imm_b() holds the return address.
3566 	 */
3567 	nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog);
3568 	for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
3569 		u8 adj = (reg - BPF_REG_0) * 2;
3570 		u8 idx = (reg - BPF_REG_6) * 2;
3571 
3572 		/* The first slot in the stack frame is used to push the return
3573 		 * address in bpf_to_bpf_call(), start just after.
3574 		 */
3575 		wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj));
3576 
3577 		if (reg == BPF_REG_8)
3578 			/* Prepare to jump back, last 3 insns use defer slots */
3579 			emit_rtn(nfp_prog, imm_b(nfp_prog), 3);
3580 
3581 		wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1));
3582 	}
3583 }
3584 
3585 static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog)
3586 {
3587 	u8 reg;
3588 
3589 	/* Subroutine: Restore all callee saved registers (R6 ~ R9).
3590 	 * ret_reg() holds the return address.
3591 	 */
3592 	nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog);
3593 	for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
3594 		u8 adj = (reg - BPF_REG_0) * 2;
3595 		u8 idx = (reg - BPF_REG_6) * 2;
3596 
3597 		/* The first slot in the stack frame holds the return address,
3598 		 * start popping just after that.
3599 		 */
3600 		wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx));
3601 
3602 		if (reg == BPF_REG_8)
3603 			/* Prepare to jump back, last 3 insns use defer slots */
3604 			emit_rtn(nfp_prog, ret_reg(nfp_prog), 3);
3605 
3606 		wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1));
3607 	}
3608 }
3609 
3610 static void nfp_outro(struct nfp_prog *nfp_prog)
3611 {
3612 	switch (nfp_prog->type) {
3613 	case BPF_PROG_TYPE_SCHED_CLS:
3614 		nfp_outro_tc_da(nfp_prog);
3615 		break;
3616 	case BPF_PROG_TYPE_XDP:
3617 		nfp_outro_xdp(nfp_prog);
3618 		break;
3619 	default:
3620 		WARN_ON(1);
3621 	}
3622 
3623 	if (!nfp_prog_needs_callee_reg_save(nfp_prog))
3624 		return;
3625 
3626 	nfp_push_callee_registers(nfp_prog);
3627 	nfp_pop_callee_registers(nfp_prog);
3628 }
3629 
3630 static int nfp_translate(struct nfp_prog *nfp_prog)
3631 {
3632 	struct nfp_insn_meta *meta;
3633 	unsigned int depth;
3634 	int err;
3635 
3636 	depth = nfp_prog->subprog[0].stack_depth;
3637 	nfp_prog->stack_frame_depth = round_up(depth, 4);
3638 
3639 	nfp_intro(nfp_prog);
3640 	if (nfp_prog->error)
3641 		return nfp_prog->error;
3642 
3643 	list_for_each_entry(meta, &nfp_prog->insns, l) {
3644 		instr_cb_t cb = instr_cb[meta->insn.code];
3645 
3646 		meta->off = nfp_prog_current_offset(nfp_prog);
3647 
3648 		if (nfp_is_subprog_start(meta)) {
3649 			nfp_start_subprog(nfp_prog, meta);
3650 			if (nfp_prog->error)
3651 				return nfp_prog->error;
3652 		}
3653 
3654 		if (meta->skip) {
3655 			nfp_prog->n_translated++;
3656 			continue;
3657 		}
3658 
3659 		if (nfp_meta_has_prev(nfp_prog, meta) &&
3660 		    nfp_meta_prev(meta)->double_cb)
3661 			cb = nfp_meta_prev(meta)->double_cb;
3662 		if (!cb)
3663 			return -ENOENT;
3664 		err = cb(nfp_prog, meta);
3665 		if (err)
3666 			return err;
3667 		if (nfp_prog->error)
3668 			return nfp_prog->error;
3669 
3670 		nfp_prog->n_translated++;
3671 	}
3672 
3673 	nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1;
3674 
3675 	nfp_outro(nfp_prog);
3676 	if (nfp_prog->error)
3677 		return nfp_prog->error;
3678 
3679 	wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW);
3680 	if (nfp_prog->error)
3681 		return nfp_prog->error;
3682 
3683 	return nfp_fixup_branches(nfp_prog);
3684 }
3685 
3686 /* --- Optimizations --- */
3687 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
3688 {
3689 	struct nfp_insn_meta *meta;
3690 
3691 	list_for_each_entry(meta, &nfp_prog->insns, l) {
3692 		struct bpf_insn insn = meta->insn;
3693 
3694 		/* Programs converted from cBPF start with register xoring */
3695 		if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
3696 		    insn.src_reg == insn.dst_reg)
3697 			continue;
3698 
3699 		/* Programs start with R6 = R1 but we ignore the skb pointer */
3700 		if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
3701 		    insn.src_reg == 1 && insn.dst_reg == 6)
3702 			meta->skip = true;
3703 
3704 		/* Return as soon as something doesn't match */
3705 		if (!meta->skip)
3706 			return;
3707 	}
3708 }
3709 
3710 /* abs(insn.imm) will fit better into unrestricted reg immediate -
3711  * convert add/sub of a negative number into a sub/add of a positive one.
3712  */
3713 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
3714 {
3715 	struct nfp_insn_meta *meta;
3716 
3717 	list_for_each_entry(meta, &nfp_prog->insns, l) {
3718 		struct bpf_insn insn = meta->insn;
3719 
3720 		if (meta->skip)
3721 			continue;
3722 
3723 		if (BPF_CLASS(insn.code) != BPF_ALU &&
3724 		    BPF_CLASS(insn.code) != BPF_ALU64 &&
3725 		    BPF_CLASS(insn.code) != BPF_JMP)
3726 			continue;
3727 		if (BPF_SRC(insn.code) != BPF_K)
3728 			continue;
3729 		if (insn.imm >= 0)
3730 			continue;
3731 
3732 		if (BPF_CLASS(insn.code) == BPF_JMP) {
3733 			switch (BPF_OP(insn.code)) {
3734 			case BPF_JGE:
3735 			case BPF_JSGE:
3736 			case BPF_JLT:
3737 			case BPF_JSLT:
3738 				meta->jump_neg_op = true;
3739 				break;
3740 			default:
3741 				continue;
3742 			}
3743 		} else {
3744 			if (BPF_OP(insn.code) == BPF_ADD)
3745 				insn.code = BPF_CLASS(insn.code) | BPF_SUB;
3746 			else if (BPF_OP(insn.code) == BPF_SUB)
3747 				insn.code = BPF_CLASS(insn.code) | BPF_ADD;
3748 			else
3749 				continue;
3750 
3751 			meta->insn.code = insn.code | BPF_K;
3752 		}
3753 
3754 		meta->insn.imm = -insn.imm;
3755 	}
3756 }
3757 
3758 /* Remove masking after load since our load guarantees this is not needed */
3759 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
3760 {
3761 	struct nfp_insn_meta *meta1, *meta2;
3762 	const s32 exp_mask[] = {
3763 		[BPF_B] = 0x000000ffU,
3764 		[BPF_H] = 0x0000ffffU,
3765 		[BPF_W] = 0xffffffffU,
3766 	};
3767 
3768 	nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
3769 		struct bpf_insn insn, next;
3770 
3771 		insn = meta1->insn;
3772 		next = meta2->insn;
3773 
3774 		if (BPF_CLASS(insn.code) != BPF_LD)
3775 			continue;
3776 		if (BPF_MODE(insn.code) != BPF_ABS &&
3777 		    BPF_MODE(insn.code) != BPF_IND)
3778 			continue;
3779 
3780 		if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
3781 			continue;
3782 
3783 		if (!exp_mask[BPF_SIZE(insn.code)])
3784 			continue;
3785 		if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
3786 			continue;
3787 
3788 		if (next.src_reg || next.dst_reg)
3789 			continue;
3790 
3791 		if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
3792 			continue;
3793 
3794 		meta2->skip = true;
3795 	}
3796 }
3797 
3798 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
3799 {
3800 	struct nfp_insn_meta *meta1, *meta2, *meta3;
3801 
3802 	nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
3803 		struct bpf_insn insn, next1, next2;
3804 
3805 		insn = meta1->insn;
3806 		next1 = meta2->insn;
3807 		next2 = meta3->insn;
3808 
3809 		if (BPF_CLASS(insn.code) != BPF_LD)
3810 			continue;
3811 		if (BPF_MODE(insn.code) != BPF_ABS &&
3812 		    BPF_MODE(insn.code) != BPF_IND)
3813 			continue;
3814 		if (BPF_SIZE(insn.code) != BPF_W)
3815 			continue;
3816 
3817 		if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
3818 		      next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
3819 		    !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
3820 		      next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
3821 			continue;
3822 
3823 		if (next1.src_reg || next1.dst_reg ||
3824 		    next2.src_reg || next2.dst_reg)
3825 			continue;
3826 
3827 		if (next1.imm != 0x20 || next2.imm != 0x20)
3828 			continue;
3829 
3830 		if (meta2->flags & FLAG_INSN_IS_JUMP_DST ||
3831 		    meta3->flags & FLAG_INSN_IS_JUMP_DST)
3832 			continue;
3833 
3834 		meta2->skip = true;
3835 		meta3->skip = true;
3836 	}
3837 }
3838 
3839 /* load/store pair that forms memory copy sould look like the following:
3840  *
3841  *   ld_width R, [addr_src + offset_src]
3842  *   st_width [addr_dest + offset_dest], R
3843  *
3844  * The destination register of load and source register of store should
3845  * be the same, load and store should also perform at the same width.
3846  * If either of addr_src or addr_dest is stack pointer, we don't do the
3847  * CPP optimization as stack is modelled by registers on NFP.
3848  */
3849 static bool
3850 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
3851 		    struct nfp_insn_meta *st_meta)
3852 {
3853 	struct bpf_insn *ld = &ld_meta->insn;
3854 	struct bpf_insn *st = &st_meta->insn;
3855 
3856 	if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
3857 		return false;
3858 
3859 	if (ld_meta->ptr.type != PTR_TO_PACKET &&
3860 	    ld_meta->ptr.type != PTR_TO_MAP_VALUE)
3861 		return false;
3862 
3863 	if (st_meta->ptr.type != PTR_TO_PACKET)
3864 		return false;
3865 
3866 	if (BPF_SIZE(ld->code) != BPF_SIZE(st->code))
3867 		return false;
3868 
3869 	if (ld->dst_reg != st->src_reg)
3870 		return false;
3871 
3872 	/* There is jump to the store insn in this pair. */
3873 	if (st_meta->flags & FLAG_INSN_IS_JUMP_DST)
3874 		return false;
3875 
3876 	return true;
3877 }
3878 
3879 /* Currently, we only support chaining load/store pairs if:
3880  *
3881  *  - Their address base registers are the same.
3882  *  - Their address offsets are in the same order.
3883  *  - They operate at the same memory width.
3884  *  - There is no jump into the middle of them.
3885  */
3886 static bool
3887 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta,
3888 			      struct nfp_insn_meta *st_meta,
3889 			      struct bpf_insn *prev_ld,
3890 			      struct bpf_insn *prev_st)
3891 {
3892 	u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst;
3893 	struct bpf_insn *ld = &ld_meta->insn;
3894 	struct bpf_insn *st = &st_meta->insn;
3895 	s16 prev_ld_off, prev_st_off;
3896 
3897 	/* This pair is the start pair. */
3898 	if (!prev_ld)
3899 		return true;
3900 
3901 	prev_size = BPF_LDST_BYTES(prev_ld);
3902 	curr_size = BPF_LDST_BYTES(ld);
3903 	prev_ld_base = prev_ld->src_reg;
3904 	prev_st_base = prev_st->dst_reg;
3905 	prev_ld_dst = prev_ld->dst_reg;
3906 	prev_ld_off = prev_ld->off;
3907 	prev_st_off = prev_st->off;
3908 
3909 	if (ld->dst_reg != prev_ld_dst)
3910 		return false;
3911 
3912 	if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base)
3913 		return false;
3914 
3915 	if (curr_size != prev_size)
3916 		return false;
3917 
3918 	/* There is jump to the head of this pair. */
3919 	if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST)
3920 		return false;
3921 
3922 	/* Both in ascending order. */
3923 	if (prev_ld_off + prev_size == ld->off &&
3924 	    prev_st_off + prev_size == st->off)
3925 		return true;
3926 
3927 	/* Both in descending order. */
3928 	if (ld->off + curr_size == prev_ld_off &&
3929 	    st->off + curr_size == prev_st_off)
3930 		return true;
3931 
3932 	return false;
3933 }
3934 
3935 /* Return TRUE if cross memory access happens. Cross memory access means
3936  * store area is overlapping with load area that a later load might load
3937  * the value from previous store, for this case we can't treat the sequence
3938  * as an memory copy.
3939  */
3940 static bool
3941 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta,
3942 		 struct nfp_insn_meta *head_st_meta)
3943 {
3944 	s16 head_ld_off, head_st_off, ld_off;
3945 
3946 	/* Different pointer types does not overlap. */
3947 	if (head_ld_meta->ptr.type != head_st_meta->ptr.type)
3948 		return false;
3949 
3950 	/* load and store are both PTR_TO_PACKET, check ID info.  */
3951 	if (head_ld_meta->ptr.id != head_st_meta->ptr.id)
3952 		return true;
3953 
3954 	/* Canonicalize the offsets. Turn all of them against the original
3955 	 * base register.
3956 	 */
3957 	head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off;
3958 	head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off;
3959 	ld_off = ld->off + head_ld_meta->ptr.off;
3960 
3961 	/* Ascending order cross. */
3962 	if (ld_off > head_ld_off &&
3963 	    head_ld_off < head_st_off && ld_off >= head_st_off)
3964 		return true;
3965 
3966 	/* Descending order cross. */
3967 	if (ld_off < head_ld_off &&
3968 	    head_ld_off > head_st_off && ld_off <= head_st_off)
3969 		return true;
3970 
3971 	return false;
3972 }
3973 
3974 /* This pass try to identify the following instructoin sequences.
3975  *
3976  *   load R, [regA + offA]
3977  *   store [regB + offB], R
3978  *   load R, [regA + offA + const_imm_A]
3979  *   store [regB + offB + const_imm_A], R
3980  *   load R, [regA + offA + 2 * const_imm_A]
3981  *   store [regB + offB + 2 * const_imm_A], R
3982  *   ...
3983  *
3984  * Above sequence is typically generated by compiler when lowering
3985  * memcpy. NFP prefer using CPP instructions to accelerate it.
3986  */
3987 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
3988 {
3989 	struct nfp_insn_meta *head_ld_meta = NULL;
3990 	struct nfp_insn_meta *head_st_meta = NULL;
3991 	struct nfp_insn_meta *meta1, *meta2;
3992 	struct bpf_insn *prev_ld = NULL;
3993 	struct bpf_insn *prev_st = NULL;
3994 	u8 count = 0;
3995 
3996 	nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
3997 		struct bpf_insn *ld = &meta1->insn;
3998 		struct bpf_insn *st = &meta2->insn;
3999 
4000 		/* Reset record status if any of the following if true:
4001 		 *   - The current insn pair is not load/store.
4002 		 *   - The load/store pair doesn't chain with previous one.
4003 		 *   - The chained load/store pair crossed with previous pair.
4004 		 *   - The chained load/store pair has a total size of memory
4005 		 *     copy beyond 128 bytes which is the maximum length a
4006 		 *     single NFP CPP command can transfer.
4007 		 */
4008 		if (!curr_pair_is_memcpy(meta1, meta2) ||
4009 		    !curr_pair_chain_with_previous(meta1, meta2, prev_ld,
4010 						   prev_st) ||
4011 		    (head_ld_meta && (cross_mem_access(ld, head_ld_meta,
4012 						       head_st_meta) ||
4013 				      head_ld_meta->ldst_gather_len >= 128))) {
4014 			if (!count)
4015 				continue;
4016 
4017 			if (count > 1) {
4018 				s16 prev_ld_off = prev_ld->off;
4019 				s16 prev_st_off = prev_st->off;
4020 				s16 head_ld_off = head_ld_meta->insn.off;
4021 
4022 				if (prev_ld_off < head_ld_off) {
4023 					head_ld_meta->insn.off = prev_ld_off;
4024 					head_st_meta->insn.off = prev_st_off;
4025 					head_ld_meta->ldst_gather_len =
4026 						-head_ld_meta->ldst_gather_len;
4027 				}
4028 
4029 				head_ld_meta->paired_st = &head_st_meta->insn;
4030 				head_st_meta->skip = true;
4031 			} else {
4032 				head_ld_meta->ldst_gather_len = 0;
4033 			}
4034 
4035 			/* If the chain is ended by an load/store pair then this
4036 			 * could serve as the new head of the the next chain.
4037 			 */
4038 			if (curr_pair_is_memcpy(meta1, meta2)) {
4039 				head_ld_meta = meta1;
4040 				head_st_meta = meta2;
4041 				head_ld_meta->ldst_gather_len =
4042 					BPF_LDST_BYTES(ld);
4043 				meta1 = nfp_meta_next(meta1);
4044 				meta2 = nfp_meta_next(meta2);
4045 				prev_ld = ld;
4046 				prev_st = st;
4047 				count = 1;
4048 			} else {
4049 				head_ld_meta = NULL;
4050 				head_st_meta = NULL;
4051 				prev_ld = NULL;
4052 				prev_st = NULL;
4053 				count = 0;
4054 			}
4055 
4056 			continue;
4057 		}
4058 
4059 		if (!head_ld_meta) {
4060 			head_ld_meta = meta1;
4061 			head_st_meta = meta2;
4062 		} else {
4063 			meta1->skip = true;
4064 			meta2->skip = true;
4065 		}
4066 
4067 		head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
4068 		meta1 = nfp_meta_next(meta1);
4069 		meta2 = nfp_meta_next(meta2);
4070 		prev_ld = ld;
4071 		prev_st = st;
4072 		count++;
4073 	}
4074 }
4075 
4076 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
4077 {
4078 	struct nfp_insn_meta *meta, *range_node = NULL;
4079 	s16 range_start = 0, range_end = 0;
4080 	bool cache_avail = false;
4081 	struct bpf_insn *insn;
4082 	s32 range_ptr_off = 0;
4083 	u32 range_ptr_id = 0;
4084 
4085 	list_for_each_entry(meta, &nfp_prog->insns, l) {
4086 		if (meta->flags & FLAG_INSN_IS_JUMP_DST)
4087 			cache_avail = false;
4088 
4089 		if (meta->skip)
4090 			continue;
4091 
4092 		insn = &meta->insn;
4093 
4094 		if (is_mbpf_store_pkt(meta) ||
4095 		    insn->code == (BPF_JMP | BPF_CALL) ||
4096 		    is_mbpf_classic_store_pkt(meta) ||
4097 		    is_mbpf_classic_load(meta)) {
4098 			cache_avail = false;
4099 			continue;
4100 		}
4101 
4102 		if (!is_mbpf_load(meta))
4103 			continue;
4104 
4105 		if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) {
4106 			cache_avail = false;
4107 			continue;
4108 		}
4109 
4110 		if (!cache_avail) {
4111 			cache_avail = true;
4112 			if (range_node)
4113 				goto end_current_then_start_new;
4114 			goto start_new;
4115 		}
4116 
4117 		/* Check ID to make sure two reads share the same
4118 		 * variable offset against PTR_TO_PACKET, and check OFF
4119 		 * to make sure they also share the same constant
4120 		 * offset.
4121 		 *
4122 		 * OFFs don't really need to be the same, because they
4123 		 * are the constant offsets against PTR_TO_PACKET, so
4124 		 * for different OFFs, we could canonicalize them to
4125 		 * offsets against original packet pointer. We don't
4126 		 * support this.
4127 		 */
4128 		if (meta->ptr.id == range_ptr_id &&
4129 		    meta->ptr.off == range_ptr_off) {
4130 			s16 new_start = range_start;
4131 			s16 end, off = insn->off;
4132 			s16 new_end = range_end;
4133 			bool changed = false;
4134 
4135 			if (off < range_start) {
4136 				new_start = off;
4137 				changed = true;
4138 			}
4139 
4140 			end = off + BPF_LDST_BYTES(insn);
4141 			if (end > range_end) {
4142 				new_end = end;
4143 				changed = true;
4144 			}
4145 
4146 			if (!changed)
4147 				continue;
4148 
4149 			if (new_end - new_start <= 64) {
4150 				/* Install new range. */
4151 				range_start = new_start;
4152 				range_end = new_end;
4153 				continue;
4154 			}
4155 		}
4156 
4157 end_current_then_start_new:
4158 		range_node->pkt_cache.range_start = range_start;
4159 		range_node->pkt_cache.range_end = range_end;
4160 start_new:
4161 		range_node = meta;
4162 		range_node->pkt_cache.do_init = true;
4163 		range_ptr_id = range_node->ptr.id;
4164 		range_ptr_off = range_node->ptr.off;
4165 		range_start = insn->off;
4166 		range_end = insn->off + BPF_LDST_BYTES(insn);
4167 	}
4168 
4169 	if (range_node) {
4170 		range_node->pkt_cache.range_start = range_start;
4171 		range_node->pkt_cache.range_end = range_end;
4172 	}
4173 
4174 	list_for_each_entry(meta, &nfp_prog->insns, l) {
4175 		if (meta->skip)
4176 			continue;
4177 
4178 		if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) {
4179 			if (meta->pkt_cache.do_init) {
4180 				range_start = meta->pkt_cache.range_start;
4181 				range_end = meta->pkt_cache.range_end;
4182 			} else {
4183 				meta->pkt_cache.range_start = range_start;
4184 				meta->pkt_cache.range_end = range_end;
4185 			}
4186 		}
4187 	}
4188 }
4189 
4190 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
4191 {
4192 	nfp_bpf_opt_reg_init(nfp_prog);
4193 
4194 	nfp_bpf_opt_neg_add_sub(nfp_prog);
4195 	nfp_bpf_opt_ld_mask(nfp_prog);
4196 	nfp_bpf_opt_ld_shift(nfp_prog);
4197 	nfp_bpf_opt_ldst_gather(nfp_prog);
4198 	nfp_bpf_opt_pkt_cache(nfp_prog);
4199 
4200 	return 0;
4201 }
4202 
4203 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
4204 {
4205 	struct nfp_insn_meta *meta1, *meta2;
4206 	struct nfp_bpf_map *nfp_map;
4207 	struct bpf_map *map;
4208 	u32 id;
4209 
4210 	nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
4211 		if (meta1->skip || meta2->skip)
4212 			continue;
4213 
4214 		if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) ||
4215 		    meta1->insn.src_reg != BPF_PSEUDO_MAP_FD)
4216 			continue;
4217 
4218 		map = (void *)(unsigned long)((u32)meta1->insn.imm |
4219 					      (u64)meta2->insn.imm << 32);
4220 		if (bpf_map_offload_neutral(map)) {
4221 			id = map->id;
4222 		} else {
4223 			nfp_map = map_to_offmap(map)->dev_priv;
4224 			id = nfp_map->tid;
4225 		}
4226 
4227 		meta1->insn.imm = id;
4228 		meta2->insn.imm = 0;
4229 	}
4230 
4231 	return 0;
4232 }
4233 
4234 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
4235 {
4236 	__le64 *ustore = (__force __le64 *)prog;
4237 	int i;
4238 
4239 	for (i = 0; i < len; i++) {
4240 		int err;
4241 
4242 		err = nfp_ustore_check_valid_no_ecc(prog[i]);
4243 		if (err)
4244 			return err;
4245 
4246 		ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
4247 	}
4248 
4249 	return 0;
4250 }
4251 
4252 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
4253 {
4254 	void *prog;
4255 
4256 	prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
4257 	if (!prog)
4258 		return;
4259 
4260 	nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
4261 	memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
4262 	kvfree(nfp_prog->prog);
4263 	nfp_prog->prog = prog;
4264 }
4265 
4266 int nfp_bpf_jit(struct nfp_prog *nfp_prog)
4267 {
4268 	int ret;
4269 
4270 	ret = nfp_bpf_replace_map_ptrs(nfp_prog);
4271 	if (ret)
4272 		return ret;
4273 
4274 	ret = nfp_bpf_optimize(nfp_prog);
4275 	if (ret)
4276 		return ret;
4277 
4278 	ret = nfp_translate(nfp_prog);
4279 	if (ret) {
4280 		pr_err("Translation failed with error %d (translated: %u)\n",
4281 		       ret, nfp_prog->n_translated);
4282 		return -EINVAL;
4283 	}
4284 
4285 	nfp_bpf_prog_trim(nfp_prog);
4286 
4287 	return ret;
4288 }
4289 
4290 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
4291 {
4292 	struct nfp_insn_meta *meta;
4293 
4294 	/* Another pass to record jump information. */
4295 	list_for_each_entry(meta, &nfp_prog->insns, l) {
4296 		struct nfp_insn_meta *dst_meta;
4297 		u64 code = meta->insn.code;
4298 		unsigned int dst_idx;
4299 		bool pseudo_call;
4300 
4301 		if (BPF_CLASS(code) != BPF_JMP)
4302 			continue;
4303 		if (BPF_OP(code) == BPF_EXIT)
4304 			continue;
4305 		if (is_mbpf_helper_call(meta))
4306 			continue;
4307 
4308 		/* If opcode is BPF_CALL at this point, this can only be a
4309 		 * BPF-to-BPF call (a.k.a pseudo call).
4310 		 */
4311 		pseudo_call = BPF_OP(code) == BPF_CALL;
4312 
4313 		if (pseudo_call)
4314 			dst_idx = meta->n + 1 + meta->insn.imm;
4315 		else
4316 			dst_idx = meta->n + 1 + meta->insn.off;
4317 
4318 		dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt);
4319 
4320 		if (pseudo_call)
4321 			dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START;
4322 
4323 		dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
4324 		meta->jmp_dst = dst_meta;
4325 	}
4326 }
4327 
4328 bool nfp_bpf_supported_opcode(u8 code)
4329 {
4330 	return !!instr_cb[code];
4331 }
4332 
4333 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
4334 {
4335 	unsigned int i;
4336 	u64 *prog;
4337 	int err;
4338 
4339 	prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
4340 		       GFP_KERNEL);
4341 	if (!prog)
4342 		return ERR_PTR(-ENOMEM);
4343 
4344 	for (i = 0; i < nfp_prog->prog_len; i++) {
4345 		enum nfp_relo_type special;
4346 		u32 val;
4347 		u16 off;
4348 
4349 		special = FIELD_GET(OP_RELO_TYPE, prog[i]);
4350 		switch (special) {
4351 		case RELO_NONE:
4352 			continue;
4353 		case RELO_BR_REL:
4354 			br_add_offset(&prog[i], bv->start_off);
4355 			break;
4356 		case RELO_BR_GO_OUT:
4357 			br_set_offset(&prog[i],
4358 				      nfp_prog->tgt_out + bv->start_off);
4359 			break;
4360 		case RELO_BR_GO_ABORT:
4361 			br_set_offset(&prog[i],
4362 				      nfp_prog->tgt_abort + bv->start_off);
4363 			break;
4364 		case RELO_BR_GO_CALL_PUSH_REGS:
4365 			if (!nfp_prog->tgt_call_push_regs) {
4366 				pr_err("BUG: failed to detect subprogram registers needs\n");
4367 				err = -EINVAL;
4368 				goto err_free_prog;
4369 			}
4370 			off = nfp_prog->tgt_call_push_regs + bv->start_off;
4371 			br_set_offset(&prog[i], off);
4372 			break;
4373 		case RELO_BR_GO_CALL_POP_REGS:
4374 			if (!nfp_prog->tgt_call_pop_regs) {
4375 				pr_err("BUG: failed to detect subprogram registers needs\n");
4376 				err = -EINVAL;
4377 				goto err_free_prog;
4378 			}
4379 			off = nfp_prog->tgt_call_pop_regs + bv->start_off;
4380 			br_set_offset(&prog[i], off);
4381 			break;
4382 		case RELO_BR_NEXT_PKT:
4383 			br_set_offset(&prog[i], bv->tgt_done);
4384 			break;
4385 		case RELO_BR_HELPER:
4386 			val = br_get_offset(prog[i]);
4387 			val -= BR_OFF_RELO;
4388 			switch (val) {
4389 			case BPF_FUNC_map_lookup_elem:
4390 				val = nfp_prog->bpf->helpers.map_lookup;
4391 				break;
4392 			case BPF_FUNC_map_update_elem:
4393 				val = nfp_prog->bpf->helpers.map_update;
4394 				break;
4395 			case BPF_FUNC_map_delete_elem:
4396 				val = nfp_prog->bpf->helpers.map_delete;
4397 				break;
4398 			case BPF_FUNC_perf_event_output:
4399 				val = nfp_prog->bpf->helpers.perf_event_output;
4400 				break;
4401 			default:
4402 				pr_err("relocation of unknown helper %d\n",
4403 				       val);
4404 				err = -EINVAL;
4405 				goto err_free_prog;
4406 			}
4407 			br_set_offset(&prog[i], val);
4408 			break;
4409 		case RELO_IMMED_REL:
4410 			immed_add_value(&prog[i], bv->start_off);
4411 			break;
4412 		}
4413 
4414 		prog[i] &= ~OP_RELO_TYPE;
4415 	}
4416 
4417 	err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
4418 	if (err)
4419 		goto err_free_prog;
4420 
4421 	return prog;
4422 
4423 err_free_prog:
4424 	kfree(prog);
4425 	return ERR_PTR(err);
4426 }
4427