1 /*
2  * Copyright (C) 2016-2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #define pr_fmt(fmt)	"NFP net bpf: " fmt
35 
36 #include <linux/bug.h>
37 #include <linux/kernel.h>
38 #include <linux/bpf.h>
39 #include <linux/filter.h>
40 #include <linux/pkt_cls.h>
41 #include <linux/unistd.h>
42 
43 #include "main.h"
44 #include "../nfp_asm.h"
45 
46 /* --- NFP prog --- */
47 /* Foreach "multiple" entries macros provide pos and next<n> pointers.
48  * It's safe to modify the next pointers (but not pos).
49  */
50 #define nfp_for_each_insn_walk2(nfp_prog, pos, next)			\
51 	for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
52 	     next = list_next_entry(pos, l);			\
53 	     &(nfp_prog)->insns != &pos->l &&			\
54 	     &(nfp_prog)->insns != &next->l;			\
55 	     pos = nfp_meta_next(pos),				\
56 	     next = nfp_meta_next(pos))
57 
58 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2)		\
59 	for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
60 	     next = list_next_entry(pos, l),			\
61 	     next2 = list_next_entry(next, l);			\
62 	     &(nfp_prog)->insns != &pos->l &&			\
63 	     &(nfp_prog)->insns != &next->l &&			\
64 	     &(nfp_prog)->insns != &next2->l;			\
65 	     pos = nfp_meta_next(pos),				\
66 	     next = nfp_meta_next(pos),				\
67 	     next2 = nfp_meta_next(next))
68 
69 static bool
70 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
71 {
72 	return meta->l.prev != &nfp_prog->insns;
73 }
74 
75 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
76 {
77 	if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
78 		nfp_prog->error = -ENOSPC;
79 		return;
80 	}
81 
82 	nfp_prog->prog[nfp_prog->prog_len] = insn;
83 	nfp_prog->prog_len++;
84 }
85 
86 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
87 {
88 	return nfp_prog->prog_len;
89 }
90 
91 static bool
92 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
93 {
94 	/* If there is a recorded error we may have dropped instructions;
95 	 * that doesn't have to be due to translator bug, and the translation
96 	 * will fail anyway, so just return OK.
97 	 */
98 	if (nfp_prog->error)
99 		return true;
100 	return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
101 }
102 
103 /* --- Emitters --- */
104 static void
105 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
106 	   u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir)
107 {
108 	enum cmd_ctx_swap ctx;
109 	u64 insn;
110 
111 	if (sync)
112 		ctx = CMD_CTX_SWAP;
113 	else
114 		ctx = CMD_CTX_NO_SWAP;
115 
116 	insn =	FIELD_PREP(OP_CMD_A_SRC, areg) |
117 		FIELD_PREP(OP_CMD_CTX, ctx) |
118 		FIELD_PREP(OP_CMD_B_SRC, breg) |
119 		FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
120 		FIELD_PREP(OP_CMD_XFER, xfer) |
121 		FIELD_PREP(OP_CMD_CNT, size) |
122 		FIELD_PREP(OP_CMD_SIG, sync) |
123 		FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
124 		FIELD_PREP(OP_CMD_INDIR, indir) |
125 		FIELD_PREP(OP_CMD_MODE, mode);
126 
127 	nfp_prog_push(nfp_prog, insn);
128 }
129 
130 static void
131 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
132 	     swreg lreg, swreg rreg, u8 size, bool sync, bool indir)
133 {
134 	struct nfp_insn_re_regs reg;
135 	int err;
136 
137 	err = swreg_to_restricted(reg_none(), lreg, rreg, &reg, false);
138 	if (err) {
139 		nfp_prog->error = err;
140 		return;
141 	}
142 	if (reg.swap) {
143 		pr_err("cmd can't swap arguments\n");
144 		nfp_prog->error = -EFAULT;
145 		return;
146 	}
147 	if (reg.dst_lmextn || reg.src_lmextn) {
148 		pr_err("cmd can't use LMextn\n");
149 		nfp_prog->error = -EFAULT;
150 		return;
151 	}
152 
153 	__emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync,
154 		   indir);
155 }
156 
157 static void
158 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
159 	 swreg lreg, swreg rreg, u8 size, bool sync)
160 {
161 	emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false);
162 }
163 
164 static void
165 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
166 	       swreg lreg, swreg rreg, u8 size, bool sync)
167 {
168 	emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true);
169 }
170 
171 static void
172 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
173 	  enum br_ctx_signal_state css, u16 addr, u8 defer)
174 {
175 	u16 addr_lo, addr_hi;
176 	u64 insn;
177 
178 	addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
179 	addr_hi = addr != addr_lo;
180 
181 	insn = OP_BR_BASE |
182 		FIELD_PREP(OP_BR_MASK, mask) |
183 		FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
184 		FIELD_PREP(OP_BR_CSS, css) |
185 		FIELD_PREP(OP_BR_DEFBR, defer) |
186 		FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
187 		FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
188 
189 	nfp_prog_push(nfp_prog, insn);
190 }
191 
192 static void
193 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
194 	     enum nfp_relo_type relo)
195 {
196 	if (mask == BR_UNC && defer > 2) {
197 		pr_err("BUG: branch defer out of bounds %d\n", defer);
198 		nfp_prog->error = -EFAULT;
199 		return;
200 	}
201 
202 	__emit_br(nfp_prog, mask,
203 		  mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
204 		  BR_CSS_NONE, addr, defer);
205 
206 	nfp_prog->prog[nfp_prog->prog_len - 1] |=
207 		FIELD_PREP(OP_RELO_TYPE, relo);
208 }
209 
210 static void
211 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
212 {
213 	emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
214 }
215 
216 static void
217 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
218 	     enum immed_width width, bool invert,
219 	     enum immed_shift shift, bool wr_both,
220 	     bool dst_lmextn, bool src_lmextn)
221 {
222 	u64 insn;
223 
224 	insn = OP_IMMED_BASE |
225 		FIELD_PREP(OP_IMMED_A_SRC, areg) |
226 		FIELD_PREP(OP_IMMED_B_SRC, breg) |
227 		FIELD_PREP(OP_IMMED_IMM, imm_hi) |
228 		FIELD_PREP(OP_IMMED_WIDTH, width) |
229 		FIELD_PREP(OP_IMMED_INV, invert) |
230 		FIELD_PREP(OP_IMMED_SHIFT, shift) |
231 		FIELD_PREP(OP_IMMED_WR_AB, wr_both) |
232 		FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) |
233 		FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn);
234 
235 	nfp_prog_push(nfp_prog, insn);
236 }
237 
238 static void
239 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
240 	   enum immed_width width, bool invert, enum immed_shift shift)
241 {
242 	struct nfp_insn_ur_regs reg;
243 	int err;
244 
245 	if (swreg_type(dst) == NN_REG_IMM) {
246 		nfp_prog->error = -EFAULT;
247 		return;
248 	}
249 
250 	err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), &reg);
251 	if (err) {
252 		nfp_prog->error = err;
253 		return;
254 	}
255 
256 	/* Use reg.dst when destination is No-Dest. */
257 	__emit_immed(nfp_prog,
258 		     swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg,
259 		     reg.breg, imm >> 8, width, invert, shift,
260 		     reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
261 }
262 
263 static void
264 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
265 	   enum shf_sc sc, u8 shift,
266 	   u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
267 	   bool dst_lmextn, bool src_lmextn)
268 {
269 	u64 insn;
270 
271 	if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
272 		nfp_prog->error = -EFAULT;
273 		return;
274 	}
275 
276 	if (sc == SHF_SC_L_SHF)
277 		shift = 32 - shift;
278 
279 	insn = OP_SHF_BASE |
280 		FIELD_PREP(OP_SHF_A_SRC, areg) |
281 		FIELD_PREP(OP_SHF_SC, sc) |
282 		FIELD_PREP(OP_SHF_B_SRC, breg) |
283 		FIELD_PREP(OP_SHF_I8, i8) |
284 		FIELD_PREP(OP_SHF_SW, sw) |
285 		FIELD_PREP(OP_SHF_DST, dst) |
286 		FIELD_PREP(OP_SHF_SHIFT, shift) |
287 		FIELD_PREP(OP_SHF_OP, op) |
288 		FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
289 		FIELD_PREP(OP_SHF_WR_AB, wr_both) |
290 		FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) |
291 		FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn);
292 
293 	nfp_prog_push(nfp_prog, insn);
294 }
295 
296 static void
297 emit_shf(struct nfp_prog *nfp_prog, swreg dst,
298 	 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
299 {
300 	struct nfp_insn_re_regs reg;
301 	int err;
302 
303 	err = swreg_to_restricted(dst, lreg, rreg, &reg, true);
304 	if (err) {
305 		nfp_prog->error = err;
306 		return;
307 	}
308 
309 	__emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
310 		   reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both,
311 		   reg.dst_lmextn, reg.src_lmextn);
312 }
313 
314 static void
315 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
316 	   u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
317 	   bool dst_lmextn, bool src_lmextn)
318 {
319 	u64 insn;
320 
321 	insn = OP_ALU_BASE |
322 		FIELD_PREP(OP_ALU_A_SRC, areg) |
323 		FIELD_PREP(OP_ALU_B_SRC, breg) |
324 		FIELD_PREP(OP_ALU_DST, dst) |
325 		FIELD_PREP(OP_ALU_SW, swap) |
326 		FIELD_PREP(OP_ALU_OP, op) |
327 		FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
328 		FIELD_PREP(OP_ALU_WR_AB, wr_both) |
329 		FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) |
330 		FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn);
331 
332 	nfp_prog_push(nfp_prog, insn);
333 }
334 
335 static void
336 emit_alu(struct nfp_prog *nfp_prog, swreg dst,
337 	 swreg lreg, enum alu_op op, swreg rreg)
338 {
339 	struct nfp_insn_ur_regs reg;
340 	int err;
341 
342 	err = swreg_to_unrestricted(dst, lreg, rreg, &reg);
343 	if (err) {
344 		nfp_prog->error = err;
345 		return;
346 	}
347 
348 	__emit_alu(nfp_prog, reg.dst, reg.dst_ab,
349 		   reg.areg, op, reg.breg, reg.swap, reg.wr_both,
350 		   reg.dst_lmextn, reg.src_lmextn);
351 }
352 
353 static void
354 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
355 		u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
356 		bool zero, bool swap, bool wr_both,
357 		bool dst_lmextn, bool src_lmextn)
358 {
359 	u64 insn;
360 
361 	insn = OP_LDF_BASE |
362 		FIELD_PREP(OP_LDF_A_SRC, areg) |
363 		FIELD_PREP(OP_LDF_SC, sc) |
364 		FIELD_PREP(OP_LDF_B_SRC, breg) |
365 		FIELD_PREP(OP_LDF_I8, imm8) |
366 		FIELD_PREP(OP_LDF_SW, swap) |
367 		FIELD_PREP(OP_LDF_ZF, zero) |
368 		FIELD_PREP(OP_LDF_BMASK, bmask) |
369 		FIELD_PREP(OP_LDF_SHF, shift) |
370 		FIELD_PREP(OP_LDF_WR_AB, wr_both) |
371 		FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) |
372 		FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn);
373 
374 	nfp_prog_push(nfp_prog, insn);
375 }
376 
377 static void
378 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
379 		  enum shf_sc sc, u8 shift, bool zero)
380 {
381 	struct nfp_insn_re_regs reg;
382 	int err;
383 
384 	/* Note: ld_field is special as it uses one of the src regs as dst */
385 	err = swreg_to_restricted(dst, dst, src, &reg, true);
386 	if (err) {
387 		nfp_prog->error = err;
388 		return;
389 	}
390 
391 	__emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
392 			reg.i8, zero, reg.swap, reg.wr_both,
393 			reg.dst_lmextn, reg.src_lmextn);
394 }
395 
396 static void
397 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
398 	      enum shf_sc sc, u8 shift)
399 {
400 	emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
401 }
402 
403 static void
404 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr,
405 	    bool dst_lmextn, bool src_lmextn)
406 {
407 	u64 insn;
408 
409 	insn = OP_LCSR_BASE |
410 		FIELD_PREP(OP_LCSR_A_SRC, areg) |
411 		FIELD_PREP(OP_LCSR_B_SRC, breg) |
412 		FIELD_PREP(OP_LCSR_WRITE, wr) |
413 		FIELD_PREP(OP_LCSR_ADDR, addr) |
414 		FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) |
415 		FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn);
416 
417 	nfp_prog_push(nfp_prog, insn);
418 }
419 
420 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr)
421 {
422 	struct nfp_insn_ur_regs reg;
423 	int err;
424 
425 	/* This instruction takes immeds instead of reg_none() for the ignored
426 	 * operand, but we can't encode 2 immeds in one instr with our normal
427 	 * swreg infra so if param is an immed, we encode as reg_none() and
428 	 * copy the immed to both operands.
429 	 */
430 	if (swreg_type(src) == NN_REG_IMM) {
431 		err = swreg_to_unrestricted(reg_none(), src, reg_none(), &reg);
432 		reg.breg = reg.areg;
433 	} else {
434 		err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), &reg);
435 	}
436 	if (err) {
437 		nfp_prog->error = err;
438 		return;
439 	}
440 
441 	__emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4,
442 		    false, reg.src_lmextn);
443 }
444 
445 static void emit_nop(struct nfp_prog *nfp_prog)
446 {
447 	__emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
448 }
449 
450 /* --- Wrappers --- */
451 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
452 {
453 	if (!(imm & 0xffff0000)) {
454 		*val = imm;
455 		*shift = IMMED_SHIFT_0B;
456 	} else if (!(imm & 0xff0000ff)) {
457 		*val = imm >> 8;
458 		*shift = IMMED_SHIFT_1B;
459 	} else if (!(imm & 0x0000ffff)) {
460 		*val = imm >> 16;
461 		*shift = IMMED_SHIFT_2B;
462 	} else {
463 		return false;
464 	}
465 
466 	return true;
467 }
468 
469 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
470 {
471 	enum immed_shift shift;
472 	u16 val;
473 
474 	if (pack_immed(imm, &val, &shift)) {
475 		emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
476 	} else if (pack_immed(~imm, &val, &shift)) {
477 		emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
478 	} else {
479 		emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
480 			   false, IMMED_SHIFT_0B);
481 		emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
482 			   false, IMMED_SHIFT_2B);
483 	}
484 }
485 
486 static void
487 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
488 	       enum nfp_relo_type relo)
489 {
490 	if (imm > 0xffff) {
491 		pr_err("relocation of a large immediate!\n");
492 		nfp_prog->error = -EFAULT;
493 		return;
494 	}
495 	emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
496 
497 	nfp_prog->prog[nfp_prog->prog_len - 1] |=
498 		FIELD_PREP(OP_RELO_TYPE, relo);
499 }
500 
501 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
502  * If the @imm is small enough encode it directly in operand and return
503  * otherwise load @imm to a spare register and return its encoding.
504  */
505 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
506 {
507 	if (FIELD_FIT(UR_REG_IMM_MAX, imm))
508 		return reg_imm(imm);
509 
510 	wrp_immed(nfp_prog, tmp_reg, imm);
511 	return tmp_reg;
512 }
513 
514 /* re_load_imm_any() - encode immediate or use tmp register (restricted)
515  * If the @imm is small enough encode it directly in operand and return
516  * otherwise load @imm to a spare register and return its encoding.
517  */
518 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
519 {
520 	if (FIELD_FIT(RE_REG_IMM_MAX, imm))
521 		return reg_imm(imm);
522 
523 	wrp_immed(nfp_prog, tmp_reg, imm);
524 	return tmp_reg;
525 }
526 
527 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
528 {
529 	while (count--)
530 		emit_nop(nfp_prog);
531 }
532 
533 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
534 {
535 	emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
536 }
537 
538 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
539 {
540 	wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
541 }
542 
543 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the
544  * result to @dst from low end.
545  */
546 static void
547 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
548 		u8 offset)
549 {
550 	enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE;
551 	u8 mask = (1 << field_len) - 1;
552 
553 	emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
554 }
555 
556 static void
557 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
558 	      swreg *rega, swreg *regb)
559 {
560 	if (offset == reg_imm(0)) {
561 		*rega = reg_a(src_gpr);
562 		*regb = reg_b(src_gpr + 1);
563 		return;
564 	}
565 
566 	emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
567 	emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
568 		 reg_imm(0));
569 	*rega = imm_a(nfp_prog);
570 	*regb = imm_b(nfp_prog);
571 }
572 
573 /* NFP has Command Push Pull bus which supports bluk memory operations. */
574 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
575 {
576 	bool descending_seq = meta->ldst_gather_len < 0;
577 	s16 len = abs(meta->ldst_gather_len);
578 	swreg src_base, off;
579 	bool src_40bit_addr;
580 	unsigned int i;
581 	u8 xfer_num;
582 
583 	off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
584 	src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
585 	src_base = reg_a(meta->insn.src_reg * 2);
586 	xfer_num = round_up(len, 4) / 4;
587 
588 	if (src_40bit_addr)
589 		addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
590 			      &off);
591 
592 	/* Setup PREV_ALU fields to override memory read length. */
593 	if (len > 32)
594 		wrp_immed(nfp_prog, reg_none(),
595 			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
596 
597 	/* Memory read from source addr into transfer-in registers. */
598 	emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
599 		     src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
600 		     src_base, off, xfer_num - 1, true, len > 32);
601 
602 	/* Move from transfer-in to transfer-out. */
603 	for (i = 0; i < xfer_num; i++)
604 		wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i));
605 
606 	off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog));
607 
608 	if (len <= 8) {
609 		/* Use single direct_ref write8. */
610 		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
611 			 reg_a(meta->paired_st->dst_reg * 2), off, len - 1,
612 			 true);
613 	} else if (len <= 32 && IS_ALIGNED(len, 4)) {
614 		/* Use single direct_ref write32. */
615 		emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
616 			 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1,
617 			 true);
618 	} else if (len <= 32) {
619 		/* Use single indirect_ref write8. */
620 		wrp_immed(nfp_prog, reg_none(),
621 			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1));
622 		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
623 			       reg_a(meta->paired_st->dst_reg * 2), off,
624 			       len - 1, true);
625 	} else if (IS_ALIGNED(len, 4)) {
626 		/* Use single indirect_ref write32. */
627 		wrp_immed(nfp_prog, reg_none(),
628 			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
629 		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
630 			       reg_a(meta->paired_st->dst_reg * 2), off,
631 			       xfer_num - 1, true);
632 	} else if (len <= 40) {
633 		/* Use one direct_ref write32 to write the first 32-bytes, then
634 		 * another direct_ref write8 to write the remaining bytes.
635 		 */
636 		emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
637 			 reg_a(meta->paired_st->dst_reg * 2), off, 7,
638 			 true);
639 
640 		off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32,
641 				      imm_b(nfp_prog));
642 		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8,
643 			 reg_a(meta->paired_st->dst_reg * 2), off, len - 33,
644 			 true);
645 	} else {
646 		/* Use one indirect_ref write32 to write 4-bytes aligned length,
647 		 * then another direct_ref write8 to write the remaining bytes.
648 		 */
649 		u8 new_off;
650 
651 		wrp_immed(nfp_prog, reg_none(),
652 			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2));
653 		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
654 			       reg_a(meta->paired_st->dst_reg * 2), off,
655 			       xfer_num - 2, true);
656 		new_off = meta->paired_st->off + (xfer_num - 1) * 4;
657 		off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog));
658 		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b,
659 			 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off,
660 			 (len & 0x3) - 1, true);
661 	}
662 
663 	/* TODO: The following extra load is to make sure data flow be identical
664 	 *  before and after we do memory copy optimization.
665 	 *
666 	 *  The load destination register is not guaranteed to be dead, so we
667 	 *  need to make sure it is loaded with the value the same as before
668 	 *  this transformation.
669 	 *
670 	 *  These extra loads could be removed once we have accurate register
671 	 *  usage information.
672 	 */
673 	if (descending_seq)
674 		xfer_num = 0;
675 	else if (BPF_SIZE(meta->insn.code) != BPF_DW)
676 		xfer_num = xfer_num - 1;
677 	else
678 		xfer_num = xfer_num - 2;
679 
680 	switch (BPF_SIZE(meta->insn.code)) {
681 	case BPF_B:
682 		wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
683 				reg_xfer(xfer_num), 1,
684 				IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1);
685 		break;
686 	case BPF_H:
687 		wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
688 				reg_xfer(xfer_num), 2, (len & 3) ^ 2);
689 		break;
690 	case BPF_W:
691 		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
692 			reg_xfer(0));
693 		break;
694 	case BPF_DW:
695 		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
696 			reg_xfer(xfer_num));
697 		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1),
698 			reg_xfer(xfer_num + 1));
699 		break;
700 	}
701 
702 	if (BPF_SIZE(meta->insn.code) != BPF_DW)
703 		wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
704 
705 	return 0;
706 }
707 
708 static int
709 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
710 {
711 	unsigned int i;
712 	u16 shift, sz;
713 
714 	/* We load the value from the address indicated in @offset and then
715 	 * shift out the data we don't need.  Note: this is big endian!
716 	 */
717 	sz = max(size, 4);
718 	shift = size < 4 ? 4 - size : 0;
719 
720 	emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
721 		 pptr_reg(nfp_prog), offset, sz - 1, true);
722 
723 	i = 0;
724 	if (shift)
725 		emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE,
726 			 reg_xfer(0), SHF_SC_R_SHF, shift * 8);
727 	else
728 		for (; i * 4 < size; i++)
729 			wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
730 
731 	if (i < 2)
732 		wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
733 
734 	return 0;
735 }
736 
737 static int
738 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
739 		   swreg lreg, swreg rreg, int size, enum cmd_mode mode)
740 {
741 	unsigned int i;
742 	u8 mask, sz;
743 
744 	/* We load the value from the address indicated in rreg + lreg and then
745 	 * mask out the data we don't need.  Note: this is little endian!
746 	 */
747 	sz = max(size, 4);
748 	mask = size < 4 ? GENMASK(size - 1, 0) : 0;
749 
750 	emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
751 		 lreg, rreg, sz / 4 - 1, true);
752 
753 	i = 0;
754 	if (mask)
755 		emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask,
756 				  reg_xfer(0), SHF_SC_NONE, 0, true);
757 	else
758 		for (; i * 4 < size; i++)
759 			wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
760 
761 	if (i < 2)
762 		wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
763 
764 	return 0;
765 }
766 
767 static int
768 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
769 			  u8 dst_gpr, u8 size)
770 {
771 	return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
772 				  size, CMD_MODE_32b);
773 }
774 
775 static int
776 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
777 			  u8 dst_gpr, u8 size)
778 {
779 	swreg rega, regb;
780 
781 	addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
782 
783 	return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
784 				  size, CMD_MODE_40b_BA);
785 }
786 
787 static int
788 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
789 {
790 	swreg tmp_reg;
791 
792 	/* Calculate the true offset (src_reg + imm) */
793 	tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
794 	emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg);
795 
796 	/* Check packet length (size guaranteed to fit b/c it's u8) */
797 	emit_alu(nfp_prog, imm_a(nfp_prog),
798 		 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
799 	emit_alu(nfp_prog, reg_none(),
800 		 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
801 	emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
802 
803 	/* Load data */
804 	return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
805 }
806 
807 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
808 {
809 	swreg tmp_reg;
810 
811 	/* Check packet length */
812 	tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
813 	emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
814 	emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
815 
816 	/* Load data */
817 	tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
818 	return data_ld(nfp_prog, tmp_reg, 0, size);
819 }
820 
821 static int
822 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
823 		    u8 src_gpr, u8 size)
824 {
825 	unsigned int i;
826 
827 	for (i = 0; i * 4 < size; i++)
828 		wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i));
829 
830 	emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
831 		 reg_a(dst_gpr), offset, size - 1, true);
832 
833 	return 0;
834 }
835 
836 static int
837 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
838 		   u64 imm, u8 size)
839 {
840 	wrp_immed(nfp_prog, reg_xfer(0), imm);
841 	if (size == 8)
842 		wrp_immed(nfp_prog, reg_xfer(1), imm >> 32);
843 
844 	emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
845 		 reg_a(dst_gpr), offset, size - 1, true);
846 
847 	return 0;
848 }
849 
850 typedef int
851 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off,
852 	     unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
853 	     bool needs_inc);
854 
855 static int
856 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off,
857 	      unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
858 	      bool needs_inc)
859 {
860 	bool should_inc = needs_inc && new_gpr && !last;
861 	u32 idx, src_byte;
862 	enum shf_sc sc;
863 	swreg reg;
864 	int shf;
865 	u8 mask;
866 
867 	if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4))
868 		return -EOPNOTSUPP;
869 
870 	idx = off / 4;
871 
872 	/* Move the entire word */
873 	if (size == 4) {
874 		wrp_mov(nfp_prog, reg_both(dst),
875 			should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx));
876 		return 0;
877 	}
878 
879 	if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
880 		return -EOPNOTSUPP;
881 
882 	src_byte = off % 4;
883 
884 	mask = (1 << size) - 1;
885 	mask <<= dst_byte;
886 
887 	if (WARN_ON_ONCE(mask > 0xf))
888 		return -EOPNOTSUPP;
889 
890 	shf = abs(src_byte - dst_byte) * 8;
891 	if (src_byte == dst_byte) {
892 		sc = SHF_SC_NONE;
893 	} else if (src_byte < dst_byte) {
894 		shf = 32 - shf;
895 		sc = SHF_SC_L_SHF;
896 	} else {
897 		sc = SHF_SC_R_SHF;
898 	}
899 
900 	/* ld_field can address fewer indexes, if offset too large do RMW.
901 	 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
902 	 */
903 	if (idx <= RE_REG_LM_IDX_MAX) {
904 		reg = reg_lm(lm3 ? 3 : 0, idx);
905 	} else {
906 		reg = imm_a(nfp_prog);
907 		/* If it's not the first part of the load and we start a new GPR
908 		 * that means we are loading a second part of the LMEM word into
909 		 * a new GPR.  IOW we've already looked that LMEM word and
910 		 * therefore it has been loaded into imm_a().
911 		 */
912 		if (first || !new_gpr)
913 			wrp_mov(nfp_prog, reg, reg_lm(0, idx));
914 	}
915 
916 	emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr);
917 
918 	if (should_inc)
919 		wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
920 
921 	return 0;
922 }
923 
924 static int
925 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off,
926 	       unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
927 	       bool needs_inc)
928 {
929 	bool should_inc = needs_inc && new_gpr && !last;
930 	u32 idx, dst_byte;
931 	enum shf_sc sc;
932 	swreg reg;
933 	int shf;
934 	u8 mask;
935 
936 	if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4))
937 		return -EOPNOTSUPP;
938 
939 	idx = off / 4;
940 
941 	/* Move the entire word */
942 	if (size == 4) {
943 		wrp_mov(nfp_prog,
944 			should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx),
945 			reg_b(src));
946 		return 0;
947 	}
948 
949 	if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
950 		return -EOPNOTSUPP;
951 
952 	dst_byte = off % 4;
953 
954 	mask = (1 << size) - 1;
955 	mask <<= dst_byte;
956 
957 	if (WARN_ON_ONCE(mask > 0xf))
958 		return -EOPNOTSUPP;
959 
960 	shf = abs(src_byte - dst_byte) * 8;
961 	if (src_byte == dst_byte) {
962 		sc = SHF_SC_NONE;
963 	} else if (src_byte < dst_byte) {
964 		shf = 32 - shf;
965 		sc = SHF_SC_L_SHF;
966 	} else {
967 		sc = SHF_SC_R_SHF;
968 	}
969 
970 	/* ld_field can address fewer indexes, if offset too large do RMW.
971 	 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
972 	 */
973 	if (idx <= RE_REG_LM_IDX_MAX) {
974 		reg = reg_lm(lm3 ? 3 : 0, idx);
975 	} else {
976 		reg = imm_a(nfp_prog);
977 		/* Only first and last LMEM locations are going to need RMW,
978 		 * the middle location will be overwritten fully.
979 		 */
980 		if (first || last)
981 			wrp_mov(nfp_prog, reg, reg_lm(0, idx));
982 	}
983 
984 	emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf);
985 
986 	if (new_gpr || last) {
987 		if (idx > RE_REG_LM_IDX_MAX)
988 			wrp_mov(nfp_prog, reg_lm(0, idx), reg);
989 		if (should_inc)
990 			wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
991 	}
992 
993 	return 0;
994 }
995 
996 static int
997 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
998 	     unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
999 	     bool clr_gpr, lmem_step step)
1000 {
1001 	s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off;
1002 	bool first = true, last;
1003 	bool needs_inc = false;
1004 	swreg stack_off_reg;
1005 	u8 prev_gpr = 255;
1006 	u32 gpr_byte = 0;
1007 	bool lm3 = true;
1008 	int ret;
1009 
1010 	if (meta->ptr_not_const) {
1011 		/* Use of the last encountered ptr_off is OK, they all have
1012 		 * the same alignment.  Depend on low bits of value being
1013 		 * discarded when written to LMaddr register.
1014 		 */
1015 		stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off,
1016 						stack_imm(nfp_prog));
1017 
1018 		emit_alu(nfp_prog, imm_b(nfp_prog),
1019 			 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg);
1020 
1021 		needs_inc = true;
1022 	} else if (off + size <= 64) {
1023 		/* We can reach bottom 64B with LMaddr0 */
1024 		lm3 = false;
1025 	} else if (round_down(off, 32) == round_down(off + size - 1, 32)) {
1026 		/* We have to set up a new pointer.  If we know the offset
1027 		 * and the entire access falls into a single 32 byte aligned
1028 		 * window we won't have to increment the LM pointer.
1029 		 * The 32 byte alignment is imporant because offset is ORed in
1030 		 * not added when doing *l$indexN[off].
1031 		 */
1032 		stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32),
1033 						stack_imm(nfp_prog));
1034 		emit_alu(nfp_prog, imm_b(nfp_prog),
1035 			 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
1036 
1037 		off %= 32;
1038 	} else {
1039 		stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4),
1040 						stack_imm(nfp_prog));
1041 
1042 		emit_alu(nfp_prog, imm_b(nfp_prog),
1043 			 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
1044 
1045 		needs_inc = true;
1046 	}
1047 	if (lm3) {
1048 		emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
1049 		/* For size < 4 one slot will be filled by zeroing of upper. */
1050 		wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
1051 	}
1052 
1053 	if (clr_gpr && size < 8)
1054 		wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
1055 
1056 	while (size) {
1057 		u32 slice_end;
1058 		u8 slice_size;
1059 
1060 		slice_size = min(size, 4 - gpr_byte);
1061 		slice_end = min(off + slice_size, round_up(off + 1, 4));
1062 		slice_size = slice_end - off;
1063 
1064 		last = slice_size == size;
1065 
1066 		if (needs_inc)
1067 			off %= 4;
1068 
1069 		ret = step(nfp_prog, gpr, gpr_byte, off, slice_size,
1070 			   first, gpr != prev_gpr, last, lm3, needs_inc);
1071 		if (ret)
1072 			return ret;
1073 
1074 		prev_gpr = gpr;
1075 		first = false;
1076 
1077 		gpr_byte += slice_size;
1078 		if (gpr_byte >= 4) {
1079 			gpr_byte -= 4;
1080 			gpr++;
1081 		}
1082 
1083 		size -= slice_size;
1084 		off += slice_size;
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 static void
1091 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
1092 {
1093 	swreg tmp_reg;
1094 
1095 	if (alu_op == ALU_OP_AND) {
1096 		if (!imm)
1097 			wrp_immed(nfp_prog, reg_both(dst), 0);
1098 		if (!imm || !~imm)
1099 			return;
1100 	}
1101 	if (alu_op == ALU_OP_OR) {
1102 		if (!~imm)
1103 			wrp_immed(nfp_prog, reg_both(dst), ~0U);
1104 		if (!imm || !~imm)
1105 			return;
1106 	}
1107 	if (alu_op == ALU_OP_XOR) {
1108 		if (!~imm)
1109 			emit_alu(nfp_prog, reg_both(dst), reg_none(),
1110 				 ALU_OP_NOT, reg_b(dst));
1111 		if (!imm || !~imm)
1112 			return;
1113 	}
1114 
1115 	tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1116 	emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
1117 }
1118 
1119 static int
1120 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1121 	      enum alu_op alu_op, bool skip)
1122 {
1123 	const struct bpf_insn *insn = &meta->insn;
1124 	u64 imm = insn->imm; /* sign extend */
1125 
1126 	if (skip) {
1127 		meta->skip = true;
1128 		return 0;
1129 	}
1130 
1131 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
1132 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
1133 
1134 	return 0;
1135 }
1136 
1137 static int
1138 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1139 	      enum alu_op alu_op)
1140 {
1141 	u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
1142 
1143 	emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
1144 	emit_alu(nfp_prog, reg_both(dst + 1),
1145 		 reg_a(dst + 1), alu_op, reg_b(src + 1));
1146 
1147 	return 0;
1148 }
1149 
1150 static int
1151 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1152 	      enum alu_op alu_op, bool skip)
1153 {
1154 	const struct bpf_insn *insn = &meta->insn;
1155 
1156 	if (skip) {
1157 		meta->skip = true;
1158 		return 0;
1159 	}
1160 
1161 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
1162 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1163 
1164 	return 0;
1165 }
1166 
1167 static int
1168 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1169 	      enum alu_op alu_op)
1170 {
1171 	u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
1172 
1173 	emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
1174 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1175 
1176 	return 0;
1177 }
1178 
1179 static void
1180 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
1181 		 enum br_mask br_mask, u16 off)
1182 {
1183 	emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
1184 	emit_br(nfp_prog, br_mask, off, 0);
1185 }
1186 
1187 static int
1188 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1189 	     enum alu_op alu_op, enum br_mask br_mask)
1190 {
1191 	const struct bpf_insn *insn = &meta->insn;
1192 
1193 	wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
1194 			 insn->src_reg * 2, br_mask, insn->off);
1195 	wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
1196 			 insn->src_reg * 2 + 1, br_mask, insn->off);
1197 
1198 	return 0;
1199 }
1200 
1201 static int
1202 wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1203 	    enum br_mask br_mask, bool swap)
1204 {
1205 	const struct bpf_insn *insn = &meta->insn;
1206 	u64 imm = insn->imm; /* sign extend */
1207 	u8 reg = insn->dst_reg * 2;
1208 	swreg tmp_reg;
1209 
1210 	tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
1211 	if (!swap)
1212 		emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
1213 	else
1214 		emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
1215 
1216 	tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
1217 	if (!swap)
1218 		emit_alu(nfp_prog, reg_none(),
1219 			 reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
1220 	else
1221 		emit_alu(nfp_prog, reg_none(),
1222 			 tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
1223 
1224 	emit_br(nfp_prog, br_mask, insn->off, 0);
1225 
1226 	return 0;
1227 }
1228 
1229 static int
1230 wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1231 	    enum br_mask br_mask, bool swap)
1232 {
1233 	const struct bpf_insn *insn = &meta->insn;
1234 	u8 areg, breg;
1235 
1236 	areg = insn->dst_reg * 2;
1237 	breg = insn->src_reg * 2;
1238 
1239 	if (swap) {
1240 		areg ^= breg;
1241 		breg ^= areg;
1242 		areg ^= breg;
1243 	}
1244 
1245 	emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
1246 	emit_alu(nfp_prog, reg_none(),
1247 		 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
1248 	emit_br(nfp_prog, br_mask, insn->off, 0);
1249 
1250 	return 0;
1251 }
1252 
1253 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
1254 {
1255 	emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in,
1256 		      SHF_SC_R_ROT, 8);
1257 	emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out),
1258 		      SHF_SC_R_ROT, 16);
1259 }
1260 
1261 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1262 {
1263 	swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
1264 	struct nfp_bpf_cap_adjust_head *adjust_head;
1265 	u32 ret_einval, end;
1266 
1267 	adjust_head = &nfp_prog->bpf->adjust_head;
1268 
1269 	/* Optimized version - 5 vs 14 cycles */
1270 	if (nfp_prog->adjust_head_location != UINT_MAX) {
1271 		if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n))
1272 			return -EINVAL;
1273 
1274 		emit_alu(nfp_prog, pptr_reg(nfp_prog),
1275 			 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog));
1276 		emit_alu(nfp_prog, plen_reg(nfp_prog),
1277 			 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1278 		emit_alu(nfp_prog, pv_len(nfp_prog),
1279 			 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1280 
1281 		wrp_immed(nfp_prog, reg_both(0), 0);
1282 		wrp_immed(nfp_prog, reg_both(1), 0);
1283 
1284 		/* TODO: when adjust head is guaranteed to succeed we can
1285 		 * also eliminate the following if (r0 == 0) branch.
1286 		 */
1287 
1288 		return 0;
1289 	}
1290 
1291 	ret_einval = nfp_prog_current_offset(nfp_prog) + 14;
1292 	end = ret_einval + 2;
1293 
1294 	/* We need to use a temp because offset is just a part of the pkt ptr */
1295 	emit_alu(nfp_prog, tmp,
1296 		 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog));
1297 
1298 	/* Validate result will fit within FW datapath constraints */
1299 	emit_alu(nfp_prog, reg_none(),
1300 		 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min));
1301 	emit_br(nfp_prog, BR_BLO, ret_einval, 0);
1302 	emit_alu(nfp_prog, reg_none(),
1303 		 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp);
1304 	emit_br(nfp_prog, BR_BLO, ret_einval, 0);
1305 
1306 	/* Validate the length is at least ETH_HLEN */
1307 	emit_alu(nfp_prog, tmp_len,
1308 		 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1309 	emit_alu(nfp_prog, reg_none(),
1310 		 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN));
1311 	emit_br(nfp_prog, BR_BMI, ret_einval, 0);
1312 
1313 	/* Load the ret code */
1314 	wrp_immed(nfp_prog, reg_both(0), 0);
1315 	wrp_immed(nfp_prog, reg_both(1), 0);
1316 
1317 	/* Modify the packet metadata */
1318 	emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
1319 
1320 	/* Skip over the -EINVAL ret code (defer 2) */
1321 	emit_br(nfp_prog, BR_UNC, end, 2);
1322 
1323 	emit_alu(nfp_prog, plen_reg(nfp_prog),
1324 		 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1325 	emit_alu(nfp_prog, pv_len(nfp_prog),
1326 		 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1327 
1328 	/* return -EINVAL target */
1329 	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
1330 		return -EINVAL;
1331 
1332 	wrp_immed(nfp_prog, reg_both(0), -22);
1333 	wrp_immed(nfp_prog, reg_both(1), ~0);
1334 
1335 	if (!nfp_prog_confirm_current_offset(nfp_prog, end))
1336 		return -EINVAL;
1337 
1338 	return 0;
1339 }
1340 
1341 static int
1342 map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1343 {
1344 	struct bpf_offloaded_map *offmap;
1345 	struct nfp_bpf_map *nfp_map;
1346 	bool load_lm_ptr;
1347 	u32 ret_tgt;
1348 	s64 lm_off;
1349 	swreg tid;
1350 
1351 	offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
1352 	nfp_map = offmap->dev_priv;
1353 
1354 	/* We only have to reload LM0 if the key is not at start of stack */
1355 	lm_off = nfp_prog->stack_depth;
1356 	lm_off += meta->arg2.var_off.value + meta->arg2.off;
1357 	load_lm_ptr = meta->arg2_var_off || lm_off;
1358 
1359 	/* Set LM0 to start of key */
1360 	if (load_lm_ptr)
1361 		emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
1362 
1363 	/* Load map ID into a register, it should actually fit as an immediate
1364 	 * but in case it doesn't deal with it here, not in the delay slots.
1365 	 */
1366 	tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
1367 
1368 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem,
1369 		     2, RELO_BR_HELPER);
1370 	ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
1371 
1372 	/* Load map ID into A0 */
1373 	wrp_mov(nfp_prog, reg_a(0), tid);
1374 
1375 	/* Load the return address into B0 */
1376 	wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
1377 
1378 	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
1379 		return -EINVAL;
1380 
1381 	/* Reset the LM0 pointer */
1382 	if (!load_lm_ptr)
1383 		return 0;
1384 
1385 	emit_csr_wr(nfp_prog, stack_reg(nfp_prog),  NFP_CSR_ACT_LM_ADDR0);
1386 	wrp_nops(nfp_prog, 3);
1387 
1388 	return 0;
1389 }
1390 
1391 /* --- Callbacks --- */
1392 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1393 {
1394 	const struct bpf_insn *insn = &meta->insn;
1395 	u8 dst = insn->dst_reg * 2;
1396 	u8 src = insn->src_reg * 2;
1397 
1398 	if (insn->src_reg == BPF_REG_10) {
1399 		swreg stack_depth_reg;
1400 
1401 		stack_depth_reg = ur_load_imm_any(nfp_prog,
1402 						  nfp_prog->stack_depth,
1403 						  stack_imm(nfp_prog));
1404 		emit_alu(nfp_prog, reg_both(dst),
1405 			 stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg);
1406 		wrp_immed(nfp_prog, reg_both(dst + 1), 0);
1407 	} else {
1408 		wrp_reg_mov(nfp_prog, dst, src);
1409 		wrp_reg_mov(nfp_prog, dst + 1, src + 1);
1410 	}
1411 
1412 	return 0;
1413 }
1414 
1415 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1416 {
1417 	u64 imm = meta->insn.imm; /* sign extend */
1418 
1419 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
1420 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
1421 
1422 	return 0;
1423 }
1424 
1425 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1426 {
1427 	return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
1428 }
1429 
1430 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1431 {
1432 	return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
1433 }
1434 
1435 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1436 {
1437 	return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
1438 }
1439 
1440 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1441 {
1442 	return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
1443 }
1444 
1445 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1446 {
1447 	return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
1448 }
1449 
1450 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1451 {
1452 	return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
1453 }
1454 
1455 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1456 {
1457 	const struct bpf_insn *insn = &meta->insn;
1458 
1459 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1460 		 reg_a(insn->dst_reg * 2), ALU_OP_ADD,
1461 		 reg_b(insn->src_reg * 2));
1462 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1463 		 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
1464 		 reg_b(insn->src_reg * 2 + 1));
1465 
1466 	return 0;
1467 }
1468 
1469 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1470 {
1471 	const struct bpf_insn *insn = &meta->insn;
1472 	u64 imm = insn->imm; /* sign extend */
1473 
1474 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
1475 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
1476 
1477 	return 0;
1478 }
1479 
1480 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1481 {
1482 	const struct bpf_insn *insn = &meta->insn;
1483 
1484 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1485 		 reg_a(insn->dst_reg * 2), ALU_OP_SUB,
1486 		 reg_b(insn->src_reg * 2));
1487 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1488 		 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
1489 		 reg_b(insn->src_reg * 2 + 1));
1490 
1491 	return 0;
1492 }
1493 
1494 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1495 {
1496 	const struct bpf_insn *insn = &meta->insn;
1497 	u64 imm = insn->imm; /* sign extend */
1498 
1499 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
1500 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
1501 
1502 	return 0;
1503 }
1504 
1505 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1506 {
1507 	const struct bpf_insn *insn = &meta->insn;
1508 
1509 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0),
1510 		 ALU_OP_SUB, reg_b(insn->dst_reg * 2));
1511 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0),
1512 		 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1));
1513 
1514 	return 0;
1515 }
1516 
1517 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1518 {
1519 	const struct bpf_insn *insn = &meta->insn;
1520 	u8 dst = insn->dst_reg * 2;
1521 
1522 	if (insn->imm < 32) {
1523 		emit_shf(nfp_prog, reg_both(dst + 1),
1524 			 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
1525 			 SHF_SC_R_DSHF, 32 - insn->imm);
1526 		emit_shf(nfp_prog, reg_both(dst),
1527 			 reg_none(), SHF_OP_NONE, reg_b(dst),
1528 			 SHF_SC_L_SHF, insn->imm);
1529 	} else if (insn->imm == 32) {
1530 		wrp_reg_mov(nfp_prog, dst + 1, dst);
1531 		wrp_immed(nfp_prog, reg_both(dst), 0);
1532 	} else if (insn->imm > 32) {
1533 		emit_shf(nfp_prog, reg_both(dst + 1),
1534 			 reg_none(), SHF_OP_NONE, reg_b(dst),
1535 			 SHF_SC_L_SHF, insn->imm - 32);
1536 		wrp_immed(nfp_prog, reg_both(dst), 0);
1537 	}
1538 
1539 	return 0;
1540 }
1541 
1542 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1543 {
1544 	const struct bpf_insn *insn = &meta->insn;
1545 	u8 dst = insn->dst_reg * 2;
1546 
1547 	if (insn->imm < 32) {
1548 		emit_shf(nfp_prog, reg_both(dst),
1549 			 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
1550 			 SHF_SC_R_DSHF, insn->imm);
1551 		emit_shf(nfp_prog, reg_both(dst + 1),
1552 			 reg_none(), SHF_OP_NONE, reg_b(dst + 1),
1553 			 SHF_SC_R_SHF, insn->imm);
1554 	} else if (insn->imm == 32) {
1555 		wrp_reg_mov(nfp_prog, dst, dst + 1);
1556 		wrp_immed(nfp_prog, reg_both(dst + 1), 0);
1557 	} else if (insn->imm > 32) {
1558 		emit_shf(nfp_prog, reg_both(dst),
1559 			 reg_none(), SHF_OP_NONE, reg_b(dst + 1),
1560 			 SHF_SC_R_SHF, insn->imm - 32);
1561 		wrp_immed(nfp_prog, reg_both(dst + 1), 0);
1562 	}
1563 
1564 	return 0;
1565 }
1566 
1567 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1568 {
1569 	const struct bpf_insn *insn = &meta->insn;
1570 
1571 	wrp_reg_mov(nfp_prog, insn->dst_reg * 2,  insn->src_reg * 2);
1572 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1573 
1574 	return 0;
1575 }
1576 
1577 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1578 {
1579 	const struct bpf_insn *insn = &meta->insn;
1580 
1581 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
1582 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1583 
1584 	return 0;
1585 }
1586 
1587 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1588 {
1589 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
1590 }
1591 
1592 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1593 {
1594 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
1595 }
1596 
1597 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1598 {
1599 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
1600 }
1601 
1602 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1603 {
1604 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
1605 }
1606 
1607 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1608 {
1609 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
1610 }
1611 
1612 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1613 {
1614 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
1615 }
1616 
1617 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1618 {
1619 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
1620 }
1621 
1622 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1623 {
1624 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
1625 }
1626 
1627 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1628 {
1629 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
1630 }
1631 
1632 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1633 {
1634 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
1635 }
1636 
1637 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1638 {
1639 	u8 dst = meta->insn.dst_reg * 2;
1640 
1641 	emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst));
1642 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1643 
1644 	return 0;
1645 }
1646 
1647 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1648 {
1649 	const struct bpf_insn *insn = &meta->insn;
1650 
1651 	if (!insn->imm)
1652 		return 1; /* TODO: zero shift means indirect */
1653 
1654 	emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
1655 		 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
1656 		 SHF_SC_L_SHF, insn->imm);
1657 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1658 
1659 	return 0;
1660 }
1661 
1662 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1663 {
1664 	const struct bpf_insn *insn = &meta->insn;
1665 	u8 gpr = insn->dst_reg * 2;
1666 
1667 	switch (insn->imm) {
1668 	case 16:
1669 		emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr),
1670 			      SHF_SC_R_ROT, 8);
1671 		emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr),
1672 			      SHF_SC_R_SHF, 16);
1673 
1674 		wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
1675 		break;
1676 	case 32:
1677 		wrp_end32(nfp_prog, reg_a(gpr), gpr);
1678 		wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
1679 		break;
1680 	case 64:
1681 		wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1));
1682 
1683 		wrp_end32(nfp_prog, reg_a(gpr), gpr + 1);
1684 		wrp_end32(nfp_prog, imm_a(nfp_prog), gpr);
1685 		break;
1686 	}
1687 
1688 	return 0;
1689 }
1690 
1691 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1692 {
1693 	struct nfp_insn_meta *prev = nfp_meta_prev(meta);
1694 	u32 imm_lo, imm_hi;
1695 	u8 dst;
1696 
1697 	dst = prev->insn.dst_reg * 2;
1698 	imm_lo = prev->insn.imm;
1699 	imm_hi = meta->insn.imm;
1700 
1701 	wrp_immed(nfp_prog, reg_both(dst), imm_lo);
1702 
1703 	/* mov is always 1 insn, load imm may be two, so try to use mov */
1704 	if (imm_hi == imm_lo)
1705 		wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst));
1706 	else
1707 		wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi);
1708 
1709 	return 0;
1710 }
1711 
1712 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1713 {
1714 	meta->double_cb = imm_ld8_part2;
1715 	return 0;
1716 }
1717 
1718 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1719 {
1720 	return construct_data_ld(nfp_prog, meta->insn.imm, 1);
1721 }
1722 
1723 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1724 {
1725 	return construct_data_ld(nfp_prog, meta->insn.imm, 2);
1726 }
1727 
1728 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1729 {
1730 	return construct_data_ld(nfp_prog, meta->insn.imm, 4);
1731 }
1732 
1733 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1734 {
1735 	return construct_data_ind_ld(nfp_prog, meta->insn.imm,
1736 				     meta->insn.src_reg * 2, 1);
1737 }
1738 
1739 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1740 {
1741 	return construct_data_ind_ld(nfp_prog, meta->insn.imm,
1742 				     meta->insn.src_reg * 2, 2);
1743 }
1744 
1745 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1746 {
1747 	return construct_data_ind_ld(nfp_prog, meta->insn.imm,
1748 				     meta->insn.src_reg * 2, 4);
1749 }
1750 
1751 static int
1752 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1753 	      unsigned int size, unsigned int ptr_off)
1754 {
1755 	return mem_op_stack(nfp_prog, meta, size, ptr_off,
1756 			    meta->insn.dst_reg * 2, meta->insn.src_reg * 2,
1757 			    true, wrp_lmem_load);
1758 }
1759 
1760 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1761 		       u8 size)
1762 {
1763 	swreg dst = reg_both(meta->insn.dst_reg * 2);
1764 
1765 	switch (meta->insn.off) {
1766 	case offsetof(struct __sk_buff, len):
1767 		if (size != FIELD_SIZEOF(struct __sk_buff, len))
1768 			return -EOPNOTSUPP;
1769 		wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
1770 		break;
1771 	case offsetof(struct __sk_buff, data):
1772 		if (size != FIELD_SIZEOF(struct __sk_buff, data))
1773 			return -EOPNOTSUPP;
1774 		wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
1775 		break;
1776 	case offsetof(struct __sk_buff, data_end):
1777 		if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
1778 			return -EOPNOTSUPP;
1779 		emit_alu(nfp_prog, dst,
1780 			 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
1781 		break;
1782 	default:
1783 		return -EOPNOTSUPP;
1784 	}
1785 
1786 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1787 
1788 	return 0;
1789 }
1790 
1791 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1792 		       u8 size)
1793 {
1794 	swreg dst = reg_both(meta->insn.dst_reg * 2);
1795 
1796 	switch (meta->insn.off) {
1797 	case offsetof(struct xdp_md, data):
1798 		if (size != FIELD_SIZEOF(struct xdp_md, data))
1799 			return -EOPNOTSUPP;
1800 		wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
1801 		break;
1802 	case offsetof(struct xdp_md, data_end):
1803 		if (size != FIELD_SIZEOF(struct xdp_md, data_end))
1804 			return -EOPNOTSUPP;
1805 		emit_alu(nfp_prog, dst,
1806 			 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
1807 		break;
1808 	default:
1809 		return -EOPNOTSUPP;
1810 	}
1811 
1812 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1813 
1814 	return 0;
1815 }
1816 
1817 static int
1818 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1819 	     unsigned int size)
1820 {
1821 	swreg tmp_reg;
1822 
1823 	tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
1824 
1825 	return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
1826 					 tmp_reg, meta->insn.dst_reg * 2, size);
1827 }
1828 
1829 static int
1830 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1831 	     unsigned int size)
1832 {
1833 	swreg tmp_reg;
1834 
1835 	tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
1836 
1837 	return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
1838 					 tmp_reg, meta->insn.dst_reg * 2, size);
1839 }
1840 
1841 static void
1842 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog,
1843 			   struct nfp_insn_meta *meta)
1844 {
1845 	s16 range_start = meta->pkt_cache.range_start;
1846 	s16 range_end = meta->pkt_cache.range_end;
1847 	swreg src_base, off;
1848 	u8 xfer_num, len;
1849 	bool indir;
1850 
1851 	off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog));
1852 	src_base = reg_a(meta->insn.src_reg * 2);
1853 	len = range_end - range_start;
1854 	xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH;
1855 
1856 	indir = len > 8 * REG_WIDTH;
1857 	/* Setup PREV_ALU for indirect mode. */
1858 	if (indir)
1859 		wrp_immed(nfp_prog, reg_none(),
1860 			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
1861 
1862 	/* Cache memory into transfer-in registers. */
1863 	emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base,
1864 		     off, xfer_num - 1, true, indir);
1865 }
1866 
1867 static int
1868 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog,
1869 				   struct nfp_insn_meta *meta,
1870 				   unsigned int size)
1871 {
1872 	swreg dst_lo, dst_hi, src_lo;
1873 	u8 dst_gpr, idx;
1874 
1875 	idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH;
1876 	dst_gpr = meta->insn.dst_reg * 2;
1877 	dst_hi = reg_both(dst_gpr + 1);
1878 	dst_lo = reg_both(dst_gpr);
1879 	src_lo = reg_xfer(idx);
1880 
1881 	if (size < REG_WIDTH) {
1882 		wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0);
1883 		wrp_immed(nfp_prog, dst_hi, 0);
1884 	} else if (size == REG_WIDTH) {
1885 		wrp_mov(nfp_prog, dst_lo, src_lo);
1886 		wrp_immed(nfp_prog, dst_hi, 0);
1887 	} else {
1888 		swreg src_hi = reg_xfer(idx + 1);
1889 
1890 		wrp_mov(nfp_prog, dst_lo, src_lo);
1891 		wrp_mov(nfp_prog, dst_hi, src_hi);
1892 	}
1893 
1894 	return 0;
1895 }
1896 
1897 static int
1898 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog,
1899 			   struct nfp_insn_meta *meta, unsigned int size)
1900 {
1901 	u8 off = meta->insn.off - meta->pkt_cache.range_start;
1902 
1903 	if (WARN_ON_ONCE(!IS_ALIGNED(off, REG_WIDTH)))
1904 		return -EOPNOTSUPP;
1905 
1906 	return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size);
1907 }
1908 
1909 static int
1910 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1911 	unsigned int size)
1912 {
1913 	if (meta->ldst_gather_len)
1914 		return nfp_cpp_memcpy(nfp_prog, meta);
1915 
1916 	if (meta->ptr.type == PTR_TO_CTX) {
1917 		if (nfp_prog->type == BPF_PROG_TYPE_XDP)
1918 			return mem_ldx_xdp(nfp_prog, meta, size);
1919 		else
1920 			return mem_ldx_skb(nfp_prog, meta, size);
1921 	}
1922 
1923 	if (meta->ptr.type == PTR_TO_PACKET) {
1924 		if (meta->pkt_cache.range_end) {
1925 			if (meta->pkt_cache.do_init)
1926 				mem_ldx_data_init_pktcache(nfp_prog, meta);
1927 
1928 			return mem_ldx_data_from_pktcache(nfp_prog, meta, size);
1929 		} else {
1930 			return mem_ldx_data(nfp_prog, meta, size);
1931 		}
1932 	}
1933 
1934 	if (meta->ptr.type == PTR_TO_STACK)
1935 		return mem_ldx_stack(nfp_prog, meta, size,
1936 				     meta->ptr.off + meta->ptr.var_off.value);
1937 
1938 	if (meta->ptr.type == PTR_TO_MAP_VALUE)
1939 		return mem_ldx_emem(nfp_prog, meta, size);
1940 
1941 	return -EOPNOTSUPP;
1942 }
1943 
1944 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1945 {
1946 	return mem_ldx(nfp_prog, meta, 1);
1947 }
1948 
1949 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1950 {
1951 	return mem_ldx(nfp_prog, meta, 2);
1952 }
1953 
1954 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1955 {
1956 	return mem_ldx(nfp_prog, meta, 4);
1957 }
1958 
1959 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1960 {
1961 	return mem_ldx(nfp_prog, meta, 8);
1962 }
1963 
1964 static int
1965 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1966 	    unsigned int size)
1967 {
1968 	u64 imm = meta->insn.imm; /* sign extend */
1969 	swreg off_reg;
1970 
1971 	off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
1972 
1973 	return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
1974 				  imm, size);
1975 }
1976 
1977 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1978 		  unsigned int size)
1979 {
1980 	if (meta->ptr.type == PTR_TO_PACKET)
1981 		return mem_st_data(nfp_prog, meta, size);
1982 
1983 	return -EOPNOTSUPP;
1984 }
1985 
1986 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1987 {
1988 	return mem_st(nfp_prog, meta, 1);
1989 }
1990 
1991 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1992 {
1993 	return mem_st(nfp_prog, meta, 2);
1994 }
1995 
1996 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1997 {
1998 	return mem_st(nfp_prog, meta, 4);
1999 }
2000 
2001 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2002 {
2003 	return mem_st(nfp_prog, meta, 8);
2004 }
2005 
2006 static int
2007 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2008 	     unsigned int size)
2009 {
2010 	swreg off_reg;
2011 
2012 	off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2013 
2014 	return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
2015 				   meta->insn.src_reg * 2, size);
2016 }
2017 
2018 static int
2019 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2020 	      unsigned int size, unsigned int ptr_off)
2021 {
2022 	return mem_op_stack(nfp_prog, meta, size, ptr_off,
2023 			    meta->insn.src_reg * 2, meta->insn.dst_reg * 2,
2024 			    false, wrp_lmem_store);
2025 }
2026 
2027 static int
2028 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2029 	unsigned int size)
2030 {
2031 	if (meta->ptr.type == PTR_TO_PACKET)
2032 		return mem_stx_data(nfp_prog, meta, size);
2033 
2034 	if (meta->ptr.type == PTR_TO_STACK)
2035 		return mem_stx_stack(nfp_prog, meta, size,
2036 				     meta->ptr.off + meta->ptr.var_off.value);
2037 
2038 	return -EOPNOTSUPP;
2039 }
2040 
2041 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2042 {
2043 	return mem_stx(nfp_prog, meta, 1);
2044 }
2045 
2046 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2047 {
2048 	return mem_stx(nfp_prog, meta, 2);
2049 }
2050 
2051 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2052 {
2053 	return mem_stx(nfp_prog, meta, 4);
2054 }
2055 
2056 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2057 {
2058 	return mem_stx(nfp_prog, meta, 8);
2059 }
2060 
2061 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2062 {
2063 	emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
2064 
2065 	return 0;
2066 }
2067 
2068 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2069 {
2070 	const struct bpf_insn *insn = &meta->insn;
2071 	u64 imm = insn->imm; /* sign extend */
2072 	swreg or1, or2, tmp_reg;
2073 
2074 	or1 = reg_a(insn->dst_reg * 2);
2075 	or2 = reg_b(insn->dst_reg * 2 + 1);
2076 
2077 	if (imm & ~0U) {
2078 		tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
2079 		emit_alu(nfp_prog, imm_a(nfp_prog),
2080 			 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
2081 		or1 = imm_a(nfp_prog);
2082 	}
2083 
2084 	if (imm >> 32) {
2085 		tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
2086 		emit_alu(nfp_prog, imm_b(nfp_prog),
2087 			 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
2088 		or2 = imm_b(nfp_prog);
2089 	}
2090 
2091 	emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
2092 	emit_br(nfp_prog, BR_BEQ, insn->off, 0);
2093 
2094 	return 0;
2095 }
2096 
2097 static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2098 {
2099 	return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
2100 }
2101 
2102 static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2103 {
2104 	return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
2105 }
2106 
2107 static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2108 {
2109 	return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
2110 }
2111 
2112 static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2113 {
2114 	return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
2115 }
2116 
2117 static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2118 {
2119 	return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
2120 }
2121 
2122 static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2123 {
2124 	return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
2125 }
2126 
2127 static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2128 {
2129 	return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
2130 }
2131 
2132 static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2133 {
2134 	return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
2135 }
2136 
2137 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2138 {
2139 	const struct bpf_insn *insn = &meta->insn;
2140 	u64 imm = insn->imm; /* sign extend */
2141 	swreg tmp_reg;
2142 
2143 	if (!imm) {
2144 		meta->skip = true;
2145 		return 0;
2146 	}
2147 
2148 	if (imm & ~0U) {
2149 		tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
2150 		emit_alu(nfp_prog, reg_none(),
2151 			 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
2152 		emit_br(nfp_prog, BR_BNE, insn->off, 0);
2153 	}
2154 
2155 	if (imm >> 32) {
2156 		tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
2157 		emit_alu(nfp_prog, reg_none(),
2158 			 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
2159 		emit_br(nfp_prog, BR_BNE, insn->off, 0);
2160 	}
2161 
2162 	return 0;
2163 }
2164 
2165 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2166 {
2167 	const struct bpf_insn *insn = &meta->insn;
2168 	u64 imm = insn->imm; /* sign extend */
2169 	swreg tmp_reg;
2170 
2171 	if (!imm) {
2172 		emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
2173 			 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
2174 		emit_br(nfp_prog, BR_BNE, insn->off, 0);
2175 		return 0;
2176 	}
2177 
2178 	tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
2179 	emit_alu(nfp_prog, reg_none(),
2180 		 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
2181 	emit_br(nfp_prog, BR_BNE, insn->off, 0);
2182 
2183 	tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
2184 	emit_alu(nfp_prog, reg_none(),
2185 		 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
2186 	emit_br(nfp_prog, BR_BNE, insn->off, 0);
2187 
2188 	return 0;
2189 }
2190 
2191 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2192 {
2193 	const struct bpf_insn *insn = &meta->insn;
2194 
2195 	emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
2196 		 ALU_OP_XOR, reg_b(insn->src_reg * 2));
2197 	emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
2198 		 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
2199 	emit_alu(nfp_prog, reg_none(),
2200 		 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
2201 	emit_br(nfp_prog, BR_BEQ, insn->off, 0);
2202 
2203 	return 0;
2204 }
2205 
2206 static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2207 {
2208 	return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
2209 }
2210 
2211 static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2212 {
2213 	return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
2214 }
2215 
2216 static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2217 {
2218 	return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
2219 }
2220 
2221 static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2222 {
2223 	return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
2224 }
2225 
2226 static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2227 {
2228 	return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
2229 }
2230 
2231 static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2232 {
2233 	return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
2234 }
2235 
2236 static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2237 {
2238 	return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
2239 }
2240 
2241 static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2242 {
2243 	return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
2244 }
2245 
2246 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2247 {
2248 	return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
2249 }
2250 
2251 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2252 {
2253 	return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
2254 }
2255 
2256 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2257 {
2258 	switch (meta->insn.imm) {
2259 	case BPF_FUNC_xdp_adjust_head:
2260 		return adjust_head(nfp_prog, meta);
2261 	case BPF_FUNC_map_lookup_elem:
2262 		return map_lookup_stack(nfp_prog, meta);
2263 	default:
2264 		WARN_ONCE(1, "verifier allowed unsupported function\n");
2265 		return -EOPNOTSUPP;
2266 	}
2267 }
2268 
2269 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2270 {
2271 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
2272 
2273 	return 0;
2274 }
2275 
2276 static const instr_cb_t instr_cb[256] = {
2277 	[BPF_ALU64 | BPF_MOV | BPF_X] =	mov_reg64,
2278 	[BPF_ALU64 | BPF_MOV | BPF_K] =	mov_imm64,
2279 	[BPF_ALU64 | BPF_XOR | BPF_X] =	xor_reg64,
2280 	[BPF_ALU64 | BPF_XOR | BPF_K] =	xor_imm64,
2281 	[BPF_ALU64 | BPF_AND | BPF_X] =	and_reg64,
2282 	[BPF_ALU64 | BPF_AND | BPF_K] =	and_imm64,
2283 	[BPF_ALU64 | BPF_OR | BPF_X] =	or_reg64,
2284 	[BPF_ALU64 | BPF_OR | BPF_K] =	or_imm64,
2285 	[BPF_ALU64 | BPF_ADD | BPF_X] =	add_reg64,
2286 	[BPF_ALU64 | BPF_ADD | BPF_K] =	add_imm64,
2287 	[BPF_ALU64 | BPF_SUB | BPF_X] =	sub_reg64,
2288 	[BPF_ALU64 | BPF_SUB | BPF_K] =	sub_imm64,
2289 	[BPF_ALU64 | BPF_NEG] =		neg_reg64,
2290 	[BPF_ALU64 | BPF_LSH | BPF_K] =	shl_imm64,
2291 	[BPF_ALU64 | BPF_RSH | BPF_K] =	shr_imm64,
2292 	[BPF_ALU | BPF_MOV | BPF_X] =	mov_reg,
2293 	[BPF_ALU | BPF_MOV | BPF_K] =	mov_imm,
2294 	[BPF_ALU | BPF_XOR | BPF_X] =	xor_reg,
2295 	[BPF_ALU | BPF_XOR | BPF_K] =	xor_imm,
2296 	[BPF_ALU | BPF_AND | BPF_X] =	and_reg,
2297 	[BPF_ALU | BPF_AND | BPF_K] =	and_imm,
2298 	[BPF_ALU | BPF_OR | BPF_X] =	or_reg,
2299 	[BPF_ALU | BPF_OR | BPF_K] =	or_imm,
2300 	[BPF_ALU | BPF_ADD | BPF_X] =	add_reg,
2301 	[BPF_ALU | BPF_ADD | BPF_K] =	add_imm,
2302 	[BPF_ALU | BPF_SUB | BPF_X] =	sub_reg,
2303 	[BPF_ALU | BPF_SUB | BPF_K] =	sub_imm,
2304 	[BPF_ALU | BPF_NEG] =		neg_reg,
2305 	[BPF_ALU | BPF_LSH | BPF_K] =	shl_imm,
2306 	[BPF_ALU | BPF_END | BPF_X] =	end_reg32,
2307 	[BPF_LD | BPF_IMM | BPF_DW] =	imm_ld8,
2308 	[BPF_LD | BPF_ABS | BPF_B] =	data_ld1,
2309 	[BPF_LD | BPF_ABS | BPF_H] =	data_ld2,
2310 	[BPF_LD | BPF_ABS | BPF_W] =	data_ld4,
2311 	[BPF_LD | BPF_IND | BPF_B] =	data_ind_ld1,
2312 	[BPF_LD | BPF_IND | BPF_H] =	data_ind_ld2,
2313 	[BPF_LD | BPF_IND | BPF_W] =	data_ind_ld4,
2314 	[BPF_LDX | BPF_MEM | BPF_B] =	mem_ldx1,
2315 	[BPF_LDX | BPF_MEM | BPF_H] =	mem_ldx2,
2316 	[BPF_LDX | BPF_MEM | BPF_W] =	mem_ldx4,
2317 	[BPF_LDX | BPF_MEM | BPF_DW] =	mem_ldx8,
2318 	[BPF_STX | BPF_MEM | BPF_B] =	mem_stx1,
2319 	[BPF_STX | BPF_MEM | BPF_H] =	mem_stx2,
2320 	[BPF_STX | BPF_MEM | BPF_W] =	mem_stx4,
2321 	[BPF_STX | BPF_MEM | BPF_DW] =	mem_stx8,
2322 	[BPF_ST | BPF_MEM | BPF_B] =	mem_st1,
2323 	[BPF_ST | BPF_MEM | BPF_H] =	mem_st2,
2324 	[BPF_ST | BPF_MEM | BPF_W] =	mem_st4,
2325 	[BPF_ST | BPF_MEM | BPF_DW] =	mem_st8,
2326 	[BPF_JMP | BPF_JA | BPF_K] =	jump,
2327 	[BPF_JMP | BPF_JEQ | BPF_K] =	jeq_imm,
2328 	[BPF_JMP | BPF_JGT | BPF_K] =	jgt_imm,
2329 	[BPF_JMP | BPF_JGE | BPF_K] =	jge_imm,
2330 	[BPF_JMP | BPF_JLT | BPF_K] =	jlt_imm,
2331 	[BPF_JMP | BPF_JLE | BPF_K] =	jle_imm,
2332 	[BPF_JMP | BPF_JSGT | BPF_K] =  jsgt_imm,
2333 	[BPF_JMP | BPF_JSGE | BPF_K] =  jsge_imm,
2334 	[BPF_JMP | BPF_JSLT | BPF_K] =  jslt_imm,
2335 	[BPF_JMP | BPF_JSLE | BPF_K] =  jsle_imm,
2336 	[BPF_JMP | BPF_JSET | BPF_K] =	jset_imm,
2337 	[BPF_JMP | BPF_JNE | BPF_K] =	jne_imm,
2338 	[BPF_JMP | BPF_JEQ | BPF_X] =	jeq_reg,
2339 	[BPF_JMP | BPF_JGT | BPF_X] =	jgt_reg,
2340 	[BPF_JMP | BPF_JGE | BPF_X] =	jge_reg,
2341 	[BPF_JMP | BPF_JLT | BPF_X] =	jlt_reg,
2342 	[BPF_JMP | BPF_JLE | BPF_X] =	jle_reg,
2343 	[BPF_JMP | BPF_JSGT | BPF_X] =  jsgt_reg,
2344 	[BPF_JMP | BPF_JSGE | BPF_X] =  jsge_reg,
2345 	[BPF_JMP | BPF_JSLT | BPF_X] =  jslt_reg,
2346 	[BPF_JMP | BPF_JSLE | BPF_X] =  jsle_reg,
2347 	[BPF_JMP | BPF_JSET | BPF_X] =	jset_reg,
2348 	[BPF_JMP | BPF_JNE | BPF_X] =	jne_reg,
2349 	[BPF_JMP | BPF_CALL] =		call,
2350 	[BPF_JMP | BPF_EXIT] =		goto_out,
2351 };
2352 
2353 /* --- Assembler logic --- */
2354 static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
2355 {
2356 	struct nfp_insn_meta *meta, *jmp_dst;
2357 	u32 idx, br_idx;
2358 
2359 	list_for_each_entry(meta, &nfp_prog->insns, l) {
2360 		if (meta->skip)
2361 			continue;
2362 		if (meta->insn.code == (BPF_JMP | BPF_CALL))
2363 			continue;
2364 		if (BPF_CLASS(meta->insn.code) != BPF_JMP)
2365 			continue;
2366 
2367 		if (list_is_last(&meta->l, &nfp_prog->insns))
2368 			br_idx = nfp_prog->last_bpf_off;
2369 		else
2370 			br_idx = list_next_entry(meta, l)->off - 1;
2371 
2372 		if (!nfp_is_br(nfp_prog->prog[br_idx])) {
2373 			pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
2374 			       br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
2375 			return -ELOOP;
2376 		}
2377 		/* Leave special branches for later */
2378 		if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
2379 		    RELO_BR_REL)
2380 			continue;
2381 
2382 		if (!meta->jmp_dst) {
2383 			pr_err("Non-exit jump doesn't have destination info recorded!!\n");
2384 			return -ELOOP;
2385 		}
2386 
2387 		jmp_dst = meta->jmp_dst;
2388 
2389 		if (jmp_dst->skip) {
2390 			pr_err("Branch landing on removed instruction!!\n");
2391 			return -ELOOP;
2392 		}
2393 
2394 		for (idx = meta->off; idx <= br_idx; idx++) {
2395 			if (!nfp_is_br(nfp_prog->prog[idx]))
2396 				continue;
2397 			br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
2398 		}
2399 	}
2400 
2401 	return 0;
2402 }
2403 
2404 static void nfp_intro(struct nfp_prog *nfp_prog)
2405 {
2406 	wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0));
2407 	emit_alu(nfp_prog, plen_reg(nfp_prog),
2408 		 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
2409 }
2410 
2411 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
2412 {
2413 	/* TC direct-action mode:
2414 	 *   0,1   ok        NOT SUPPORTED[1]
2415 	 *   2   drop  0x22 -> drop,  count as stat1
2416 	 *   4,5 nuke  0x02 -> drop
2417 	 *   7  redir  0x44 -> redir, count as stat2
2418 	 *   * unspec  0x11 -> pass,  count as stat0
2419 	 *
2420 	 * [1] We can't support OK and RECLASSIFY because we can't tell TC
2421 	 *     the exact decision made.  We are forced to support UNSPEC
2422 	 *     to handle aborts so that's the only one we handle for passing
2423 	 *     packets up the stack.
2424 	 */
2425 	/* Target for aborts */
2426 	nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
2427 
2428 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
2429 
2430 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2431 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
2432 
2433 	/* Target for normal exits */
2434 	nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
2435 
2436 	/* if R0 > 7 jump to abort */
2437 	emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
2438 	emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
2439 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2440 
2441 	wrp_immed(nfp_prog, reg_b(2), 0x41221211);
2442 	wrp_immed(nfp_prog, reg_b(3), 0x41001211);
2443 
2444 	emit_shf(nfp_prog, reg_a(1),
2445 		 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
2446 
2447 	emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
2448 	emit_shf(nfp_prog, reg_a(2),
2449 		 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
2450 
2451 	emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
2452 	emit_shf(nfp_prog, reg_b(2),
2453 		 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
2454 
2455 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
2456 
2457 	emit_shf(nfp_prog, reg_b(2),
2458 		 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
2459 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
2460 }
2461 
2462 static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
2463 {
2464 	/* XDP return codes:
2465 	 *   0 aborted  0x82 -> drop,  count as stat3
2466 	 *   1    drop  0x22 -> drop,  count as stat1
2467 	 *   2    pass  0x11 -> pass,  count as stat0
2468 	 *   3      tx  0x44 -> redir, count as stat2
2469 	 *   * unknown  0x82 -> drop,  count as stat3
2470 	 */
2471 	/* Target for aborts */
2472 	nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
2473 
2474 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
2475 
2476 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2477 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
2478 
2479 	/* Target for normal exits */
2480 	nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
2481 
2482 	/* if R0 > 3 jump to abort */
2483 	emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0));
2484 	emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
2485 
2486 	wrp_immed(nfp_prog, reg_b(2), 0x44112282);
2487 
2488 	emit_shf(nfp_prog, reg_a(1),
2489 		 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3);
2490 
2491 	emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
2492 	emit_shf(nfp_prog, reg_b(2),
2493 		 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
2494 
2495 	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
2496 
2497 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2498 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
2499 }
2500 
2501 static void nfp_outro(struct nfp_prog *nfp_prog)
2502 {
2503 	switch (nfp_prog->type) {
2504 	case BPF_PROG_TYPE_SCHED_CLS:
2505 		nfp_outro_tc_da(nfp_prog);
2506 		break;
2507 	case BPF_PROG_TYPE_XDP:
2508 		nfp_outro_xdp(nfp_prog);
2509 		break;
2510 	default:
2511 		WARN_ON(1);
2512 	}
2513 }
2514 
2515 static int nfp_translate(struct nfp_prog *nfp_prog)
2516 {
2517 	struct nfp_insn_meta *meta;
2518 	int err;
2519 
2520 	nfp_intro(nfp_prog);
2521 	if (nfp_prog->error)
2522 		return nfp_prog->error;
2523 
2524 	list_for_each_entry(meta, &nfp_prog->insns, l) {
2525 		instr_cb_t cb = instr_cb[meta->insn.code];
2526 
2527 		meta->off = nfp_prog_current_offset(nfp_prog);
2528 
2529 		if (meta->skip) {
2530 			nfp_prog->n_translated++;
2531 			continue;
2532 		}
2533 
2534 		if (nfp_meta_has_prev(nfp_prog, meta) &&
2535 		    nfp_meta_prev(meta)->double_cb)
2536 			cb = nfp_meta_prev(meta)->double_cb;
2537 		if (!cb)
2538 			return -ENOENT;
2539 		err = cb(nfp_prog, meta);
2540 		if (err)
2541 			return err;
2542 
2543 		nfp_prog->n_translated++;
2544 	}
2545 
2546 	nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1;
2547 
2548 	nfp_outro(nfp_prog);
2549 	if (nfp_prog->error)
2550 		return nfp_prog->error;
2551 
2552 	wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW);
2553 	if (nfp_prog->error)
2554 		return nfp_prog->error;
2555 
2556 	return nfp_fixup_branches(nfp_prog);
2557 }
2558 
2559 /* --- Optimizations --- */
2560 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
2561 {
2562 	struct nfp_insn_meta *meta;
2563 
2564 	list_for_each_entry(meta, &nfp_prog->insns, l) {
2565 		struct bpf_insn insn = meta->insn;
2566 
2567 		/* Programs converted from cBPF start with register xoring */
2568 		if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
2569 		    insn.src_reg == insn.dst_reg)
2570 			continue;
2571 
2572 		/* Programs start with R6 = R1 but we ignore the skb pointer */
2573 		if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
2574 		    insn.src_reg == 1 && insn.dst_reg == 6)
2575 			meta->skip = true;
2576 
2577 		/* Return as soon as something doesn't match */
2578 		if (!meta->skip)
2579 			return;
2580 	}
2581 }
2582 
2583 /* Remove masking after load since our load guarantees this is not needed */
2584 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
2585 {
2586 	struct nfp_insn_meta *meta1, *meta2;
2587 	const s32 exp_mask[] = {
2588 		[BPF_B] = 0x000000ffU,
2589 		[BPF_H] = 0x0000ffffU,
2590 		[BPF_W] = 0xffffffffU,
2591 	};
2592 
2593 	nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
2594 		struct bpf_insn insn, next;
2595 
2596 		insn = meta1->insn;
2597 		next = meta2->insn;
2598 
2599 		if (BPF_CLASS(insn.code) != BPF_LD)
2600 			continue;
2601 		if (BPF_MODE(insn.code) != BPF_ABS &&
2602 		    BPF_MODE(insn.code) != BPF_IND)
2603 			continue;
2604 
2605 		if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
2606 			continue;
2607 
2608 		if (!exp_mask[BPF_SIZE(insn.code)])
2609 			continue;
2610 		if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
2611 			continue;
2612 
2613 		if (next.src_reg || next.dst_reg)
2614 			continue;
2615 
2616 		if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
2617 			continue;
2618 
2619 		meta2->skip = true;
2620 	}
2621 }
2622 
2623 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
2624 {
2625 	struct nfp_insn_meta *meta1, *meta2, *meta3;
2626 
2627 	nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
2628 		struct bpf_insn insn, next1, next2;
2629 
2630 		insn = meta1->insn;
2631 		next1 = meta2->insn;
2632 		next2 = meta3->insn;
2633 
2634 		if (BPF_CLASS(insn.code) != BPF_LD)
2635 			continue;
2636 		if (BPF_MODE(insn.code) != BPF_ABS &&
2637 		    BPF_MODE(insn.code) != BPF_IND)
2638 			continue;
2639 		if (BPF_SIZE(insn.code) != BPF_W)
2640 			continue;
2641 
2642 		if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
2643 		      next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
2644 		    !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
2645 		      next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
2646 			continue;
2647 
2648 		if (next1.src_reg || next1.dst_reg ||
2649 		    next2.src_reg || next2.dst_reg)
2650 			continue;
2651 
2652 		if (next1.imm != 0x20 || next2.imm != 0x20)
2653 			continue;
2654 
2655 		if (meta2->flags & FLAG_INSN_IS_JUMP_DST ||
2656 		    meta3->flags & FLAG_INSN_IS_JUMP_DST)
2657 			continue;
2658 
2659 		meta2->skip = true;
2660 		meta3->skip = true;
2661 	}
2662 }
2663 
2664 /* load/store pair that forms memory copy sould look like the following:
2665  *
2666  *   ld_width R, [addr_src + offset_src]
2667  *   st_width [addr_dest + offset_dest], R
2668  *
2669  * The destination register of load and source register of store should
2670  * be the same, load and store should also perform at the same width.
2671  * If either of addr_src or addr_dest is stack pointer, we don't do the
2672  * CPP optimization as stack is modelled by registers on NFP.
2673  */
2674 static bool
2675 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
2676 		    struct nfp_insn_meta *st_meta)
2677 {
2678 	struct bpf_insn *ld = &ld_meta->insn;
2679 	struct bpf_insn *st = &st_meta->insn;
2680 
2681 	if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
2682 		return false;
2683 
2684 	if (ld_meta->ptr.type != PTR_TO_PACKET)
2685 		return false;
2686 
2687 	if (st_meta->ptr.type != PTR_TO_PACKET)
2688 		return false;
2689 
2690 	if (BPF_SIZE(ld->code) != BPF_SIZE(st->code))
2691 		return false;
2692 
2693 	if (ld->dst_reg != st->src_reg)
2694 		return false;
2695 
2696 	/* There is jump to the store insn in this pair. */
2697 	if (st_meta->flags & FLAG_INSN_IS_JUMP_DST)
2698 		return false;
2699 
2700 	return true;
2701 }
2702 
2703 /* Currently, we only support chaining load/store pairs if:
2704  *
2705  *  - Their address base registers are the same.
2706  *  - Their address offsets are in the same order.
2707  *  - They operate at the same memory width.
2708  *  - There is no jump into the middle of them.
2709  */
2710 static bool
2711 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta,
2712 			      struct nfp_insn_meta *st_meta,
2713 			      struct bpf_insn *prev_ld,
2714 			      struct bpf_insn *prev_st)
2715 {
2716 	u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst;
2717 	struct bpf_insn *ld = &ld_meta->insn;
2718 	struct bpf_insn *st = &st_meta->insn;
2719 	s16 prev_ld_off, prev_st_off;
2720 
2721 	/* This pair is the start pair. */
2722 	if (!prev_ld)
2723 		return true;
2724 
2725 	prev_size = BPF_LDST_BYTES(prev_ld);
2726 	curr_size = BPF_LDST_BYTES(ld);
2727 	prev_ld_base = prev_ld->src_reg;
2728 	prev_st_base = prev_st->dst_reg;
2729 	prev_ld_dst = prev_ld->dst_reg;
2730 	prev_ld_off = prev_ld->off;
2731 	prev_st_off = prev_st->off;
2732 
2733 	if (ld->dst_reg != prev_ld_dst)
2734 		return false;
2735 
2736 	if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base)
2737 		return false;
2738 
2739 	if (curr_size != prev_size)
2740 		return false;
2741 
2742 	/* There is jump to the head of this pair. */
2743 	if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST)
2744 		return false;
2745 
2746 	/* Both in ascending order. */
2747 	if (prev_ld_off + prev_size == ld->off &&
2748 	    prev_st_off + prev_size == st->off)
2749 		return true;
2750 
2751 	/* Both in descending order. */
2752 	if (ld->off + curr_size == prev_ld_off &&
2753 	    st->off + curr_size == prev_st_off)
2754 		return true;
2755 
2756 	return false;
2757 }
2758 
2759 /* Return TRUE if cross memory access happens. Cross memory access means
2760  * store area is overlapping with load area that a later load might load
2761  * the value from previous store, for this case we can't treat the sequence
2762  * as an memory copy.
2763  */
2764 static bool
2765 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta,
2766 		 struct nfp_insn_meta *head_st_meta)
2767 {
2768 	s16 head_ld_off, head_st_off, ld_off;
2769 
2770 	/* Different pointer types does not overlap. */
2771 	if (head_ld_meta->ptr.type != head_st_meta->ptr.type)
2772 		return false;
2773 
2774 	/* load and store are both PTR_TO_PACKET, check ID info.  */
2775 	if (head_ld_meta->ptr.id != head_st_meta->ptr.id)
2776 		return true;
2777 
2778 	/* Canonicalize the offsets. Turn all of them against the original
2779 	 * base register.
2780 	 */
2781 	head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off;
2782 	head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off;
2783 	ld_off = ld->off + head_ld_meta->ptr.off;
2784 
2785 	/* Ascending order cross. */
2786 	if (ld_off > head_ld_off &&
2787 	    head_ld_off < head_st_off && ld_off >= head_st_off)
2788 		return true;
2789 
2790 	/* Descending order cross. */
2791 	if (ld_off < head_ld_off &&
2792 	    head_ld_off > head_st_off && ld_off <= head_st_off)
2793 		return true;
2794 
2795 	return false;
2796 }
2797 
2798 /* This pass try to identify the following instructoin sequences.
2799  *
2800  *   load R, [regA + offA]
2801  *   store [regB + offB], R
2802  *   load R, [regA + offA + const_imm_A]
2803  *   store [regB + offB + const_imm_A], R
2804  *   load R, [regA + offA + 2 * const_imm_A]
2805  *   store [regB + offB + 2 * const_imm_A], R
2806  *   ...
2807  *
2808  * Above sequence is typically generated by compiler when lowering
2809  * memcpy. NFP prefer using CPP instructions to accelerate it.
2810  */
2811 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
2812 {
2813 	struct nfp_insn_meta *head_ld_meta = NULL;
2814 	struct nfp_insn_meta *head_st_meta = NULL;
2815 	struct nfp_insn_meta *meta1, *meta2;
2816 	struct bpf_insn *prev_ld = NULL;
2817 	struct bpf_insn *prev_st = NULL;
2818 	u8 count = 0;
2819 
2820 	nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
2821 		struct bpf_insn *ld = &meta1->insn;
2822 		struct bpf_insn *st = &meta2->insn;
2823 
2824 		/* Reset record status if any of the following if true:
2825 		 *   - The current insn pair is not load/store.
2826 		 *   - The load/store pair doesn't chain with previous one.
2827 		 *   - The chained load/store pair crossed with previous pair.
2828 		 *   - The chained load/store pair has a total size of memory
2829 		 *     copy beyond 128 bytes which is the maximum length a
2830 		 *     single NFP CPP command can transfer.
2831 		 */
2832 		if (!curr_pair_is_memcpy(meta1, meta2) ||
2833 		    !curr_pair_chain_with_previous(meta1, meta2, prev_ld,
2834 						   prev_st) ||
2835 		    (head_ld_meta && (cross_mem_access(ld, head_ld_meta,
2836 						       head_st_meta) ||
2837 				      head_ld_meta->ldst_gather_len >= 128))) {
2838 			if (!count)
2839 				continue;
2840 
2841 			if (count > 1) {
2842 				s16 prev_ld_off = prev_ld->off;
2843 				s16 prev_st_off = prev_st->off;
2844 				s16 head_ld_off = head_ld_meta->insn.off;
2845 
2846 				if (prev_ld_off < head_ld_off) {
2847 					head_ld_meta->insn.off = prev_ld_off;
2848 					head_st_meta->insn.off = prev_st_off;
2849 					head_ld_meta->ldst_gather_len =
2850 						-head_ld_meta->ldst_gather_len;
2851 				}
2852 
2853 				head_ld_meta->paired_st = &head_st_meta->insn;
2854 				head_st_meta->skip = true;
2855 			} else {
2856 				head_ld_meta->ldst_gather_len = 0;
2857 			}
2858 
2859 			/* If the chain is ended by an load/store pair then this
2860 			 * could serve as the new head of the the next chain.
2861 			 */
2862 			if (curr_pair_is_memcpy(meta1, meta2)) {
2863 				head_ld_meta = meta1;
2864 				head_st_meta = meta2;
2865 				head_ld_meta->ldst_gather_len =
2866 					BPF_LDST_BYTES(ld);
2867 				meta1 = nfp_meta_next(meta1);
2868 				meta2 = nfp_meta_next(meta2);
2869 				prev_ld = ld;
2870 				prev_st = st;
2871 				count = 1;
2872 			} else {
2873 				head_ld_meta = NULL;
2874 				head_st_meta = NULL;
2875 				prev_ld = NULL;
2876 				prev_st = NULL;
2877 				count = 0;
2878 			}
2879 
2880 			continue;
2881 		}
2882 
2883 		if (!head_ld_meta) {
2884 			head_ld_meta = meta1;
2885 			head_st_meta = meta2;
2886 		} else {
2887 			meta1->skip = true;
2888 			meta2->skip = true;
2889 		}
2890 
2891 		head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
2892 		meta1 = nfp_meta_next(meta1);
2893 		meta2 = nfp_meta_next(meta2);
2894 		prev_ld = ld;
2895 		prev_st = st;
2896 		count++;
2897 	}
2898 }
2899 
2900 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
2901 {
2902 	nfp_bpf_opt_reg_init(nfp_prog);
2903 
2904 	nfp_bpf_opt_ld_mask(nfp_prog);
2905 	nfp_bpf_opt_ld_shift(nfp_prog);
2906 	nfp_bpf_opt_ldst_gather(nfp_prog);
2907 
2908 	return 0;
2909 }
2910 
2911 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
2912 {
2913 	__le64 *ustore = (__force __le64 *)prog;
2914 	int i;
2915 
2916 	for (i = 0; i < len; i++) {
2917 		int err;
2918 
2919 		err = nfp_ustore_check_valid_no_ecc(prog[i]);
2920 		if (err)
2921 			return err;
2922 
2923 		ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
2924 	}
2925 
2926 	return 0;
2927 }
2928 
2929 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
2930 {
2931 	void *prog;
2932 
2933 	prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
2934 	if (!prog)
2935 		return;
2936 
2937 	nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
2938 	memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
2939 	kvfree(nfp_prog->prog);
2940 	nfp_prog->prog = prog;
2941 }
2942 
2943 int nfp_bpf_jit(struct nfp_prog *nfp_prog)
2944 {
2945 	int ret;
2946 
2947 	ret = nfp_bpf_optimize(nfp_prog);
2948 	if (ret)
2949 		return ret;
2950 
2951 	ret = nfp_translate(nfp_prog);
2952 	if (ret) {
2953 		pr_err("Translation failed with error %d (translated: %u)\n",
2954 		       ret, nfp_prog->n_translated);
2955 		return -EINVAL;
2956 	}
2957 
2958 	nfp_bpf_prog_trim(nfp_prog);
2959 
2960 	return ret;
2961 }
2962 
2963 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
2964 {
2965 	struct nfp_insn_meta *meta;
2966 
2967 	/* Another pass to record jump information. */
2968 	list_for_each_entry(meta, &nfp_prog->insns, l) {
2969 		u64 code = meta->insn.code;
2970 
2971 		if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
2972 		    BPF_OP(code) != BPF_CALL) {
2973 			struct nfp_insn_meta *dst_meta;
2974 			unsigned short dst_indx;
2975 
2976 			dst_indx = meta->n + 1 + meta->insn.off;
2977 			dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
2978 						     cnt);
2979 
2980 			meta->jmp_dst = dst_meta;
2981 			dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
2982 		}
2983 	}
2984 }
2985 
2986 bool nfp_bpf_supported_opcode(u8 code)
2987 {
2988 	return !!instr_cb[code];
2989 }
2990 
2991 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
2992 {
2993 	unsigned int i;
2994 	u64 *prog;
2995 	int err;
2996 
2997 	prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
2998 		       GFP_KERNEL);
2999 	if (!prog)
3000 		return ERR_PTR(-ENOMEM);
3001 
3002 	for (i = 0; i < nfp_prog->prog_len; i++) {
3003 		enum nfp_relo_type special;
3004 		u32 val;
3005 
3006 		special = FIELD_GET(OP_RELO_TYPE, prog[i]);
3007 		switch (special) {
3008 		case RELO_NONE:
3009 			continue;
3010 		case RELO_BR_REL:
3011 			br_add_offset(&prog[i], bv->start_off);
3012 			break;
3013 		case RELO_BR_GO_OUT:
3014 			br_set_offset(&prog[i],
3015 				      nfp_prog->tgt_out + bv->start_off);
3016 			break;
3017 		case RELO_BR_GO_ABORT:
3018 			br_set_offset(&prog[i],
3019 				      nfp_prog->tgt_abort + bv->start_off);
3020 			break;
3021 		case RELO_BR_NEXT_PKT:
3022 			br_set_offset(&prog[i], bv->tgt_done);
3023 			break;
3024 		case RELO_BR_HELPER:
3025 			val = br_get_offset(prog[i]);
3026 			val -= BR_OFF_RELO;
3027 			switch (val) {
3028 			case BPF_FUNC_map_lookup_elem:
3029 				val = nfp_prog->bpf->helpers.map_lookup;
3030 				break;
3031 			default:
3032 				pr_err("relocation of unknown helper %d\n",
3033 				       val);
3034 				err = -EINVAL;
3035 				goto err_free_prog;
3036 			}
3037 			br_set_offset(&prog[i], val);
3038 			break;
3039 		case RELO_IMMED_REL:
3040 			immed_add_value(&prog[i], bv->start_off);
3041 			break;
3042 		}
3043 
3044 		prog[i] &= ~OP_RELO_TYPE;
3045 	}
3046 
3047 	err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
3048 	if (err)
3049 		goto err_free_prog;
3050 
3051 	return prog;
3052 
3053 err_free_prog:
3054 	kfree(prog);
3055 	return ERR_PTR(err);
3056 }
3057