1 /*
2  * Copyright (C) 2016 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #define pr_fmt(fmt)	"NFP net bpf: " fmt
35 
36 #include <linux/kernel.h>
37 #include <linux/bpf.h>
38 #include <linux/filter.h>
39 #include <linux/pkt_cls.h>
40 #include <linux/unistd.h>
41 
42 #include "main.h"
43 #include "../nfp_asm.h"
44 
45 /* --- NFP prog --- */
46 /* Foreach "multiple" entries macros provide pos and next<n> pointers.
47  * It's safe to modify the next pointers (but not pos).
48  */
49 #define nfp_for_each_insn_walk2(nfp_prog, pos, next)			\
50 	for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
51 	     next = list_next_entry(pos, l);			\
52 	     &(nfp_prog)->insns != &pos->l &&			\
53 	     &(nfp_prog)->insns != &next->l;			\
54 	     pos = nfp_meta_next(pos),				\
55 	     next = nfp_meta_next(pos))
56 
57 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2)		\
58 	for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
59 	     next = list_next_entry(pos, l),			\
60 	     next2 = list_next_entry(next, l);			\
61 	     &(nfp_prog)->insns != &pos->l &&			\
62 	     &(nfp_prog)->insns != &next->l &&			\
63 	     &(nfp_prog)->insns != &next2->l;			\
64 	     pos = nfp_meta_next(pos),				\
65 	     next = nfp_meta_next(pos),				\
66 	     next2 = nfp_meta_next(next))
67 
68 static bool
69 nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
70 {
71 	return meta->l.next != &nfp_prog->insns;
72 }
73 
74 static bool
75 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
76 {
77 	return meta->l.prev != &nfp_prog->insns;
78 }
79 
80 static void nfp_prog_free(struct nfp_prog *nfp_prog)
81 {
82 	struct nfp_insn_meta *meta, *tmp;
83 
84 	list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
85 		list_del(&meta->l);
86 		kfree(meta);
87 	}
88 	kfree(nfp_prog);
89 }
90 
91 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
92 {
93 	if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
94 		nfp_prog->error = -ENOSPC;
95 		return;
96 	}
97 
98 	nfp_prog->prog[nfp_prog->prog_len] = insn;
99 	nfp_prog->prog_len++;
100 }
101 
102 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
103 {
104 	return nfp_prog->start_off + nfp_prog->prog_len;
105 }
106 
107 static unsigned int
108 nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
109 {
110 	return offset - nfp_prog->start_off;
111 }
112 
113 /* --- Emitters --- */
114 static void
115 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
116 	   u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
117 {
118 	enum cmd_ctx_swap ctx;
119 	u64 insn;
120 
121 	if (sync)
122 		ctx = CMD_CTX_SWAP;
123 	else
124 		ctx = CMD_CTX_NO_SWAP;
125 
126 	insn =	FIELD_PREP(OP_CMD_A_SRC, areg) |
127 		FIELD_PREP(OP_CMD_CTX, ctx) |
128 		FIELD_PREP(OP_CMD_B_SRC, breg) |
129 		FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
130 		FIELD_PREP(OP_CMD_XFER, xfer) |
131 		FIELD_PREP(OP_CMD_CNT, size) |
132 		FIELD_PREP(OP_CMD_SIG, sync) |
133 		FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
134 		FIELD_PREP(OP_CMD_MODE, mode);
135 
136 	nfp_prog_push(nfp_prog, insn);
137 }
138 
139 static void
140 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
141 	 u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync)
142 {
143 	struct nfp_insn_re_regs reg;
144 	int err;
145 
146 	err = swreg_to_restricted(reg_none(), lreg, rreg, &reg, false);
147 	if (err) {
148 		nfp_prog->error = err;
149 		return;
150 	}
151 	if (reg.swap) {
152 		pr_err("cmd can't swap arguments\n");
153 		nfp_prog->error = -EFAULT;
154 		return;
155 	}
156 	if (reg.dst_lmextn || reg.src_lmextn) {
157 		pr_err("cmd can't use LMextn\n");
158 		nfp_prog->error = -EFAULT;
159 		return;
160 	}
161 
162 	__emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
163 }
164 
165 static void
166 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
167 	  enum br_ctx_signal_state css, u16 addr, u8 defer)
168 {
169 	u16 addr_lo, addr_hi;
170 	u64 insn;
171 
172 	addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
173 	addr_hi = addr != addr_lo;
174 
175 	insn = OP_BR_BASE |
176 		FIELD_PREP(OP_BR_MASK, mask) |
177 		FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
178 		FIELD_PREP(OP_BR_CSS, css) |
179 		FIELD_PREP(OP_BR_DEFBR, defer) |
180 		FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
181 		FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
182 
183 	nfp_prog_push(nfp_prog, insn);
184 }
185 
186 static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
187 {
188 	if (defer > 2) {
189 		pr_err("BUG: branch defer out of bounds %d\n", defer);
190 		nfp_prog->error = -EFAULT;
191 		return;
192 	}
193 	__emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
194 }
195 
196 static void
197 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
198 {
199 	__emit_br(nfp_prog, mask,
200 		  mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
201 		  BR_CSS_NONE, addr, defer);
202 }
203 
204 static void
205 __emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
206 	       u8 byte, bool equal, u16 addr, u8 defer, bool src_lmextn)
207 {
208 	u16 addr_lo, addr_hi;
209 	u64 insn;
210 
211 	addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO));
212 	addr_hi = addr != addr_lo;
213 
214 	insn = OP_BBYTE_BASE |
215 		FIELD_PREP(OP_BB_A_SRC, areg) |
216 		FIELD_PREP(OP_BB_BYTE, byte) |
217 		FIELD_PREP(OP_BB_B_SRC, breg) |
218 		FIELD_PREP(OP_BB_I8, imm8) |
219 		FIELD_PREP(OP_BB_EQ, equal) |
220 		FIELD_PREP(OP_BB_DEFBR, defer) |
221 		FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
222 		FIELD_PREP(OP_BB_ADDR_HI, addr_hi) |
223 		FIELD_PREP(OP_BB_SRC_LMEXTN, src_lmextn);
224 
225 	nfp_prog_push(nfp_prog, insn);
226 }
227 
228 static void
229 emit_br_byte_neq(struct nfp_prog *nfp_prog,
230 		 swreg src, u8 imm, u8 byte, u16 addr, u8 defer)
231 {
232 	struct nfp_insn_re_regs reg;
233 	int err;
234 
235 	err = swreg_to_restricted(reg_none(), src, reg_imm(imm), &reg, true);
236 	if (err) {
237 		nfp_prog->error = err;
238 		return;
239 	}
240 
241 	__emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
242 		       defer, reg.src_lmextn);
243 }
244 
245 static void
246 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
247 	     enum immed_width width, bool invert,
248 	     enum immed_shift shift, bool wr_both,
249 	     bool dst_lmextn, bool src_lmextn)
250 {
251 	u64 insn;
252 
253 	insn = OP_IMMED_BASE |
254 		FIELD_PREP(OP_IMMED_A_SRC, areg) |
255 		FIELD_PREP(OP_IMMED_B_SRC, breg) |
256 		FIELD_PREP(OP_IMMED_IMM, imm_hi) |
257 		FIELD_PREP(OP_IMMED_WIDTH, width) |
258 		FIELD_PREP(OP_IMMED_INV, invert) |
259 		FIELD_PREP(OP_IMMED_SHIFT, shift) |
260 		FIELD_PREP(OP_IMMED_WR_AB, wr_both) |
261 		FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) |
262 		FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn);
263 
264 	nfp_prog_push(nfp_prog, insn);
265 }
266 
267 static void
268 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
269 	   enum immed_width width, bool invert, enum immed_shift shift)
270 {
271 	struct nfp_insn_ur_regs reg;
272 	int err;
273 
274 	if (swreg_type(dst) == NN_REG_IMM) {
275 		nfp_prog->error = -EFAULT;
276 		return;
277 	}
278 
279 	err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), &reg);
280 	if (err) {
281 		nfp_prog->error = err;
282 		return;
283 	}
284 
285 	__emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
286 		     invert, shift, reg.wr_both,
287 		     reg.dst_lmextn, reg.src_lmextn);
288 }
289 
290 static void
291 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
292 	   enum shf_sc sc, u8 shift,
293 	   u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
294 	   bool dst_lmextn, bool src_lmextn)
295 {
296 	u64 insn;
297 
298 	if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
299 		nfp_prog->error = -EFAULT;
300 		return;
301 	}
302 
303 	if (sc == SHF_SC_L_SHF)
304 		shift = 32 - shift;
305 
306 	insn = OP_SHF_BASE |
307 		FIELD_PREP(OP_SHF_A_SRC, areg) |
308 		FIELD_PREP(OP_SHF_SC, sc) |
309 		FIELD_PREP(OP_SHF_B_SRC, breg) |
310 		FIELD_PREP(OP_SHF_I8, i8) |
311 		FIELD_PREP(OP_SHF_SW, sw) |
312 		FIELD_PREP(OP_SHF_DST, dst) |
313 		FIELD_PREP(OP_SHF_SHIFT, shift) |
314 		FIELD_PREP(OP_SHF_OP, op) |
315 		FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
316 		FIELD_PREP(OP_SHF_WR_AB, wr_both) |
317 		FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) |
318 		FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn);
319 
320 	nfp_prog_push(nfp_prog, insn);
321 }
322 
323 static void
324 emit_shf(struct nfp_prog *nfp_prog, swreg dst,
325 	 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
326 {
327 	struct nfp_insn_re_regs reg;
328 	int err;
329 
330 	err = swreg_to_restricted(dst, lreg, rreg, &reg, true);
331 	if (err) {
332 		nfp_prog->error = err;
333 		return;
334 	}
335 
336 	__emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
337 		   reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both,
338 		   reg.dst_lmextn, reg.src_lmextn);
339 }
340 
341 static void
342 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
343 	   u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
344 	   bool dst_lmextn, bool src_lmextn)
345 {
346 	u64 insn;
347 
348 	insn = OP_ALU_BASE |
349 		FIELD_PREP(OP_ALU_A_SRC, areg) |
350 		FIELD_PREP(OP_ALU_B_SRC, breg) |
351 		FIELD_PREP(OP_ALU_DST, dst) |
352 		FIELD_PREP(OP_ALU_SW, swap) |
353 		FIELD_PREP(OP_ALU_OP, op) |
354 		FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
355 		FIELD_PREP(OP_ALU_WR_AB, wr_both) |
356 		FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) |
357 		FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn);
358 
359 	nfp_prog_push(nfp_prog, insn);
360 }
361 
362 static void
363 emit_alu(struct nfp_prog *nfp_prog, swreg dst,
364 	 swreg lreg, enum alu_op op, swreg rreg)
365 {
366 	struct nfp_insn_ur_regs reg;
367 	int err;
368 
369 	err = swreg_to_unrestricted(dst, lreg, rreg, &reg);
370 	if (err) {
371 		nfp_prog->error = err;
372 		return;
373 	}
374 
375 	__emit_alu(nfp_prog, reg.dst, reg.dst_ab,
376 		   reg.areg, op, reg.breg, reg.swap, reg.wr_both,
377 		   reg.dst_lmextn, reg.src_lmextn);
378 }
379 
380 static void
381 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
382 		u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
383 		bool zero, bool swap, bool wr_both,
384 		bool dst_lmextn, bool src_lmextn)
385 {
386 	u64 insn;
387 
388 	insn = OP_LDF_BASE |
389 		FIELD_PREP(OP_LDF_A_SRC, areg) |
390 		FIELD_PREP(OP_LDF_SC, sc) |
391 		FIELD_PREP(OP_LDF_B_SRC, breg) |
392 		FIELD_PREP(OP_LDF_I8, imm8) |
393 		FIELD_PREP(OP_LDF_SW, swap) |
394 		FIELD_PREP(OP_LDF_ZF, zero) |
395 		FIELD_PREP(OP_LDF_BMASK, bmask) |
396 		FIELD_PREP(OP_LDF_SHF, shift) |
397 		FIELD_PREP(OP_LDF_WR_AB, wr_both) |
398 		FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) |
399 		FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn);
400 
401 	nfp_prog_push(nfp_prog, insn);
402 }
403 
404 static void
405 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
406 		  enum shf_sc sc, u8 shift, bool zero)
407 {
408 	struct nfp_insn_re_regs reg;
409 	int err;
410 
411 	/* Note: ld_field is special as it uses one of the src regs as dst */
412 	err = swreg_to_restricted(dst, dst, src, &reg, true);
413 	if (err) {
414 		nfp_prog->error = err;
415 		return;
416 	}
417 
418 	__emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
419 			reg.i8, zero, reg.swap, reg.wr_both,
420 			reg.dst_lmextn, reg.src_lmextn);
421 }
422 
423 static void
424 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
425 	      enum shf_sc sc, u8 shift)
426 {
427 	emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
428 }
429 
430 static void
431 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr,
432 	    bool dst_lmextn, bool src_lmextn)
433 {
434 	u64 insn;
435 
436 	insn = OP_LCSR_BASE |
437 		FIELD_PREP(OP_LCSR_A_SRC, areg) |
438 		FIELD_PREP(OP_LCSR_B_SRC, breg) |
439 		FIELD_PREP(OP_LCSR_WRITE, wr) |
440 		FIELD_PREP(OP_LCSR_ADDR, addr) |
441 		FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) |
442 		FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn);
443 
444 	nfp_prog_push(nfp_prog, insn);
445 }
446 
447 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr)
448 {
449 	struct nfp_insn_ur_regs reg;
450 	int err;
451 
452 	/* This instruction takes immeds instead of reg_none() for the ignored
453 	 * operand, but we can't encode 2 immeds in one instr with our normal
454 	 * swreg infra so if param is an immed, we encode as reg_none() and
455 	 * copy the immed to both operands.
456 	 */
457 	if (swreg_type(src) == NN_REG_IMM) {
458 		err = swreg_to_unrestricted(reg_none(), src, reg_none(), &reg);
459 		reg.breg = reg.areg;
460 	} else {
461 		err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), &reg);
462 	}
463 	if (err) {
464 		nfp_prog->error = err;
465 		return;
466 	}
467 
468 	__emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4,
469 		    false, reg.src_lmextn);
470 }
471 
472 static void emit_nop(struct nfp_prog *nfp_prog)
473 {
474 	__emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
475 }
476 
477 /* --- Wrappers --- */
478 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
479 {
480 	if (!(imm & 0xffff0000)) {
481 		*val = imm;
482 		*shift = IMMED_SHIFT_0B;
483 	} else if (!(imm & 0xff0000ff)) {
484 		*val = imm >> 8;
485 		*shift = IMMED_SHIFT_1B;
486 	} else if (!(imm & 0x0000ffff)) {
487 		*val = imm >> 16;
488 		*shift = IMMED_SHIFT_2B;
489 	} else {
490 		return false;
491 	}
492 
493 	return true;
494 }
495 
496 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
497 {
498 	enum immed_shift shift;
499 	u16 val;
500 
501 	if (pack_immed(imm, &val, &shift)) {
502 		emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
503 	} else if (pack_immed(~imm, &val, &shift)) {
504 		emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
505 	} else {
506 		emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
507 			   false, IMMED_SHIFT_0B);
508 		emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
509 			   false, IMMED_SHIFT_2B);
510 	}
511 }
512 
513 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
514  * If the @imm is small enough encode it directly in operand and return
515  * otherwise load @imm to a spare register and return its encoding.
516  */
517 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
518 {
519 	if (FIELD_FIT(UR_REG_IMM_MAX, imm))
520 		return reg_imm(imm);
521 
522 	wrp_immed(nfp_prog, tmp_reg, imm);
523 	return tmp_reg;
524 }
525 
526 /* re_load_imm_any() - encode immediate or use tmp register (restricted)
527  * If the @imm is small enough encode it directly in operand and return
528  * otherwise load @imm to a spare register and return its encoding.
529  */
530 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
531 {
532 	if (FIELD_FIT(RE_REG_IMM_MAX, imm))
533 		return reg_imm(imm);
534 
535 	wrp_immed(nfp_prog, tmp_reg, imm);
536 	return tmp_reg;
537 }
538 
539 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
540 {
541 	while (count--)
542 		emit_nop(nfp_prog);
543 }
544 
545 static void
546 wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
547 	       enum br_special special)
548 {
549 	emit_br(nfp_prog, mask, 0, 0);
550 
551 	nfp_prog->prog[nfp_prog->prog_len - 1] |=
552 		FIELD_PREP(OP_BR_SPECIAL, special);
553 }
554 
555 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
556 {
557 	emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
558 }
559 
560 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
561 {
562 	wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
563 }
564 
565 static int
566 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
567 {
568 	unsigned int i;
569 	u16 shift, sz;
570 
571 	/* We load the value from the address indicated in @offset and then
572 	 * shift out the data we don't need.  Note: this is big endian!
573 	 */
574 	sz = max(size, 4);
575 	shift = size < 4 ? 4 - size : 0;
576 
577 	emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
578 		 pptr_reg(nfp_prog), offset, sz - 1, true);
579 
580 	i = 0;
581 	if (shift)
582 		emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE,
583 			 reg_xfer(0), SHF_SC_R_SHF, shift * 8);
584 	else
585 		for (; i * 4 < size; i++)
586 			wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
587 
588 	if (i < 2)
589 		wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
590 
591 	return 0;
592 }
593 
594 static int
595 data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
596 		   u8 dst_gpr, int size)
597 {
598 	unsigned int i;
599 	u8 mask, sz;
600 
601 	/* We load the value from the address indicated in @offset and then
602 	 * mask out the data we don't need.  Note: this is little endian!
603 	 */
604 	sz = max(size, 4);
605 	mask = size < 4 ? GENMASK(size - 1, 0) : 0;
606 
607 	emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
608 		 reg_a(src_gpr), offset, sz / 4 - 1, true);
609 
610 	i = 0;
611 	if (mask)
612 		emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask,
613 				  reg_xfer(0), SHF_SC_NONE, 0, true);
614 	else
615 		for (; i * 4 < size; i++)
616 			wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
617 
618 	if (i < 2)
619 		wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
620 
621 	return 0;
622 }
623 
624 static int
625 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
626 {
627 	swreg tmp_reg;
628 
629 	/* Calculate the true offset (src_reg + imm) */
630 	tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
631 	emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg);
632 
633 	/* Check packet length (size guaranteed to fit b/c it's u8) */
634 	emit_alu(nfp_prog, imm_a(nfp_prog),
635 		 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
636 	emit_alu(nfp_prog, reg_none(),
637 		 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
638 	wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
639 
640 	/* Load data */
641 	return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
642 }
643 
644 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
645 {
646 	swreg tmp_reg;
647 
648 	/* Check packet length */
649 	tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
650 	emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
651 	wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
652 
653 	/* Load data */
654 	tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
655 	return data_ld(nfp_prog, tmp_reg, 0, size);
656 }
657 
658 static int
659 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
660 		    u8 src_gpr, u8 size)
661 {
662 	unsigned int i;
663 
664 	for (i = 0; i * 4 < size; i++)
665 		wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i));
666 
667 	emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
668 		 reg_a(dst_gpr), offset, size - 1, true);
669 
670 	return 0;
671 }
672 
673 static int
674 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
675 		   u64 imm, u8 size)
676 {
677 	wrp_immed(nfp_prog, reg_xfer(0), imm);
678 	if (size == 8)
679 		wrp_immed(nfp_prog, reg_xfer(1), imm >> 32);
680 
681 	emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
682 		 reg_a(dst_gpr), offset, size - 1, true);
683 
684 	return 0;
685 }
686 
687 typedef int
688 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off,
689 	     unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
690 	     bool needs_inc);
691 
692 static int
693 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off,
694 	      unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
695 	      bool needs_inc)
696 {
697 	bool should_inc = needs_inc && new_gpr && !last;
698 	u32 idx, src_byte;
699 	enum shf_sc sc;
700 	swreg reg;
701 	int shf;
702 	u8 mask;
703 
704 	if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4))
705 		return -EOPNOTSUPP;
706 
707 	idx = off / 4;
708 
709 	/* Move the entire word */
710 	if (size == 4) {
711 		wrp_mov(nfp_prog, reg_both(dst),
712 			should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx));
713 		return 0;
714 	}
715 
716 	if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
717 		return -EOPNOTSUPP;
718 
719 	src_byte = off % 4;
720 
721 	mask = (1 << size) - 1;
722 	mask <<= dst_byte;
723 
724 	if (WARN_ON_ONCE(mask > 0xf))
725 		return -EOPNOTSUPP;
726 
727 	shf = abs(src_byte - dst_byte) * 8;
728 	if (src_byte == dst_byte) {
729 		sc = SHF_SC_NONE;
730 	} else if (src_byte < dst_byte) {
731 		shf = 32 - shf;
732 		sc = SHF_SC_L_SHF;
733 	} else {
734 		sc = SHF_SC_R_SHF;
735 	}
736 
737 	/* ld_field can address fewer indexes, if offset too large do RMW.
738 	 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
739 	 */
740 	if (idx <= RE_REG_LM_IDX_MAX) {
741 		reg = reg_lm(lm3 ? 3 : 0, idx);
742 	} else {
743 		reg = imm_a(nfp_prog);
744 		/* If it's not the first part of the load and we start a new GPR
745 		 * that means we are loading a second part of the LMEM word into
746 		 * a new GPR.  IOW we've already looked that LMEM word and
747 		 * therefore it has been loaded into imm_a().
748 		 */
749 		if (first || !new_gpr)
750 			wrp_mov(nfp_prog, reg, reg_lm(0, idx));
751 	}
752 
753 	emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr);
754 
755 	if (should_inc)
756 		wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
757 
758 	return 0;
759 }
760 
761 static int
762 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off,
763 	       unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
764 	       bool needs_inc)
765 {
766 	bool should_inc = needs_inc && new_gpr && !last;
767 	u32 idx, dst_byte;
768 	enum shf_sc sc;
769 	swreg reg;
770 	int shf;
771 	u8 mask;
772 
773 	if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4))
774 		return -EOPNOTSUPP;
775 
776 	idx = off / 4;
777 
778 	/* Move the entire word */
779 	if (size == 4) {
780 		wrp_mov(nfp_prog,
781 			should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx),
782 			reg_b(src));
783 		return 0;
784 	}
785 
786 	if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
787 		return -EOPNOTSUPP;
788 
789 	dst_byte = off % 4;
790 
791 	mask = (1 << size) - 1;
792 	mask <<= dst_byte;
793 
794 	if (WARN_ON_ONCE(mask > 0xf))
795 		return -EOPNOTSUPP;
796 
797 	shf = abs(src_byte - dst_byte) * 8;
798 	if (src_byte == dst_byte) {
799 		sc = SHF_SC_NONE;
800 	} else if (src_byte < dst_byte) {
801 		shf = 32 - shf;
802 		sc = SHF_SC_L_SHF;
803 	} else {
804 		sc = SHF_SC_R_SHF;
805 	}
806 
807 	/* ld_field can address fewer indexes, if offset too large do RMW.
808 	 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
809 	 */
810 	if (idx <= RE_REG_LM_IDX_MAX) {
811 		reg = reg_lm(lm3 ? 3 : 0, idx);
812 	} else {
813 		reg = imm_a(nfp_prog);
814 		/* Only first and last LMEM locations are going to need RMW,
815 		 * the middle location will be overwritten fully.
816 		 */
817 		if (first || last)
818 			wrp_mov(nfp_prog, reg, reg_lm(0, idx));
819 	}
820 
821 	emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf);
822 
823 	if (new_gpr || last) {
824 		if (idx > RE_REG_LM_IDX_MAX)
825 			wrp_mov(nfp_prog, reg_lm(0, idx), reg);
826 		if (should_inc)
827 			wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
828 	}
829 
830 	return 0;
831 }
832 
833 static int
834 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
835 	     unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
836 	     bool clr_gpr, lmem_step step)
837 {
838 	s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off;
839 	bool first = true, last;
840 	bool needs_inc = false;
841 	swreg stack_off_reg;
842 	u8 prev_gpr = 255;
843 	u32 gpr_byte = 0;
844 	bool lm3 = true;
845 	int ret;
846 
847 	if (meta->ptr_not_const) {
848 		/* Use of the last encountered ptr_off is OK, they all have
849 		 * the same alignment.  Depend on low bits of value being
850 		 * discarded when written to LMaddr register.
851 		 */
852 		stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off,
853 						stack_imm(nfp_prog));
854 
855 		emit_alu(nfp_prog, imm_b(nfp_prog),
856 			 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg);
857 
858 		needs_inc = true;
859 	} else if (off + size <= 64) {
860 		/* We can reach bottom 64B with LMaddr0 */
861 		lm3 = false;
862 	} else if (round_down(off, 32) == round_down(off + size - 1, 32)) {
863 		/* We have to set up a new pointer.  If we know the offset
864 		 * and the entire access falls into a single 32 byte aligned
865 		 * window we won't have to increment the LM pointer.
866 		 * The 32 byte alignment is imporant because offset is ORed in
867 		 * not added when doing *l$indexN[off].
868 		 */
869 		stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32),
870 						stack_imm(nfp_prog));
871 		emit_alu(nfp_prog, imm_b(nfp_prog),
872 			 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
873 
874 		off %= 32;
875 	} else {
876 		stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4),
877 						stack_imm(nfp_prog));
878 
879 		emit_alu(nfp_prog, imm_b(nfp_prog),
880 			 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
881 
882 		needs_inc = true;
883 	}
884 	if (lm3) {
885 		emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
886 		/* For size < 4 one slot will be filled by zeroing of upper. */
887 		wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
888 	}
889 
890 	if (clr_gpr && size < 8)
891 		wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
892 
893 	while (size) {
894 		u32 slice_end;
895 		u8 slice_size;
896 
897 		slice_size = min(size, 4 - gpr_byte);
898 		slice_end = min(off + slice_size, round_up(off + 1, 4));
899 		slice_size = slice_end - off;
900 
901 		last = slice_size == size;
902 
903 		if (needs_inc)
904 			off %= 4;
905 
906 		ret = step(nfp_prog, gpr, gpr_byte, off, slice_size,
907 			   first, gpr != prev_gpr, last, lm3, needs_inc);
908 		if (ret)
909 			return ret;
910 
911 		prev_gpr = gpr;
912 		first = false;
913 
914 		gpr_byte += slice_size;
915 		if (gpr_byte >= 4) {
916 			gpr_byte -= 4;
917 			gpr++;
918 		}
919 
920 		size -= slice_size;
921 		off += slice_size;
922 	}
923 
924 	return 0;
925 }
926 
927 static void
928 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
929 {
930 	swreg tmp_reg;
931 
932 	if (alu_op == ALU_OP_AND) {
933 		if (!imm)
934 			wrp_immed(nfp_prog, reg_both(dst), 0);
935 		if (!imm || !~imm)
936 			return;
937 	}
938 	if (alu_op == ALU_OP_OR) {
939 		if (!~imm)
940 			wrp_immed(nfp_prog, reg_both(dst), ~0U);
941 		if (!imm || !~imm)
942 			return;
943 	}
944 	if (alu_op == ALU_OP_XOR) {
945 		if (!~imm)
946 			emit_alu(nfp_prog, reg_both(dst), reg_none(),
947 				 ALU_OP_NOT, reg_b(dst));
948 		if (!imm || !~imm)
949 			return;
950 	}
951 
952 	tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
953 	emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
954 }
955 
956 static int
957 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
958 	      enum alu_op alu_op, bool skip)
959 {
960 	const struct bpf_insn *insn = &meta->insn;
961 	u64 imm = insn->imm; /* sign extend */
962 
963 	if (skip) {
964 		meta->skip = true;
965 		return 0;
966 	}
967 
968 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
969 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
970 
971 	return 0;
972 }
973 
974 static int
975 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
976 	      enum alu_op alu_op)
977 {
978 	u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
979 
980 	emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
981 	emit_alu(nfp_prog, reg_both(dst + 1),
982 		 reg_a(dst + 1), alu_op, reg_b(src + 1));
983 
984 	return 0;
985 }
986 
987 static int
988 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
989 	      enum alu_op alu_op, bool skip)
990 {
991 	const struct bpf_insn *insn = &meta->insn;
992 
993 	if (skip) {
994 		meta->skip = true;
995 		return 0;
996 	}
997 
998 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
999 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1000 
1001 	return 0;
1002 }
1003 
1004 static int
1005 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1006 	      enum alu_op alu_op)
1007 {
1008 	u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
1009 
1010 	emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
1011 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1012 
1013 	return 0;
1014 }
1015 
1016 static void
1017 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
1018 		 enum br_mask br_mask, u16 off)
1019 {
1020 	emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
1021 	emit_br(nfp_prog, br_mask, off, 0);
1022 }
1023 
1024 static int
1025 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1026 	     enum alu_op alu_op, enum br_mask br_mask)
1027 {
1028 	const struct bpf_insn *insn = &meta->insn;
1029 
1030 	if (insn->off < 0) /* TODO */
1031 		return -EOPNOTSUPP;
1032 
1033 	wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
1034 			 insn->src_reg * 2, br_mask, insn->off);
1035 	wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
1036 			 insn->src_reg * 2 + 1, br_mask, insn->off);
1037 
1038 	return 0;
1039 }
1040 
1041 static int
1042 wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1043 	    enum br_mask br_mask, bool swap)
1044 {
1045 	const struct bpf_insn *insn = &meta->insn;
1046 	u64 imm = insn->imm; /* sign extend */
1047 	u8 reg = insn->dst_reg * 2;
1048 	swreg tmp_reg;
1049 
1050 	if (insn->off < 0) /* TODO */
1051 		return -EOPNOTSUPP;
1052 
1053 	tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
1054 	if (!swap)
1055 		emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
1056 	else
1057 		emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
1058 
1059 	tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
1060 	if (!swap)
1061 		emit_alu(nfp_prog, reg_none(),
1062 			 reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
1063 	else
1064 		emit_alu(nfp_prog, reg_none(),
1065 			 tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
1066 
1067 	emit_br(nfp_prog, br_mask, insn->off, 0);
1068 
1069 	return 0;
1070 }
1071 
1072 static int
1073 wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1074 	    enum br_mask br_mask, bool swap)
1075 {
1076 	const struct bpf_insn *insn = &meta->insn;
1077 	u8 areg, breg;
1078 
1079 	areg = insn->dst_reg * 2;
1080 	breg = insn->src_reg * 2;
1081 
1082 	if (insn->off < 0) /* TODO */
1083 		return -EOPNOTSUPP;
1084 
1085 	if (swap) {
1086 		areg ^= breg;
1087 		breg ^= areg;
1088 		areg ^= breg;
1089 	}
1090 
1091 	emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
1092 	emit_alu(nfp_prog, reg_none(),
1093 		 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
1094 	emit_br(nfp_prog, br_mask, insn->off, 0);
1095 
1096 	return 0;
1097 }
1098 
1099 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
1100 {
1101 	emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in,
1102 		      SHF_SC_R_ROT, 8);
1103 	emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out),
1104 		      SHF_SC_R_ROT, 16);
1105 }
1106 
1107 /* --- Callbacks --- */
1108 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1109 {
1110 	const struct bpf_insn *insn = &meta->insn;
1111 	u8 dst = insn->dst_reg * 2;
1112 	u8 src = insn->src_reg * 2;
1113 
1114 	if (insn->src_reg == BPF_REG_10) {
1115 		swreg stack_depth_reg;
1116 
1117 		stack_depth_reg = ur_load_imm_any(nfp_prog,
1118 						  nfp_prog->stack_depth,
1119 						  stack_imm(nfp_prog));
1120 		emit_alu(nfp_prog, reg_both(dst),
1121 			 stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg);
1122 		wrp_immed(nfp_prog, reg_both(dst + 1), 0);
1123 	} else {
1124 		wrp_reg_mov(nfp_prog, dst, src);
1125 		wrp_reg_mov(nfp_prog, dst + 1, src + 1);
1126 	}
1127 
1128 	return 0;
1129 }
1130 
1131 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1132 {
1133 	u64 imm = meta->insn.imm; /* sign extend */
1134 
1135 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
1136 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
1137 
1138 	return 0;
1139 }
1140 
1141 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1142 {
1143 	return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
1144 }
1145 
1146 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1147 {
1148 	return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
1149 }
1150 
1151 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1152 {
1153 	return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
1154 }
1155 
1156 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1157 {
1158 	return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
1159 }
1160 
1161 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1162 {
1163 	return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
1164 }
1165 
1166 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1167 {
1168 	return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
1169 }
1170 
1171 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1172 {
1173 	const struct bpf_insn *insn = &meta->insn;
1174 
1175 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1176 		 reg_a(insn->dst_reg * 2), ALU_OP_ADD,
1177 		 reg_b(insn->src_reg * 2));
1178 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1179 		 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
1180 		 reg_b(insn->src_reg * 2 + 1));
1181 
1182 	return 0;
1183 }
1184 
1185 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1186 {
1187 	const struct bpf_insn *insn = &meta->insn;
1188 	u64 imm = insn->imm; /* sign extend */
1189 
1190 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
1191 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
1192 
1193 	return 0;
1194 }
1195 
1196 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1197 {
1198 	const struct bpf_insn *insn = &meta->insn;
1199 
1200 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1201 		 reg_a(insn->dst_reg * 2), ALU_OP_SUB,
1202 		 reg_b(insn->src_reg * 2));
1203 	emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1204 		 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
1205 		 reg_b(insn->src_reg * 2 + 1));
1206 
1207 	return 0;
1208 }
1209 
1210 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1211 {
1212 	const struct bpf_insn *insn = &meta->insn;
1213 	u64 imm = insn->imm; /* sign extend */
1214 
1215 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
1216 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
1217 
1218 	return 0;
1219 }
1220 
1221 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1222 {
1223 	const struct bpf_insn *insn = &meta->insn;
1224 	u8 dst = insn->dst_reg * 2;
1225 
1226 	if (insn->imm < 32) {
1227 		emit_shf(nfp_prog, reg_both(dst + 1),
1228 			 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
1229 			 SHF_SC_R_DSHF, 32 - insn->imm);
1230 		emit_shf(nfp_prog, reg_both(dst),
1231 			 reg_none(), SHF_OP_NONE, reg_b(dst),
1232 			 SHF_SC_L_SHF, insn->imm);
1233 	} else if (insn->imm == 32) {
1234 		wrp_reg_mov(nfp_prog, dst + 1, dst);
1235 		wrp_immed(nfp_prog, reg_both(dst), 0);
1236 	} else if (insn->imm > 32) {
1237 		emit_shf(nfp_prog, reg_both(dst + 1),
1238 			 reg_none(), SHF_OP_NONE, reg_b(dst),
1239 			 SHF_SC_L_SHF, insn->imm - 32);
1240 		wrp_immed(nfp_prog, reg_both(dst), 0);
1241 	}
1242 
1243 	return 0;
1244 }
1245 
1246 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1247 {
1248 	const struct bpf_insn *insn = &meta->insn;
1249 	u8 dst = insn->dst_reg * 2;
1250 
1251 	if (insn->imm < 32) {
1252 		emit_shf(nfp_prog, reg_both(dst),
1253 			 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
1254 			 SHF_SC_R_DSHF, insn->imm);
1255 		emit_shf(nfp_prog, reg_both(dst + 1),
1256 			 reg_none(), SHF_OP_NONE, reg_b(dst + 1),
1257 			 SHF_SC_R_SHF, insn->imm);
1258 	} else if (insn->imm == 32) {
1259 		wrp_reg_mov(nfp_prog, dst, dst + 1);
1260 		wrp_immed(nfp_prog, reg_both(dst + 1), 0);
1261 	} else if (insn->imm > 32) {
1262 		emit_shf(nfp_prog, reg_both(dst),
1263 			 reg_none(), SHF_OP_NONE, reg_b(dst + 1),
1264 			 SHF_SC_R_SHF, insn->imm - 32);
1265 		wrp_immed(nfp_prog, reg_both(dst + 1), 0);
1266 	}
1267 
1268 	return 0;
1269 }
1270 
1271 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1272 {
1273 	const struct bpf_insn *insn = &meta->insn;
1274 
1275 	wrp_reg_mov(nfp_prog, insn->dst_reg * 2,  insn->src_reg * 2);
1276 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1277 
1278 	return 0;
1279 }
1280 
1281 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1282 {
1283 	const struct bpf_insn *insn = &meta->insn;
1284 
1285 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
1286 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1287 
1288 	return 0;
1289 }
1290 
1291 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1292 {
1293 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
1294 }
1295 
1296 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1297 {
1298 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
1299 }
1300 
1301 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1302 {
1303 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
1304 }
1305 
1306 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1307 {
1308 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
1309 }
1310 
1311 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1312 {
1313 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
1314 }
1315 
1316 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1317 {
1318 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
1319 }
1320 
1321 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1322 {
1323 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
1324 }
1325 
1326 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1327 {
1328 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
1329 }
1330 
1331 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1332 {
1333 	return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
1334 }
1335 
1336 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1337 {
1338 	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
1339 }
1340 
1341 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1342 {
1343 	const struct bpf_insn *insn = &meta->insn;
1344 
1345 	if (!insn->imm)
1346 		return 1; /* TODO: zero shift means indirect */
1347 
1348 	emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
1349 		 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
1350 		 SHF_SC_L_SHF, insn->imm);
1351 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1352 
1353 	return 0;
1354 }
1355 
1356 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1357 {
1358 	const struct bpf_insn *insn = &meta->insn;
1359 	u8 gpr = insn->dst_reg * 2;
1360 
1361 	switch (insn->imm) {
1362 	case 16:
1363 		emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr),
1364 			      SHF_SC_R_ROT, 8);
1365 		emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr),
1366 			      SHF_SC_R_SHF, 16);
1367 
1368 		wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
1369 		break;
1370 	case 32:
1371 		wrp_end32(nfp_prog, reg_a(gpr), gpr);
1372 		wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
1373 		break;
1374 	case 64:
1375 		wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1));
1376 
1377 		wrp_end32(nfp_prog, reg_a(gpr), gpr + 1);
1378 		wrp_end32(nfp_prog, imm_a(nfp_prog), gpr);
1379 		break;
1380 	}
1381 
1382 	return 0;
1383 }
1384 
1385 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1386 {
1387 	struct nfp_insn_meta *prev = nfp_meta_prev(meta);
1388 	u32 imm_lo, imm_hi;
1389 	u8 dst;
1390 
1391 	dst = prev->insn.dst_reg * 2;
1392 	imm_lo = prev->insn.imm;
1393 	imm_hi = meta->insn.imm;
1394 
1395 	wrp_immed(nfp_prog, reg_both(dst), imm_lo);
1396 
1397 	/* mov is always 1 insn, load imm may be two, so try to use mov */
1398 	if (imm_hi == imm_lo)
1399 		wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst));
1400 	else
1401 		wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi);
1402 
1403 	return 0;
1404 }
1405 
1406 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1407 {
1408 	meta->double_cb = imm_ld8_part2;
1409 	return 0;
1410 }
1411 
1412 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1413 {
1414 	return construct_data_ld(nfp_prog, meta->insn.imm, 1);
1415 }
1416 
1417 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1418 {
1419 	return construct_data_ld(nfp_prog, meta->insn.imm, 2);
1420 }
1421 
1422 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1423 {
1424 	return construct_data_ld(nfp_prog, meta->insn.imm, 4);
1425 }
1426 
1427 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1428 {
1429 	return construct_data_ind_ld(nfp_prog, meta->insn.imm,
1430 				     meta->insn.src_reg * 2, 1);
1431 }
1432 
1433 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1434 {
1435 	return construct_data_ind_ld(nfp_prog, meta->insn.imm,
1436 				     meta->insn.src_reg * 2, 2);
1437 }
1438 
1439 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1440 {
1441 	return construct_data_ind_ld(nfp_prog, meta->insn.imm,
1442 				     meta->insn.src_reg * 2, 4);
1443 }
1444 
1445 static int
1446 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1447 	      unsigned int size, unsigned int ptr_off)
1448 {
1449 	return mem_op_stack(nfp_prog, meta, size, ptr_off,
1450 			    meta->insn.dst_reg * 2, meta->insn.src_reg * 2,
1451 			    true, wrp_lmem_load);
1452 }
1453 
1454 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1455 		       u8 size)
1456 {
1457 	swreg dst = reg_both(meta->insn.dst_reg * 2);
1458 
1459 	switch (meta->insn.off) {
1460 	case offsetof(struct sk_buff, len):
1461 		if (size != FIELD_SIZEOF(struct sk_buff, len))
1462 			return -EOPNOTSUPP;
1463 		wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
1464 		break;
1465 	case offsetof(struct sk_buff, data):
1466 		if (size != sizeof(void *))
1467 			return -EOPNOTSUPP;
1468 		wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
1469 		break;
1470 	case offsetof(struct sk_buff, cb) +
1471 	     offsetof(struct bpf_skb_data_end, data_end):
1472 		if (size != sizeof(void *))
1473 			return -EOPNOTSUPP;
1474 		emit_alu(nfp_prog, dst,
1475 			 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
1476 		break;
1477 	default:
1478 		return -EOPNOTSUPP;
1479 	}
1480 
1481 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1482 
1483 	return 0;
1484 }
1485 
1486 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1487 		       u8 size)
1488 {
1489 	swreg dst = reg_both(meta->insn.dst_reg * 2);
1490 
1491 	if (size != sizeof(void *))
1492 		return -EINVAL;
1493 
1494 	switch (meta->insn.off) {
1495 	case offsetof(struct xdp_buff, data):
1496 		wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
1497 		break;
1498 	case offsetof(struct xdp_buff, data_end):
1499 		emit_alu(nfp_prog, dst,
1500 			 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
1501 		break;
1502 	default:
1503 		return -EOPNOTSUPP;
1504 	}
1505 
1506 	wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1507 
1508 	return 0;
1509 }
1510 
1511 static int
1512 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1513 	     unsigned int size)
1514 {
1515 	swreg tmp_reg;
1516 
1517 	tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
1518 
1519 	return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
1520 				  meta->insn.dst_reg * 2, size);
1521 }
1522 
1523 static int
1524 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1525 	unsigned int size)
1526 {
1527 	if (meta->ptr.type == PTR_TO_CTX) {
1528 		if (nfp_prog->act == NN_ACT_XDP)
1529 			return mem_ldx_xdp(nfp_prog, meta, size);
1530 		else
1531 			return mem_ldx_skb(nfp_prog, meta, size);
1532 	}
1533 
1534 	if (meta->ptr.type == PTR_TO_PACKET)
1535 		return mem_ldx_data(nfp_prog, meta, size);
1536 
1537 	if (meta->ptr.type == PTR_TO_STACK)
1538 		return mem_ldx_stack(nfp_prog, meta, size,
1539 				     meta->ptr.off + meta->ptr.var_off.value);
1540 
1541 	return -EOPNOTSUPP;
1542 }
1543 
1544 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1545 {
1546 	return mem_ldx(nfp_prog, meta, 1);
1547 }
1548 
1549 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1550 {
1551 	return mem_ldx(nfp_prog, meta, 2);
1552 }
1553 
1554 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1555 {
1556 	return mem_ldx(nfp_prog, meta, 4);
1557 }
1558 
1559 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1560 {
1561 	return mem_ldx(nfp_prog, meta, 8);
1562 }
1563 
1564 static int
1565 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1566 	    unsigned int size)
1567 {
1568 	u64 imm = meta->insn.imm; /* sign extend */
1569 	swreg off_reg;
1570 
1571 	off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
1572 
1573 	return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
1574 				  imm, size);
1575 }
1576 
1577 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1578 		  unsigned int size)
1579 {
1580 	if (meta->ptr.type == PTR_TO_PACKET)
1581 		return mem_st_data(nfp_prog, meta, size);
1582 
1583 	return -EOPNOTSUPP;
1584 }
1585 
1586 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1587 {
1588 	return mem_st(nfp_prog, meta, 1);
1589 }
1590 
1591 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1592 {
1593 	return mem_st(nfp_prog, meta, 2);
1594 }
1595 
1596 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1597 {
1598 	return mem_st(nfp_prog, meta, 4);
1599 }
1600 
1601 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1602 {
1603 	return mem_st(nfp_prog, meta, 8);
1604 }
1605 
1606 static int
1607 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1608 	     unsigned int size)
1609 {
1610 	swreg off_reg;
1611 
1612 	off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
1613 
1614 	return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
1615 				   meta->insn.src_reg * 2, size);
1616 }
1617 
1618 static int
1619 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1620 	      unsigned int size, unsigned int ptr_off)
1621 {
1622 	return mem_op_stack(nfp_prog, meta, size, ptr_off,
1623 			    meta->insn.src_reg * 2, meta->insn.dst_reg * 2,
1624 			    false, wrp_lmem_store);
1625 }
1626 
1627 static int
1628 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1629 	unsigned int size)
1630 {
1631 	if (meta->ptr.type == PTR_TO_PACKET)
1632 		return mem_stx_data(nfp_prog, meta, size);
1633 
1634 	if (meta->ptr.type == PTR_TO_STACK)
1635 		return mem_stx_stack(nfp_prog, meta, size,
1636 				     meta->ptr.off + meta->ptr.var_off.value);
1637 
1638 	return -EOPNOTSUPP;
1639 }
1640 
1641 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1642 {
1643 	return mem_stx(nfp_prog, meta, 1);
1644 }
1645 
1646 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1647 {
1648 	return mem_stx(nfp_prog, meta, 2);
1649 }
1650 
1651 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1652 {
1653 	return mem_stx(nfp_prog, meta, 4);
1654 }
1655 
1656 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1657 {
1658 	return mem_stx(nfp_prog, meta, 8);
1659 }
1660 
1661 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1662 {
1663 	if (meta->insn.off < 0) /* TODO */
1664 		return -EOPNOTSUPP;
1665 	emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
1666 
1667 	return 0;
1668 }
1669 
1670 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1671 {
1672 	const struct bpf_insn *insn = &meta->insn;
1673 	u64 imm = insn->imm; /* sign extend */
1674 	swreg or1, or2, tmp_reg;
1675 
1676 	or1 = reg_a(insn->dst_reg * 2);
1677 	or2 = reg_b(insn->dst_reg * 2 + 1);
1678 
1679 	if (insn->off < 0) /* TODO */
1680 		return -EOPNOTSUPP;
1681 
1682 	if (imm & ~0U) {
1683 		tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
1684 		emit_alu(nfp_prog, imm_a(nfp_prog),
1685 			 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
1686 		or1 = imm_a(nfp_prog);
1687 	}
1688 
1689 	if (imm >> 32) {
1690 		tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
1691 		emit_alu(nfp_prog, imm_b(nfp_prog),
1692 			 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
1693 		or2 = imm_b(nfp_prog);
1694 	}
1695 
1696 	emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
1697 	emit_br(nfp_prog, BR_BEQ, insn->off, 0);
1698 
1699 	return 0;
1700 }
1701 
1702 static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1703 {
1704 	return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
1705 }
1706 
1707 static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1708 {
1709 	return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
1710 }
1711 
1712 static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1713 {
1714 	return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
1715 }
1716 
1717 static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1718 {
1719 	return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
1720 }
1721 
1722 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1723 {
1724 	const struct bpf_insn *insn = &meta->insn;
1725 	u64 imm = insn->imm; /* sign extend */
1726 	swreg tmp_reg;
1727 
1728 	if (insn->off < 0) /* TODO */
1729 		return -EOPNOTSUPP;
1730 
1731 	if (!imm) {
1732 		meta->skip = true;
1733 		return 0;
1734 	}
1735 
1736 	if (imm & ~0U) {
1737 		tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
1738 		emit_alu(nfp_prog, reg_none(),
1739 			 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
1740 		emit_br(nfp_prog, BR_BNE, insn->off, 0);
1741 	}
1742 
1743 	if (imm >> 32) {
1744 		tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
1745 		emit_alu(nfp_prog, reg_none(),
1746 			 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
1747 		emit_br(nfp_prog, BR_BNE, insn->off, 0);
1748 	}
1749 
1750 	return 0;
1751 }
1752 
1753 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1754 {
1755 	const struct bpf_insn *insn = &meta->insn;
1756 	u64 imm = insn->imm; /* sign extend */
1757 	swreg tmp_reg;
1758 
1759 	if (insn->off < 0) /* TODO */
1760 		return -EOPNOTSUPP;
1761 
1762 	if (!imm) {
1763 		emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
1764 			 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
1765 		emit_br(nfp_prog, BR_BNE, insn->off, 0);
1766 		return 0;
1767 	}
1768 
1769 	tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
1770 	emit_alu(nfp_prog, reg_none(),
1771 		 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
1772 	emit_br(nfp_prog, BR_BNE, insn->off, 0);
1773 
1774 	tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
1775 	emit_alu(nfp_prog, reg_none(),
1776 		 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
1777 	emit_br(nfp_prog, BR_BNE, insn->off, 0);
1778 
1779 	return 0;
1780 }
1781 
1782 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1783 {
1784 	const struct bpf_insn *insn = &meta->insn;
1785 
1786 	if (insn->off < 0) /* TODO */
1787 		return -EOPNOTSUPP;
1788 
1789 	emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
1790 		 ALU_OP_XOR, reg_b(insn->src_reg * 2));
1791 	emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
1792 		 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
1793 	emit_alu(nfp_prog, reg_none(),
1794 		 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
1795 	emit_br(nfp_prog, BR_BEQ, insn->off, 0);
1796 
1797 	return 0;
1798 }
1799 
1800 static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1801 {
1802 	return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
1803 }
1804 
1805 static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1806 {
1807 	return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
1808 }
1809 
1810 static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1811 {
1812 	return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
1813 }
1814 
1815 static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1816 {
1817 	return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
1818 }
1819 
1820 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1821 {
1822 	return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
1823 }
1824 
1825 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1826 {
1827 	return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
1828 }
1829 
1830 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1831 {
1832 	wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
1833 
1834 	return 0;
1835 }
1836 
1837 static const instr_cb_t instr_cb[256] = {
1838 	[BPF_ALU64 | BPF_MOV | BPF_X] =	mov_reg64,
1839 	[BPF_ALU64 | BPF_MOV | BPF_K] =	mov_imm64,
1840 	[BPF_ALU64 | BPF_XOR | BPF_X] =	xor_reg64,
1841 	[BPF_ALU64 | BPF_XOR | BPF_K] =	xor_imm64,
1842 	[BPF_ALU64 | BPF_AND | BPF_X] =	and_reg64,
1843 	[BPF_ALU64 | BPF_AND | BPF_K] =	and_imm64,
1844 	[BPF_ALU64 | BPF_OR | BPF_X] =	or_reg64,
1845 	[BPF_ALU64 | BPF_OR | BPF_K] =	or_imm64,
1846 	[BPF_ALU64 | BPF_ADD | BPF_X] =	add_reg64,
1847 	[BPF_ALU64 | BPF_ADD | BPF_K] =	add_imm64,
1848 	[BPF_ALU64 | BPF_SUB | BPF_X] =	sub_reg64,
1849 	[BPF_ALU64 | BPF_SUB | BPF_K] =	sub_imm64,
1850 	[BPF_ALU64 | BPF_LSH | BPF_K] =	shl_imm64,
1851 	[BPF_ALU64 | BPF_RSH | BPF_K] =	shr_imm64,
1852 	[BPF_ALU | BPF_MOV | BPF_X] =	mov_reg,
1853 	[BPF_ALU | BPF_MOV | BPF_K] =	mov_imm,
1854 	[BPF_ALU | BPF_XOR | BPF_X] =	xor_reg,
1855 	[BPF_ALU | BPF_XOR | BPF_K] =	xor_imm,
1856 	[BPF_ALU | BPF_AND | BPF_X] =	and_reg,
1857 	[BPF_ALU | BPF_AND | BPF_K] =	and_imm,
1858 	[BPF_ALU | BPF_OR | BPF_X] =	or_reg,
1859 	[BPF_ALU | BPF_OR | BPF_K] =	or_imm,
1860 	[BPF_ALU | BPF_ADD | BPF_X] =	add_reg,
1861 	[BPF_ALU | BPF_ADD | BPF_K] =	add_imm,
1862 	[BPF_ALU | BPF_SUB | BPF_X] =	sub_reg,
1863 	[BPF_ALU | BPF_SUB | BPF_K] =	sub_imm,
1864 	[BPF_ALU | BPF_LSH | BPF_K] =	shl_imm,
1865 	[BPF_ALU | BPF_END | BPF_X] =	end_reg32,
1866 	[BPF_LD | BPF_IMM | BPF_DW] =	imm_ld8,
1867 	[BPF_LD | BPF_ABS | BPF_B] =	data_ld1,
1868 	[BPF_LD | BPF_ABS | BPF_H] =	data_ld2,
1869 	[BPF_LD | BPF_ABS | BPF_W] =	data_ld4,
1870 	[BPF_LD | BPF_IND | BPF_B] =	data_ind_ld1,
1871 	[BPF_LD | BPF_IND | BPF_H] =	data_ind_ld2,
1872 	[BPF_LD | BPF_IND | BPF_W] =	data_ind_ld4,
1873 	[BPF_LDX | BPF_MEM | BPF_B] =	mem_ldx1,
1874 	[BPF_LDX | BPF_MEM | BPF_H] =	mem_ldx2,
1875 	[BPF_LDX | BPF_MEM | BPF_W] =	mem_ldx4,
1876 	[BPF_LDX | BPF_MEM | BPF_DW] =	mem_ldx8,
1877 	[BPF_STX | BPF_MEM | BPF_B] =	mem_stx1,
1878 	[BPF_STX | BPF_MEM | BPF_H] =	mem_stx2,
1879 	[BPF_STX | BPF_MEM | BPF_W] =	mem_stx4,
1880 	[BPF_STX | BPF_MEM | BPF_DW] =	mem_stx8,
1881 	[BPF_ST | BPF_MEM | BPF_B] =	mem_st1,
1882 	[BPF_ST | BPF_MEM | BPF_H] =	mem_st2,
1883 	[BPF_ST | BPF_MEM | BPF_W] =	mem_st4,
1884 	[BPF_ST | BPF_MEM | BPF_DW] =	mem_st8,
1885 	[BPF_JMP | BPF_JA | BPF_K] =	jump,
1886 	[BPF_JMP | BPF_JEQ | BPF_K] =	jeq_imm,
1887 	[BPF_JMP | BPF_JGT | BPF_K] =	jgt_imm,
1888 	[BPF_JMP | BPF_JGE | BPF_K] =	jge_imm,
1889 	[BPF_JMP | BPF_JLT | BPF_K] =	jlt_imm,
1890 	[BPF_JMP | BPF_JLE | BPF_K] =	jle_imm,
1891 	[BPF_JMP | BPF_JSET | BPF_K] =	jset_imm,
1892 	[BPF_JMP | BPF_JNE | BPF_K] =	jne_imm,
1893 	[BPF_JMP | BPF_JEQ | BPF_X] =	jeq_reg,
1894 	[BPF_JMP | BPF_JGT | BPF_X] =	jgt_reg,
1895 	[BPF_JMP | BPF_JGE | BPF_X] =	jge_reg,
1896 	[BPF_JMP | BPF_JLT | BPF_X] =	jlt_reg,
1897 	[BPF_JMP | BPF_JLE | BPF_X] =	jle_reg,
1898 	[BPF_JMP | BPF_JSET | BPF_X] =	jset_reg,
1899 	[BPF_JMP | BPF_JNE | BPF_X] =	jne_reg,
1900 	[BPF_JMP | BPF_EXIT] =		goto_out,
1901 };
1902 
1903 /* --- Misc code --- */
1904 static void br_set_offset(u64 *instr, u16 offset)
1905 {
1906 	u16 addr_lo, addr_hi;
1907 
1908 	addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
1909 	addr_hi = offset != addr_lo;
1910 	*instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
1911 	*instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
1912 	*instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
1913 }
1914 
1915 /* --- Assembler logic --- */
1916 static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
1917 {
1918 	struct nfp_insn_meta *meta, *next;
1919 	u32 off, br_idx;
1920 	u32 idx;
1921 
1922 	nfp_for_each_insn_walk2(nfp_prog, meta, next) {
1923 		if (meta->skip)
1924 			continue;
1925 		if (BPF_CLASS(meta->insn.code) != BPF_JMP)
1926 			continue;
1927 
1928 		br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
1929 		if (!nfp_is_br(nfp_prog->prog[br_idx])) {
1930 			pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
1931 			       br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
1932 			return -ELOOP;
1933 		}
1934 		/* Leave special branches for later */
1935 		if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
1936 			continue;
1937 
1938 		/* Find the target offset in assembler realm */
1939 		off = meta->insn.off;
1940 		if (!off) {
1941 			pr_err("Fixup found zero offset!!\n");
1942 			return -ELOOP;
1943 		}
1944 
1945 		while (off && nfp_meta_has_next(nfp_prog, next)) {
1946 			next = nfp_meta_next(next);
1947 			off--;
1948 		}
1949 		if (off) {
1950 			pr_err("Fixup found too large jump!! %d\n", off);
1951 			return -ELOOP;
1952 		}
1953 
1954 		if (next->skip) {
1955 			pr_err("Branch landing on removed instruction!!\n");
1956 			return -ELOOP;
1957 		}
1958 
1959 		for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
1960 		     idx <= br_idx; idx++) {
1961 			if (!nfp_is_br(nfp_prog->prog[idx]))
1962 				continue;
1963 			br_set_offset(&nfp_prog->prog[idx], next->off);
1964 		}
1965 	}
1966 
1967 	/* Fixup 'goto out's separately, they can be scattered around */
1968 	for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
1969 		enum br_special special;
1970 
1971 		if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
1972 			continue;
1973 
1974 		special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
1975 		switch (special) {
1976 		case OP_BR_NORMAL:
1977 			break;
1978 		case OP_BR_GO_OUT:
1979 			br_set_offset(&nfp_prog->prog[br_idx],
1980 				      nfp_prog->tgt_out);
1981 			break;
1982 		case OP_BR_GO_ABORT:
1983 			br_set_offset(&nfp_prog->prog[br_idx],
1984 				      nfp_prog->tgt_abort);
1985 			break;
1986 		}
1987 
1988 		nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
1989 	}
1990 
1991 	return 0;
1992 }
1993 
1994 static void nfp_intro(struct nfp_prog *nfp_prog)
1995 {
1996 	wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0));
1997 	emit_alu(nfp_prog, plen_reg(nfp_prog),
1998 		 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
1999 }
2000 
2001 static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
2002 {
2003 	const u8 act2code[] = {
2004 		[NN_ACT_TC_DROP]  = 0x22,
2005 		[NN_ACT_TC_REDIR] = 0x24
2006 	};
2007 	/* Target for aborts */
2008 	nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
2009 	wrp_immed(nfp_prog, reg_both(0), 0);
2010 
2011 	/* Target for normal exits */
2012 	nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
2013 	/* Legacy TC mode:
2014 	 *   0        0x11 -> pass,  count as stat0
2015 	 *  -1  drop  0x22 -> drop,  count as stat1
2016 	 *     redir  0x24 -> redir, count as stat1
2017 	 *  ife mark  0x21 -> pass,  count as stat1
2018 	 *  ife + tx  0x24 -> redir, count as stat1
2019 	 */
2020 	emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
2021 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2022 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
2023 
2024 	emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
2025 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]),
2026 		      SHF_SC_L_SHF, 16);
2027 }
2028 
2029 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
2030 {
2031 	/* TC direct-action mode:
2032 	 *   0,1   ok        NOT SUPPORTED[1]
2033 	 *   2   drop  0x22 -> drop,  count as stat1
2034 	 *   4,5 nuke  0x02 -> drop
2035 	 *   7  redir  0x44 -> redir, count as stat2
2036 	 *   * unspec  0x11 -> pass,  count as stat0
2037 	 *
2038 	 * [1] We can't support OK and RECLASSIFY because we can't tell TC
2039 	 *     the exact decision made.  We are forced to support UNSPEC
2040 	 *     to handle aborts so that's the only one we handle for passing
2041 	 *     packets up the stack.
2042 	 */
2043 	/* Target for aborts */
2044 	nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
2045 
2046 	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
2047 
2048 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2049 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
2050 
2051 	/* Target for normal exits */
2052 	nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
2053 
2054 	/* if R0 > 7 jump to abort */
2055 	emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
2056 	emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
2057 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2058 
2059 	wrp_immed(nfp_prog, reg_b(2), 0x41221211);
2060 	wrp_immed(nfp_prog, reg_b(3), 0x41001211);
2061 
2062 	emit_shf(nfp_prog, reg_a(1),
2063 		 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
2064 
2065 	emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
2066 	emit_shf(nfp_prog, reg_a(2),
2067 		 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
2068 
2069 	emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
2070 	emit_shf(nfp_prog, reg_b(2),
2071 		 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
2072 
2073 	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
2074 
2075 	emit_shf(nfp_prog, reg_b(2),
2076 		 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
2077 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
2078 }
2079 
2080 static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
2081 {
2082 	/* XDP return codes:
2083 	 *   0 aborted  0x82 -> drop,  count as stat3
2084 	 *   1    drop  0x22 -> drop,  count as stat1
2085 	 *   2    pass  0x11 -> pass,  count as stat0
2086 	 *   3      tx  0x44 -> redir, count as stat2
2087 	 *   * unknown  0x82 -> drop,  count as stat3
2088 	 */
2089 	/* Target for aborts */
2090 	nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
2091 
2092 	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
2093 
2094 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2095 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
2096 
2097 	/* Target for normal exits */
2098 	nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
2099 
2100 	/* if R0 > 3 jump to abort */
2101 	emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0));
2102 	emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
2103 
2104 	wrp_immed(nfp_prog, reg_b(2), 0x44112282);
2105 
2106 	emit_shf(nfp_prog, reg_a(1),
2107 		 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3);
2108 
2109 	emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
2110 	emit_shf(nfp_prog, reg_b(2),
2111 		 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
2112 
2113 	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
2114 
2115 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2116 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
2117 }
2118 
2119 static void nfp_outro(struct nfp_prog *nfp_prog)
2120 {
2121 	switch (nfp_prog->act) {
2122 	case NN_ACT_DIRECT:
2123 		nfp_outro_tc_da(nfp_prog);
2124 		break;
2125 	case NN_ACT_TC_DROP:
2126 	case NN_ACT_TC_REDIR:
2127 		nfp_outro_tc_legacy(nfp_prog);
2128 		break;
2129 	case NN_ACT_XDP:
2130 		nfp_outro_xdp(nfp_prog);
2131 		break;
2132 	}
2133 }
2134 
2135 static int nfp_translate(struct nfp_prog *nfp_prog)
2136 {
2137 	struct nfp_insn_meta *meta;
2138 	int err;
2139 
2140 	nfp_intro(nfp_prog);
2141 	if (nfp_prog->error)
2142 		return nfp_prog->error;
2143 
2144 	list_for_each_entry(meta, &nfp_prog->insns, l) {
2145 		instr_cb_t cb = instr_cb[meta->insn.code];
2146 
2147 		meta->off = nfp_prog_current_offset(nfp_prog);
2148 
2149 		if (meta->skip) {
2150 			nfp_prog->n_translated++;
2151 			continue;
2152 		}
2153 
2154 		if (nfp_meta_has_prev(nfp_prog, meta) &&
2155 		    nfp_meta_prev(meta)->double_cb)
2156 			cb = nfp_meta_prev(meta)->double_cb;
2157 		if (!cb)
2158 			return -ENOENT;
2159 		err = cb(nfp_prog, meta);
2160 		if (err)
2161 			return err;
2162 
2163 		nfp_prog->n_translated++;
2164 	}
2165 
2166 	nfp_outro(nfp_prog);
2167 	if (nfp_prog->error)
2168 		return nfp_prog->error;
2169 
2170 	wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW);
2171 	if (nfp_prog->error)
2172 		return nfp_prog->error;
2173 
2174 	return nfp_fixup_branches(nfp_prog);
2175 }
2176 
2177 static int
2178 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
2179 		 unsigned int cnt)
2180 {
2181 	unsigned int i;
2182 
2183 	for (i = 0; i < cnt; i++) {
2184 		struct nfp_insn_meta *meta;
2185 
2186 		meta = kzalloc(sizeof(*meta), GFP_KERNEL);
2187 		if (!meta)
2188 			return -ENOMEM;
2189 
2190 		meta->insn = prog[i];
2191 		meta->n = i;
2192 
2193 		list_add_tail(&meta->l, &nfp_prog->insns);
2194 	}
2195 
2196 	return 0;
2197 }
2198 
2199 /* --- Optimizations --- */
2200 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
2201 {
2202 	struct nfp_insn_meta *meta;
2203 
2204 	list_for_each_entry(meta, &nfp_prog->insns, l) {
2205 		struct bpf_insn insn = meta->insn;
2206 
2207 		/* Programs converted from cBPF start with register xoring */
2208 		if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
2209 		    insn.src_reg == insn.dst_reg)
2210 			continue;
2211 
2212 		/* Programs start with R6 = R1 but we ignore the skb pointer */
2213 		if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
2214 		    insn.src_reg == 1 && insn.dst_reg == 6)
2215 			meta->skip = true;
2216 
2217 		/* Return as soon as something doesn't match */
2218 		if (!meta->skip)
2219 			return;
2220 	}
2221 }
2222 
2223 /* Remove masking after load since our load guarantees this is not needed */
2224 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
2225 {
2226 	struct nfp_insn_meta *meta1, *meta2;
2227 	const s32 exp_mask[] = {
2228 		[BPF_B] = 0x000000ffU,
2229 		[BPF_H] = 0x0000ffffU,
2230 		[BPF_W] = 0xffffffffU,
2231 	};
2232 
2233 	nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
2234 		struct bpf_insn insn, next;
2235 
2236 		insn = meta1->insn;
2237 		next = meta2->insn;
2238 
2239 		if (BPF_CLASS(insn.code) != BPF_LD)
2240 			continue;
2241 		if (BPF_MODE(insn.code) != BPF_ABS &&
2242 		    BPF_MODE(insn.code) != BPF_IND)
2243 			continue;
2244 
2245 		if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
2246 			continue;
2247 
2248 		if (!exp_mask[BPF_SIZE(insn.code)])
2249 			continue;
2250 		if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
2251 			continue;
2252 
2253 		if (next.src_reg || next.dst_reg)
2254 			continue;
2255 
2256 		meta2->skip = true;
2257 	}
2258 }
2259 
2260 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
2261 {
2262 	struct nfp_insn_meta *meta1, *meta2, *meta3;
2263 
2264 	nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
2265 		struct bpf_insn insn, next1, next2;
2266 
2267 		insn = meta1->insn;
2268 		next1 = meta2->insn;
2269 		next2 = meta3->insn;
2270 
2271 		if (BPF_CLASS(insn.code) != BPF_LD)
2272 			continue;
2273 		if (BPF_MODE(insn.code) != BPF_ABS &&
2274 		    BPF_MODE(insn.code) != BPF_IND)
2275 			continue;
2276 		if (BPF_SIZE(insn.code) != BPF_W)
2277 			continue;
2278 
2279 		if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
2280 		      next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
2281 		    !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
2282 		      next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
2283 			continue;
2284 
2285 		if (next1.src_reg || next1.dst_reg ||
2286 		    next2.src_reg || next2.dst_reg)
2287 			continue;
2288 
2289 		if (next1.imm != 0x20 || next2.imm != 0x20)
2290 			continue;
2291 
2292 		meta2->skip = true;
2293 		meta3->skip = true;
2294 	}
2295 }
2296 
2297 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
2298 {
2299 	nfp_bpf_opt_reg_init(nfp_prog);
2300 
2301 	nfp_bpf_opt_ld_mask(nfp_prog);
2302 	nfp_bpf_opt_ld_shift(nfp_prog);
2303 
2304 	return 0;
2305 }
2306 
2307 static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
2308 {
2309 	int i;
2310 
2311 	for (i = 0; i < nfp_prog->prog_len; i++) {
2312 		int err;
2313 
2314 		err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
2315 		if (err)
2316 			return err;
2317 
2318 		nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
2319 
2320 		ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 /**
2327  * nfp_bpf_jit() - translate BPF code into NFP assembly
2328  * @filter:	kernel BPF filter struct
2329  * @prog_mem:	memory to store assembler instructions
2330  * @act:	action attached to this eBPF program
2331  * @prog_start:	offset of the first instruction when loaded
2332  * @prog_done:	where to jump on exit
2333  * @prog_sz:	size of @prog_mem in instructions
2334  * @res:	achieved parameters of translation results
2335  */
2336 int
2337 nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
2338 	    enum nfp_bpf_action_type act,
2339 	    unsigned int prog_start, unsigned int prog_done,
2340 	    unsigned int prog_sz, struct nfp_bpf_result *res)
2341 {
2342 	struct nfp_prog *nfp_prog;
2343 	int ret;
2344 
2345 	nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
2346 	if (!nfp_prog)
2347 		return -ENOMEM;
2348 
2349 	INIT_LIST_HEAD(&nfp_prog->insns);
2350 	nfp_prog->act = act;
2351 	nfp_prog->start_off = prog_start;
2352 	nfp_prog->tgt_done = prog_done;
2353 
2354 	ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
2355 	if (ret)
2356 		goto out;
2357 
2358 	ret = nfp_prog_verify(nfp_prog, filter);
2359 	if (ret)
2360 		goto out;
2361 
2362 	ret = nfp_bpf_optimize(nfp_prog);
2363 	if (ret)
2364 		goto out;
2365 
2366 	nfp_prog->num_regs = MAX_BPF_REG;
2367 	nfp_prog->regs_per_thread = 32;
2368 
2369 	nfp_prog->prog = prog_mem;
2370 	nfp_prog->__prog_alloc_len = prog_sz;
2371 
2372 	ret = nfp_translate(nfp_prog);
2373 	if (ret) {
2374 		pr_err("Translation failed with error %d (translated: %u)\n",
2375 		       ret, nfp_prog->n_translated);
2376 		ret = -EINVAL;
2377 		goto out;
2378 	}
2379 
2380 	ret = nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)prog_mem);
2381 
2382 	res->n_instr = nfp_prog->prog_len;
2383 	res->dense_mode = false;
2384 out:
2385 	nfp_prog_free(nfp_prog);
2386 
2387 	return ret;
2388 }
2389