1 /*
2  * Copyright (C) 2016-2018 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/bpf.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/kernel.h>
37 #include <linux/pkt_cls.h>
38 
39 #include "../nfp_app.h"
40 #include "../nfp_main.h"
41 #include "fw.h"
42 #include "main.h"
43 
44 #define pr_vlog(env, fmt, ...)	\
45 	bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
46 
47 struct nfp_insn_meta *
48 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
49 		  unsigned int insn_idx, unsigned int n_insns)
50 {
51 	unsigned int forward, backward, i;
52 
53 	backward = meta->n - insn_idx;
54 	forward = insn_idx - meta->n;
55 
56 	if (min(forward, backward) > n_insns - insn_idx - 1) {
57 		backward = n_insns - insn_idx - 1;
58 		meta = nfp_prog_last_meta(nfp_prog);
59 	}
60 	if (min(forward, backward) > insn_idx && backward > insn_idx) {
61 		forward = insn_idx;
62 		meta = nfp_prog_first_meta(nfp_prog);
63 	}
64 
65 	if (forward < backward)
66 		for (i = 0; i < forward; i++)
67 			meta = nfp_meta_next(meta);
68 	else
69 		for (i = 0; i < backward; i++)
70 			meta = nfp_meta_prev(meta);
71 
72 	return meta;
73 }
74 
75 static void
76 nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
77 		       struct nfp_insn_meta *meta,
78 		       const struct bpf_reg_state *reg2)
79 {
80 	unsigned int location =	UINT_MAX;
81 	int imm;
82 
83 	/* Datapath usually can give us guarantees on how much adjust head
84 	 * can be done without the need for any checks.  Optimize the simple
85 	 * case where there is only one adjust head by a constant.
86 	 */
87 	if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
88 		goto exit_set_location;
89 	imm = reg2->var_off.value;
90 	/* Translator will skip all checks, we need to guarantee min pkt len */
91 	if (imm > ETH_ZLEN - ETH_HLEN)
92 		goto exit_set_location;
93 	if (imm > (int)bpf->adjust_head.guaranteed_add ||
94 	    imm < -bpf->adjust_head.guaranteed_sub)
95 		goto exit_set_location;
96 
97 	if (nfp_prog->adjust_head_location) {
98 		/* Only one call per program allowed */
99 		if (nfp_prog->adjust_head_location != meta->n)
100 			goto exit_set_location;
101 
102 		if (meta->arg2.reg.var_off.value != imm)
103 			goto exit_set_location;
104 	}
105 
106 	location = meta->n;
107 exit_set_location:
108 	nfp_prog->adjust_head_location = location;
109 }
110 
111 static int
112 nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
113 		     const struct bpf_reg_state *reg,
114 		     struct nfp_bpf_reg_state *old_arg)
115 {
116 	s64 off, old_off;
117 
118 	if (reg->type != PTR_TO_STACK) {
119 		pr_vlog(env, "%s: unsupported ptr type %d\n",
120 			fname, reg->type);
121 		return false;
122 	}
123 	if (!tnum_is_const(reg->var_off)) {
124 		pr_vlog(env, "%s: variable pointer\n", fname);
125 		return false;
126 	}
127 
128 	off = reg->var_off.value + reg->off;
129 	if (-off % 4) {
130 		pr_vlog(env, "%s: unaligned stack pointer %lld\n", fname, -off);
131 		return false;
132 	}
133 
134 	/* Rest of the checks is only if we re-parse the same insn */
135 	if (!old_arg)
136 		return true;
137 
138 	old_off = old_arg->reg.var_off.value + old_arg->reg.off;
139 	old_arg->var_off |= off != old_off;
140 
141 	return true;
142 }
143 
144 static bool
145 nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
146 		    struct nfp_insn_meta *meta,
147 		    u32 helper_tgt, const struct bpf_reg_state *reg1)
148 {
149 	if (!helper_tgt) {
150 		pr_vlog(env, "%s: not supported by FW\n", fname);
151 		return false;
152 	}
153 
154 	return true;
155 }
156 
157 static int
158 nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
159 		   struct nfp_insn_meta *meta)
160 {
161 	const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
162 	const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
163 	const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
164 	struct nfp_app_bpf *bpf = nfp_prog->bpf;
165 	u32 func_id = meta->insn.imm;
166 
167 	switch (func_id) {
168 	case BPF_FUNC_xdp_adjust_head:
169 		if (!bpf->adjust_head.off_max) {
170 			pr_vlog(env, "adjust_head not supported by FW\n");
171 			return -EOPNOTSUPP;
172 		}
173 		if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
174 			pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
175 			return -EOPNOTSUPP;
176 		}
177 
178 		nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
179 		break;
180 
181 	case BPF_FUNC_xdp_adjust_tail:
182 		if (!bpf->adjust_tail) {
183 			pr_vlog(env, "adjust_tail not supported by FW\n");
184 			return -EOPNOTSUPP;
185 		}
186 		break;
187 
188 	case BPF_FUNC_map_lookup_elem:
189 		if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
190 					 bpf->helpers.map_lookup, reg1) ||
191 		    !nfp_bpf_stack_arg_ok("map_lookup", env, reg2,
192 					  meta->func_id ? &meta->arg2 : NULL))
193 			return -EOPNOTSUPP;
194 		break;
195 
196 	case BPF_FUNC_map_update_elem:
197 		if (!nfp_bpf_map_call_ok("map_update", env, meta,
198 					 bpf->helpers.map_update, reg1) ||
199 		    !nfp_bpf_stack_arg_ok("map_update", env, reg2,
200 					  meta->func_id ? &meta->arg2 : NULL) ||
201 		    !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
202 			return -EOPNOTSUPP;
203 		break;
204 
205 	case BPF_FUNC_map_delete_elem:
206 		if (!nfp_bpf_map_call_ok("map_delete", env, meta,
207 					 bpf->helpers.map_delete, reg1) ||
208 		    !nfp_bpf_stack_arg_ok("map_delete", env, reg2,
209 					  meta->func_id ? &meta->arg2 : NULL))
210 			return -EOPNOTSUPP;
211 		break;
212 
213 	case BPF_FUNC_get_prandom_u32:
214 		if (bpf->pseudo_random)
215 			break;
216 		pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
217 		return -EOPNOTSUPP;
218 
219 	case BPF_FUNC_perf_event_output:
220 		BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE ||
221 			     NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE ||
222 			     NFP_BPF_STACK != PTR_TO_STACK ||
223 			     NFP_BPF_PACKET_DATA != PTR_TO_PACKET);
224 
225 		if (!bpf->helpers.perf_event_output) {
226 			pr_vlog(env, "event_output: not supported by FW\n");
227 			return -EOPNOTSUPP;
228 		}
229 
230 		/* Force current CPU to make sure we can report the event
231 		 * wherever we get the control message from FW.
232 		 */
233 		if (reg3->var_off.mask & BPF_F_INDEX_MASK ||
234 		    (reg3->var_off.value & BPF_F_INDEX_MASK) !=
235 		    BPF_F_CURRENT_CPU) {
236 			char tn_buf[48];
237 
238 			tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off);
239 			pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n",
240 				tn_buf);
241 			return -EOPNOTSUPP;
242 		}
243 
244 		/* Save space in meta, we don't care about arguments other
245 		 * than 4th meta, shove it into arg1.
246 		 */
247 		reg1 = cur_regs(env) + BPF_REG_4;
248 
249 		if (reg1->type != SCALAR_VALUE /* NULL ptr */ &&
250 		    reg1->type != PTR_TO_STACK &&
251 		    reg1->type != PTR_TO_MAP_VALUE &&
252 		    reg1->type != PTR_TO_PACKET) {
253 			pr_vlog(env, "event_output: unsupported ptr type: %d\n",
254 				reg1->type);
255 			return -EOPNOTSUPP;
256 		}
257 
258 		if (reg1->type == PTR_TO_STACK &&
259 		    !nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL))
260 			return -EOPNOTSUPP;
261 
262 		/* Warn user that on offload NFP may return success even if map
263 		 * is not going to accept the event, since the event output is
264 		 * fully async and device won't know the state of the map.
265 		 * There is also FW limitation on the event length.
266 		 *
267 		 * Lost events will not show up on the perf ring, driver
268 		 * won't see them at all.  Events may also get reordered.
269 		 */
270 		dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev,
271 			      "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n");
272 		pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n");
273 
274 		if (!meta->func_id)
275 			break;
276 
277 		if (reg1->type != meta->arg1.type) {
278 			pr_vlog(env, "event_output: ptr type changed: %d %d\n",
279 				meta->arg1.type, reg1->type);
280 			return -EINVAL;
281 		}
282 		break;
283 
284 	default:
285 		pr_vlog(env, "unsupported function id: %d\n", func_id);
286 		return -EOPNOTSUPP;
287 	}
288 
289 	meta->func_id = func_id;
290 	meta->arg1 = *reg1;
291 	meta->arg2.reg = *reg2;
292 
293 	return 0;
294 }
295 
296 static int
297 nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
298 		   struct bpf_verifier_env *env)
299 {
300 	const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
301 	u64 imm;
302 
303 	if (nfp_prog->type == BPF_PROG_TYPE_XDP)
304 		return 0;
305 
306 	if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
307 		char tn_buf[48];
308 
309 		tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
310 		pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
311 			reg0->type, tn_buf);
312 		return -EINVAL;
313 	}
314 
315 	imm = reg0->var_off.value;
316 	if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
317 	    imm <= TC_ACT_REDIRECT &&
318 	    imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
319 	    imm != TC_ACT_QUEUED) {
320 		pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
321 			reg0->type, imm);
322 		return -EINVAL;
323 	}
324 
325 	return 0;
326 }
327 
328 static int
329 nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
330 			   struct nfp_insn_meta *meta,
331 			   const struct bpf_reg_state *reg,
332 			   struct bpf_verifier_env *env)
333 {
334 	s32 old_off, new_off;
335 
336 	if (!tnum_is_const(reg->var_off)) {
337 		pr_vlog(env, "variable ptr stack access\n");
338 		return -EINVAL;
339 	}
340 
341 	if (meta->ptr.type == NOT_INIT)
342 		return 0;
343 
344 	old_off = meta->ptr.off + meta->ptr.var_off.value;
345 	new_off = reg->off + reg->var_off.value;
346 
347 	meta->ptr_not_const |= old_off != new_off;
348 
349 	if (!meta->ptr_not_const)
350 		return 0;
351 
352 	if (old_off % 4 == new_off % 4)
353 		return 0;
354 
355 	pr_vlog(env, "stack access changed location was:%d is:%d\n",
356 		old_off, new_off);
357 	return -EINVAL;
358 }
359 
360 static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use)
361 {
362 	static const char * const names[] = {
363 		[NFP_MAP_UNUSED]	= "unused",
364 		[NFP_MAP_USE_READ]	= "read",
365 		[NFP_MAP_USE_WRITE]	= "write",
366 		[NFP_MAP_USE_ATOMIC_CNT] = "atomic",
367 	};
368 
369 	if (use >= ARRAY_SIZE(names) || !names[use])
370 		return "unknown";
371 	return names[use];
372 }
373 
374 static int
375 nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
376 			  struct nfp_bpf_map *nfp_map,
377 			  unsigned int off, enum nfp_bpf_map_use use)
378 {
379 	if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
380 	    nfp_map->use_map[off / 4] != use) {
381 		pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
382 			nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
383 			nfp_bpf_map_use_name(use), off);
384 		return -EOPNOTSUPP;
385 	}
386 
387 	nfp_map->use_map[off / 4] = use;
388 
389 	return 0;
390 }
391 
392 static int
393 nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
394 		      const struct bpf_reg_state *reg,
395 		      enum nfp_bpf_map_use use)
396 {
397 	struct bpf_offloaded_map *offmap;
398 	struct nfp_bpf_map *nfp_map;
399 	unsigned int size, off;
400 	int i, err;
401 
402 	if (!tnum_is_const(reg->var_off)) {
403 		pr_vlog(env, "map value offset is variable\n");
404 		return -EOPNOTSUPP;
405 	}
406 
407 	off = reg->var_off.value + meta->insn.off + reg->off;
408 	size = BPF_LDST_BYTES(&meta->insn);
409 	offmap = map_to_offmap(reg->map_ptr);
410 	nfp_map = offmap->dev_priv;
411 
412 	if (off + size > offmap->map.value_size) {
413 		pr_vlog(env, "map value access out-of-bounds\n");
414 		return -EINVAL;
415 	}
416 
417 	for (i = 0; i < size; i += 4 - (off + i) % 4) {
418 		err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use);
419 		if (err)
420 			return err;
421 	}
422 
423 	return 0;
424 }
425 
426 static int
427 nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
428 		  struct bpf_verifier_env *env, u8 reg_no)
429 {
430 	const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
431 	int err;
432 
433 	if (reg->type != PTR_TO_CTX &&
434 	    reg->type != PTR_TO_STACK &&
435 	    reg->type != PTR_TO_MAP_VALUE &&
436 	    reg->type != PTR_TO_PACKET) {
437 		pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
438 		return -EINVAL;
439 	}
440 
441 	if (reg->type == PTR_TO_STACK) {
442 		err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
443 		if (err)
444 			return err;
445 	}
446 
447 	if (reg->type == PTR_TO_MAP_VALUE) {
448 		if (is_mbpf_load(meta)) {
449 			err = nfp_bpf_map_mark_used(env, meta, reg,
450 						    NFP_MAP_USE_READ);
451 			if (err)
452 				return err;
453 		}
454 		if (is_mbpf_store(meta)) {
455 			pr_vlog(env, "map writes not supported\n");
456 			return -EOPNOTSUPP;
457 		}
458 		if (is_mbpf_xadd(meta)) {
459 			err = nfp_bpf_map_mark_used(env, meta, reg,
460 						    NFP_MAP_USE_ATOMIC_CNT);
461 			if (err)
462 				return err;
463 		}
464 	}
465 
466 	if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
467 		pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
468 			meta->ptr.type, reg->type);
469 		return -EINVAL;
470 	}
471 
472 	meta->ptr = *reg;
473 
474 	return 0;
475 }
476 
477 static int
478 nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
479 		    struct bpf_verifier_env *env)
480 {
481 	const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg;
482 
483 	if (reg->type == PTR_TO_CTX) {
484 		if (nfp_prog->type == BPF_PROG_TYPE_XDP) {
485 			/* XDP ctx accesses must be 4B in size */
486 			switch (meta->insn.off) {
487 			case offsetof(struct xdp_md, rx_queue_index):
488 				if (nfp_prog->bpf->queue_select)
489 					goto exit_check_ptr;
490 				pr_vlog(env, "queue selection not supported by FW\n");
491 				return -EOPNOTSUPP;
492 			}
493 		}
494 		pr_vlog(env, "unsupported store to context field\n");
495 		return -EOPNOTSUPP;
496 	}
497 exit_check_ptr:
498 	return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
499 }
500 
501 static int
502 nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
503 		   struct bpf_verifier_env *env)
504 {
505 	const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
506 	const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
507 
508 	if (dreg->type != PTR_TO_MAP_VALUE) {
509 		pr_vlog(env, "atomic add not to a map value pointer: %d\n",
510 			dreg->type);
511 		return -EOPNOTSUPP;
512 	}
513 	if (sreg->type != SCALAR_VALUE) {
514 		pr_vlog(env, "atomic add not of a scalar: %d\n", sreg->type);
515 		return -EOPNOTSUPP;
516 	}
517 
518 	meta->xadd_over_16bit |=
519 		sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff;
520 	meta->xadd_maybe_16bit |=
521 		(sreg->var_off.value & ~sreg->var_off.mask) <= 0xffff;
522 
523 	return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
524 }
525 
526 static int
527 nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
528 		  struct bpf_verifier_env *env)
529 {
530 	const struct bpf_reg_state *sreg =
531 		cur_regs(env) + meta->insn.src_reg;
532 	const struct bpf_reg_state *dreg =
533 		cur_regs(env) + meta->insn.dst_reg;
534 
535 	meta->umin_src = min(meta->umin_src, sreg->umin_value);
536 	meta->umax_src = max(meta->umax_src, sreg->umax_value);
537 	meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
538 	meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
539 
540 	/* NFP supports u16 and u32 multiplication.
541 	 *
542 	 * For ALU64, if either operand is beyond u32's value range, we reject
543 	 * it. One thing to note, if the source operand is BPF_K, then we need
544 	 * to check "imm" field directly, and we'd reject it if it is negative.
545 	 * Because for ALU64, "imm" (with s32 type) is expected to be sign
546 	 * extended to s64 which NFP mul doesn't support.
547 	 *
548 	 * For ALU32, it is fine for "imm" be negative though, because the
549 	 * result is 32-bits and there is no difference on the low halve of
550 	 * the result for signed/unsigned mul, so we will get correct result.
551 	 */
552 	if (is_mbpf_mul(meta)) {
553 		if (meta->umax_dst > U32_MAX) {
554 			pr_vlog(env, "multiplier is not within u32 value range\n");
555 			return -EINVAL;
556 		}
557 		if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
558 			pr_vlog(env, "multiplicand is not within u32 value range\n");
559 			return -EINVAL;
560 		}
561 		if (mbpf_class(meta) == BPF_ALU64 &&
562 		    mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
563 			pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n");
564 			return -EINVAL;
565 		}
566 	}
567 
568 	/* NFP doesn't have divide instructions, we support divide by constant
569 	 * through reciprocal multiplication. Given NFP support multiplication
570 	 * no bigger than u32, we'd require divisor and dividend no bigger than
571 	 * that as well.
572 	 *
573 	 * Also eBPF doesn't support signed divide and has enforced this on C
574 	 * language level by failing compilation. However LLVM assembler hasn't
575 	 * enforced this, so it is possible for negative constant to leak in as
576 	 * a BPF_K operand through assembly code, we reject such cases as well.
577 	 */
578 	if (is_mbpf_div(meta)) {
579 		if (meta->umax_dst > U32_MAX) {
580 			pr_vlog(env, "dividend is not within u32 value range\n");
581 			return -EINVAL;
582 		}
583 		if (mbpf_src(meta) == BPF_X) {
584 			if (meta->umin_src != meta->umax_src) {
585 				pr_vlog(env, "divisor is not constant\n");
586 				return -EINVAL;
587 			}
588 			if (meta->umax_src > U32_MAX) {
589 				pr_vlog(env, "divisor is not within u32 value range\n");
590 				return -EINVAL;
591 			}
592 		}
593 		if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
594 			pr_vlog(env, "divide by negative constant is not supported\n");
595 			return -EINVAL;
596 		}
597 	}
598 
599 	return 0;
600 }
601 
602 static int
603 nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
604 {
605 	struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
606 	struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
607 
608 	meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
609 	nfp_prog->verifier_meta = meta;
610 
611 	if (!nfp_bpf_supported_opcode(meta->insn.code)) {
612 		pr_vlog(env, "instruction %#02x not supported\n",
613 			meta->insn.code);
614 		return -EINVAL;
615 	}
616 
617 	if (meta->insn.src_reg >= MAX_BPF_REG ||
618 	    meta->insn.dst_reg >= MAX_BPF_REG) {
619 		pr_vlog(env, "program uses extended registers - jit hardening?\n");
620 		return -EINVAL;
621 	}
622 
623 	if (meta->insn.code == (BPF_JMP | BPF_CALL))
624 		return nfp_bpf_check_call(nfp_prog, env, meta);
625 	if (meta->insn.code == (BPF_JMP | BPF_EXIT))
626 		return nfp_bpf_check_exit(nfp_prog, env);
627 
628 	if (is_mbpf_load(meta))
629 		return nfp_bpf_check_ptr(nfp_prog, meta, env,
630 					 meta->insn.src_reg);
631 	if (is_mbpf_store(meta))
632 		return nfp_bpf_check_store(nfp_prog, meta, env);
633 
634 	if (is_mbpf_xadd(meta))
635 		return nfp_bpf_check_xadd(nfp_prog, meta, env);
636 
637 	if (is_mbpf_alu(meta))
638 		return nfp_bpf_check_alu(nfp_prog, meta, env);
639 
640 	return 0;
641 }
642 
643 const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
644 	.insn_hook = nfp_verify_insn,
645 };
646