1 /*
2  * Copyright (C) 2016-2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/bpf.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/kernel.h>
37 #include <linux/pkt_cls.h>
38 
39 #include "fw.h"
40 #include "main.h"
41 
42 #define pr_vlog(env, fmt, ...)	\
43 	bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
44 
45 struct nfp_insn_meta *
46 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
47 		  unsigned int insn_idx, unsigned int n_insns)
48 {
49 	unsigned int forward, backward, i;
50 
51 	backward = meta->n - insn_idx;
52 	forward = insn_idx - meta->n;
53 
54 	if (min(forward, backward) > n_insns - insn_idx - 1) {
55 		backward = n_insns - insn_idx - 1;
56 		meta = nfp_prog_last_meta(nfp_prog);
57 	}
58 	if (min(forward, backward) > insn_idx && backward > insn_idx) {
59 		forward = insn_idx;
60 		meta = nfp_prog_first_meta(nfp_prog);
61 	}
62 
63 	if (forward < backward)
64 		for (i = 0; i < forward; i++)
65 			meta = nfp_meta_next(meta);
66 	else
67 		for (i = 0; i < backward; i++)
68 			meta = nfp_meta_prev(meta);
69 
70 	return meta;
71 }
72 
73 static void
74 nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
75 		       struct nfp_insn_meta *meta,
76 		       const struct bpf_reg_state *reg2)
77 {
78 	unsigned int location =	UINT_MAX;
79 	int imm;
80 
81 	/* Datapath usually can give us guarantees on how much adjust head
82 	 * can be done without the need for any checks.  Optimize the simple
83 	 * case where there is only one adjust head by a constant.
84 	 */
85 	if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
86 		goto exit_set_location;
87 	imm = reg2->var_off.value;
88 	/* Translator will skip all checks, we need to guarantee min pkt len */
89 	if (imm > ETH_ZLEN - ETH_HLEN)
90 		goto exit_set_location;
91 	if (imm > (int)bpf->adjust_head.guaranteed_add ||
92 	    imm < -bpf->adjust_head.guaranteed_sub)
93 		goto exit_set_location;
94 
95 	if (nfp_prog->adjust_head_location) {
96 		/* Only one call per program allowed */
97 		if (nfp_prog->adjust_head_location != meta->n)
98 			goto exit_set_location;
99 
100 		if (meta->arg2.var_off.value != imm)
101 			goto exit_set_location;
102 	}
103 
104 	location = meta->n;
105 exit_set_location:
106 	nfp_prog->adjust_head_location = location;
107 }
108 
109 static int
110 nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
111 		   struct nfp_insn_meta *meta)
112 {
113 	const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
114 	const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
115 	struct nfp_app_bpf *bpf = nfp_prog->bpf;
116 	u32 func_id = meta->insn.imm;
117 	s64 off, old_off;
118 
119 	switch (func_id) {
120 	case BPF_FUNC_xdp_adjust_head:
121 		if (!bpf->adjust_head.off_max) {
122 			pr_vlog(env, "adjust_head not supported by FW\n");
123 			return -EOPNOTSUPP;
124 		}
125 		if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
126 			pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
127 			return -EOPNOTSUPP;
128 		}
129 
130 		nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
131 		break;
132 
133 	case BPF_FUNC_map_lookup_elem:
134 		if (!bpf->helpers.map_lookup) {
135 			pr_vlog(env, "map_lookup: not supported by FW\n");
136 			return -EOPNOTSUPP;
137 		}
138 		if (reg2->type != PTR_TO_STACK) {
139 			pr_vlog(env,
140 				"map_lookup: unsupported key ptr type %d\n",
141 				reg2->type);
142 			return -EOPNOTSUPP;
143 		}
144 		if (!tnum_is_const(reg2->var_off)) {
145 			pr_vlog(env, "map_lookup: variable key pointer\n");
146 			return -EOPNOTSUPP;
147 		}
148 
149 		off = reg2->var_off.value + reg2->off;
150 		if (-off % 4) {
151 			pr_vlog(env,
152 				"map_lookup: unaligned stack pointer %lld\n",
153 				-off);
154 			return -EOPNOTSUPP;
155 		}
156 
157 		/* Rest of the checks is only if we re-parse the same insn */
158 		if (!meta->func_id)
159 			break;
160 
161 		old_off = meta->arg2.var_off.value + meta->arg2.off;
162 		meta->arg2_var_off |= off != old_off;
163 
164 		if (meta->arg1.map_ptr != reg1->map_ptr) {
165 			pr_vlog(env, "map_lookup: called for different map\n");
166 			return -EOPNOTSUPP;
167 		}
168 		break;
169 	default:
170 		pr_vlog(env, "unsupported function id: %d\n", func_id);
171 		return -EOPNOTSUPP;
172 	}
173 
174 	meta->func_id = func_id;
175 	meta->arg1 = *reg1;
176 	meta->arg2 = *reg2;
177 
178 	return 0;
179 }
180 
181 static int
182 nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
183 		   struct bpf_verifier_env *env)
184 {
185 	const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
186 	u64 imm;
187 
188 	if (nfp_prog->type == BPF_PROG_TYPE_XDP)
189 		return 0;
190 
191 	if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
192 		char tn_buf[48];
193 
194 		tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
195 		pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
196 			reg0->type, tn_buf);
197 		return -EINVAL;
198 	}
199 
200 	imm = reg0->var_off.value;
201 	if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
202 	    imm <= TC_ACT_REDIRECT &&
203 	    imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
204 	    imm != TC_ACT_QUEUED) {
205 		pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
206 			reg0->type, imm);
207 		return -EINVAL;
208 	}
209 
210 	return 0;
211 }
212 
213 static int
214 nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
215 			   struct nfp_insn_meta *meta,
216 			   const struct bpf_reg_state *reg,
217 			   struct bpf_verifier_env *env)
218 {
219 	s32 old_off, new_off;
220 
221 	if (!tnum_is_const(reg->var_off)) {
222 		pr_vlog(env, "variable ptr stack access\n");
223 		return -EINVAL;
224 	}
225 
226 	if (meta->ptr.type == NOT_INIT)
227 		return 0;
228 
229 	old_off = meta->ptr.off + meta->ptr.var_off.value;
230 	new_off = reg->off + reg->var_off.value;
231 
232 	meta->ptr_not_const |= old_off != new_off;
233 
234 	if (!meta->ptr_not_const)
235 		return 0;
236 
237 	if (old_off % 4 == new_off % 4)
238 		return 0;
239 
240 	pr_vlog(env, "stack access changed location was:%d is:%d\n",
241 		old_off, new_off);
242 	return -EINVAL;
243 }
244 
245 static int
246 nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
247 		  struct bpf_verifier_env *env, u8 reg_no)
248 {
249 	const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
250 	int err;
251 
252 	if (reg->type != PTR_TO_CTX &&
253 	    reg->type != PTR_TO_STACK &&
254 	    reg->type != PTR_TO_MAP_VALUE &&
255 	    reg->type != PTR_TO_PACKET) {
256 		pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
257 		return -EINVAL;
258 	}
259 
260 	if (reg->type == PTR_TO_STACK) {
261 		err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
262 		if (err)
263 			return err;
264 	}
265 
266 	if (reg->type == PTR_TO_MAP_VALUE) {
267 		if (is_mbpf_store(meta)) {
268 			pr_vlog(env, "map writes not supported\n");
269 			return -EOPNOTSUPP;
270 		}
271 	}
272 
273 	if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
274 		pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
275 			meta->ptr.type, reg->type);
276 		return -EINVAL;
277 	}
278 
279 	meta->ptr = *reg;
280 
281 	return 0;
282 }
283 
284 static int
285 nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
286 {
287 	struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
288 	struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
289 
290 	meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
291 	nfp_prog->verifier_meta = meta;
292 
293 	if (!nfp_bpf_supported_opcode(meta->insn.code)) {
294 		pr_vlog(env, "instruction %#02x not supported\n",
295 			meta->insn.code);
296 		return -EINVAL;
297 	}
298 
299 	if (meta->insn.src_reg >= MAX_BPF_REG ||
300 	    meta->insn.dst_reg >= MAX_BPF_REG) {
301 		pr_vlog(env, "program uses extended registers - jit hardening?\n");
302 		return -EINVAL;
303 	}
304 
305 	if (meta->insn.code == (BPF_JMP | BPF_CALL))
306 		return nfp_bpf_check_call(nfp_prog, env, meta);
307 	if (meta->insn.code == (BPF_JMP | BPF_EXIT))
308 		return nfp_bpf_check_exit(nfp_prog, env);
309 
310 	if (is_mbpf_load(meta))
311 		return nfp_bpf_check_ptr(nfp_prog, meta, env,
312 					 meta->insn.src_reg);
313 	if (is_mbpf_store(meta))
314 		return nfp_bpf_check_ptr(nfp_prog, meta, env,
315 					 meta->insn.dst_reg);
316 
317 	return 0;
318 }
319 
320 const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
321 	.insn_hook = nfp_verify_insn,
322 };
323