1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
3 
4 /*
5  * nfp_net_offload.c
6  * Netronome network device driver: TC offload functions for PF and VF
7  */
8 
9 #define pr_fmt(fmt)	"NFP net bpf: " fmt
10 
11 #include <linux/bpf.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/jiffies.h>
16 #include <linux/timer.h>
17 #include <linux/list.h>
18 #include <linux/mm.h>
19 
20 #include <net/pkt_cls.h>
21 #include <net/tc_act/tc_gact.h>
22 #include <net/tc_act/tc_mirred.h>
23 
24 #include "main.h"
25 #include "../nfp_app.h"
26 #include "../nfp_net_ctrl.h"
27 #include "../nfp_net.h"
28 
29 static int
30 nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
31 		   struct bpf_map *map)
32 {
33 	struct nfp_bpf_neutral_map *record;
34 	int err;
35 
36 	/* Map record paths are entered via ndo, update side is protected. */
37 	ASSERT_RTNL();
38 
39 	/* Reuse path - other offloaded program is already tracking this map. */
40 	record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
41 					nfp_bpf_maps_neutral_params);
42 	if (record) {
43 		nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
44 		record->count++;
45 		return 0;
46 	}
47 
48 	/* Grab a single ref to the map for our record.  The prog destroy ndo
49 	 * happens after free_used_maps().
50 	 */
51 	map = bpf_map_inc(map, false);
52 	if (IS_ERR(map))
53 		return PTR_ERR(map);
54 
55 	record = kmalloc(sizeof(*record), GFP_KERNEL);
56 	if (!record) {
57 		err = -ENOMEM;
58 		goto err_map_put;
59 	}
60 
61 	record->ptr = map;
62 	record->map_id = map->id;
63 	record->count = 1;
64 
65 	err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
66 				     nfp_bpf_maps_neutral_params);
67 	if (err)
68 		goto err_free_rec;
69 
70 	nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
71 
72 	return 0;
73 
74 err_free_rec:
75 	kfree(record);
76 err_map_put:
77 	bpf_map_put(map);
78 	return err;
79 }
80 
81 static void
82 nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
83 {
84 	bool freed = false;
85 	int i;
86 
87 	ASSERT_RTNL();
88 
89 	for (i = 0; i < nfp_prog->map_records_cnt; i++) {
90 		if (--nfp_prog->map_records[i]->count) {
91 			nfp_prog->map_records[i] = NULL;
92 			continue;
93 		}
94 
95 		WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
96 					       &nfp_prog->map_records[i]->l,
97 					       nfp_bpf_maps_neutral_params));
98 		freed = true;
99 	}
100 
101 	if (freed) {
102 		synchronize_rcu();
103 
104 		for (i = 0; i < nfp_prog->map_records_cnt; i++)
105 			if (nfp_prog->map_records[i]) {
106 				bpf_map_put(nfp_prog->map_records[i]->ptr);
107 				kfree(nfp_prog->map_records[i]);
108 			}
109 	}
110 
111 	kfree(nfp_prog->map_records);
112 	nfp_prog->map_records = NULL;
113 	nfp_prog->map_records_cnt = 0;
114 }
115 
116 static int
117 nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
118 		    struct bpf_prog *prog)
119 {
120 	int i, cnt, err;
121 
122 	/* Quickly count the maps we will have to remember */
123 	cnt = 0;
124 	for (i = 0; i < prog->aux->used_map_cnt; i++)
125 		if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
126 			cnt++;
127 	if (!cnt)
128 		return 0;
129 
130 	nfp_prog->map_records = kmalloc_array(cnt,
131 					      sizeof(nfp_prog->map_records[0]),
132 					      GFP_KERNEL);
133 	if (!nfp_prog->map_records)
134 		return -ENOMEM;
135 
136 	for (i = 0; i < prog->aux->used_map_cnt; i++)
137 		if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
138 			err = nfp_map_ptr_record(bpf, nfp_prog,
139 						 prog->aux->used_maps[i]);
140 			if (err) {
141 				nfp_map_ptrs_forget(bpf, nfp_prog);
142 				return err;
143 			}
144 		}
145 	WARN_ON(cnt != nfp_prog->map_records_cnt);
146 
147 	return 0;
148 }
149 
150 static int
151 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
152 		 unsigned int cnt)
153 {
154 	struct nfp_insn_meta *meta;
155 	unsigned int i;
156 
157 	for (i = 0; i < cnt; i++) {
158 		meta = kzalloc(sizeof(*meta), GFP_KERNEL);
159 		if (!meta)
160 			return -ENOMEM;
161 
162 		meta->insn = prog[i];
163 		meta->n = i;
164 		if (is_mbpf_alu(meta)) {
165 			meta->umin_src = U64_MAX;
166 			meta->umin_dst = U64_MAX;
167 		}
168 
169 		list_add_tail(&meta->l, &nfp_prog->insns);
170 	}
171 
172 	nfp_bpf_jit_prepare(nfp_prog, cnt);
173 
174 	return 0;
175 }
176 
177 static void nfp_prog_free(struct nfp_prog *nfp_prog)
178 {
179 	struct nfp_insn_meta *meta, *tmp;
180 
181 	kfree(nfp_prog->subprog);
182 
183 	list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
184 		list_del(&meta->l);
185 		kfree(meta);
186 	}
187 	kfree(nfp_prog);
188 }
189 
190 static int
191 nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
192 		      struct netdev_bpf *bpf)
193 {
194 	struct bpf_prog *prog = bpf->verifier.prog;
195 	struct nfp_prog *nfp_prog;
196 	int ret;
197 
198 	nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
199 	if (!nfp_prog)
200 		return -ENOMEM;
201 	prog->aux->offload->dev_priv = nfp_prog;
202 
203 	INIT_LIST_HEAD(&nfp_prog->insns);
204 	nfp_prog->type = prog->type;
205 	nfp_prog->bpf = app->priv;
206 
207 	ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
208 	if (ret)
209 		goto err_free;
210 
211 	nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
212 	bpf->verifier.ops = &nfp_bpf_analyzer_ops;
213 
214 	return 0;
215 
216 err_free:
217 	nfp_prog_free(nfp_prog);
218 
219 	return ret;
220 }
221 
222 static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
223 {
224 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
225 	unsigned int max_instr;
226 	int err;
227 
228 	max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
229 	nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
230 
231 	nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
232 	if (!nfp_prog->prog)
233 		return -ENOMEM;
234 
235 	err = nfp_bpf_jit(nfp_prog);
236 	if (err)
237 		return err;
238 
239 	prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
240 	prog->aux->offload->jited_image = nfp_prog->prog;
241 
242 	return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
243 }
244 
245 static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
246 {
247 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
248 
249 	kvfree(nfp_prog->prog);
250 	nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
251 	nfp_prog_free(nfp_prog);
252 
253 	return 0;
254 }
255 
256 /* Atomic engine requires values to be in big endian, we need to byte swap
257  * the value words used with xadd.
258  */
259 static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
260 {
261 	u32 *word = value;
262 	unsigned int i;
263 
264 	for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
265 		if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT)
266 			word[i] = (__force u32)cpu_to_be32(word[i]);
267 }
268 
269 /* Mark value as unsafely initialized in case it becomes atomic later
270  * and we didn't byte swap something non-byte swap neutral.
271  */
272 static void
273 nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value)
274 {
275 	u32 *word = value;
276 	unsigned int i;
277 
278 	for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
279 		if (nfp_map->use_map[i].type == NFP_MAP_UNUSED &&
280 		    word[i] != (__force u32)cpu_to_be32(word[i]))
281 			nfp_map->use_map[i].non_zero_update = 1;
282 }
283 
284 static int
285 nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
286 			 void *key, void *value)
287 {
288 	int err;
289 
290 	err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
291 	if (err)
292 		return err;
293 
294 	nfp_map_bpf_byte_swap(offmap->dev_priv, value);
295 	return 0;
296 }
297 
298 static int
299 nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
300 			 void *key, void *value, u64 flags)
301 {
302 	nfp_map_bpf_byte_swap(offmap->dev_priv, value);
303 	nfp_map_bpf_byte_swap_record(offmap->dev_priv, value);
304 	return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
305 }
306 
307 static int
308 nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
309 			 void *key, void *next_key)
310 {
311 	if (!key)
312 		return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
313 	return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
314 }
315 
316 static int
317 nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
318 {
319 	if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
320 		return -EINVAL;
321 	return nfp_bpf_ctrl_del_entry(offmap, key);
322 }
323 
324 static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
325 	.map_get_next_key	= nfp_bpf_map_get_next_key,
326 	.map_lookup_elem	= nfp_bpf_map_lookup_entry,
327 	.map_update_elem	= nfp_bpf_map_update_entry,
328 	.map_delete_elem	= nfp_bpf_map_delete_elem,
329 };
330 
331 static int
332 nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
333 {
334 	struct nfp_bpf_map *nfp_map;
335 	unsigned int use_map_size;
336 	long long int res;
337 
338 	if (!bpf->maps.types)
339 		return -EOPNOTSUPP;
340 
341 	if (offmap->map.map_flags ||
342 	    offmap->map.numa_node != NUMA_NO_NODE) {
343 		pr_info("map flags are not supported\n");
344 		return -EINVAL;
345 	}
346 
347 	if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
348 		pr_info("map type not supported\n");
349 		return -EOPNOTSUPP;
350 	}
351 	if (bpf->maps.max_maps == bpf->maps_in_use) {
352 		pr_info("too many maps for a device\n");
353 		return -ENOMEM;
354 	}
355 	if (bpf->maps.max_elems - bpf->map_elems_in_use <
356 	    offmap->map.max_entries) {
357 		pr_info("map with too many elements: %u, left: %u\n",
358 			offmap->map.max_entries,
359 			bpf->maps.max_elems - bpf->map_elems_in_use);
360 		return -ENOMEM;
361 	}
362 
363 	if (round_up(offmap->map.key_size, 8) +
364 	    round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
365 		pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
366 			round_up(offmap->map.key_size, 8) +
367 			round_up(offmap->map.value_size, 8),
368 			bpf->maps.max_elem_sz);
369 		return -ENOMEM;
370 	}
371 	if (offmap->map.key_size > bpf->maps.max_key_sz) {
372 		pr_info("map key size %u, FW max is %u\n",
373 			offmap->map.key_size, bpf->maps.max_key_sz);
374 		return -ENOMEM;
375 	}
376 	if (offmap->map.value_size > bpf->maps.max_val_sz) {
377 		pr_info("map value size %u, FW max is %u\n",
378 			offmap->map.value_size, bpf->maps.max_val_sz);
379 		return -ENOMEM;
380 	}
381 
382 	use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
383 		       FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
384 
385 	nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
386 	if (!nfp_map)
387 		return -ENOMEM;
388 
389 	offmap->dev_priv = nfp_map;
390 	nfp_map->offmap = offmap;
391 	nfp_map->bpf = bpf;
392 
393 	res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
394 	if (res < 0) {
395 		kfree(nfp_map);
396 		return res;
397 	}
398 
399 	nfp_map->tid = res;
400 	offmap->dev_ops = &nfp_bpf_map_ops;
401 	bpf->maps_in_use++;
402 	bpf->map_elems_in_use += offmap->map.max_entries;
403 	list_add_tail(&nfp_map->l, &bpf->map_list);
404 
405 	return 0;
406 }
407 
408 static int
409 nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
410 {
411 	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
412 
413 	nfp_bpf_ctrl_free_map(bpf, nfp_map);
414 	list_del_init(&nfp_map->l);
415 	bpf->map_elems_in_use -= offmap->map.max_entries;
416 	bpf->maps_in_use--;
417 	kfree(nfp_map);
418 
419 	return 0;
420 }
421 
422 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
423 {
424 	switch (bpf->command) {
425 	case BPF_OFFLOAD_VERIFIER_PREP:
426 		return nfp_bpf_verifier_prep(app, nn, bpf);
427 	case BPF_OFFLOAD_TRANSLATE:
428 		return nfp_bpf_translate(nn, bpf->offload.prog);
429 	case BPF_OFFLOAD_DESTROY:
430 		return nfp_bpf_destroy(nn, bpf->offload.prog);
431 	case BPF_OFFLOAD_MAP_ALLOC:
432 		return nfp_bpf_map_alloc(app->priv, bpf->offmap);
433 	case BPF_OFFLOAD_MAP_FREE:
434 		return nfp_bpf_map_free(app->priv, bpf->offmap);
435 	default:
436 		return -EINVAL;
437 	}
438 }
439 
440 static unsigned long
441 nfp_bpf_perf_event_copy(void *dst, const void *src,
442 			unsigned long off, unsigned long len)
443 {
444 	memcpy(dst, src + off, len);
445 	return 0;
446 }
447 
448 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
449 			 unsigned int len)
450 {
451 	struct cmsg_bpf_event *cbe = (void *)data;
452 	struct nfp_bpf_neutral_map *record;
453 	u32 pkt_size, data_size, map_id;
454 	u64 map_id_full;
455 
456 	if (len < sizeof(struct cmsg_bpf_event))
457 		return -EINVAL;
458 
459 	pkt_size = be32_to_cpu(cbe->pkt_size);
460 	data_size = be32_to_cpu(cbe->data_size);
461 	map_id_full = be64_to_cpu(cbe->map_ptr);
462 	map_id = map_id_full;
463 
464 	if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
465 		return -EINVAL;
466 	if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
467 		return -EINVAL;
468 
469 	rcu_read_lock();
470 	record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
471 					nfp_bpf_maps_neutral_params);
472 	if (!record || map_id_full > U32_MAX) {
473 		rcu_read_unlock();
474 		cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
475 			  map_id_full, map_id_full);
476 		return -EINVAL;
477 	}
478 
479 	bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
480 			 &cbe->data[round_up(pkt_size, 4)], data_size,
481 			 cbe->data, pkt_size, nfp_bpf_perf_event_copy);
482 	rcu_read_unlock();
483 
484 	return 0;
485 }
486 
487 static int
488 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
489 		 struct netlink_ext_ack *extack)
490 {
491 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
492 	unsigned int max_mtu, max_stack, max_prog_len;
493 	dma_addr_t dma_addr;
494 	void *img;
495 	int err;
496 
497 	max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
498 	if (max_mtu < nn->dp.netdev->mtu) {
499 		NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
500 		return -EOPNOTSUPP;
501 	}
502 
503 	max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
504 	if (nfp_prog->stack_size > max_stack) {
505 		NL_SET_ERR_MSG_MOD(extack, "stack too large");
506 		return -EOPNOTSUPP;
507 	}
508 
509 	max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
510 	if (nfp_prog->prog_len > max_prog_len) {
511 		NL_SET_ERR_MSG_MOD(extack, "program too long");
512 		return -EOPNOTSUPP;
513 	}
514 
515 	img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
516 	if (IS_ERR(img))
517 		return PTR_ERR(img);
518 
519 	dma_addr = dma_map_single(nn->dp.dev, img,
520 				  nfp_prog->prog_len * sizeof(u64),
521 				  DMA_TO_DEVICE);
522 	if (dma_mapping_error(nn->dp.dev, dma_addr)) {
523 		kfree(img);
524 		return -ENOMEM;
525 	}
526 
527 	nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
528 	nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
529 
530 	/* Load up the JITed code */
531 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
532 	if (err)
533 		NL_SET_ERR_MSG_MOD(extack,
534 				   "FW command error while loading BPF");
535 
536 	dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
537 			 DMA_TO_DEVICE);
538 	kfree(img);
539 
540 	return err;
541 }
542 
543 static void
544 nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
545 {
546 	int err;
547 
548 	/* Enable passing packets through BPF function */
549 	nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
550 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
551 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
552 	if (err)
553 		NL_SET_ERR_MSG_MOD(extack,
554 				   "FW command error while enabling BPF");
555 }
556 
557 static int nfp_net_bpf_stop(struct nfp_net *nn)
558 {
559 	if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
560 		return 0;
561 
562 	nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
563 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
564 
565 	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
566 }
567 
568 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
569 			bool old_prog, struct netlink_ext_ack *extack)
570 {
571 	int err;
572 
573 	if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev))
574 		return -EINVAL;
575 
576 	if (prog && old_prog) {
577 		u8 cap;
578 
579 		cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
580 		if (!(cap & NFP_NET_BPF_CAP_RELO)) {
581 			NL_SET_ERR_MSG_MOD(extack,
582 					   "FW does not support live reload");
583 			return -EBUSY;
584 		}
585 	}
586 
587 	/* Something else is loaded, different program type? */
588 	if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
589 		return -EBUSY;
590 
591 	if (old_prog && !prog)
592 		return nfp_net_bpf_stop(nn);
593 
594 	err = nfp_net_bpf_load(nn, prog, extack);
595 	if (err)
596 		return err;
597 
598 	if (!old_prog)
599 		nfp_net_bpf_start(nn, extack);
600 
601 	return 0;
602 }
603