xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/bpf/offload.c (revision a0ae2562c6c4b2721d9fddba63b7286c13517d9f)
1 /*
2  * Copyright (C) 2016-2018 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * nfp_net_offload.c
36  * Netronome network device driver: TC offload functions for PF and VF
37  */
38 
39 #define pr_fmt(fmt)	"NFP net bpf: " fmt
40 
41 #include <linux/bpf.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/pci.h>
45 #include <linux/jiffies.h>
46 #include <linux/timer.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 
50 #include <net/pkt_cls.h>
51 #include <net/tc_act/tc_gact.h>
52 #include <net/tc_act/tc_mirred.h>
53 
54 #include "main.h"
55 #include "../nfp_app.h"
56 #include "../nfp_net_ctrl.h"
57 #include "../nfp_net.h"
58 
59 static int
60 nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
61 		   struct bpf_map *map)
62 {
63 	struct nfp_bpf_neutral_map *record;
64 	int err;
65 
66 	/* Map record paths are entered via ndo, update side is protected. */
67 	ASSERT_RTNL();
68 
69 	/* Reuse path - other offloaded program is already tracking this map. */
70 	record = rhashtable_lookup_fast(&bpf->maps_neutral, &map,
71 					nfp_bpf_maps_neutral_params);
72 	if (record) {
73 		nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
74 		record->count++;
75 		return 0;
76 	}
77 
78 	/* Grab a single ref to the map for our record.  The prog destroy ndo
79 	 * happens after free_used_maps().
80 	 */
81 	map = bpf_map_inc(map, false);
82 	if (IS_ERR(map))
83 		return PTR_ERR(map);
84 
85 	record = kmalloc(sizeof(*record), GFP_KERNEL);
86 	if (!record) {
87 		err = -ENOMEM;
88 		goto err_map_put;
89 	}
90 
91 	record->ptr = map;
92 	record->count = 1;
93 
94 	err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
95 				     nfp_bpf_maps_neutral_params);
96 	if (err)
97 		goto err_free_rec;
98 
99 	nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
100 
101 	return 0;
102 
103 err_free_rec:
104 	kfree(record);
105 err_map_put:
106 	bpf_map_put(map);
107 	return err;
108 }
109 
110 static void
111 nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
112 {
113 	bool freed = false;
114 	int i;
115 
116 	ASSERT_RTNL();
117 
118 	for (i = 0; i < nfp_prog->map_records_cnt; i++) {
119 		if (--nfp_prog->map_records[i]->count) {
120 			nfp_prog->map_records[i] = NULL;
121 			continue;
122 		}
123 
124 		WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
125 					       &nfp_prog->map_records[i]->l,
126 					       nfp_bpf_maps_neutral_params));
127 		freed = true;
128 	}
129 
130 	if (freed) {
131 		synchronize_rcu();
132 
133 		for (i = 0; i < nfp_prog->map_records_cnt; i++)
134 			if (nfp_prog->map_records[i]) {
135 				bpf_map_put(nfp_prog->map_records[i]->ptr);
136 				kfree(nfp_prog->map_records[i]);
137 			}
138 	}
139 
140 	kfree(nfp_prog->map_records);
141 	nfp_prog->map_records = NULL;
142 	nfp_prog->map_records_cnt = 0;
143 }
144 
145 static int
146 nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
147 		    struct bpf_prog *prog)
148 {
149 	int i, cnt, err;
150 
151 	/* Quickly count the maps we will have to remember */
152 	cnt = 0;
153 	for (i = 0; i < prog->aux->used_map_cnt; i++)
154 		if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
155 			cnt++;
156 	if (!cnt)
157 		return 0;
158 
159 	nfp_prog->map_records = kmalloc_array(cnt,
160 					      sizeof(nfp_prog->map_records[0]),
161 					      GFP_KERNEL);
162 	if (!nfp_prog->map_records)
163 		return -ENOMEM;
164 
165 	for (i = 0; i < prog->aux->used_map_cnt; i++)
166 		if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
167 			err = nfp_map_ptr_record(bpf, nfp_prog,
168 						 prog->aux->used_maps[i]);
169 			if (err) {
170 				nfp_map_ptrs_forget(bpf, nfp_prog);
171 				return err;
172 			}
173 		}
174 	WARN_ON(cnt != nfp_prog->map_records_cnt);
175 
176 	return 0;
177 }
178 
179 static int
180 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
181 		 unsigned int cnt)
182 {
183 	struct nfp_insn_meta *meta;
184 	unsigned int i;
185 
186 	for (i = 0; i < cnt; i++) {
187 		meta = kzalloc(sizeof(*meta), GFP_KERNEL);
188 		if (!meta)
189 			return -ENOMEM;
190 
191 		meta->insn = prog[i];
192 		meta->n = i;
193 		if (is_mbpf_alu(meta)) {
194 			meta->umin_src = U64_MAX;
195 			meta->umin_dst = U64_MAX;
196 		}
197 
198 		list_add_tail(&meta->l, &nfp_prog->insns);
199 	}
200 
201 	nfp_bpf_jit_prepare(nfp_prog, cnt);
202 
203 	return 0;
204 }
205 
206 static void nfp_prog_free(struct nfp_prog *nfp_prog)
207 {
208 	struct nfp_insn_meta *meta, *tmp;
209 
210 	list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
211 		list_del(&meta->l);
212 		kfree(meta);
213 	}
214 	kfree(nfp_prog);
215 }
216 
217 static int
218 nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
219 		      struct netdev_bpf *bpf)
220 {
221 	struct bpf_prog *prog = bpf->verifier.prog;
222 	struct nfp_prog *nfp_prog;
223 	int ret;
224 
225 	nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
226 	if (!nfp_prog)
227 		return -ENOMEM;
228 	prog->aux->offload->dev_priv = nfp_prog;
229 
230 	INIT_LIST_HEAD(&nfp_prog->insns);
231 	nfp_prog->type = prog->type;
232 	nfp_prog->bpf = app->priv;
233 
234 	ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
235 	if (ret)
236 		goto err_free;
237 
238 	nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
239 	bpf->verifier.ops = &nfp_bpf_analyzer_ops;
240 
241 	return 0;
242 
243 err_free:
244 	nfp_prog_free(nfp_prog);
245 
246 	return ret;
247 }
248 
249 static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
250 {
251 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
252 	unsigned int stack_size;
253 	unsigned int max_instr;
254 	int err;
255 
256 	stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
257 	if (prog->aux->stack_depth > stack_size) {
258 		nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
259 			prog->aux->stack_depth, stack_size);
260 		return -EOPNOTSUPP;
261 	}
262 	nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
263 
264 	max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
265 	nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
266 
267 	nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
268 	if (!nfp_prog->prog)
269 		return -ENOMEM;
270 
271 	err = nfp_bpf_jit(nfp_prog);
272 	if (err)
273 		return err;
274 
275 	prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
276 	prog->aux->offload->jited_image = nfp_prog->prog;
277 
278 	return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
279 }
280 
281 static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
282 {
283 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
284 
285 	kvfree(nfp_prog->prog);
286 	nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
287 	nfp_prog_free(nfp_prog);
288 
289 	return 0;
290 }
291 
292 /* Atomic engine requires values to be in big endian, we need to byte swap
293  * the value words used with xadd.
294  */
295 static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
296 {
297 	u32 *word = value;
298 	unsigned int i;
299 
300 	for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
301 		if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT)
302 			word[i] = (__force u32)cpu_to_be32(word[i]);
303 }
304 
305 static int
306 nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
307 			 void *key, void *value)
308 {
309 	int err;
310 
311 	err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
312 	if (err)
313 		return err;
314 
315 	nfp_map_bpf_byte_swap(offmap->dev_priv, value);
316 	return 0;
317 }
318 
319 static int
320 nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
321 			 void *key, void *value, u64 flags)
322 {
323 	nfp_map_bpf_byte_swap(offmap->dev_priv, value);
324 	return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
325 }
326 
327 static int
328 nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
329 			 void *key, void *next_key)
330 {
331 	if (!key)
332 		return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
333 	return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
334 }
335 
336 static int
337 nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
338 {
339 	if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
340 		return -EINVAL;
341 	return nfp_bpf_ctrl_del_entry(offmap, key);
342 }
343 
344 static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
345 	.map_get_next_key	= nfp_bpf_map_get_next_key,
346 	.map_lookup_elem	= nfp_bpf_map_lookup_entry,
347 	.map_update_elem	= nfp_bpf_map_update_entry,
348 	.map_delete_elem	= nfp_bpf_map_delete_elem,
349 };
350 
351 static int
352 nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
353 {
354 	struct nfp_bpf_map *nfp_map;
355 	unsigned int use_map_size;
356 	long long int res;
357 
358 	if (!bpf->maps.types)
359 		return -EOPNOTSUPP;
360 
361 	if (offmap->map.map_flags ||
362 	    offmap->map.numa_node != NUMA_NO_NODE) {
363 		pr_info("map flags are not supported\n");
364 		return -EINVAL;
365 	}
366 
367 	if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
368 		pr_info("map type not supported\n");
369 		return -EOPNOTSUPP;
370 	}
371 	if (bpf->maps.max_maps == bpf->maps_in_use) {
372 		pr_info("too many maps for a device\n");
373 		return -ENOMEM;
374 	}
375 	if (bpf->maps.max_elems - bpf->map_elems_in_use <
376 	    offmap->map.max_entries) {
377 		pr_info("map with too many elements: %u, left: %u\n",
378 			offmap->map.max_entries,
379 			bpf->maps.max_elems - bpf->map_elems_in_use);
380 		return -ENOMEM;
381 	}
382 	if (offmap->map.key_size > bpf->maps.max_key_sz ||
383 	    offmap->map.value_size > bpf->maps.max_val_sz ||
384 	    round_up(offmap->map.key_size, 8) +
385 	    round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
386 		pr_info("elements don't fit in device constraints\n");
387 		return -ENOMEM;
388 	}
389 
390 	use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
391 		       FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
392 
393 	nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
394 	if (!nfp_map)
395 		return -ENOMEM;
396 
397 	offmap->dev_priv = nfp_map;
398 	nfp_map->offmap = offmap;
399 	nfp_map->bpf = bpf;
400 
401 	res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
402 	if (res < 0) {
403 		kfree(nfp_map);
404 		return res;
405 	}
406 
407 	nfp_map->tid = res;
408 	offmap->dev_ops = &nfp_bpf_map_ops;
409 	bpf->maps_in_use++;
410 	bpf->map_elems_in_use += offmap->map.max_entries;
411 	list_add_tail(&nfp_map->l, &bpf->map_list);
412 
413 	return 0;
414 }
415 
416 static int
417 nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
418 {
419 	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
420 
421 	nfp_bpf_ctrl_free_map(bpf, nfp_map);
422 	list_del_init(&nfp_map->l);
423 	bpf->map_elems_in_use -= offmap->map.max_entries;
424 	bpf->maps_in_use--;
425 	kfree(nfp_map);
426 
427 	return 0;
428 }
429 
430 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
431 {
432 	switch (bpf->command) {
433 	case BPF_OFFLOAD_VERIFIER_PREP:
434 		return nfp_bpf_verifier_prep(app, nn, bpf);
435 	case BPF_OFFLOAD_TRANSLATE:
436 		return nfp_bpf_translate(nn, bpf->offload.prog);
437 	case BPF_OFFLOAD_DESTROY:
438 		return nfp_bpf_destroy(nn, bpf->offload.prog);
439 	case BPF_OFFLOAD_MAP_ALLOC:
440 		return nfp_bpf_map_alloc(app->priv, bpf->offmap);
441 	case BPF_OFFLOAD_MAP_FREE:
442 		return nfp_bpf_map_free(app->priv, bpf->offmap);
443 	default:
444 		return -EINVAL;
445 	}
446 }
447 
448 static unsigned long
449 nfp_bpf_perf_event_copy(void *dst, const void *src,
450 			unsigned long off, unsigned long len)
451 {
452 	memcpy(dst, src + off, len);
453 	return 0;
454 }
455 
456 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb)
457 {
458 	struct cmsg_bpf_event *cbe = (void *)skb->data;
459 	u32 pkt_size, data_size;
460 	struct bpf_map *map;
461 
462 	if (skb->len < sizeof(struct cmsg_bpf_event))
463 		goto err_drop;
464 
465 	pkt_size = be32_to_cpu(cbe->pkt_size);
466 	data_size = be32_to_cpu(cbe->data_size);
467 	map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr);
468 
469 	if (skb->len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
470 		goto err_drop;
471 	if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
472 		goto err_drop;
473 
474 	rcu_read_lock();
475 	if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map,
476 				    nfp_bpf_maps_neutral_params)) {
477 		rcu_read_unlock();
478 		pr_warn("perf event: dest map pointer %px not recognized, dropping event\n",
479 			map);
480 		goto err_drop;
481 	}
482 
483 	bpf_event_output(map, be32_to_cpu(cbe->cpu_id),
484 			 &cbe->data[round_up(pkt_size, 4)], data_size,
485 			 cbe->data, pkt_size, nfp_bpf_perf_event_copy);
486 	rcu_read_unlock();
487 
488 	dev_consume_skb_any(skb);
489 	return 0;
490 err_drop:
491 	dev_kfree_skb_any(skb);
492 	return -EINVAL;
493 }
494 
495 static int
496 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
497 		 struct netlink_ext_ack *extack)
498 {
499 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
500 	unsigned int max_mtu;
501 	dma_addr_t dma_addr;
502 	void *img;
503 	int err;
504 
505 	max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
506 	if (max_mtu < nn->dp.netdev->mtu) {
507 		NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
508 		return -EOPNOTSUPP;
509 	}
510 
511 	img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
512 	if (IS_ERR(img))
513 		return PTR_ERR(img);
514 
515 	dma_addr = dma_map_single(nn->dp.dev, img,
516 				  nfp_prog->prog_len * sizeof(u64),
517 				  DMA_TO_DEVICE);
518 	if (dma_mapping_error(nn->dp.dev, dma_addr)) {
519 		kfree(img);
520 		return -ENOMEM;
521 	}
522 
523 	nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
524 	nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
525 
526 	/* Load up the JITed code */
527 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
528 	if (err)
529 		NL_SET_ERR_MSG_MOD(extack,
530 				   "FW command error while loading BPF");
531 
532 	dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
533 			 DMA_TO_DEVICE);
534 	kfree(img);
535 
536 	return err;
537 }
538 
539 static void
540 nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
541 {
542 	int err;
543 
544 	/* Enable passing packets through BPF function */
545 	nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
546 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
547 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
548 	if (err)
549 		NL_SET_ERR_MSG_MOD(extack,
550 				   "FW command error while enabling BPF");
551 }
552 
553 static int nfp_net_bpf_stop(struct nfp_net *nn)
554 {
555 	if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
556 		return 0;
557 
558 	nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
559 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
560 
561 	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
562 }
563 
564 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
565 			bool old_prog, struct netlink_ext_ack *extack)
566 {
567 	int err;
568 
569 	if (prog) {
570 		struct bpf_prog_offload *offload = prog->aux->offload;
571 
572 		if (!offload)
573 			return -EINVAL;
574 		if (offload->netdev != nn->dp.netdev)
575 			return -EINVAL;
576 	}
577 
578 	if (prog && old_prog) {
579 		u8 cap;
580 
581 		cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
582 		if (!(cap & NFP_NET_BPF_CAP_RELO)) {
583 			NL_SET_ERR_MSG_MOD(extack,
584 					   "FW does not support live reload");
585 			return -EBUSY;
586 		}
587 	}
588 
589 	/* Something else is loaded, different program type? */
590 	if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
591 		return -EBUSY;
592 
593 	if (old_prog && !prog)
594 		return nfp_net_bpf_stop(nn);
595 
596 	err = nfp_net_bpf_load(nn, prog, extack);
597 	if (err)
598 		return err;
599 
600 	if (!old_prog)
601 		nfp_net_bpf_start(nn, extack);
602 
603 	return 0;
604 }
605