1 /*
2  * Copyright (C) 2016 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * nfp_net_offload.c
36  * Netronome network device driver: TC offload functions for PF and VF
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/netdevice.h>
41 #include <linux/pci.h>
42 #include <linux/jiffies.h>
43 #include <linux/timer.h>
44 #include <linux/list.h>
45 
46 #include <net/pkt_cls.h>
47 #include <net/tc_act/tc_gact.h>
48 #include <net/tc_act/tc_mirred.h>
49 
50 #include "main.h"
51 #include "../nfp_net_ctrl.h"
52 #include "../nfp_net.h"
53 
54 static int
55 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
56 		 unsigned int cnt)
57 {
58 	unsigned int i;
59 
60 	for (i = 0; i < cnt; i++) {
61 		struct nfp_insn_meta *meta;
62 
63 		meta = kzalloc(sizeof(*meta), GFP_KERNEL);
64 		if (!meta)
65 			return -ENOMEM;
66 
67 		meta->insn = prog[i];
68 		meta->n = i;
69 
70 		list_add_tail(&meta->l, &nfp_prog->insns);
71 	}
72 
73 	return 0;
74 }
75 
76 static void nfp_prog_free(struct nfp_prog *nfp_prog)
77 {
78 	struct nfp_insn_meta *meta, *tmp;
79 
80 	list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
81 		list_del(&meta->l);
82 		kfree(meta);
83 	}
84 	kfree(nfp_prog);
85 }
86 
87 int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
88 			  struct netdev_bpf *bpf)
89 {
90 	struct bpf_prog *prog = bpf->verifier.prog;
91 	struct nfp_prog *nfp_prog;
92 	int ret;
93 
94 	nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
95 	if (!nfp_prog)
96 		return -ENOMEM;
97 	prog->aux->offload->dev_priv = nfp_prog;
98 
99 	INIT_LIST_HEAD(&nfp_prog->insns);
100 	nfp_prog->type = prog->type;
101 
102 	ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
103 	if (ret)
104 		goto err_free;
105 
106 	nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
107 	bpf->verifier.ops = &nfp_bpf_analyzer_ops;
108 
109 	return 0;
110 
111 err_free:
112 	nfp_prog_free(nfp_prog);
113 
114 	return ret;
115 }
116 
117 int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
118 		      struct bpf_prog *prog)
119 {
120 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
121 	unsigned int stack_size;
122 	unsigned int max_instr;
123 
124 	stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
125 	if (prog->aux->stack_depth > stack_size) {
126 		nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
127 			prog->aux->stack_depth, stack_size);
128 		return -EOPNOTSUPP;
129 	}
130 
131 	nfp_prog->stack_depth = prog->aux->stack_depth;
132 	nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
133 	nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
134 
135 	max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
136 	nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
137 
138 	nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
139 	if (!nfp_prog->prog)
140 		return -ENOMEM;
141 
142 	return nfp_bpf_jit(nfp_prog);
143 }
144 
145 int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
146 		    struct bpf_prog *prog)
147 {
148 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
149 
150 	kfree(nfp_prog->prog);
151 	nfp_prog_free(nfp_prog);
152 
153 	return 0;
154 }
155 
156 static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
157 {
158 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
159 	unsigned int max_mtu;
160 	dma_addr_t dma_addr;
161 	int err;
162 
163 	max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
164 	if (max_mtu < nn->dp.netdev->mtu) {
165 		nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
166 		return -EOPNOTSUPP;
167 	}
168 
169 	dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
170 				  nfp_prog->prog_len * sizeof(u64),
171 				  DMA_TO_DEVICE);
172 	if (dma_mapping_error(nn->dp.dev, dma_addr))
173 		return -ENOMEM;
174 
175 	nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
176 	nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
177 
178 	/* Load up the JITed code */
179 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
180 	if (err)
181 		nn_err(nn, "FW command error while loading BPF: %d\n", err);
182 
183 	dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
184 			 DMA_TO_DEVICE);
185 
186 	return err;
187 }
188 
189 static void nfp_net_bpf_start(struct nfp_net *nn)
190 {
191 	int err;
192 
193 	/* Enable passing packets through BPF function */
194 	nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
195 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
196 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
197 	if (err)
198 		nn_err(nn, "FW command error while enabling BPF: %d\n", err);
199 }
200 
201 static int nfp_net_bpf_stop(struct nfp_net *nn)
202 {
203 	if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
204 		return 0;
205 
206 	nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
207 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
208 
209 	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
210 }
211 
212 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
213 			bool old_prog)
214 {
215 	int err;
216 
217 	if (prog) {
218 		struct bpf_dev_offload *offload = prog->aux->offload;
219 
220 		if (!offload)
221 			return -EINVAL;
222 		if (offload->netdev != nn->dp.netdev)
223 			return -EINVAL;
224 	}
225 
226 	if (prog && old_prog) {
227 		u8 cap;
228 
229 		cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
230 		if (!(cap & NFP_NET_BPF_CAP_RELO)) {
231 			nn_err(nn, "FW does not support live reload\n");
232 			return -EBUSY;
233 		}
234 	}
235 
236 	/* Something else is loaded, different program type? */
237 	if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
238 		return -EBUSY;
239 
240 	if (old_prog && !prog)
241 		return nfp_net_bpf_stop(nn);
242 
243 	err = nfp_net_bpf_load(nn, prog);
244 	if (err)
245 		return err;
246 
247 	if (!old_prog)
248 		nfp_net_bpf_start(nn);
249 
250 	return 0;
251 }
252