1 /*
2  * Copyright (C) 2017-2018 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/bpf.h>
35 #include <linux/bitops.h>
36 #include <linux/bug.h>
37 #include <linux/jiffies.h>
38 #include <linux/skbuff.h>
39 #include <linux/wait.h>
40 
41 #include "../nfp_app.h"
42 #include "../nfp_net.h"
43 #include "fw.h"
44 #include "main.h"
45 
46 #define cmsg_warn(bpf, msg...)	nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
47 
48 #define NFP_BPF_TAG_ALLOC_SPAN	(U16_MAX / 4)
49 
50 static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
51 {
52 	u16 used_tags;
53 
54 	used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
55 
56 	return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
57 }
58 
59 static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
60 {
61 	/* All FW communication for BPF is request-reply.  To make sure we
62 	 * don't reuse the message ID too early after timeout - limit the
63 	 * number of requests in flight.
64 	 */
65 	if (nfp_bpf_all_tags_busy(bpf)) {
66 		cmsg_warn(bpf, "all FW request contexts busy!\n");
67 		return -EAGAIN;
68 	}
69 
70 	WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
71 	return bpf->tag_alloc_next++;
72 }
73 
74 static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
75 {
76 	WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
77 
78 	while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
79 	       bpf->tag_alloc_last != bpf->tag_alloc_next)
80 		bpf->tag_alloc_last++;
81 }
82 
83 static struct sk_buff *
84 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
85 {
86 	struct sk_buff *skb;
87 
88 	skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
89 	skb_put(skb, size);
90 
91 	return skb;
92 }
93 
94 static struct sk_buff *
95 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
96 {
97 	unsigned int size;
98 
99 	size = sizeof(struct cmsg_req_map_op);
100 	size += sizeof(struct cmsg_key_value_pair) * n;
101 
102 	return nfp_bpf_cmsg_alloc(bpf, size);
103 }
104 
105 static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
106 {
107 	struct cmsg_hdr *hdr;
108 
109 	hdr = (struct cmsg_hdr *)skb->data;
110 
111 	return hdr->type;
112 }
113 
114 static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
115 {
116 	struct cmsg_hdr *hdr;
117 
118 	hdr = (struct cmsg_hdr *)skb->data;
119 
120 	return be16_to_cpu(hdr->tag);
121 }
122 
123 static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
124 {
125 	unsigned int msg_tag;
126 	struct sk_buff *skb;
127 
128 	skb_queue_walk(&bpf->cmsg_replies, skb) {
129 		msg_tag = nfp_bpf_cmsg_get_tag(skb);
130 		if (msg_tag == tag) {
131 			nfp_bpf_free_tag(bpf, tag);
132 			__skb_unlink(skb, &bpf->cmsg_replies);
133 			return skb;
134 		}
135 	}
136 
137 	return NULL;
138 }
139 
140 static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
141 {
142 	struct sk_buff *skb;
143 
144 	nfp_ctrl_lock(bpf->app->ctrl);
145 	skb = __nfp_bpf_reply(bpf, tag);
146 	nfp_ctrl_unlock(bpf->app->ctrl);
147 
148 	return skb;
149 }
150 
151 static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
152 {
153 	struct sk_buff *skb;
154 
155 	nfp_ctrl_lock(bpf->app->ctrl);
156 	skb = __nfp_bpf_reply(bpf, tag);
157 	if (!skb)
158 		nfp_bpf_free_tag(bpf, tag);
159 	nfp_ctrl_unlock(bpf->app->ctrl);
160 
161 	return skb;
162 }
163 
164 static struct sk_buff *
165 nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
166 			int tag)
167 {
168 	struct sk_buff *skb;
169 	int i, err;
170 
171 	for (i = 0; i < 50; i++) {
172 		udelay(4);
173 		skb = nfp_bpf_reply(bpf, tag);
174 		if (skb)
175 			return skb;
176 	}
177 
178 	err = wait_event_interruptible_timeout(bpf->cmsg_wq,
179 					       skb = nfp_bpf_reply(bpf, tag),
180 					       msecs_to_jiffies(5000));
181 	/* We didn't get a response - try last time and atomically drop
182 	 * the tag even if no response is matched.
183 	 */
184 	if (!skb)
185 		skb = nfp_bpf_reply_drop_tag(bpf, tag);
186 	if (err < 0) {
187 		cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
188 			  err == ERESTARTSYS ? "interrupted" : "error",
189 			  type, err);
190 		return ERR_PTR(err);
191 	}
192 	if (!skb) {
193 		cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
194 			  type);
195 		return ERR_PTR(-ETIMEDOUT);
196 	}
197 
198 	return skb;
199 }
200 
201 static struct sk_buff *
202 nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
203 			 enum nfp_bpf_cmsg_type type, unsigned int reply_size)
204 {
205 	struct cmsg_hdr *hdr;
206 	int tag;
207 
208 	nfp_ctrl_lock(bpf->app->ctrl);
209 	tag = nfp_bpf_alloc_tag(bpf);
210 	if (tag < 0) {
211 		nfp_ctrl_unlock(bpf->app->ctrl);
212 		dev_kfree_skb_any(skb);
213 		return ERR_PTR(tag);
214 	}
215 
216 	hdr = (void *)skb->data;
217 	hdr->ver = CMSG_MAP_ABI_VERSION;
218 	hdr->type = type;
219 	hdr->tag = cpu_to_be16(tag);
220 
221 	__nfp_app_ctrl_tx(bpf->app, skb);
222 
223 	nfp_ctrl_unlock(bpf->app->ctrl);
224 
225 	skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
226 	if (IS_ERR(skb))
227 		return skb;
228 
229 	hdr = (struct cmsg_hdr *)skb->data;
230 	if (hdr->type != __CMSG_REPLY(type)) {
231 		cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
232 			  hdr->type, __CMSG_REPLY(type));
233 		goto err_free;
234 	}
235 	/* 0 reply_size means caller will do the validation */
236 	if (reply_size && skb->len != reply_size) {
237 		cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
238 			  type, skb->len, reply_size);
239 		goto err_free;
240 	}
241 
242 	return skb;
243 err_free:
244 	dev_kfree_skb_any(skb);
245 	return ERR_PTR(-EIO);
246 }
247 
248 static int
249 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
250 			 struct cmsg_reply_map_simple *reply)
251 {
252 	static const int res_table[] = {
253 		[CMSG_RC_SUCCESS]	= 0,
254 		[CMSG_RC_ERR_MAP_FD]	= -EBADFD,
255 		[CMSG_RC_ERR_MAP_NOENT]	= -ENOENT,
256 		[CMSG_RC_ERR_MAP_ERR]	= -EINVAL,
257 		[CMSG_RC_ERR_MAP_PARSE]	= -EIO,
258 		[CMSG_RC_ERR_MAP_EXIST]	= -EEXIST,
259 		[CMSG_RC_ERR_MAP_NOMEM]	= -ENOMEM,
260 		[CMSG_RC_ERR_MAP_E2BIG]	= -E2BIG,
261 	};
262 	u32 rc;
263 
264 	rc = be32_to_cpu(reply->rc);
265 	if (rc >= ARRAY_SIZE(res_table)) {
266 		cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
267 		return -EIO;
268 	}
269 
270 	return res_table[rc];
271 }
272 
273 long long int
274 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
275 {
276 	struct cmsg_reply_map_alloc_tbl *reply;
277 	struct cmsg_req_map_alloc_tbl *req;
278 	struct sk_buff *skb;
279 	u32 tid;
280 	int err;
281 
282 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
283 	if (!skb)
284 		return -ENOMEM;
285 
286 	req = (void *)skb->data;
287 	req->key_size = cpu_to_be32(map->key_size);
288 	req->value_size = cpu_to_be32(map->value_size);
289 	req->max_entries = cpu_to_be32(map->max_entries);
290 	req->map_type = cpu_to_be32(map->map_type);
291 	req->map_flags = 0;
292 
293 	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
294 				       sizeof(*reply));
295 	if (IS_ERR(skb))
296 		return PTR_ERR(skb);
297 
298 	reply = (void *)skb->data;
299 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
300 	if (err)
301 		goto err_free;
302 
303 	tid = be32_to_cpu(reply->tid);
304 	dev_consume_skb_any(skb);
305 
306 	return tid;
307 err_free:
308 	dev_kfree_skb_any(skb);
309 	return err;
310 }
311 
312 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
313 {
314 	struct cmsg_reply_map_free_tbl *reply;
315 	struct cmsg_req_map_free_tbl *req;
316 	struct sk_buff *skb;
317 	int err;
318 
319 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
320 	if (!skb) {
321 		cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
322 		return;
323 	}
324 
325 	req = (void *)skb->data;
326 	req->tid = cpu_to_be32(nfp_map->tid);
327 
328 	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
329 				       sizeof(*reply));
330 	if (IS_ERR(skb)) {
331 		cmsg_warn(bpf, "leaking map - I/O error\n");
332 		return;
333 	}
334 
335 	reply = (void *)skb->data;
336 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
337 	if (err)
338 		cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
339 
340 	dev_consume_skb_any(skb);
341 }
342 
343 static int
344 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
345 		      enum nfp_bpf_cmsg_type op,
346 		      u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
347 {
348 	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
349 	struct nfp_app_bpf *bpf = nfp_map->bpf;
350 	struct bpf_map *map = &offmap->map;
351 	struct cmsg_reply_map_op *reply;
352 	struct cmsg_req_map_op *req;
353 	struct sk_buff *skb;
354 	int err;
355 
356 	/* FW messages have no space for more than 32 bits of flags */
357 	if (flags >> 32)
358 		return -EOPNOTSUPP;
359 
360 	skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
361 	if (!skb)
362 		return -ENOMEM;
363 
364 	req = (void *)skb->data;
365 	req->tid = cpu_to_be32(nfp_map->tid);
366 	req->count = cpu_to_be32(1);
367 	req->flags = cpu_to_be32(flags);
368 
369 	/* Copy inputs */
370 	if (key)
371 		memcpy(&req->elem[0].key, key, map->key_size);
372 	if (value)
373 		memcpy(&req->elem[0].value, value, map->value_size);
374 
375 	skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
376 				       sizeof(*reply) + sizeof(*reply->elem));
377 	if (IS_ERR(skb))
378 		return PTR_ERR(skb);
379 
380 	reply = (void *)skb->data;
381 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
382 	if (err)
383 		goto err_free;
384 
385 	/* Copy outputs */
386 	if (out_key)
387 		memcpy(out_key, &reply->elem[0].key, map->key_size);
388 	if (out_value)
389 		memcpy(out_value, &reply->elem[0].value, map->value_size);
390 
391 	dev_consume_skb_any(skb);
392 
393 	return 0;
394 err_free:
395 	dev_kfree_skb_any(skb);
396 	return err;
397 }
398 
399 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
400 			      void *key, void *value, u64 flags)
401 {
402 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
403 				     key, value, flags, NULL, NULL);
404 }
405 
406 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
407 {
408 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
409 				     key, NULL, 0, NULL, NULL);
410 }
411 
412 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
413 			      void *key, void *value)
414 {
415 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
416 				     key, NULL, 0, NULL, value);
417 }
418 
419 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
420 				void *next_key)
421 {
422 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
423 				     NULL, NULL, 0, next_key, NULL);
424 }
425 
426 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
427 			       void *key, void *next_key)
428 {
429 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
430 				     key, NULL, 0, next_key, NULL);
431 }
432 
433 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
434 {
435 	struct nfp_app_bpf *bpf = app->priv;
436 	unsigned int tag;
437 
438 	if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
439 		cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
440 		goto err_free;
441 	}
442 
443 	if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
444 		nfp_bpf_event_output(bpf, skb);
445 		return;
446 	}
447 
448 	nfp_ctrl_lock(bpf->app->ctrl);
449 
450 	tag = nfp_bpf_cmsg_get_tag(skb);
451 	if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
452 		cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
453 			  tag);
454 		goto err_unlock;
455 	}
456 
457 	__skb_queue_tail(&bpf->cmsg_replies, skb);
458 	wake_up_interruptible_all(&bpf->cmsg_wq);
459 
460 	nfp_ctrl_unlock(bpf->app->ctrl);
461 
462 	return;
463 err_unlock:
464 	nfp_ctrl_unlock(bpf->app->ctrl);
465 err_free:
466 	dev_kfree_skb_any(skb);
467 }
468