1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/bpf.h>
5 #include <linux/bitops.h>
6 #include <linux/bug.h>
7 #include <linux/jiffies.h>
8 #include <linux/skbuff.h>
9 #include <linux/wait.h>
10 
11 #include "../nfp_app.h"
12 #include "../nfp_net.h"
13 #include "fw.h"
14 #include "main.h"
15 
16 #define NFP_BPF_TAG_ALLOC_SPAN	(U16_MAX / 4)
17 
18 static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
19 {
20 	u16 used_tags;
21 
22 	used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
23 
24 	return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
25 }
26 
27 static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
28 {
29 	/* All FW communication for BPF is request-reply.  To make sure we
30 	 * don't reuse the message ID too early after timeout - limit the
31 	 * number of requests in flight.
32 	 */
33 	if (nfp_bpf_all_tags_busy(bpf)) {
34 		cmsg_warn(bpf, "all FW request contexts busy!\n");
35 		return -EAGAIN;
36 	}
37 
38 	WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
39 	return bpf->tag_alloc_next++;
40 }
41 
42 static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
43 {
44 	WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
45 
46 	while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
47 	       bpf->tag_alloc_last != bpf->tag_alloc_next)
48 		bpf->tag_alloc_last++;
49 }
50 
51 static struct sk_buff *
52 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
53 {
54 	struct sk_buff *skb;
55 
56 	skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
57 	skb_put(skb, size);
58 
59 	return skb;
60 }
61 
62 static unsigned int
63 nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n)
64 {
65 	unsigned int size;
66 
67 	size = sizeof(struct cmsg_req_map_op);
68 	size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
69 
70 	return size;
71 }
72 
73 static struct sk_buff *
74 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
75 {
76 	return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n));
77 }
78 
79 static unsigned int
80 nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
81 {
82 	unsigned int size;
83 
84 	size = sizeof(struct cmsg_reply_map_op);
85 	size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
86 
87 	return size;
88 }
89 
90 static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
91 {
92 	struct cmsg_hdr *hdr;
93 
94 	hdr = (struct cmsg_hdr *)skb->data;
95 
96 	return hdr->type;
97 }
98 
99 static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
100 {
101 	struct cmsg_hdr *hdr;
102 
103 	hdr = (struct cmsg_hdr *)skb->data;
104 
105 	return be16_to_cpu(hdr->tag);
106 }
107 
108 static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
109 {
110 	unsigned int msg_tag;
111 	struct sk_buff *skb;
112 
113 	skb_queue_walk(&bpf->cmsg_replies, skb) {
114 		msg_tag = nfp_bpf_cmsg_get_tag(skb);
115 		if (msg_tag == tag) {
116 			nfp_bpf_free_tag(bpf, tag);
117 			__skb_unlink(skb, &bpf->cmsg_replies);
118 			return skb;
119 		}
120 	}
121 
122 	return NULL;
123 }
124 
125 static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
126 {
127 	struct sk_buff *skb;
128 
129 	nfp_ctrl_lock(bpf->app->ctrl);
130 	skb = __nfp_bpf_reply(bpf, tag);
131 	nfp_ctrl_unlock(bpf->app->ctrl);
132 
133 	return skb;
134 }
135 
136 static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
137 {
138 	struct sk_buff *skb;
139 
140 	nfp_ctrl_lock(bpf->app->ctrl);
141 	skb = __nfp_bpf_reply(bpf, tag);
142 	if (!skb)
143 		nfp_bpf_free_tag(bpf, tag);
144 	nfp_ctrl_unlock(bpf->app->ctrl);
145 
146 	return skb;
147 }
148 
149 static struct sk_buff *
150 nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
151 			int tag)
152 {
153 	struct sk_buff *skb;
154 	int i, err;
155 
156 	for (i = 0; i < 50; i++) {
157 		udelay(4);
158 		skb = nfp_bpf_reply(bpf, tag);
159 		if (skb)
160 			return skb;
161 	}
162 
163 	err = wait_event_interruptible_timeout(bpf->cmsg_wq,
164 					       skb = nfp_bpf_reply(bpf, tag),
165 					       msecs_to_jiffies(5000));
166 	/* We didn't get a response - try last time and atomically drop
167 	 * the tag even if no response is matched.
168 	 */
169 	if (!skb)
170 		skb = nfp_bpf_reply_drop_tag(bpf, tag);
171 	if (err < 0) {
172 		cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
173 			  err == ERESTARTSYS ? "interrupted" : "error",
174 			  type, err);
175 		return ERR_PTR(err);
176 	}
177 	if (!skb) {
178 		cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
179 			  type);
180 		return ERR_PTR(-ETIMEDOUT);
181 	}
182 
183 	return skb;
184 }
185 
186 static struct sk_buff *
187 nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
188 			 enum nfp_bpf_cmsg_type type, unsigned int reply_size)
189 {
190 	struct cmsg_hdr *hdr;
191 	int tag;
192 
193 	nfp_ctrl_lock(bpf->app->ctrl);
194 	tag = nfp_bpf_alloc_tag(bpf);
195 	if (tag < 0) {
196 		nfp_ctrl_unlock(bpf->app->ctrl);
197 		dev_kfree_skb_any(skb);
198 		return ERR_PTR(tag);
199 	}
200 
201 	hdr = (void *)skb->data;
202 	hdr->ver = CMSG_MAP_ABI_VERSION;
203 	hdr->type = type;
204 	hdr->tag = cpu_to_be16(tag);
205 
206 	__nfp_app_ctrl_tx(bpf->app, skb);
207 
208 	nfp_ctrl_unlock(bpf->app->ctrl);
209 
210 	skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
211 	if (IS_ERR(skb))
212 		return skb;
213 
214 	hdr = (struct cmsg_hdr *)skb->data;
215 	if (hdr->type != __CMSG_REPLY(type)) {
216 		cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
217 			  hdr->type, __CMSG_REPLY(type));
218 		goto err_free;
219 	}
220 	/* 0 reply_size means caller will do the validation */
221 	if (reply_size && skb->len != reply_size) {
222 		cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
223 			  type, skb->len, reply_size);
224 		goto err_free;
225 	}
226 
227 	return skb;
228 err_free:
229 	dev_kfree_skb_any(skb);
230 	return ERR_PTR(-EIO);
231 }
232 
233 static int
234 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
235 			 struct cmsg_reply_map_simple *reply)
236 {
237 	static const int res_table[] = {
238 		[CMSG_RC_SUCCESS]	= 0,
239 		[CMSG_RC_ERR_MAP_FD]	= -EBADFD,
240 		[CMSG_RC_ERR_MAP_NOENT]	= -ENOENT,
241 		[CMSG_RC_ERR_MAP_ERR]	= -EINVAL,
242 		[CMSG_RC_ERR_MAP_PARSE]	= -EIO,
243 		[CMSG_RC_ERR_MAP_EXIST]	= -EEXIST,
244 		[CMSG_RC_ERR_MAP_NOMEM]	= -ENOMEM,
245 		[CMSG_RC_ERR_MAP_E2BIG]	= -E2BIG,
246 	};
247 	u32 rc;
248 
249 	rc = be32_to_cpu(reply->rc);
250 	if (rc >= ARRAY_SIZE(res_table)) {
251 		cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
252 		return -EIO;
253 	}
254 
255 	return res_table[rc];
256 }
257 
258 long long int
259 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
260 {
261 	struct cmsg_reply_map_alloc_tbl *reply;
262 	struct cmsg_req_map_alloc_tbl *req;
263 	struct sk_buff *skb;
264 	u32 tid;
265 	int err;
266 
267 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
268 	if (!skb)
269 		return -ENOMEM;
270 
271 	req = (void *)skb->data;
272 	req->key_size = cpu_to_be32(map->key_size);
273 	req->value_size = cpu_to_be32(map->value_size);
274 	req->max_entries = cpu_to_be32(map->max_entries);
275 	req->map_type = cpu_to_be32(map->map_type);
276 	req->map_flags = 0;
277 
278 	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
279 				       sizeof(*reply));
280 	if (IS_ERR(skb))
281 		return PTR_ERR(skb);
282 
283 	reply = (void *)skb->data;
284 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
285 	if (err)
286 		goto err_free;
287 
288 	tid = be32_to_cpu(reply->tid);
289 	dev_consume_skb_any(skb);
290 
291 	return tid;
292 err_free:
293 	dev_kfree_skb_any(skb);
294 	return err;
295 }
296 
297 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
298 {
299 	struct cmsg_reply_map_free_tbl *reply;
300 	struct cmsg_req_map_free_tbl *req;
301 	struct sk_buff *skb;
302 	int err;
303 
304 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
305 	if (!skb) {
306 		cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
307 		return;
308 	}
309 
310 	req = (void *)skb->data;
311 	req->tid = cpu_to_be32(nfp_map->tid);
312 
313 	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
314 				       sizeof(*reply));
315 	if (IS_ERR(skb)) {
316 		cmsg_warn(bpf, "leaking map - I/O error\n");
317 		return;
318 	}
319 
320 	reply = (void *)skb->data;
321 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
322 	if (err)
323 		cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
324 
325 	dev_consume_skb_any(skb);
326 }
327 
328 static void *
329 nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
330 		     unsigned int n)
331 {
332 	return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
333 }
334 
335 static void *
336 nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
337 		     unsigned int n)
338 {
339 	return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
340 }
341 
342 static void *
343 nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
344 		       unsigned int n)
345 {
346 	return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
347 }
348 
349 static void *
350 nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
351 		       unsigned int n)
352 {
353 	return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
354 }
355 
356 static int
357 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
358 		      enum nfp_bpf_cmsg_type op,
359 		      u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
360 {
361 	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
362 	struct nfp_app_bpf *bpf = nfp_map->bpf;
363 	struct bpf_map *map = &offmap->map;
364 	struct cmsg_reply_map_op *reply;
365 	struct cmsg_req_map_op *req;
366 	struct sk_buff *skb;
367 	int err;
368 
369 	/* FW messages have no space for more than 32 bits of flags */
370 	if (flags >> 32)
371 		return -EOPNOTSUPP;
372 
373 	skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
374 	if (!skb)
375 		return -ENOMEM;
376 
377 	req = (void *)skb->data;
378 	req->tid = cpu_to_be32(nfp_map->tid);
379 	req->count = cpu_to_be32(1);
380 	req->flags = cpu_to_be32(flags);
381 
382 	/* Copy inputs */
383 	if (key)
384 		memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
385 	if (value)
386 		memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
387 		       map->value_size);
388 
389 	skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
390 				       nfp_bpf_cmsg_map_reply_size(bpf, 1));
391 	if (IS_ERR(skb))
392 		return PTR_ERR(skb);
393 
394 	reply = (void *)skb->data;
395 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
396 	if (err)
397 		goto err_free;
398 
399 	/* Copy outputs */
400 	if (out_key)
401 		memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
402 		       map->key_size);
403 	if (out_value)
404 		memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
405 		       map->value_size);
406 
407 	dev_consume_skb_any(skb);
408 
409 	return 0;
410 err_free:
411 	dev_kfree_skb_any(skb);
412 	return err;
413 }
414 
415 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
416 			      void *key, void *value, u64 flags)
417 {
418 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
419 				     key, value, flags, NULL, NULL);
420 }
421 
422 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
423 {
424 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
425 				     key, NULL, 0, NULL, NULL);
426 }
427 
428 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
429 			      void *key, void *value)
430 {
431 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
432 				     key, NULL, 0, NULL, value);
433 }
434 
435 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
436 				void *next_key)
437 {
438 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
439 				     NULL, NULL, 0, next_key, NULL);
440 }
441 
442 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
443 			       void *key, void *next_key)
444 {
445 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
446 				     key, NULL, 0, next_key, NULL);
447 }
448 
449 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
450 {
451 	return max3((unsigned int)NFP_NET_DEFAULT_MTU,
452 		    nfp_bpf_cmsg_map_req_size(bpf, 1),
453 		    nfp_bpf_cmsg_map_reply_size(bpf, 1));
454 }
455 
456 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
457 {
458 	struct nfp_app_bpf *bpf = app->priv;
459 	unsigned int tag;
460 
461 	if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
462 		cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
463 		goto err_free;
464 	}
465 
466 	if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
467 		if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
468 			dev_consume_skb_any(skb);
469 		else
470 			dev_kfree_skb_any(skb);
471 		return;
472 	}
473 
474 	nfp_ctrl_lock(bpf->app->ctrl);
475 
476 	tag = nfp_bpf_cmsg_get_tag(skb);
477 	if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
478 		cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
479 			  tag);
480 		goto err_unlock;
481 	}
482 
483 	__skb_queue_tail(&bpf->cmsg_replies, skb);
484 	wake_up_interruptible_all(&bpf->cmsg_wq);
485 
486 	nfp_ctrl_unlock(bpf->app->ctrl);
487 
488 	return;
489 err_unlock:
490 	nfp_ctrl_unlock(bpf->app->ctrl);
491 err_free:
492 	dev_kfree_skb_any(skb);
493 }
494 
495 void
496 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
497 {
498 	struct nfp_app_bpf *bpf = app->priv;
499 	const struct cmsg_hdr *hdr = data;
500 
501 	if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
502 		cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
503 		return;
504 	}
505 
506 	if (hdr->type == CMSG_TYPE_BPF_EVENT)
507 		nfp_bpf_event_output(bpf, data, len);
508 	else
509 		cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
510 			  hdr->type);
511 }
512