xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c (revision 9977a8c3497a8f7f7f951994f298a8e4d961234f)
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/bpf.h>
35 #include <linux/bitops.h>
36 #include <linux/bug.h>
37 #include <linux/jiffies.h>
38 #include <linux/skbuff.h>
39 #include <linux/wait.h>
40 
41 #include "../nfp_app.h"
42 #include "../nfp_net.h"
43 #include "fw.h"
44 #include "main.h"
45 
46 #define cmsg_warn(bpf, msg...)	nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
47 
48 #define NFP_BPF_TAG_ALLOC_SPAN	(U16_MAX / 4)
49 
50 static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
51 {
52 	u16 used_tags;
53 
54 	used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
55 
56 	return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
57 }
58 
59 static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
60 {
61 	/* All FW communication for BPF is request-reply.  To make sure we
62 	 * don't reuse the message ID too early after timeout - limit the
63 	 * number of requests in flight.
64 	 */
65 	if (nfp_bpf_all_tags_busy(bpf)) {
66 		cmsg_warn(bpf, "all FW request contexts busy!\n");
67 		return -EAGAIN;
68 	}
69 
70 	WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
71 	return bpf->tag_alloc_next++;
72 }
73 
74 static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
75 {
76 	WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
77 
78 	while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
79 	       bpf->tag_alloc_last != bpf->tag_alloc_next)
80 		bpf->tag_alloc_last++;
81 }
82 
83 static struct sk_buff *
84 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
85 {
86 	struct sk_buff *skb;
87 
88 	skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
89 	skb_put(skb, size);
90 
91 	return skb;
92 }
93 
94 static struct sk_buff *
95 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
96 {
97 	unsigned int size;
98 
99 	size = sizeof(struct cmsg_req_map_op);
100 	size += sizeof(struct cmsg_key_value_pair) * n;
101 
102 	return nfp_bpf_cmsg_alloc(bpf, size);
103 }
104 
105 static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
106 {
107 	struct cmsg_hdr *hdr;
108 
109 	hdr = (struct cmsg_hdr *)skb->data;
110 
111 	return be16_to_cpu(hdr->tag);
112 }
113 
114 static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
115 {
116 	unsigned int msg_tag;
117 	struct sk_buff *skb;
118 
119 	skb_queue_walk(&bpf->cmsg_replies, skb) {
120 		msg_tag = nfp_bpf_cmsg_get_tag(skb);
121 		if (msg_tag == tag) {
122 			nfp_bpf_free_tag(bpf, tag);
123 			__skb_unlink(skb, &bpf->cmsg_replies);
124 			return skb;
125 		}
126 	}
127 
128 	return NULL;
129 }
130 
131 static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
132 {
133 	struct sk_buff *skb;
134 
135 	nfp_ctrl_lock(bpf->app->ctrl);
136 	skb = __nfp_bpf_reply(bpf, tag);
137 	nfp_ctrl_unlock(bpf->app->ctrl);
138 
139 	return skb;
140 }
141 
142 static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
143 {
144 	struct sk_buff *skb;
145 
146 	nfp_ctrl_lock(bpf->app->ctrl);
147 	skb = __nfp_bpf_reply(bpf, tag);
148 	if (!skb)
149 		nfp_bpf_free_tag(bpf, tag);
150 	nfp_ctrl_unlock(bpf->app->ctrl);
151 
152 	return skb;
153 }
154 
155 static struct sk_buff *
156 nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
157 			int tag)
158 {
159 	struct sk_buff *skb;
160 	int i, err;
161 
162 	for (i = 0; i < 50; i++) {
163 		udelay(4);
164 		skb = nfp_bpf_reply(bpf, tag);
165 		if (skb)
166 			return skb;
167 	}
168 
169 	err = wait_event_interruptible_timeout(bpf->cmsg_wq,
170 					       skb = nfp_bpf_reply(bpf, tag),
171 					       msecs_to_jiffies(5000));
172 	/* We didn't get a response - try last time and atomically drop
173 	 * the tag even if no response is matched.
174 	 */
175 	if (!skb)
176 		skb = nfp_bpf_reply_drop_tag(bpf, tag);
177 	if (err < 0) {
178 		cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
179 			  err == ERESTARTSYS ? "interrupted" : "error",
180 			  type, err);
181 		return ERR_PTR(err);
182 	}
183 	if (!skb) {
184 		cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
185 			  type);
186 		return ERR_PTR(-ETIMEDOUT);
187 	}
188 
189 	return skb;
190 }
191 
192 static struct sk_buff *
193 nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
194 			 enum nfp_bpf_cmsg_type type, unsigned int reply_size)
195 {
196 	struct cmsg_hdr *hdr;
197 	int tag;
198 
199 	nfp_ctrl_lock(bpf->app->ctrl);
200 	tag = nfp_bpf_alloc_tag(bpf);
201 	if (tag < 0) {
202 		nfp_ctrl_unlock(bpf->app->ctrl);
203 		dev_kfree_skb_any(skb);
204 		return ERR_PTR(tag);
205 	}
206 
207 	hdr = (void *)skb->data;
208 	hdr->ver = CMSG_MAP_ABI_VERSION;
209 	hdr->type = type;
210 	hdr->tag = cpu_to_be16(tag);
211 
212 	__nfp_app_ctrl_tx(bpf->app, skb);
213 
214 	nfp_ctrl_unlock(bpf->app->ctrl);
215 
216 	skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
217 	if (IS_ERR(skb))
218 		return skb;
219 
220 	hdr = (struct cmsg_hdr *)skb->data;
221 	/* 0 reply_size means caller will do the validation */
222 	if (reply_size && skb->len != reply_size) {
223 		cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n",
224 			  skb->len, reply_size);
225 		goto err_free;
226 	}
227 	if (hdr->type != __CMSG_REPLY(type)) {
228 		cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
229 			  hdr->type, __CMSG_REPLY(type));
230 		goto err_free;
231 	}
232 
233 	return skb;
234 err_free:
235 	dev_kfree_skb_any(skb);
236 	return ERR_PTR(-EIO);
237 }
238 
239 static int
240 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
241 			 struct cmsg_reply_map_simple *reply)
242 {
243 	static const int res_table[] = {
244 		[CMSG_RC_SUCCESS]	= 0,
245 		[CMSG_RC_ERR_MAP_FD]	= -EBADFD,
246 		[CMSG_RC_ERR_MAP_NOENT]	= -ENOENT,
247 		[CMSG_RC_ERR_MAP_ERR]	= -EINVAL,
248 		[CMSG_RC_ERR_MAP_PARSE]	= -EIO,
249 		[CMSG_RC_ERR_MAP_EXIST]	= -EEXIST,
250 		[CMSG_RC_ERR_MAP_NOMEM]	= -ENOMEM,
251 		[CMSG_RC_ERR_MAP_E2BIG]	= -E2BIG,
252 	};
253 	u32 rc;
254 
255 	rc = be32_to_cpu(reply->rc);
256 	if (rc >= ARRAY_SIZE(res_table)) {
257 		cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
258 		return -EIO;
259 	}
260 
261 	return res_table[rc];
262 }
263 
264 long long int
265 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
266 {
267 	struct cmsg_reply_map_alloc_tbl *reply;
268 	struct cmsg_req_map_alloc_tbl *req;
269 	struct sk_buff *skb;
270 	u32 tid;
271 	int err;
272 
273 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
274 	if (!skb)
275 		return -ENOMEM;
276 
277 	req = (void *)skb->data;
278 	req->key_size = cpu_to_be32(map->key_size);
279 	req->value_size = cpu_to_be32(map->value_size);
280 	req->max_entries = cpu_to_be32(map->max_entries);
281 	req->map_type = cpu_to_be32(map->map_type);
282 	req->map_flags = 0;
283 
284 	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
285 				       sizeof(*reply));
286 	if (IS_ERR(skb))
287 		return PTR_ERR(skb);
288 
289 	reply = (void *)skb->data;
290 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
291 	if (err)
292 		goto err_free;
293 
294 	tid = be32_to_cpu(reply->tid);
295 	dev_consume_skb_any(skb);
296 
297 	return tid;
298 err_free:
299 	dev_kfree_skb_any(skb);
300 	return err;
301 }
302 
303 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
304 {
305 	struct cmsg_reply_map_free_tbl *reply;
306 	struct cmsg_req_map_free_tbl *req;
307 	struct sk_buff *skb;
308 	int err;
309 
310 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
311 	if (!skb) {
312 		cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
313 		return;
314 	}
315 
316 	req = (void *)skb->data;
317 	req->tid = cpu_to_be32(nfp_map->tid);
318 
319 	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
320 				       sizeof(*reply));
321 	if (IS_ERR(skb)) {
322 		cmsg_warn(bpf, "leaking map - I/O error\n");
323 		return;
324 	}
325 
326 	reply = (void *)skb->data;
327 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
328 	if (err)
329 		cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
330 
331 	dev_consume_skb_any(skb);
332 }
333 
334 static int
335 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
336 		      enum nfp_bpf_cmsg_type op,
337 		      u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
338 {
339 	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
340 	struct nfp_app_bpf *bpf = nfp_map->bpf;
341 	struct bpf_map *map = &offmap->map;
342 	struct cmsg_reply_map_op *reply;
343 	struct cmsg_req_map_op *req;
344 	struct sk_buff *skb;
345 	int err;
346 
347 	/* FW messages have no space for more than 32 bits of flags */
348 	if (flags >> 32)
349 		return -EOPNOTSUPP;
350 
351 	skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
352 	if (!skb)
353 		return -ENOMEM;
354 
355 	req = (void *)skb->data;
356 	req->tid = cpu_to_be32(nfp_map->tid);
357 	req->count = cpu_to_be32(1);
358 	req->flags = cpu_to_be32(flags);
359 
360 	/* Copy inputs */
361 	if (key)
362 		memcpy(&req->elem[0].key, key, map->key_size);
363 	if (value)
364 		memcpy(&req->elem[0].value, value, map->value_size);
365 
366 	skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
367 				       sizeof(*reply) + sizeof(*reply->elem));
368 	if (IS_ERR(skb))
369 		return PTR_ERR(skb);
370 
371 	reply = (void *)skb->data;
372 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
373 	if (err)
374 		goto err_free;
375 
376 	/* Copy outputs */
377 	if (out_key)
378 		memcpy(out_key, &reply->elem[0].key, map->key_size);
379 	if (out_value)
380 		memcpy(out_value, &reply->elem[0].value, map->value_size);
381 
382 	dev_consume_skb_any(skb);
383 
384 	return 0;
385 err_free:
386 	dev_kfree_skb_any(skb);
387 	return err;
388 }
389 
390 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
391 			      void *key, void *value, u64 flags)
392 {
393 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
394 				     key, value, flags, NULL, NULL);
395 }
396 
397 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
398 {
399 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
400 				     key, NULL, 0, NULL, NULL);
401 }
402 
403 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
404 			      void *key, void *value)
405 {
406 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
407 				     key, NULL, 0, NULL, value);
408 }
409 
410 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
411 				void *next_key)
412 {
413 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
414 				     NULL, NULL, 0, next_key, NULL);
415 }
416 
417 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
418 			       void *key, void *next_key)
419 {
420 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
421 				     key, NULL, 0, next_key, NULL);
422 }
423 
424 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
425 {
426 	struct nfp_app_bpf *bpf = app->priv;
427 	unsigned int tag;
428 
429 	if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
430 		cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
431 		goto err_free;
432 	}
433 
434 	nfp_ctrl_lock(bpf->app->ctrl);
435 
436 	tag = nfp_bpf_cmsg_get_tag(skb);
437 	if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
438 		cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
439 			  tag);
440 		goto err_unlock;
441 	}
442 
443 	__skb_queue_tail(&bpf->cmsg_replies, skb);
444 	wake_up_interruptible_all(&bpf->cmsg_wq);
445 
446 	nfp_ctrl_unlock(bpf->app->ctrl);
447 
448 	return;
449 err_unlock:
450 	nfp_ctrl_unlock(bpf->app->ctrl);
451 err_free:
452 	dev_kfree_skb_any(skb);
453 }
454