1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/hash.h>
5 #include <linux/hashtable.h>
6 #include <linux/jhash.h>
7 #include <linux/vmalloc.h>
8 #include <net/pkt_cls.h>
9 
10 #include "cmsg.h"
11 #include "main.h"
12 #include "../nfp_app.h"
13 
14 struct nfp_mask_id_table {
15 	struct hlist_node link;
16 	u32 hash_key;
17 	u32 ref_cnt;
18 	u8 mask_id;
19 };
20 
21 struct nfp_fl_flow_table_cmp_arg {
22 	struct net_device *netdev;
23 	unsigned long cookie;
24 	__be32 host_ctx;
25 };
26 
27 static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
28 {
29 	struct nfp_flower_priv *priv = app->priv;
30 	struct circ_buf *ring;
31 
32 	ring = &priv->stats_ids.free_list;
33 	/* Check if buffer is full. */
34 	if (!CIRC_SPACE(ring->head, ring->tail,
35 			priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
36 			NFP_FL_STATS_ELEM_RS + 1))
37 		return -ENOBUFS;
38 
39 	memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
40 	ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
41 		     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
42 
43 	return 0;
44 }
45 
46 static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
47 {
48 	struct nfp_flower_priv *priv = app->priv;
49 	u32 freed_stats_id, temp_stats_id;
50 	struct circ_buf *ring;
51 
52 	ring = &priv->stats_ids.free_list;
53 	freed_stats_id = priv->stats_ring_size;
54 	/* Check for unallocated entries first. */
55 	if (priv->stats_ids.init_unalloc > 0) {
56 		*stats_context_id = priv->stats_ids.init_unalloc - 1;
57 		priv->stats_ids.init_unalloc--;
58 		return 0;
59 	}
60 
61 	/* Check if buffer is empty. */
62 	if (ring->head == ring->tail) {
63 		*stats_context_id = freed_stats_id;
64 		return -ENOENT;
65 	}
66 
67 	memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
68 	*stats_context_id = temp_stats_id;
69 	memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
70 	ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
71 		     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
72 
73 	return 0;
74 }
75 
76 /* Must be called with either RTNL or rcu_read_lock */
77 struct nfp_fl_payload *
78 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
79 			   struct net_device *netdev, __be32 host_ctx)
80 {
81 	struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
82 	struct nfp_flower_priv *priv = app->priv;
83 
84 	flower_cmp_arg.netdev = netdev;
85 	flower_cmp_arg.cookie = tc_flower_cookie;
86 	flower_cmp_arg.host_ctx = host_ctx;
87 
88 	return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
89 				      nfp_flower_table_params);
90 }
91 
92 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
93 {
94 	unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
95 	struct nfp_flower_priv *priv = app->priv;
96 	struct nfp_fl_stats_frame *stats;
97 	unsigned char *msg;
98 	u32 ctx_id;
99 	int i;
100 
101 	msg = nfp_flower_cmsg_get_data(skb);
102 
103 	spin_lock(&priv->stats_lock);
104 	for (i = 0; i < msg_len / sizeof(*stats); i++) {
105 		stats = (struct nfp_fl_stats_frame *)msg + i;
106 		ctx_id = be32_to_cpu(stats->stats_con_id);
107 		priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
108 		priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
109 		priv->stats[ctx_id].used = jiffies;
110 	}
111 	spin_unlock(&priv->stats_lock);
112 }
113 
114 static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
115 {
116 	struct nfp_flower_priv *priv = app->priv;
117 	struct circ_buf *ring;
118 
119 	ring = &priv->mask_ids.mask_id_free_list;
120 	/* Checking if buffer is full. */
121 	if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
122 		return -ENOBUFS;
123 
124 	memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
125 	ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
126 		     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
127 
128 	priv->mask_ids.last_used[mask_id] = ktime_get();
129 
130 	return 0;
131 }
132 
133 static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
134 {
135 	struct nfp_flower_priv *priv = app->priv;
136 	ktime_t reuse_timeout;
137 	struct circ_buf *ring;
138 	u8 temp_id, freed_id;
139 
140 	ring = &priv->mask_ids.mask_id_free_list;
141 	freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
142 	/* Checking for unallocated entries first. */
143 	if (priv->mask_ids.init_unallocated > 0) {
144 		*mask_id = priv->mask_ids.init_unallocated;
145 		priv->mask_ids.init_unallocated--;
146 		return 0;
147 	}
148 
149 	/* Checking if buffer is empty. */
150 	if (ring->head == ring->tail)
151 		goto err_not_found;
152 
153 	memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
154 	*mask_id = temp_id;
155 
156 	reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
157 				     NFP_FL_MASK_REUSE_TIME_NS);
158 
159 	if (ktime_before(ktime_get(), reuse_timeout))
160 		goto err_not_found;
161 
162 	memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
163 	ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
164 		     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
165 
166 	return 0;
167 
168 err_not_found:
169 	*mask_id = freed_id;
170 	return -ENOENT;
171 }
172 
173 static int
174 nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
175 {
176 	struct nfp_flower_priv *priv = app->priv;
177 	struct nfp_mask_id_table *mask_entry;
178 	unsigned long hash_key;
179 	u8 mask_id;
180 
181 	if (nfp_mask_alloc(app, &mask_id))
182 		return -ENOENT;
183 
184 	mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
185 	if (!mask_entry) {
186 		nfp_release_mask_id(app, mask_id);
187 		return -ENOMEM;
188 	}
189 
190 	INIT_HLIST_NODE(&mask_entry->link);
191 	mask_entry->mask_id = mask_id;
192 	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
193 	mask_entry->hash_key = hash_key;
194 	mask_entry->ref_cnt = 1;
195 	hash_add(priv->mask_table, &mask_entry->link, hash_key);
196 
197 	return mask_id;
198 }
199 
200 static struct nfp_mask_id_table *
201 nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
202 {
203 	struct nfp_flower_priv *priv = app->priv;
204 	struct nfp_mask_id_table *mask_entry;
205 	unsigned long hash_key;
206 
207 	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
208 
209 	hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
210 		if (mask_entry->hash_key == hash_key)
211 			return mask_entry;
212 
213 	return NULL;
214 }
215 
216 static int
217 nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
218 {
219 	struct nfp_mask_id_table *mask_entry;
220 
221 	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
222 	if (!mask_entry)
223 		return -ENOENT;
224 
225 	mask_entry->ref_cnt++;
226 
227 	/* Casting u8 to int for later use. */
228 	return mask_entry->mask_id;
229 }
230 
231 static bool
232 nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
233 		   u8 *meta_flags, u8 *mask_id)
234 {
235 	int id;
236 
237 	id = nfp_find_in_mask_table(app, mask_data, mask_len);
238 	if (id < 0) {
239 		id = nfp_add_mask_table(app, mask_data, mask_len);
240 		if (id < 0)
241 			return false;
242 		*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
243 	}
244 	*mask_id = id;
245 
246 	return true;
247 }
248 
249 static bool
250 nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
251 		      u8 *meta_flags, u8 *mask_id)
252 {
253 	struct nfp_mask_id_table *mask_entry;
254 
255 	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
256 	if (!mask_entry)
257 		return false;
258 
259 	if (meta_flags)
260 		*meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
261 
262 	*mask_id = mask_entry->mask_id;
263 	mask_entry->ref_cnt--;
264 	if (!mask_entry->ref_cnt) {
265 		hash_del(&mask_entry->link);
266 		nfp_release_mask_id(app, *mask_id);
267 		kfree(mask_entry);
268 		if (meta_flags)
269 			*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
270 	}
271 
272 	return true;
273 }
274 
275 int nfp_compile_flow_metadata(struct nfp_app *app,
276 			      struct tc_cls_flower_offload *flow,
277 			      struct nfp_fl_payload *nfp_flow,
278 			      struct net_device *netdev)
279 {
280 	struct nfp_flower_priv *priv = app->priv;
281 	struct nfp_fl_payload *check_entry;
282 	u8 new_mask_id;
283 	u32 stats_cxt;
284 
285 	if (nfp_get_stats_entry(app, &stats_cxt))
286 		return -ENOENT;
287 
288 	nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
289 	nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
290 
291 	new_mask_id = 0;
292 	if (!nfp_check_mask_add(app, nfp_flow->mask_data,
293 				nfp_flow->meta.mask_len,
294 				&nfp_flow->meta.flags, &new_mask_id)) {
295 		if (nfp_release_stats_entry(app, stats_cxt))
296 			return -EINVAL;
297 		return -ENOENT;
298 	}
299 
300 	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
301 	priv->flower_version++;
302 
303 	/* Update flow payload with mask ids. */
304 	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
305 	priv->stats[stats_cxt].pkts = 0;
306 	priv->stats[stats_cxt].bytes = 0;
307 	priv->stats[stats_cxt].used = jiffies;
308 
309 	check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
310 						 NFP_FL_STATS_CTX_DONT_CARE);
311 	if (check_entry) {
312 		if (nfp_release_stats_entry(app, stats_cxt))
313 			return -EINVAL;
314 
315 		if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
316 					   nfp_flow->meta.mask_len,
317 					   NULL, &new_mask_id))
318 			return -EINVAL;
319 
320 		return -EEXIST;
321 	}
322 
323 	return 0;
324 }
325 
326 int nfp_modify_flow_metadata(struct nfp_app *app,
327 			     struct nfp_fl_payload *nfp_flow)
328 {
329 	struct nfp_flower_priv *priv = app->priv;
330 	u8 new_mask_id = 0;
331 	u32 temp_ctx_id;
332 
333 	nfp_check_mask_remove(app, nfp_flow->mask_data,
334 			      nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
335 			      &new_mask_id);
336 
337 	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
338 	priv->flower_version++;
339 
340 	/* Update flow payload with mask ids. */
341 	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
342 
343 	/* Release the stats ctx id. */
344 	temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
345 
346 	return nfp_release_stats_entry(app, temp_ctx_id);
347 }
348 
349 static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
350 			    const void *obj)
351 {
352 	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
353 	const struct nfp_fl_payload *flow_entry = obj;
354 
355 	if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
356 	    (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
357 	     flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
358 		return flow_entry->tc_flower_cookie != cmp_arg->cookie;
359 
360 	return 1;
361 }
362 
363 static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
364 {
365 	const struct nfp_fl_payload *flower_entry = data;
366 
367 	return jhash2((u32 *)&flower_entry->tc_flower_cookie,
368 		      sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
369 		      seed);
370 }
371 
372 static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
373 {
374 	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
375 
376 	return jhash2((u32 *)&cmp_arg->cookie,
377 		      sizeof(cmp_arg->cookie) / sizeof(u32), seed);
378 }
379 
380 const struct rhashtable_params nfp_flower_table_params = {
381 	.head_offset		= offsetof(struct nfp_fl_payload, fl_node),
382 	.hashfn			= nfp_fl_key_hashfn,
383 	.obj_cmpfn		= nfp_fl_obj_cmpfn,
384 	.obj_hashfn		= nfp_fl_obj_hashfn,
385 	.automatic_shrinking	= true,
386 };
387 
388 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
389 {
390 	struct nfp_flower_priv *priv = app->priv;
391 	int err;
392 
393 	hash_init(priv->mask_table);
394 
395 	err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
396 	if (err)
397 		return err;
398 
399 	get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
400 
401 	/* Init ring buffer and unallocated mask_ids. */
402 	priv->mask_ids.mask_id_free_list.buf =
403 		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
404 			      NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
405 	if (!priv->mask_ids.mask_id_free_list.buf)
406 		goto err_free_flow_table;
407 
408 	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
409 
410 	/* Init timestamps for mask id*/
411 	priv->mask_ids.last_used =
412 		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
413 			      sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
414 	if (!priv->mask_ids.last_used)
415 		goto err_free_mask_id;
416 
417 	/* Init ring buffer and unallocated stats_ids. */
418 	priv->stats_ids.free_list.buf =
419 		vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
420 				   priv->stats_ring_size));
421 	if (!priv->stats_ids.free_list.buf)
422 		goto err_free_last_used;
423 
424 	priv->stats_ids.init_unalloc = host_ctx_count;
425 
426 	priv->stats = kvmalloc_array(priv->stats_ring_size,
427 				     sizeof(struct nfp_fl_stats), GFP_KERNEL);
428 	if (!priv->stats)
429 		goto err_free_ring_buf;
430 
431 	spin_lock_init(&priv->stats_lock);
432 
433 	return 0;
434 
435 err_free_ring_buf:
436 	vfree(priv->stats_ids.free_list.buf);
437 err_free_last_used:
438 	kfree(priv->mask_ids.last_used);
439 err_free_mask_id:
440 	kfree(priv->mask_ids.mask_id_free_list.buf);
441 err_free_flow_table:
442 	rhashtable_destroy(&priv->flow_table);
443 	return -ENOMEM;
444 }
445 
446 void nfp_flower_metadata_cleanup(struct nfp_app *app)
447 {
448 	struct nfp_flower_priv *priv = app->priv;
449 
450 	if (!priv)
451 		return;
452 
453 	rhashtable_free_and_destroy(&priv->flow_table,
454 				    nfp_check_rhashtable_empty, NULL);
455 	kvfree(priv->stats);
456 	kfree(priv->mask_ids.mask_id_free_list.buf);
457 	kfree(priv->mask_ids.last_used);
458 	vfree(priv->stats_ids.free_list.buf);
459 }
460