1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2021 Corigine, Inc. */
3 
4 #include "conntrack.h"
5 
6 const struct rhashtable_params nfp_tc_ct_merge_params = {
7 	.head_offset		= offsetof(struct nfp_fl_ct_tc_merge,
8 					   hash_node),
9 	.key_len		= sizeof(unsigned long) * 2,
10 	.key_offset		= offsetof(struct nfp_fl_ct_tc_merge, cookie),
11 	.automatic_shrinking	= true,
12 };
13 
14 /**
15  * get_hashentry() - Wrapper around hashtable lookup.
16  * @ht:		hashtable where entry could be found
17  * @key:	key to lookup
18  * @params:	hashtable params
19  * @size:	size of entry to allocate if not in table
20  *
21  * Returns an entry from a hashtable. If entry does not exist
22  * yet allocate the memory for it and return the new entry.
23  */
24 static void *get_hashentry(struct rhashtable *ht, void *key,
25 			   const struct rhashtable_params params, size_t size)
26 {
27 	void *result;
28 
29 	result = rhashtable_lookup_fast(ht, key, params);
30 
31 	if (result)
32 		return result;
33 
34 	result = kzalloc(size, GFP_KERNEL);
35 	if (!result)
36 		return ERR_PTR(-ENOMEM);
37 
38 	return result;
39 }
40 
41 bool is_pre_ct_flow(struct flow_cls_offload *flow)
42 {
43 	struct flow_action_entry *act;
44 	int i;
45 
46 	flow_action_for_each(i, act, &flow->rule->action) {
47 		if (act->id == FLOW_ACTION_CT && !act->ct.action)
48 			return true;
49 	}
50 	return false;
51 }
52 
53 bool is_post_ct_flow(struct flow_cls_offload *flow)
54 {
55 	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
56 	struct flow_dissector *dissector = rule->match.dissector;
57 	struct flow_match_ct ct;
58 
59 	if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
60 		flow_rule_match_ct(rule, &ct);
61 		if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
62 			return true;
63 	}
64 	return false;
65 }
66 
67 static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
68 			      struct nfp_fl_ct_flow_entry *entry2)
69 {
70 	return 0;
71 }
72 
73 static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
74 			      struct nfp_fl_ct_flow_entry *ct_entry1,
75 			      struct nfp_fl_ct_flow_entry *ct_entry2)
76 {
77 	struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
78 	struct nfp_fl_ct_tc_merge *m_entry;
79 	unsigned long new_cookie[2];
80 	int err;
81 
82 	if (ct_entry1->type == CT_TYPE_PRE_CT) {
83 		pre_ct_entry = ct_entry1;
84 		post_ct_entry = ct_entry2;
85 	} else {
86 		post_ct_entry = ct_entry1;
87 		pre_ct_entry = ct_entry2;
88 	}
89 
90 	if (post_ct_entry->netdev != pre_ct_entry->netdev)
91 		return -EINVAL;
92 	/* Checks that the chain_index of the filter matches the
93 	 * chain_index of the GOTO action.
94 	 */
95 	if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
96 		return -EINVAL;
97 
98 	err = nfp_ct_merge_check(post_ct_entry, pre_ct_entry);
99 	if (err)
100 		return err;
101 
102 	new_cookie[0] = pre_ct_entry->cookie;
103 	new_cookie[1] = post_ct_entry->cookie;
104 	m_entry = get_hashentry(&zt->tc_merge_tb, &new_cookie,
105 				nfp_tc_ct_merge_params, sizeof(*m_entry));
106 	if (IS_ERR(m_entry))
107 		return PTR_ERR(m_entry);
108 
109 	/* m_entry already present, not merging again */
110 	if (!memcmp(&new_cookie, m_entry->cookie, sizeof(new_cookie)))
111 		return 0;
112 
113 	memcpy(&m_entry->cookie, &new_cookie, sizeof(new_cookie));
114 	m_entry->zt = zt;
115 	m_entry->post_ct_parent = post_ct_entry;
116 	m_entry->pre_ct_parent = pre_ct_entry;
117 
118 	/* Add this entry to the pre_ct and post_ct lists */
119 	list_add(&m_entry->post_ct_list, &post_ct_entry->children);
120 	list_add(&m_entry->pre_ct_list, &pre_ct_entry->children);
121 	INIT_LIST_HEAD(&m_entry->children);
122 
123 	err = rhashtable_insert_fast(&zt->tc_merge_tb, &m_entry->hash_node,
124 				     nfp_tc_ct_merge_params);
125 	if (err)
126 		goto err_ct_tc_merge_insert;
127 	zt->tc_merge_count++;
128 
129 	return 0;
130 
131 err_ct_tc_merge_insert:
132 	list_del(&m_entry->post_ct_list);
133 	list_del(&m_entry->pre_ct_list);
134 	kfree(m_entry);
135 	return err;
136 }
137 
138 static struct
139 nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
140 					 u16 zone, bool wildcarded)
141 {
142 	struct nfp_fl_ct_zone_entry *zt;
143 	int err;
144 
145 	if (wildcarded && priv->ct_zone_wc)
146 		return priv->ct_zone_wc;
147 
148 	if (!wildcarded) {
149 		zt = get_hashentry(&priv->ct_zone_table, &zone,
150 				   nfp_zone_table_params, sizeof(*zt));
151 
152 		/* If priv is set this is an existing entry, just return it */
153 		if (IS_ERR(zt) || zt->priv)
154 			return zt;
155 	} else {
156 		zt = kzalloc(sizeof(*zt), GFP_KERNEL);
157 		if (!zt)
158 			return ERR_PTR(-ENOMEM);
159 	}
160 
161 	zt->zone = zone;
162 	zt->priv = priv;
163 	zt->nft = NULL;
164 
165 	/* init the various hash tables and lists*/
166 	INIT_LIST_HEAD(&zt->pre_ct_list);
167 	INIT_LIST_HEAD(&zt->post_ct_list);
168 
169 	err = rhashtable_init(&zt->tc_merge_tb, &nfp_tc_ct_merge_params);
170 	if (err)
171 		goto err_tc_merge_tb_init;
172 
173 	if (wildcarded) {
174 		priv->ct_zone_wc = zt;
175 	} else {
176 		err = rhashtable_insert_fast(&priv->ct_zone_table,
177 					     &zt->hash_node,
178 					     nfp_zone_table_params);
179 		if (err)
180 			goto err_zone_insert;
181 	}
182 
183 	return zt;
184 
185 err_zone_insert:
186 	rhashtable_destroy(&zt->tc_merge_tb);
187 err_tc_merge_tb_init:
188 	kfree(zt);
189 	return ERR_PTR(err);
190 }
191 
192 static struct
193 nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
194 					 struct net_device *netdev,
195 					 struct flow_cls_offload *flow,
196 					 struct netlink_ext_ack *extack)
197 {
198 	struct nfp_fl_ct_flow_entry *entry;
199 	struct nfp_fl_ct_map_entry *map;
200 	struct flow_action_entry *act;
201 	int err, i;
202 
203 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
204 	if (!entry)
205 		return ERR_PTR(-ENOMEM);
206 
207 	entry->zt = zt;
208 	entry->netdev = netdev;
209 	entry->cookie = flow->cookie;
210 	entry->rule = flow_rule_alloc(flow->rule->action.num_entries);
211 	if (!entry->rule) {
212 		err = -ENOMEM;
213 		goto err_pre_ct_act;
214 	}
215 	entry->rule->match.dissector = flow->rule->match.dissector;
216 	entry->rule->match.mask = flow->rule->match.mask;
217 	entry->rule->match.key = flow->rule->match.key;
218 	entry->chain_index = flow->common.chain_index;
219 	entry->tun_offset = NFP_FL_CT_NO_TUN;
220 
221 	/* Copy over action data. Unfortunately we do not get a handle to the
222 	 * original tcf_action data, and the flow objects gets destroyed, so we
223 	 * cannot just save a pointer to this either, so need to copy over the
224 	 * data unfortunately.
225 	 */
226 	entry->rule->action.num_entries = flow->rule->action.num_entries;
227 	flow_action_for_each(i, act, &flow->rule->action) {
228 		struct flow_action_entry *new_act;
229 
230 		new_act = &entry->rule->action.entries[i];
231 		memcpy(new_act, act, sizeof(struct flow_action_entry));
232 		/* Entunnel is a special case, need to allocate and copy
233 		 * tunnel info.
234 		 */
235 		if (act->id == FLOW_ACTION_TUNNEL_ENCAP) {
236 			struct ip_tunnel_info *tun = act->tunnel;
237 			size_t tun_size = sizeof(*tun) + tun->options_len;
238 
239 			new_act->tunnel = kmemdup(tun, tun_size, GFP_ATOMIC);
240 			if (!new_act->tunnel) {
241 				err = -ENOMEM;
242 				goto err_pre_ct_tun_cp;
243 			}
244 			entry->tun_offset = i;
245 		}
246 	}
247 
248 	INIT_LIST_HEAD(&entry->children);
249 
250 	/* Now add a ct map entry to flower-priv */
251 	map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
252 			    nfp_ct_map_params, sizeof(*map));
253 	if (IS_ERR(map)) {
254 		NL_SET_ERR_MSG_MOD(extack,
255 				   "offload error: ct map entry creation failed");
256 		err = -ENOMEM;
257 		goto err_ct_flow_insert;
258 	}
259 	map->cookie = flow->cookie;
260 	map->ct_entry = entry;
261 	err = rhashtable_insert_fast(&zt->priv->ct_map_table,
262 				     &map->hash_node,
263 				     nfp_ct_map_params);
264 	if (err) {
265 		NL_SET_ERR_MSG_MOD(extack,
266 				   "offload error: ct map entry table add failed");
267 		goto err_map_insert;
268 	}
269 
270 	return entry;
271 
272 err_map_insert:
273 	kfree(map);
274 err_ct_flow_insert:
275 	if (entry->tun_offset != NFP_FL_CT_NO_TUN)
276 		kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
277 err_pre_ct_tun_cp:
278 	kfree(entry->rule);
279 err_pre_ct_act:
280 	kfree(entry);
281 	return ERR_PTR(err);
282 }
283 
284 static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
285 {
286 }
287 
288 static void nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge *m_ent)
289 {
290 	struct nfp_fl_ct_zone_entry *zt;
291 	int err;
292 
293 	zt = m_ent->zt;
294 	err = rhashtable_remove_fast(&zt->tc_merge_tb,
295 				     &m_ent->hash_node,
296 				     nfp_tc_ct_merge_params);
297 	if (err)
298 		pr_warn("WARNING: could not remove merge_entry from hashtable\n");
299 	zt->tc_merge_count--;
300 	list_del(&m_ent->post_ct_list);
301 	list_del(&m_ent->pre_ct_list);
302 
303 	if (!list_empty(&m_ent->children))
304 		nfp_free_nft_merge_children(m_ent, false);
305 	kfree(m_ent);
306 }
307 
308 static void nfp_free_tc_merge_children(struct nfp_fl_ct_flow_entry *entry)
309 {
310 	struct nfp_fl_ct_tc_merge *m_ent, *tmp;
311 
312 	switch (entry->type) {
313 	case CT_TYPE_PRE_CT:
314 		list_for_each_entry_safe(m_ent, tmp, &entry->children, pre_ct_list) {
315 			nfp_del_tc_merge_entry(m_ent);
316 		}
317 		break;
318 	case CT_TYPE_POST_CT:
319 		list_for_each_entry_safe(m_ent, tmp, &entry->children, post_ct_list) {
320 			nfp_del_tc_merge_entry(m_ent);
321 		}
322 		break;
323 	default:
324 		break;
325 	}
326 }
327 
328 void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
329 {
330 	list_del(&entry->list_node);
331 
332 	if (!list_empty(&entry->children)) {
333 		if (entry->type == CT_TYPE_NFT)
334 			nfp_free_nft_merge_children(entry, true);
335 		else
336 			nfp_free_tc_merge_children(entry);
337 	}
338 
339 	if (entry->tun_offset != NFP_FL_CT_NO_TUN)
340 		kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
341 	kfree(entry->rule);
342 	kfree(entry);
343 }
344 
345 static struct flow_action_entry *get_flow_act(struct flow_cls_offload *flow,
346 					      enum flow_action_id act_id)
347 {
348 	struct flow_action_entry *act = NULL;
349 	int i;
350 
351 	flow_action_for_each(i, act, &flow->rule->action) {
352 		if (act->id == act_id)
353 			return act;
354 	}
355 	return NULL;
356 }
357 
358 static void
359 nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry *ct_entry1,
360 			struct nfp_fl_ct_zone_entry *zt_src,
361 			struct nfp_fl_ct_zone_entry *zt_dst)
362 {
363 	struct nfp_fl_ct_flow_entry *ct_entry2, *ct_tmp;
364 	struct list_head *ct_list;
365 
366 	if (ct_entry1->type == CT_TYPE_PRE_CT)
367 		ct_list = &zt_src->post_ct_list;
368 	else if (ct_entry1->type == CT_TYPE_POST_CT)
369 		ct_list = &zt_src->pre_ct_list;
370 	else
371 		return;
372 
373 	list_for_each_entry_safe(ct_entry2, ct_tmp, ct_list,
374 				 list_node) {
375 		nfp_ct_do_tc_merge(zt_dst, ct_entry2, ct_entry1);
376 	}
377 }
378 
379 int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
380 			    struct net_device *netdev,
381 			    struct flow_cls_offload *flow,
382 			    struct netlink_ext_ack *extack)
383 {
384 	struct flow_action_entry *ct_act, *ct_goto;
385 	struct nfp_fl_ct_flow_entry *ct_entry;
386 	struct nfp_fl_ct_zone_entry *zt;
387 
388 	ct_act = get_flow_act(flow, FLOW_ACTION_CT);
389 	if (!ct_act) {
390 		NL_SET_ERR_MSG_MOD(extack,
391 				   "unsupported offload: Conntrack action empty in conntrack offload");
392 		return -EOPNOTSUPP;
393 	}
394 
395 	ct_goto = get_flow_act(flow, FLOW_ACTION_GOTO);
396 	if (!ct_goto) {
397 		NL_SET_ERR_MSG_MOD(extack,
398 				   "unsupported offload: Conntrack requires ACTION_GOTO");
399 		return -EOPNOTSUPP;
400 	}
401 
402 	zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
403 	if (IS_ERR(zt)) {
404 		NL_SET_ERR_MSG_MOD(extack,
405 				   "offload error: Could not create zone table entry");
406 		return PTR_ERR(zt);
407 	}
408 
409 	if (!zt->nft)
410 		zt->nft = ct_act->ct.flow_table;
411 
412 	/* Add entry to pre_ct_list */
413 	ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, extack);
414 	if (IS_ERR(ct_entry))
415 		return PTR_ERR(ct_entry);
416 	ct_entry->type = CT_TYPE_PRE_CT;
417 	ct_entry->chain_index = ct_goto->chain_index;
418 	list_add(&ct_entry->list_node, &zt->pre_ct_list);
419 	zt->pre_ct_count++;
420 
421 	nfp_ct_merge_tc_entries(ct_entry, zt, zt);
422 
423 	/* Need to check and merge with tables in the wc_zone as well */
424 	if (priv->ct_zone_wc)
425 		nfp_ct_merge_tc_entries(ct_entry, priv->ct_zone_wc, zt);
426 
427 	NL_SET_ERR_MSG_MOD(extack, "unsupported offload: Conntrack action not supported");
428 	return -EOPNOTSUPP;
429 }
430 
431 int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
432 			     struct net_device *netdev,
433 			     struct flow_cls_offload *flow,
434 			     struct netlink_ext_ack *extack)
435 {
436 	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
437 	struct nfp_fl_ct_flow_entry *ct_entry;
438 	struct nfp_fl_ct_zone_entry *zt;
439 	bool wildcarded = false;
440 	struct flow_match_ct ct;
441 
442 	flow_rule_match_ct(rule, &ct);
443 	if (!ct.mask->ct_zone) {
444 		wildcarded = true;
445 	} else if (ct.mask->ct_zone != U16_MAX) {
446 		NL_SET_ERR_MSG_MOD(extack,
447 				   "unsupported offload: partially wildcarded ct_zone is not supported");
448 		return -EOPNOTSUPP;
449 	}
450 
451 	zt = get_nfp_zone_entry(priv, ct.key->ct_zone, wildcarded);
452 	if (IS_ERR(zt)) {
453 		NL_SET_ERR_MSG_MOD(extack,
454 				   "offload error: Could not create zone table entry");
455 		return PTR_ERR(zt);
456 	}
457 
458 	/* Add entry to post_ct_list */
459 	ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, extack);
460 	if (IS_ERR(ct_entry))
461 		return PTR_ERR(ct_entry);
462 
463 	ct_entry->type = CT_TYPE_POST_CT;
464 	ct_entry->chain_index = flow->common.chain_index;
465 	list_add(&ct_entry->list_node, &zt->post_ct_list);
466 	zt->post_ct_count++;
467 
468 	if (wildcarded) {
469 		/* Iterate through all zone tables if not empty, look for merges with
470 		 * pre_ct entries and merge them.
471 		 */
472 		struct rhashtable_iter iter;
473 		struct nfp_fl_ct_zone_entry *zone_table;
474 
475 		rhashtable_walk_enter(&priv->ct_zone_table, &iter);
476 		rhashtable_walk_start(&iter);
477 		while ((zone_table = rhashtable_walk_next(&iter)) != NULL) {
478 			if (IS_ERR(zone_table))
479 				continue;
480 			rhashtable_walk_stop(&iter);
481 			nfp_ct_merge_tc_entries(ct_entry, zone_table, zone_table);
482 			rhashtable_walk_start(&iter);
483 		}
484 		rhashtable_walk_stop(&iter);
485 		rhashtable_walk_exit(&iter);
486 	} else {
487 		nfp_ct_merge_tc_entries(ct_entry, zt, zt);
488 	}
489 
490 	NL_SET_ERR_MSG_MOD(extack, "unsupported offload: Conntrack match not supported");
491 	return -EOPNOTSUPP;
492 }
493