1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
7 
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
16 
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18 	(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19 	 TCPHDR_PSH | TCPHDR_URG)
20 
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22 	(FLOW_DIS_IS_FRAGMENT | \
23 	 FLOW_DIS_FIRST_FRAG)
24 
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26 	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27 	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28 	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29 	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30 	 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32 	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33 	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37 	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41 	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 	 BIT(FLOW_DISSECTOR_KEY_IP))
43 
44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
52 
53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
56 
57 #define NFP_FLOWER_MERGE_FIELDS \
58 	(NFP_FLOWER_LAYER_PORT | \
59 	 NFP_FLOWER_LAYER_MAC | \
60 	 NFP_FLOWER_LAYER_TP | \
61 	 NFP_FLOWER_LAYER_IPV4 | \
62 	 NFP_FLOWER_LAYER_IPV6)
63 
64 struct nfp_flower_merge_check {
65 	union {
66 		struct {
67 			__be16 tci;
68 			struct nfp_flower_mac_mpls l2;
69 			struct nfp_flower_tp_ports l4;
70 			union {
71 				struct nfp_flower_ipv4 ipv4;
72 				struct nfp_flower_ipv6 ipv6;
73 			};
74 		};
75 		unsigned long vals[8];
76 	};
77 };
78 
79 static int
80 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
81 		     u8 mtype)
82 {
83 	u32 meta_len, key_len, mask_len, act_len, tot_len;
84 	struct sk_buff *skb;
85 	unsigned char *msg;
86 
87 	meta_len =  sizeof(struct nfp_fl_rule_metadata);
88 	key_len = nfp_flow->meta.key_len;
89 	mask_len = nfp_flow->meta.mask_len;
90 	act_len = nfp_flow->meta.act_len;
91 
92 	tot_len = meta_len + key_len + mask_len + act_len;
93 
94 	/* Convert to long words as firmware expects
95 	 * lengths in units of NFP_FL_LW_SIZ.
96 	 */
97 	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
98 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
99 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
100 
101 	skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
102 	if (!skb)
103 		return -ENOMEM;
104 
105 	msg = nfp_flower_cmsg_get_data(skb);
106 	memcpy(msg, &nfp_flow->meta, meta_len);
107 	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
108 	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
109 	memcpy(&msg[meta_len + key_len + mask_len],
110 	       nfp_flow->action_data, act_len);
111 
112 	/* Convert back to bytes as software expects
113 	 * lengths in units of bytes.
114 	 */
115 	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
116 	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
117 	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
118 
119 	nfp_ctrl_tx(app->ctrl, skb);
120 
121 	return 0;
122 }
123 
124 static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
125 {
126 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
127 
128 	return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
129 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
130 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
131 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
132 }
133 
134 static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
135 {
136 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
137 
138 	return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
139 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
140 }
141 
142 static int
143 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
144 			  u32 *key_layer_two, int *key_size,
145 			  struct netlink_ext_ack *extack)
146 {
147 	if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
148 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
149 		return -EOPNOTSUPP;
150 	}
151 
152 	if (enc_opts->len > 0) {
153 		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
154 		*key_size += sizeof(struct nfp_flower_geneve_options);
155 	}
156 
157 	return 0;
158 }
159 
160 static int
161 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
162 			      struct flow_dissector_key_enc_opts *enc_op,
163 			      u32 *key_layer_two, u8 *key_layer, int *key_size,
164 			      struct nfp_flower_priv *priv,
165 			      enum nfp_flower_tun_type *tun_type,
166 			      struct netlink_ext_ack *extack)
167 {
168 	int err;
169 
170 	switch (enc_ports->dst) {
171 	case htons(IANA_VXLAN_UDP_PORT):
172 		*tun_type = NFP_FL_TUNNEL_VXLAN;
173 		*key_layer |= NFP_FLOWER_LAYER_VXLAN;
174 		*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
175 
176 		if (enc_op) {
177 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
178 			return -EOPNOTSUPP;
179 		}
180 		break;
181 	case htons(GENEVE_UDP_PORT):
182 		if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
183 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
184 			return -EOPNOTSUPP;
185 		}
186 		*tun_type = NFP_FL_TUNNEL_GENEVE;
187 		*key_layer |= NFP_FLOWER_LAYER_EXT_META;
188 		*key_size += sizeof(struct nfp_flower_ext_meta);
189 		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
190 		*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
191 
192 		if (!enc_op)
193 			break;
194 		if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
195 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
196 			return -EOPNOTSUPP;
197 		}
198 		err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
199 						key_size, extack);
200 		if (err)
201 			return err;
202 		break;
203 	default:
204 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
205 		return -EOPNOTSUPP;
206 	}
207 
208 	return 0;
209 }
210 
211 static int
212 nfp_flower_calculate_key_layers(struct nfp_app *app,
213 				struct net_device *netdev,
214 				struct nfp_fl_key_ls *ret_key_ls,
215 				struct flow_cls_offload *flow,
216 				enum nfp_flower_tun_type *tun_type,
217 				struct netlink_ext_ack *extack)
218 {
219 	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
220 	struct flow_dissector *dissector = rule->match.dissector;
221 	struct flow_match_basic basic = { NULL, NULL};
222 	struct nfp_flower_priv *priv = app->priv;
223 	u32 key_layer_two;
224 	u8 key_layer;
225 	int key_size;
226 	int err;
227 
228 	if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
229 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
230 		return -EOPNOTSUPP;
231 	}
232 
233 	/* If any tun dissector is used then the required set must be used. */
234 	if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
235 	    (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
236 	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
237 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
238 		return -EOPNOTSUPP;
239 	}
240 
241 	key_layer_two = 0;
242 	key_layer = NFP_FLOWER_LAYER_PORT;
243 	key_size = sizeof(struct nfp_flower_meta_tci) +
244 		   sizeof(struct nfp_flower_in_port);
245 
246 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
247 	    flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
248 		key_layer |= NFP_FLOWER_LAYER_MAC;
249 		key_size += sizeof(struct nfp_flower_mac_mpls);
250 	}
251 
252 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
253 		struct flow_match_vlan vlan;
254 
255 		flow_rule_match_vlan(rule, &vlan);
256 		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
257 		    vlan.key->vlan_priority) {
258 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
259 			return -EOPNOTSUPP;
260 		}
261 	}
262 
263 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
264 		struct flow_match_enc_opts enc_op = { NULL, NULL };
265 		struct flow_match_ipv4_addrs ipv4_addrs;
266 		struct flow_match_control enc_ctl;
267 		struct flow_match_ports enc_ports;
268 
269 		flow_rule_match_enc_control(rule, &enc_ctl);
270 
271 		if (enc_ctl.mask->addr_type != 0xffff) {
272 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
273 			return -EOPNOTSUPP;
274 		}
275 		if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
276 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
277 			return -EOPNOTSUPP;
278 		}
279 
280 		/* These fields are already verified as used. */
281 		flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
282 		if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
283 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
284 			return -EOPNOTSUPP;
285 		}
286 
287 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
288 			flow_rule_match_enc_opts(rule, &enc_op);
289 
290 
291 		if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
292 			/* check if GRE, which has no enc_ports */
293 			if (netif_is_gretap(netdev)) {
294 				*tun_type = NFP_FL_TUNNEL_GRE;
295 				key_layer |= NFP_FLOWER_LAYER_EXT_META;
296 				key_size += sizeof(struct nfp_flower_ext_meta);
297 				key_layer_two |= NFP_FLOWER_LAYER2_GRE;
298 				key_size +=
299 					sizeof(struct nfp_flower_ipv4_gre_tun);
300 
301 				if (enc_op.key) {
302 					NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
303 					return -EOPNOTSUPP;
304 				}
305 			} else {
306 				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
307 				return -EOPNOTSUPP;
308 			}
309 		} else {
310 			flow_rule_match_enc_ports(rule, &enc_ports);
311 			if (enc_ports.mask->dst != cpu_to_be16(~0)) {
312 				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
313 				return -EOPNOTSUPP;
314 			}
315 
316 			err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
317 							    enc_op.key,
318 							    &key_layer_two,
319 							    &key_layer,
320 							    &key_size, priv,
321 							    tun_type, extack);
322 			if (err)
323 				return err;
324 
325 			/* Ensure the ingress netdev matches the expected
326 			 * tun type.
327 			 */
328 			if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
329 				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
330 				return -EOPNOTSUPP;
331 			}
332 		}
333 	}
334 
335 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
336 		flow_rule_match_basic(rule, &basic);
337 
338 	if (basic.mask && basic.mask->n_proto) {
339 		/* Ethernet type is present in the key. */
340 		switch (basic.key->n_proto) {
341 		case cpu_to_be16(ETH_P_IP):
342 			key_layer |= NFP_FLOWER_LAYER_IPV4;
343 			key_size += sizeof(struct nfp_flower_ipv4);
344 			break;
345 
346 		case cpu_to_be16(ETH_P_IPV6):
347 			key_layer |= NFP_FLOWER_LAYER_IPV6;
348 			key_size += sizeof(struct nfp_flower_ipv6);
349 			break;
350 
351 		/* Currently we do not offload ARP
352 		 * because we rely on it to get to the host.
353 		 */
354 		case cpu_to_be16(ETH_P_ARP):
355 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
356 			return -EOPNOTSUPP;
357 
358 		case cpu_to_be16(ETH_P_MPLS_UC):
359 		case cpu_to_be16(ETH_P_MPLS_MC):
360 			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
361 				key_layer |= NFP_FLOWER_LAYER_MAC;
362 				key_size += sizeof(struct nfp_flower_mac_mpls);
363 			}
364 			break;
365 
366 		/* Will be included in layer 2. */
367 		case cpu_to_be16(ETH_P_8021Q):
368 			break;
369 
370 		default:
371 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
372 			return -EOPNOTSUPP;
373 		}
374 	} else if (nfp_flower_check_higher_than_mac(flow)) {
375 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
376 		return -EOPNOTSUPP;
377 	}
378 
379 	if (basic.mask && basic.mask->ip_proto) {
380 		switch (basic.key->ip_proto) {
381 		case IPPROTO_TCP:
382 		case IPPROTO_UDP:
383 		case IPPROTO_SCTP:
384 		case IPPROTO_ICMP:
385 		case IPPROTO_ICMPV6:
386 			key_layer |= NFP_FLOWER_LAYER_TP;
387 			key_size += sizeof(struct nfp_flower_tp_ports);
388 			break;
389 		}
390 	}
391 
392 	if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
393 	    nfp_flower_check_higher_than_l3(flow)) {
394 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
395 		return -EOPNOTSUPP;
396 	}
397 
398 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
399 		struct flow_match_tcp tcp;
400 		u32 tcp_flags;
401 
402 		flow_rule_match_tcp(rule, &tcp);
403 		tcp_flags = be16_to_cpu(tcp.key->flags);
404 
405 		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
406 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
407 			return -EOPNOTSUPP;
408 		}
409 
410 		/* We only support PSH and URG flags when either
411 		 * FIN, SYN or RST is present as well.
412 		 */
413 		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
414 		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
415 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
416 			return -EOPNOTSUPP;
417 		}
418 
419 		/* We need to store TCP flags in the either the IPv4 or IPv6 key
420 		 * space, thus we need to ensure we include a IPv4/IPv6 key
421 		 * layer if we have not done so already.
422 		 */
423 		if (!basic.key) {
424 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
425 			return -EOPNOTSUPP;
426 		}
427 
428 		if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
429 		    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
430 			switch (basic.key->n_proto) {
431 			case cpu_to_be16(ETH_P_IP):
432 				key_layer |= NFP_FLOWER_LAYER_IPV4;
433 				key_size += sizeof(struct nfp_flower_ipv4);
434 				break;
435 
436 			case cpu_to_be16(ETH_P_IPV6):
437 					key_layer |= NFP_FLOWER_LAYER_IPV6;
438 				key_size += sizeof(struct nfp_flower_ipv6);
439 				break;
440 
441 			default:
442 				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
443 				return -EOPNOTSUPP;
444 			}
445 		}
446 	}
447 
448 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
449 		struct flow_match_control ctl;
450 
451 		flow_rule_match_control(rule, &ctl);
452 		if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
453 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
454 			return -EOPNOTSUPP;
455 		}
456 	}
457 
458 	ret_key_ls->key_layer = key_layer;
459 	ret_key_ls->key_layer_two = key_layer_two;
460 	ret_key_ls->key_size = key_size;
461 
462 	return 0;
463 }
464 
465 static struct nfp_fl_payload *
466 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
467 {
468 	struct nfp_fl_payload *flow_pay;
469 
470 	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
471 	if (!flow_pay)
472 		return NULL;
473 
474 	flow_pay->meta.key_len = key_layer->key_size;
475 	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
476 	if (!flow_pay->unmasked_data)
477 		goto err_free_flow;
478 
479 	flow_pay->meta.mask_len = key_layer->key_size;
480 	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
481 	if (!flow_pay->mask_data)
482 		goto err_free_unmasked;
483 
484 	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
485 	if (!flow_pay->action_data)
486 		goto err_free_mask;
487 
488 	flow_pay->nfp_tun_ipv4_addr = 0;
489 	flow_pay->meta.flags = 0;
490 	INIT_LIST_HEAD(&flow_pay->linked_flows);
491 	flow_pay->in_hw = false;
492 
493 	return flow_pay;
494 
495 err_free_mask:
496 	kfree(flow_pay->mask_data);
497 err_free_unmasked:
498 	kfree(flow_pay->unmasked_data);
499 err_free_flow:
500 	kfree(flow_pay);
501 	return NULL;
502 }
503 
504 static int
505 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
506 				     struct nfp_flower_merge_check *merge,
507 				     u8 *last_act_id, int *act_out)
508 {
509 	struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
510 	struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
511 	struct nfp_fl_set_ip4_addrs *ipv4_add;
512 	struct nfp_fl_set_ipv6_addr *ipv6_add;
513 	struct nfp_fl_push_vlan *push_vlan;
514 	struct nfp_fl_set_tport *tport;
515 	struct nfp_fl_set_eth *eth;
516 	struct nfp_fl_act_head *a;
517 	unsigned int act_off = 0;
518 	u8 act_id = 0;
519 	u8 *ports;
520 	int i;
521 
522 	while (act_off < flow->meta.act_len) {
523 		a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
524 		act_id = a->jump_id;
525 
526 		switch (act_id) {
527 		case NFP_FL_ACTION_OPCODE_OUTPUT:
528 			if (act_out)
529 				(*act_out)++;
530 			break;
531 		case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
532 			push_vlan = (struct nfp_fl_push_vlan *)a;
533 			if (push_vlan->vlan_tci)
534 				merge->tci = cpu_to_be16(0xffff);
535 			break;
536 		case NFP_FL_ACTION_OPCODE_POP_VLAN:
537 			merge->tci = cpu_to_be16(0);
538 			break;
539 		case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
540 			/* New tunnel header means l2 to l4 can be matched. */
541 			eth_broadcast_addr(&merge->l2.mac_dst[0]);
542 			eth_broadcast_addr(&merge->l2.mac_src[0]);
543 			memset(&merge->l4, 0xff,
544 			       sizeof(struct nfp_flower_tp_ports));
545 			memset(&merge->ipv4, 0xff,
546 			       sizeof(struct nfp_flower_ipv4));
547 			break;
548 		case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
549 			eth = (struct nfp_fl_set_eth *)a;
550 			for (i = 0; i < ETH_ALEN; i++)
551 				merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
552 			for (i = 0; i < ETH_ALEN; i++)
553 				merge->l2.mac_src[i] |=
554 					eth->eth_addr_mask[ETH_ALEN + i];
555 			break;
556 		case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
557 			ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
558 			merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
559 			merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
560 			break;
561 		case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
562 			ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
563 			merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
564 			merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
565 			break;
566 		case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
567 			ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
568 			for (i = 0; i < 4; i++)
569 				merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
570 					ipv6_add->ipv6[i].mask;
571 			break;
572 		case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
573 			ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
574 			for (i = 0; i < 4; i++)
575 				merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
576 					ipv6_add->ipv6[i].mask;
577 			break;
578 		case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
579 			ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
580 			merge->ipv6.ip_ext.ttl |=
581 				ipv6_tc_hl_fl->ipv6_hop_limit_mask;
582 			merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
583 			merge->ipv6.ipv6_flow_label_exthdr |=
584 				ipv6_tc_hl_fl->ipv6_label_mask;
585 			break;
586 		case NFP_FL_ACTION_OPCODE_SET_UDP:
587 		case NFP_FL_ACTION_OPCODE_SET_TCP:
588 			tport = (struct nfp_fl_set_tport *)a;
589 			ports = (u8 *)&merge->l4.port_src;
590 			for (i = 0; i < 4; i++)
591 				ports[i] |= tport->tp_port_mask[i];
592 			break;
593 		case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
594 		case NFP_FL_ACTION_OPCODE_PRE_LAG:
595 		case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
596 			break;
597 		default:
598 			return -EOPNOTSUPP;
599 		}
600 
601 		act_off += a->len_lw << NFP_FL_LW_SIZ;
602 	}
603 
604 	if (last_act_id)
605 		*last_act_id = act_id;
606 
607 	return 0;
608 }
609 
610 static int
611 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
612 				struct nfp_flower_merge_check *merge,
613 				bool extra_fields)
614 {
615 	struct nfp_flower_meta_tci *meta_tci;
616 	u8 *mask = flow->mask_data;
617 	u8 key_layer, match_size;
618 
619 	memset(merge, 0, sizeof(struct nfp_flower_merge_check));
620 
621 	meta_tci = (struct nfp_flower_meta_tci *)mask;
622 	key_layer = meta_tci->nfp_flow_key_layer;
623 
624 	if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
625 		return -EOPNOTSUPP;
626 
627 	merge->tci = meta_tci->tci;
628 	mask += sizeof(struct nfp_flower_meta_tci);
629 
630 	if (key_layer & NFP_FLOWER_LAYER_EXT_META)
631 		mask += sizeof(struct nfp_flower_ext_meta);
632 
633 	mask += sizeof(struct nfp_flower_in_port);
634 
635 	if (key_layer & NFP_FLOWER_LAYER_MAC) {
636 		match_size = sizeof(struct nfp_flower_mac_mpls);
637 		memcpy(&merge->l2, mask, match_size);
638 		mask += match_size;
639 	}
640 
641 	if (key_layer & NFP_FLOWER_LAYER_TP) {
642 		match_size = sizeof(struct nfp_flower_tp_ports);
643 		memcpy(&merge->l4, mask, match_size);
644 		mask += match_size;
645 	}
646 
647 	if (key_layer & NFP_FLOWER_LAYER_IPV4) {
648 		match_size = sizeof(struct nfp_flower_ipv4);
649 		memcpy(&merge->ipv4, mask, match_size);
650 	}
651 
652 	if (key_layer & NFP_FLOWER_LAYER_IPV6) {
653 		match_size = sizeof(struct nfp_flower_ipv6);
654 		memcpy(&merge->ipv6, mask, match_size);
655 	}
656 
657 	return 0;
658 }
659 
660 static int
661 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
662 		     struct nfp_fl_payload *sub_flow2)
663 {
664 	/* Two flows can be merged if sub_flow2 only matches on bits that are
665 	 * either matched by sub_flow1 or set by a sub_flow1 action. This
666 	 * ensures that every packet that hits sub_flow1 and recirculates is
667 	 * guaranteed to hit sub_flow2.
668 	 */
669 	struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
670 	int err, act_out = 0;
671 	u8 last_act_id = 0;
672 
673 	err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
674 					      true);
675 	if (err)
676 		return err;
677 
678 	err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
679 					      false);
680 	if (err)
681 		return err;
682 
683 	err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
684 						   &last_act_id, &act_out);
685 	if (err)
686 		return err;
687 
688 	/* Must only be 1 output action and it must be the last in sequence. */
689 	if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
690 		return -EOPNOTSUPP;
691 
692 	/* Reject merge if sub_flow2 matches on something that is not matched
693 	 * on or set in an action by sub_flow1.
694 	 */
695 	err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
696 			    sub_flow1_merge.vals,
697 			    sizeof(struct nfp_flower_merge_check) * 8);
698 	if (err)
699 		return -EINVAL;
700 
701 	return 0;
702 }
703 
704 static unsigned int
705 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
706 			    bool *tunnel_act)
707 {
708 	unsigned int act_off = 0, act_len;
709 	struct nfp_fl_act_head *a;
710 	u8 act_id = 0;
711 
712 	while (act_off < len) {
713 		a = (struct nfp_fl_act_head *)&act_src[act_off];
714 		act_len = a->len_lw << NFP_FL_LW_SIZ;
715 		act_id = a->jump_id;
716 
717 		switch (act_id) {
718 		case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
719 			if (tunnel_act)
720 				*tunnel_act = true;
721 			/* fall through */
722 		case NFP_FL_ACTION_OPCODE_PRE_LAG:
723 			memcpy(act_dst + act_off, act_src + act_off, act_len);
724 			break;
725 		default:
726 			return act_off;
727 		}
728 
729 		act_off += act_len;
730 	}
731 
732 	return act_off;
733 }
734 
735 static int nfp_fl_verify_post_tun_acts(char *acts, int len)
736 {
737 	struct nfp_fl_act_head *a;
738 	unsigned int act_off = 0;
739 
740 	while (act_off < len) {
741 		a = (struct nfp_fl_act_head *)&acts[act_off];
742 		if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
743 			return -EOPNOTSUPP;
744 
745 		act_off += a->len_lw << NFP_FL_LW_SIZ;
746 	}
747 
748 	return 0;
749 }
750 
751 static int
752 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
753 			struct nfp_fl_payload *sub_flow2,
754 			struct nfp_fl_payload *merge_flow)
755 {
756 	unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
757 	bool tunnel_act = false;
758 	char *merge_act;
759 	int err;
760 
761 	/* The last action of sub_flow1 must be output - do not merge this. */
762 	sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
763 	sub2_act_len = sub_flow2->meta.act_len;
764 
765 	if (!sub2_act_len)
766 		return -EINVAL;
767 
768 	if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
769 		return -EINVAL;
770 
771 	/* A shortcut can only be applied if there is a single action. */
772 	if (sub1_act_len)
773 		merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
774 	else
775 		merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
776 
777 	merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
778 	merge_act = merge_flow->action_data;
779 
780 	/* Copy any pre-actions to the start of merge flow action list. */
781 	pre_off1 = nfp_flower_copy_pre_actions(merge_act,
782 					       sub_flow1->action_data,
783 					       sub1_act_len, &tunnel_act);
784 	merge_act += pre_off1;
785 	sub1_act_len -= pre_off1;
786 	pre_off2 = nfp_flower_copy_pre_actions(merge_act,
787 					       sub_flow2->action_data,
788 					       sub2_act_len, NULL);
789 	merge_act += pre_off2;
790 	sub2_act_len -= pre_off2;
791 
792 	/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
793 	 * a tunnel, sub_flow 2 can only have output actions for a valid merge.
794 	 */
795 	if (tunnel_act) {
796 		char *post_tun_acts = &sub_flow2->action_data[pre_off2];
797 
798 		err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
799 		if (err)
800 			return err;
801 	}
802 
803 	/* Copy remaining actions from sub_flows 1 and 2. */
804 	memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
805 	merge_act += sub1_act_len;
806 	memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
807 
808 	return 0;
809 }
810 
811 /* Flow link code should only be accessed under RTNL. */
812 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
813 {
814 	list_del(&link->merge_flow.list);
815 	list_del(&link->sub_flow.list);
816 	kfree(link);
817 }
818 
819 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
820 				    struct nfp_fl_payload *sub_flow)
821 {
822 	struct nfp_fl_payload_link *link;
823 
824 	list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
825 		if (link->sub_flow.flow == sub_flow) {
826 			nfp_flower_unlink_flow(link);
827 			return;
828 		}
829 }
830 
831 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
832 				 struct nfp_fl_payload *sub_flow)
833 {
834 	struct nfp_fl_payload_link *link;
835 
836 	link = kmalloc(sizeof(*link), GFP_KERNEL);
837 	if (!link)
838 		return -ENOMEM;
839 
840 	link->merge_flow.flow = merge_flow;
841 	list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
842 	link->sub_flow.flow = sub_flow;
843 	list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
844 
845 	return 0;
846 }
847 
848 /**
849  * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
850  * @app:	Pointer to the APP handle
851  * @sub_flow1:	Initial flow matched to produce merge hint
852  * @sub_flow2:	Post recirculation flow matched in merge hint
853  *
854  * Combines 2 flows (if valid) to a single flow, removing the initial from hw
855  * and offloading the new, merged flow.
856  *
857  * Return: negative value on error, 0 in success.
858  */
859 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
860 				     struct nfp_fl_payload *sub_flow1,
861 				     struct nfp_fl_payload *sub_flow2)
862 {
863 	struct flow_cls_offload merge_tc_off;
864 	struct nfp_flower_priv *priv = app->priv;
865 	struct netlink_ext_ack *extack = NULL;
866 	struct nfp_fl_payload *merge_flow;
867 	struct nfp_fl_key_ls merge_key_ls;
868 	int err;
869 
870 	ASSERT_RTNL();
871 
872 	extack = merge_tc_off.common.extack;
873 	if (sub_flow1 == sub_flow2 ||
874 	    nfp_flower_is_merge_flow(sub_flow1) ||
875 	    nfp_flower_is_merge_flow(sub_flow2))
876 		return -EINVAL;
877 
878 	err = nfp_flower_can_merge(sub_flow1, sub_flow2);
879 	if (err)
880 		return err;
881 
882 	merge_key_ls.key_size = sub_flow1->meta.key_len;
883 
884 	merge_flow = nfp_flower_allocate_new(&merge_key_ls);
885 	if (!merge_flow)
886 		return -ENOMEM;
887 
888 	merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
889 	merge_flow->ingress_dev = sub_flow1->ingress_dev;
890 
891 	memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
892 	       sub_flow1->meta.key_len);
893 	memcpy(merge_flow->mask_data, sub_flow1->mask_data,
894 	       sub_flow1->meta.mask_len);
895 
896 	err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
897 	if (err)
898 		goto err_destroy_merge_flow;
899 
900 	err = nfp_flower_link_flows(merge_flow, sub_flow1);
901 	if (err)
902 		goto err_destroy_merge_flow;
903 
904 	err = nfp_flower_link_flows(merge_flow, sub_flow2);
905 	if (err)
906 		goto err_unlink_sub_flow1;
907 
908 	merge_tc_off.cookie = merge_flow->tc_flower_cookie;
909 	err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
910 					merge_flow->ingress_dev, extack);
911 	if (err)
912 		goto err_unlink_sub_flow2;
913 
914 	err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
915 				     nfp_flower_table_params);
916 	if (err)
917 		goto err_release_metadata;
918 
919 	err = nfp_flower_xmit_flow(app, merge_flow,
920 				   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
921 	if (err)
922 		goto err_remove_rhash;
923 
924 	merge_flow->in_hw = true;
925 	sub_flow1->in_hw = false;
926 
927 	return 0;
928 
929 err_remove_rhash:
930 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
931 					    &merge_flow->fl_node,
932 					    nfp_flower_table_params));
933 err_release_metadata:
934 	nfp_modify_flow_metadata(app, merge_flow);
935 err_unlink_sub_flow2:
936 	nfp_flower_unlink_flows(merge_flow, sub_flow2);
937 err_unlink_sub_flow1:
938 	nfp_flower_unlink_flows(merge_flow, sub_flow1);
939 err_destroy_merge_flow:
940 	kfree(merge_flow->action_data);
941 	kfree(merge_flow->mask_data);
942 	kfree(merge_flow->unmasked_data);
943 	kfree(merge_flow);
944 	return err;
945 }
946 
947 /**
948  * nfp_flower_add_offload() - Adds a new flow to hardware.
949  * @app:	Pointer to the APP handle
950  * @netdev:	netdev structure.
951  * @flow:	TC flower classifier offload structure.
952  *
953  * Adds a new flow to the repeated hash structure and action payload.
954  *
955  * Return: negative value on error, 0 if configured successfully.
956  */
957 static int
958 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
959 		       struct flow_cls_offload *flow)
960 {
961 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
962 	struct nfp_flower_priv *priv = app->priv;
963 	struct netlink_ext_ack *extack = NULL;
964 	struct nfp_fl_payload *flow_pay;
965 	struct nfp_fl_key_ls *key_layer;
966 	struct nfp_port *port = NULL;
967 	int err;
968 
969 	extack = flow->common.extack;
970 	if (nfp_netdev_is_nfp_repr(netdev))
971 		port = nfp_port_from_netdev(netdev);
972 
973 	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
974 	if (!key_layer)
975 		return -ENOMEM;
976 
977 	err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
978 					      &tun_type, extack);
979 	if (err)
980 		goto err_free_key_ls;
981 
982 	flow_pay = nfp_flower_allocate_new(key_layer);
983 	if (!flow_pay) {
984 		err = -ENOMEM;
985 		goto err_free_key_ls;
986 	}
987 
988 	err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
989 					    flow_pay, tun_type, extack);
990 	if (err)
991 		goto err_destroy_flow;
992 
993 	err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
994 	if (err)
995 		goto err_destroy_flow;
996 
997 	err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
998 	if (err)
999 		goto err_destroy_flow;
1000 
1001 	flow_pay->tc_flower_cookie = flow->cookie;
1002 	err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1003 				     nfp_flower_table_params);
1004 	if (err) {
1005 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
1006 		goto err_release_metadata;
1007 	}
1008 
1009 	err = nfp_flower_xmit_flow(app, flow_pay,
1010 				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1011 	if (err)
1012 		goto err_remove_rhash;
1013 
1014 	if (port)
1015 		port->tc_offload_cnt++;
1016 
1017 	flow_pay->in_hw = true;
1018 
1019 	/* Deallocate flow payload when flower rule has been destroyed. */
1020 	kfree(key_layer);
1021 
1022 	return 0;
1023 
1024 err_remove_rhash:
1025 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1026 					    &flow_pay->fl_node,
1027 					    nfp_flower_table_params));
1028 err_release_metadata:
1029 	nfp_modify_flow_metadata(app, flow_pay);
1030 err_destroy_flow:
1031 	kfree(flow_pay->action_data);
1032 	kfree(flow_pay->mask_data);
1033 	kfree(flow_pay->unmasked_data);
1034 	kfree(flow_pay);
1035 err_free_key_ls:
1036 	kfree(key_layer);
1037 	return err;
1038 }
1039 
1040 static void
1041 nfp_flower_remove_merge_flow(struct nfp_app *app,
1042 			     struct nfp_fl_payload *del_sub_flow,
1043 			     struct nfp_fl_payload *merge_flow)
1044 {
1045 	struct nfp_flower_priv *priv = app->priv;
1046 	struct nfp_fl_payload_link *link, *temp;
1047 	struct nfp_fl_payload *origin;
1048 	bool mod = false;
1049 	int err;
1050 
1051 	link = list_first_entry(&merge_flow->linked_flows,
1052 				struct nfp_fl_payload_link, merge_flow.list);
1053 	origin = link->sub_flow.flow;
1054 
1055 	/* Re-add rule the merge had overwritten if it has not been deleted. */
1056 	if (origin != del_sub_flow)
1057 		mod = true;
1058 
1059 	err = nfp_modify_flow_metadata(app, merge_flow);
1060 	if (err) {
1061 		nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1062 		goto err_free_links;
1063 	}
1064 
1065 	if (!mod) {
1066 		err = nfp_flower_xmit_flow(app, merge_flow,
1067 					   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1068 		if (err) {
1069 			nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1070 			goto err_free_links;
1071 		}
1072 	} else {
1073 		__nfp_modify_flow_metadata(priv, origin);
1074 		err = nfp_flower_xmit_flow(app, origin,
1075 					   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1076 		if (err)
1077 			nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1078 		origin->in_hw = true;
1079 	}
1080 
1081 err_free_links:
1082 	/* Clean any links connected with the merged flow. */
1083 	list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1084 				 merge_flow.list)
1085 		nfp_flower_unlink_flow(link);
1086 
1087 	kfree(merge_flow->action_data);
1088 	kfree(merge_flow->mask_data);
1089 	kfree(merge_flow->unmasked_data);
1090 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1091 					    &merge_flow->fl_node,
1092 					    nfp_flower_table_params));
1093 	kfree_rcu(merge_flow, rcu);
1094 }
1095 
1096 static void
1097 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1098 				  struct nfp_fl_payload *sub_flow)
1099 {
1100 	struct nfp_fl_payload_link *link, *temp;
1101 
1102 	/* Remove any merge flow formed from the deleted sub_flow. */
1103 	list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1104 				 sub_flow.list)
1105 		nfp_flower_remove_merge_flow(app, sub_flow,
1106 					     link->merge_flow.flow);
1107 }
1108 
1109 /**
1110  * nfp_flower_del_offload() - Removes a flow from hardware.
1111  * @app:	Pointer to the APP handle
1112  * @netdev:	netdev structure.
1113  * @flow:	TC flower classifier offload structure
1114  *
1115  * Removes a flow from the repeated hash structure and clears the
1116  * action payload. Any flows merged from this are also deleted.
1117  *
1118  * Return: negative value on error, 0 if removed successfully.
1119  */
1120 static int
1121 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1122 		       struct flow_cls_offload *flow)
1123 {
1124 	struct nfp_flower_priv *priv = app->priv;
1125 	struct netlink_ext_ack *extack = NULL;
1126 	struct nfp_fl_payload *nfp_flow;
1127 	struct nfp_port *port = NULL;
1128 	int err;
1129 
1130 	extack = flow->common.extack;
1131 	if (nfp_netdev_is_nfp_repr(netdev))
1132 		port = nfp_port_from_netdev(netdev);
1133 
1134 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1135 	if (!nfp_flow) {
1136 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1137 		return -ENOENT;
1138 	}
1139 
1140 	err = nfp_modify_flow_metadata(app, nfp_flow);
1141 	if (err)
1142 		goto err_free_merge_flow;
1143 
1144 	if (nfp_flow->nfp_tun_ipv4_addr)
1145 		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1146 
1147 	if (!nfp_flow->in_hw) {
1148 		err = 0;
1149 		goto err_free_merge_flow;
1150 	}
1151 
1152 	err = nfp_flower_xmit_flow(app, nfp_flow,
1153 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1154 	/* Fall through on error. */
1155 
1156 err_free_merge_flow:
1157 	nfp_flower_del_linked_merge_flows(app, nfp_flow);
1158 	if (port)
1159 		port->tc_offload_cnt--;
1160 	kfree(nfp_flow->action_data);
1161 	kfree(nfp_flow->mask_data);
1162 	kfree(nfp_flow->unmasked_data);
1163 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1164 					    &nfp_flow->fl_node,
1165 					    nfp_flower_table_params));
1166 	kfree_rcu(nfp_flow, rcu);
1167 	return err;
1168 }
1169 
1170 static void
1171 __nfp_flower_update_merge_stats(struct nfp_app *app,
1172 				struct nfp_fl_payload *merge_flow)
1173 {
1174 	struct nfp_flower_priv *priv = app->priv;
1175 	struct nfp_fl_payload_link *link;
1176 	struct nfp_fl_payload *sub_flow;
1177 	u64 pkts, bytes, used;
1178 	u32 ctx_id;
1179 
1180 	ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1181 	pkts = priv->stats[ctx_id].pkts;
1182 	/* Do not cycle subflows if no stats to distribute. */
1183 	if (!pkts)
1184 		return;
1185 	bytes = priv->stats[ctx_id].bytes;
1186 	used = priv->stats[ctx_id].used;
1187 
1188 	/* Reset stats for the merge flow. */
1189 	priv->stats[ctx_id].pkts = 0;
1190 	priv->stats[ctx_id].bytes = 0;
1191 
1192 	/* The merge flow has received stats updates from firmware.
1193 	 * Distribute these stats to all subflows that form the merge.
1194 	 * The stats will collected from TC via the subflows.
1195 	 */
1196 	list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1197 		sub_flow = link->sub_flow.flow;
1198 		ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1199 		priv->stats[ctx_id].pkts += pkts;
1200 		priv->stats[ctx_id].bytes += bytes;
1201 		max_t(u64, priv->stats[ctx_id].used, used);
1202 	}
1203 }
1204 
1205 static void
1206 nfp_flower_update_merge_stats(struct nfp_app *app,
1207 			      struct nfp_fl_payload *sub_flow)
1208 {
1209 	struct nfp_fl_payload_link *link;
1210 
1211 	/* Get merge flows that the subflow forms to distribute their stats. */
1212 	list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1213 		__nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1214 }
1215 
1216 /**
1217  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1218  * @app:	Pointer to the APP handle
1219  * @netdev:	Netdev structure.
1220  * @flow:	TC flower classifier offload structure
1221  *
1222  * Populates a flow statistics structure which which corresponds to a
1223  * specific flow.
1224  *
1225  * Return: negative value on error, 0 if stats populated successfully.
1226  */
1227 static int
1228 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1229 		     struct flow_cls_offload *flow)
1230 {
1231 	struct nfp_flower_priv *priv = app->priv;
1232 	struct netlink_ext_ack *extack = NULL;
1233 	struct nfp_fl_payload *nfp_flow;
1234 	u32 ctx_id;
1235 
1236 	extack = flow->common.extack;
1237 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1238 	if (!nfp_flow) {
1239 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1240 		return -EINVAL;
1241 	}
1242 
1243 	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1244 
1245 	spin_lock_bh(&priv->stats_lock);
1246 	/* If request is for a sub_flow, update stats from merged flows. */
1247 	if (!list_empty(&nfp_flow->linked_flows))
1248 		nfp_flower_update_merge_stats(app, nfp_flow);
1249 
1250 	flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1251 			  priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
1252 
1253 	priv->stats[ctx_id].pkts = 0;
1254 	priv->stats[ctx_id].bytes = 0;
1255 	spin_unlock_bh(&priv->stats_lock);
1256 
1257 	return 0;
1258 }
1259 
1260 static int
1261 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1262 			struct flow_cls_offload *flower)
1263 {
1264 	if (!eth_proto_is_802_3(flower->common.protocol))
1265 		return -EOPNOTSUPP;
1266 
1267 	switch (flower->command) {
1268 	case FLOW_CLS_REPLACE:
1269 		return nfp_flower_add_offload(app, netdev, flower);
1270 	case FLOW_CLS_DESTROY:
1271 		return nfp_flower_del_offload(app, netdev, flower);
1272 	case FLOW_CLS_STATS:
1273 		return nfp_flower_get_stats(app, netdev, flower);
1274 	default:
1275 		return -EOPNOTSUPP;
1276 	}
1277 }
1278 
1279 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1280 					void *type_data, void *cb_priv)
1281 {
1282 	struct nfp_repr *repr = cb_priv;
1283 
1284 	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1285 		return -EOPNOTSUPP;
1286 
1287 	switch (type) {
1288 	case TC_SETUP_CLSFLOWER:
1289 		return nfp_flower_repr_offload(repr->app, repr->netdev,
1290 					       type_data);
1291 	case TC_SETUP_CLSMATCHALL:
1292 		return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1293 						    type_data);
1294 	default:
1295 		return -EOPNOTSUPP;
1296 	}
1297 }
1298 
1299 static LIST_HEAD(nfp_block_cb_list);
1300 
1301 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1302 				     struct flow_block_offload *f)
1303 {
1304 	struct nfp_repr *repr = netdev_priv(netdev);
1305 	struct nfp_flower_repr_priv *repr_priv;
1306 	struct flow_block_cb *block_cb;
1307 
1308 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1309 		return -EOPNOTSUPP;
1310 
1311 	repr_priv = repr->app_priv;
1312 	repr_priv->block_shared = f->block_shared;
1313 	f->driver_block_list = &nfp_block_cb_list;
1314 
1315 	switch (f->command) {
1316 	case FLOW_BLOCK_BIND:
1317 		if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
1318 					  &nfp_block_cb_list))
1319 			return -EBUSY;
1320 
1321 		block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1322 					       repr, repr, NULL);
1323 		if (IS_ERR(block_cb))
1324 			return PTR_ERR(block_cb);
1325 
1326 		flow_block_cb_add(block_cb, f);
1327 		list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1328 		return 0;
1329 	case FLOW_BLOCK_UNBIND:
1330 		block_cb = flow_block_cb_lookup(f->block,
1331 						nfp_flower_setup_tc_block_cb,
1332 						repr);
1333 		if (!block_cb)
1334 			return -ENOENT;
1335 
1336 		flow_block_cb_remove(block_cb, f);
1337 		list_del(&block_cb->driver_list);
1338 		return 0;
1339 	default:
1340 		return -EOPNOTSUPP;
1341 	}
1342 }
1343 
1344 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1345 			enum tc_setup_type type, void *type_data)
1346 {
1347 	switch (type) {
1348 	case TC_SETUP_BLOCK:
1349 		return nfp_flower_setup_tc_block(netdev, type_data);
1350 	default:
1351 		return -EOPNOTSUPP;
1352 	}
1353 }
1354 
1355 struct nfp_flower_indr_block_cb_priv {
1356 	struct net_device *netdev;
1357 	struct nfp_app *app;
1358 	struct list_head list;
1359 };
1360 
1361 static struct nfp_flower_indr_block_cb_priv *
1362 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1363 				     struct net_device *netdev)
1364 {
1365 	struct nfp_flower_indr_block_cb_priv *cb_priv;
1366 	struct nfp_flower_priv *priv = app->priv;
1367 
1368 	/* All callback list access should be protected by RTNL. */
1369 	ASSERT_RTNL();
1370 
1371 	list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1372 		if (cb_priv->netdev == netdev)
1373 			return cb_priv;
1374 
1375 	return NULL;
1376 }
1377 
1378 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1379 					  void *type_data, void *cb_priv)
1380 {
1381 	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1382 	struct flow_cls_offload *flower = type_data;
1383 
1384 	if (flower->common.chain_index)
1385 		return -EOPNOTSUPP;
1386 
1387 	switch (type) {
1388 	case TC_SETUP_CLSFLOWER:
1389 		return nfp_flower_repr_offload(priv->app, priv->netdev,
1390 					       type_data);
1391 	default:
1392 		return -EOPNOTSUPP;
1393 	}
1394 }
1395 
1396 static void nfp_flower_setup_indr_tc_release(void *cb_priv)
1397 {
1398 	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1399 
1400 	list_del(&priv->list);
1401 	kfree(priv);
1402 }
1403 
1404 static int
1405 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1406 			       struct flow_block_offload *f)
1407 {
1408 	struct nfp_flower_indr_block_cb_priv *cb_priv;
1409 	struct nfp_flower_priv *priv = app->priv;
1410 	struct flow_block_cb *block_cb;
1411 
1412 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1413 	    !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1414 	      nfp_flower_internal_port_can_offload(app, netdev)))
1415 		return -EOPNOTSUPP;
1416 
1417 	switch (f->command) {
1418 	case FLOW_BLOCK_BIND:
1419 		cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1420 		if (!cb_priv)
1421 			return -ENOMEM;
1422 
1423 		cb_priv->netdev = netdev;
1424 		cb_priv->app = app;
1425 		list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1426 
1427 		block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1428 					       cb_priv, cb_priv,
1429 					       nfp_flower_setup_indr_tc_release);
1430 		if (IS_ERR(block_cb)) {
1431 			list_del(&cb_priv->list);
1432 			kfree(cb_priv);
1433 			return PTR_ERR(block_cb);
1434 		}
1435 
1436 		flow_block_cb_add(block_cb, f);
1437 		list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1438 		return 0;
1439 	case FLOW_BLOCK_UNBIND:
1440 		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1441 		if (!cb_priv)
1442 			return -ENOENT;
1443 
1444 		block_cb = flow_block_cb_lookup(f->block,
1445 						nfp_flower_setup_indr_block_cb,
1446 						cb_priv);
1447 		if (!block_cb)
1448 			return -ENOENT;
1449 
1450 		flow_block_cb_remove(block_cb, f);
1451 		list_del(&block_cb->driver_list);
1452 		return 0;
1453 	default:
1454 		return -EOPNOTSUPP;
1455 	}
1456 	return 0;
1457 }
1458 
1459 static int
1460 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
1461 			    enum tc_setup_type type, void *type_data)
1462 {
1463 	switch (type) {
1464 	case TC_SETUP_BLOCK:
1465 		return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
1466 						      type_data);
1467 	default:
1468 		return -EOPNOTSUPP;
1469 	}
1470 }
1471 
1472 int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
1473 				       struct net_device *netdev,
1474 				       unsigned long event)
1475 {
1476 	int err;
1477 
1478 	if (!nfp_fl_is_netdev_to_offload(netdev))
1479 		return NOTIFY_OK;
1480 
1481 	if (event == NETDEV_REGISTER) {
1482 		err = __tc_indr_block_cb_register(netdev, app,
1483 						  nfp_flower_indr_setup_tc_cb,
1484 						  app);
1485 		if (err)
1486 			nfp_flower_cmsg_warn(app,
1487 					     "Indirect block reg failed - %s\n",
1488 					     netdev->name);
1489 	} else if (event == NETDEV_UNREGISTER) {
1490 		__tc_indr_block_cb_unregister(netdev,
1491 					      nfp_flower_indr_setup_tc_cb, app);
1492 	}
1493 
1494 	return NOTIFY_OK;
1495 }
1496